hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
d92522c94e17430f94254dced6800a868fcfd052 | 30,022 | py | Python | transfer/trainers.py | 0e4e6d01/non-parallel-text-style-transfer-using-self-attn-discriminator | c24a47cc96033cf960ed272810b9b7226f25e899 | [
"Apache-2.0"
] | null | null | null | transfer/trainers.py | 0e4e6d01/non-parallel-text-style-transfer-using-self-attn-discriminator | c24a47cc96033cf960ed272810b9b7226f25e899 | [
"Apache-2.0"
] | null | null | null | transfer/trainers.py | 0e4e6d01/non-parallel-text-style-transfer-using-self-attn-discriminator | c24a47cc96033cf960ed272810b9b7226f25e899 | [
"Apache-2.0"
] | null | null | null | import os
import time
import csv
import pickle
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data.dataloader import DataLoader
from torch.nn.utils import clip_grad_norm_ as clip_grad_norm
from sklearn.metrics import accuracy_score
import matplotlib.pyplot as plt
from utils import tokenization, optimization, constants, misc
from utils.data import *
from utils.evaluator import BLEUEvaluator
def get_transfer_data(data_dir, data_name):
"""
args:
data_dir: str
data_name: str
return:
data: dict of {"src_str": list of str, "lab": list of int}
"""
src_0, src_1 = [], []
with open(os.path.join(data_dir, data_name+".0"), 'r') as f:
for line in f.readlines():
src_0.append(line.strip())
with open(os.path.join(data_dir, data_name+".1"), 'r') as f:
for line in f.readlines():
src_1.append(line.strip())
lab_0 = [0] * len(src_0)
lab_1 = [1] * len(src_1)
src = src_0 + src_1
lab = lab_0 + lab_1
assert len(src) == len(lab)
data = {"src_str": src, "lab": lab}
print("%s data has been loaded" % data_name)
for l, count in enumerate(np.bincount(data["lab"])):
print("number of label %d: %d" % (l, count))
return data
def load_and_cache_data(args, data_name, tokenizer):
"""
return:
data: dict of {"src_str": list of str,
"src_ind": list of int,
"lab": list of int}
"""
sos_str = "_sos" if args.use_sos else ""
eos_str = "_eos" if args.use_eos else ""
mask_str = "_mask" if "mask" in args.vocab_file_name else ""
cached_data_file = os.path.join(
args.data_dir,
f"cached_transfer_{data_name}{sos_str}{eos_str}{mask_str}"
)
if os.path.exists(cached_data_file) and not args.overwrite_cache:
print("Loading data from cached data file %s" % cached_data_file)
data = torch.load(cached_data_file)
else:
print("Creating cached data file from data at %s" % cached_data_file)
data = get_transfer_data(args.data_dir, data_name)
index_src = []
str_src = []
sos_id, eos_id = tokenizer.SOS_ID, tokenizer.EOS_ID
sos_token, eos_token = tokenizer.SOS_TOKEN, tokenizer.EOS_TOKEN
if args.use_sos and args.use_eos:
for text in data['src_str']:
index_src.append([sos_id] + tokenizer.encode(text) + [eos_id])
str_src.append(' '.join([sos_token, text, eos_token]))
elif args.use_sos:
for text in data['src_str']:
index_src.append([sos_id] + tokenizer.encode(text))
str_src.append(' '.join([sos_token, text]))
elif args.use_eos:
for text in data['src_str']:
index_src.append(tokenizer.encode(text) + [eos_id])
str_src.append(' '.join([text, eos_token]))
else:
for text in data['src_str']:
index_src.append(tokenizer.encode(text))
str_src.append(text)
data['src_ind'] = index_src
data['src_str'] = str_src
torch.save(data, cached_data_file)
return data
def lambda_schedule(num_iter, start=0.0, stop=1.0, ratio=0.1):
lambdas = np.ones(num_iter) * stop
progress_interval = num_iter * ratio
for i in range(int(progress_interval)):
lambdas[i] *= i / progress_interval
return lambdas
class BasicTrainer:
"""
Basic Trainer
"""
def __init__(self, args, model, train_data=None, dev_data=None, test_data=None,
tokenizer=None):
self.args = args
self.model = model
self.optimizer = None
self.scheduler = None
self.train_dataloader = self.get_dataloader(train_data, "train")\
if train_data else None
self.dev_dataloader = self.get_dataloader(dev_data, "dev")\
if dev_data else None
self.test_dataloader = self.get_dataloader(test_data, "test")\
if test_data else None
if self.train_dataloader:
self.optimizer, self.scheduler = self.get_optimizer()
def get_dataloader(self, data, data_name):
args = self.args
if data_name == "train":
shuffle = args.shuffle
batch_size = args.batch_size
else:
shuffle = False
# batch_size = 2
batch_size = args.batch_size
dataset = ClassifierDataset(data["src_ind"], data["lab"])
dataloader = DataLoader(dataset=dataset,
batch_size=args.batch_size,
shuffle=shuffle,
num_workers=args.num_workers,
collate_fn=ClassifierPaddingCollate)
return dataloader
def get_optimizer(self):
args = self.args
model = self.model
train_dataloader = self.train_dataloader
optimizer = optimization.get_optim(args, model.parameters())
num_steps = len(train_dataloader) * args.num_train_epochs
args.num_steps = num_steps
print("Total number of steps: %d" % num_steps)
decay_step = len(train_dataloader) * args.decay_epoch
if args.decay_epoch > 0:
print("Step when lr starts to decay: %d" % decay_step)
scheduler = optimization.get_constant_schedule_with_linear_decay(
optimizer, decay_step=decay_step, num_training_steps=num_steps
)
else:
scheduler = optimization.get_constant_schedule(optimizer)
return optimizer, scheduler
def save_checkpoint(self, path):
# torch.save(self.args, os.path.join(path, "args.pt"))
torch.save(self.model.state_dict(), os.path.join(path, "model_state_dict.pt"))
# torch.save(self.optimizer.state_dict(), os.path.join(path, "optimizer_state_dict.pt"))
# torch.save(self.scheduler.state_dict(), os.path.join(path, "scheduler_state_dict.pt"))
return
def train(self):
raise NotImplementedError()
def evaluate(self):
raise NotImplementedError()
def test(self):
raise NotImplementedError()
def save_train_result(self, train_record, eval_record):
args = self.args
train_loss_record = train_record
eval_bleu_record, eval_gs_record = eval_record
best_bleu = np.max(eval_bleu_record)
step_of_best_bleu = eval_gs_record[np.argmax(eval_bleu_record)]
print("best BLEU: %.4f in step %d" % (best_bleu, step_of_best_bleu))
with open(os.path.join(args.output_dir, "training_result.log"), 'w') as f:
f.write("best BLEU: %.4f in step %d" % (best_bleu, step_of_best_bleu))
plt.figure()
plt.xlabel("step")
plt.ylabel("BLEU")
plt.plot(eval_gs_record, eval_bleu_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "bleu.pdf"), format='pdf') # bbox_inches='tight'
plt.figure()
plt.xlabel("step")
plt.ylabel("loss")
plt.plot(list(range(len(train_loss_record))), train_loss_record)
# plt.plot(eval_gs_record, eval_loss_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "loss.pdf"), format='pdf')
return best_bleu, step_of_best_bleu
class TransferModelTrainer(BasicTrainer):
def __init__(self, args, model, train_data=None, dev_data=None,
test_data=None, **kwargs):
super().__init__(
args, model, train_data, dev_data, test_data
)
self.tokenizer = kwargs["tokenizer"]
if self.args.cls_model_path:
print(f"Load classifier model form {self.args.cls_model_path}")
self.model.classifier.load_state_dict(
torch.load(
os.path.join(self.args.cls_model_path, "model_state_dict.pt")
)
)
self.model.freeze_cls()
# args.cls_weight = 0.05
# args.ca_weight = 0.0
# args.bt_weight = 1.0
self.use_caw_schedule = False
del self.optimizer
del self.scheduler
if self.train_dataloader:
params = []
for k, v in self.model.named_parameters():
# print("%s: %s" % (k, str(v.shape)))
if "classifier" in k or "lm" in k:
print("not optimize %s" % k)
else:
print("add params of %s to optimizer" % k)
params.append(v)
self.optimizer, self.scheduler\
= self.get_optimizer(params)
# torch.autograd.set_detect_anomaly(True)
self.clf_model = torch.load(args.cnn_clf_path).to(args.device)
self.clf_model.eval()
self.dev_ref_path_list = getattr(args, "dev_ref_path_list", None)
self.test_ref_path_list = getattr(args, "test_ref_path_list", None)
if self.test_ref_path_list is None:
self.test_ref_path_list = self.args.ref_list
print("self.dev_ref_path_list is")
print(self.dev_ref_path_list)
print("self.test_ref_path_list is")
print(self.test_ref_path_list)
if not self.args.use_bpe:
self.dev_data_path_list = [
[os.path.join(self.args.data_dir, f"dev.{i}")] for i in range(2)
]
self.test_data_path_list = [
[os.path.join(self.args.data_dir, f"test.{i}")] for i in range(2)
]
else:
self.dev_data_path_list = [
[os.path.join(self.args.data_dir, f"self_ref.dev.{i}")] for i in range(2)
]
self.test_data_path_list = [
[os.path.join(self.args.data_dir, f"self_ref.test.{i}")] for i in range(2)
]
print("self.dev_data_path_list is")
print(self.dev_data_path_list)
print("self.test_data_path_list is")
print(self.test_data_path_list)
def get_optimizer(self, params=None):
args = self.args
if params is None:
print("return because params is None")
return None, None
# params = self.model.parameters()
train_dataloader = self.train_dataloader
optimizer = optimization.get_optim(args, params)
num_steps = len(train_dataloader) * args.num_train_epochs // args.grad_accum_interval
args.num_steps = num_steps
print("Total number of steps: %d" % num_steps)
decay_step = len(train_dataloader) * args.decay_epoch
if args.decay_epoch > 0:
print("Step when lr starts to decay: %d" % decay_step)
scheduler = optimization.get_constant_schedule_with_linear_decay(
optimizer, decay_step=decay_step, num_training_steps=num_steps
)
else:
scheduler = optimization.get_constant_schedule(optimizer)
return optimizer, scheduler
def train(self, train_dataloader=None):
print("\n### TRAINING BEGINS ###")
args = self.args
model = self.model
optimizer = self.optimizer
scheduler = self.scheduler
train_dataloader = train_dataloader if train_dataloader else self.train_dataloader
model.train()
loss_record = [] # loss at global_step 0, 1, 2 ...
dev_metric_record = []
global_step_record_for_eval = []
global_step = 0
pad_id = args.pad_id
grad_accum_interval = args.grad_accum_interval
log_loss = 0.0
num_iters_per_epoch = len(train_dataloader)
normalizer = min(num_iters_per_epoch, grad_accum_interval)
cls_w = args.cls_weight
print("cls_w is", cls_w)
if self.use_caw_schedule:
start = 0.0
stop = args.ca_weight
ratio = 0.5
ca_w_list = lambda_schedule(args.num_steps,
start=start, stop=stop, ratio=ratio)
print(f"ca_w uses schedule (start={start}, stop={stop}, ratio={ratio})")
ca_w = ca_w_list[0]
else:
ca_w = args.ca_weight
print("ca_w is", ca_w)
bt_w = args.bt_weight
print("bt_w is", bt_w)
model.zero_grad()
if args.freeze_emb_at_beginning:
model.freeze_emb()
start_time = time.time()
for ep in range(args.num_train_epochs):
if ep == args.unfreeze_at_ep and args.freeze_emb_at_beginning:
model.unfreeze_emb()
for step, batch in enumerate(train_dataloader):
src, lab, src_len = batch
# print(f"ep:{ep}, step: {step}, src.shape[1] is", src.shape[1])
sorted_src_len, indices = torch.sort(src_len, dim=0, descending=True)
sorted_src = torch.index_select(src, dim=0, index=indices)
sorted_lab = torch.index_select(lab, dim=0, index=indices)
sorted_src = sorted_src.to(args.device)
sorted_src_len = sorted_src_len.to(args.device)
sorted_lab = sorted_lab.to(args.device)
try:
sorted_src_pad_mask = sorted_src==pad_id
sorted_loss_tuple, sorted_output_tuple,\
sorted_algin = model(sorted_src, sorted_src_len,
sorted_lab, sorted_src_pad_mask)
sorted_rec_loss, sorted_bt_loss,\
sorted_src_cls_loss, sorted_soft_out_cls_loss,\
sorted_out_cls_loss, sorted_ca_loss = sorted_loss_tuple
sorted_output, sorted_output_len = sorted_output_tuple
rec_loss = sorted_rec_loss.mean()
bt_loss = sorted_bt_loss.mean()
src_cls_loss = sorted_src_cls_loss.mean()
soft_out_cls_loss = sorted_soft_out_cls_loss.mean()
out_cls_loss = sorted_out_cls_loss.mean()
ca_loss = sorted_ca_loss.mean()
loss = rec_loss + bt_w * bt_loss\
+ cls_w * soft_out_cls_loss + ca_w * ca_loss
loss /= normalizer
loss.backward()
if (step+1) % grad_accum_interval == 0 or\
(grad_accum_interval >= num_iters_per_epoch and
(step+1) == num_iters_per_epoch):
g = clip_grad_norm(model.parameters(), args.max_grad_norm)
optimizer.step()
scheduler.step()
model.zero_grad()
loss_record.append(log_loss)
# global_step += 1
log_loss = 0.0
if global_step > 0 and global_step % args.log_interval == 0:
print(
f"epoch: {ep} "\
f"step: {global_step} "\
f"loss: {loss.item() * normalizer:.4f} "\
f"rec_loss: {rec_loss.item():.4f} "\
f"bt_loss: {bt_loss.item():.4f} "\
f"src_cls_loss: {src_cls_loss.item():.4f} "\
f"soft_out_cls_loss: {soft_out_cls_loss.item():.4f} "\
f"out_cls_loss: {out_cls_loss.item():.4f} "\
f"ca_loss: {ca_loss.item():.4f} "\
f"||g||: {g:.2f} "\
f"ca_w: {ca_w:.4f} "\
f"time: {misc.timeBetween(start_time, time.time())}"
)
if global_step > 0 and global_step % args.eval_interval == 0:
print("\neval model at step: %d" % global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
org_output_dir = args.output_dir
args.output_dir = checkpoint_output_dir
print("dev")
dev_metric = self.evaluate()
dev_metric_record.append(dev_metric)
global_step_record_for_eval.append(global_step)
args.output_dir = org_output_dir
print("Save checkpoint at %s" % checkpoint_output_dir)
self.save_checkpoint(checkpoint_output_dir)
model.train()
global_step += 1
if self.use_caw_schedule:
ca_w = ca_w_list[global_step]
else:
log_loss += loss.item()
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
raise e
# gpu_profile(frame=sys._getframe(), event='line', arg=None)
print("### TRAINING ENDS ###\n")
print("\neval model at step: %d" % global_step)
checkpoint_output_dir = os.path.join(args.output_dir, "checkpoint-%d" % global_step)
if not os.path.exists(checkpoint_output_dir):
os.mkdir(checkpoint_output_dir)
org_output_dir = args.output_dir
args.output_dir = checkpoint_output_dir
print("dev")
dev_metric = self.evaluate()
dev_metric_record.append(dev_metric)
global_step_record_for_eval.append(global_step)
args.output_dir = org_output_dir
print("Save checkpoint at %s" % checkpoint_output_dir)
self.save_checkpoint(checkpoint_output_dir)
train_record = loss_record
eval_record = (dev_metric_record, global_step_record_for_eval)
with open(os.path.join(args.output_dir, "record.pt"), "wb") as f:
pickle.dump({"train": train_record, "eval": eval_record}, f)
self.save_train_result(train_record, eval_record)
return train_record, eval_record
def evaluate(self, eval_dataloader=None, data_path_list=None, ref_path_list=None, data_name="dev"):
eval_dataloader = eval_dataloader if eval_dataloader else self.dev_dataloader
ref_path_list = ref_path_list if ref_path_list else self.dev_ref_path_list
data_path_list = data_path_list if data_path_list else self.dev_data_path_list
args = self.args
model = self.model
tokenizer = self.tokenizer
clf_model = self.clf_model
model.eval()
num_data = 0
total_loss = 0
total_rec_loss = 0
total_bt_loss = 0
total_src_cls_loss = 0
total_soft_out_cls_loss = 0
total_out_cls_loss = 0
total_ca_loss = 0
outputs_list = []
outputs_len_list = []
lab_list = []
clf_preds_list = []
cls_w = args.cls_weight
ca_w = args.ca_weight
bt_w = args.bt_weight
pad_id = args.pad_id
start_time = time.time()
with torch.no_grad():
for step, batch in enumerate(eval_dataloader):
src, lab, src_len = batch
num_data += src.shape[0]
# print(f"ep:{ep}, step: {step}, src.shape[1] is", src.shape[1])
sorted_src_len, indices = torch.sort(src_len, dim=0, descending=True)
_, resorted_indices = torch.sort(indices, dim=0)
sorted_src = torch.index_select(src, dim=0, index=indices)
sorted_lab = torch.index_select(lab, dim=0, index=indices)
sorted_src = sorted_src.to(args.device)
sorted_src_len = sorted_src_len.to(args.device)
sorted_lab = sorted_lab.to(args.device)
resorted_indices = resorted_indices.to(args.device)
try:
sorted_src_pad_mask = sorted_src==pad_id
sorted_loss_tuple, sorted_outputs_tuple,\
sorted_algin = model(sorted_src, sorted_src_len,
sorted_lab, sorted_src_pad_mask)
sorted_rec_loss, sorted_bt_loss,\
sorted_src_cls_loss, sorted_soft_out_cls_loss,\
sorted_out_cls_loss, sorted_ca_loss = sorted_loss_tuple
sorted_outputs, sorted_outputs_len = sorted_outputs_tuple
# shape of sorted_outputs is [batch_size, max_len]
outputs = torch.index_select(sorted_outputs, dim=0, index=resorted_indices)
outputs_len = torch.index_select(sorted_outputs_len, dim=0, index=resorted_indices)
clf_preds = torch.argmax(clf_model(outputs), dim=-1)
rec_loss = sorted_rec_loss.sum()
bt_loss = sorted_bt_loss.sum()
src_cls_loss = sorted_src_cls_loss.sum()
soft_out_cls_loss = sorted_soft_out_cls_loss.sum()
out_cls_loss = sorted_out_cls_loss.sum()
ca_loss = sorted_ca_loss.sum()
loss = rec_loss + bt_w * bt_loss\
+ cls_w * soft_out_cls_loss + ca_w * ca_loss
total_rec_loss += rec_loss.item()
total_bt_loss += bt_loss.item()
total_src_cls_loss += src_cls_loss.item()
total_soft_out_cls_loss += soft_out_cls_loss.item()
total_out_cls_loss += out_cls_loss.item()
total_ca_loss += ca_loss.item()
total_loss += loss.item()
outputs_list.extend(
[x.squeeze(0) for x in torch.split(outputs, split_size_or_sections=1, dim=0)]
)
outputs_len_list.extend(
[x.squeeze(0) for x in torch.split(outputs_len, split_size_or_sections=1, dim=0)]
)
lab_list.extend(
[x.squeeze(0) for x in torch.split(lab, split_size_or_sections=1, dim=0)]
)
clf_preds_list.extend(
[x.squeeze(0).item() for x in torch.split(clf_preds, split_size_or_sections=1, dim=0)]
)
except RuntimeError as e:
if 'out of memory' in str(e):
print('|| WARNING: ran out of memory ||\n')
if hasattr(torch.cuda, 'empty_cache'):
torch.cuda.empty_cache()
else:
print('|| WARNING: fail to train ||\n')
raise e
eval_loss = total_loss / num_data
eval_rec_loss = total_rec_loss / num_data
eval_bt_loss = total_bt_loss / num_data
eval_src_cls_loss = total_src_cls_loss / num_data
eval_soft_out_cls_loss = total_soft_out_cls_loss / num_data
eval_out_cls_loss = total_out_cls_loss / num_data
eval_ca_loss = total_ca_loss / num_data
inv_lab_list = 1-np.array(lab_list)
# print("clf_preds_list is")
# print(clf_preds_list)
eval_acc = accuracy_score(inv_lab_list, np.array(clf_preds_list)) * 100.0
transfer_file_names = [
os.path.join(args.output_dir, f"{data_name}.0.tsf"),
os.path.join(args.output_dir, f"{data_name}.1.tsf")
]
transfer_files = [
open(transfer_file_names[0], 'w'),
open(transfer_file_names[1], 'w')
]
count = 0
# print(f"len(outputs_list): {len(outputs_list)}, len(outputs_len_list): {len(outputs_len_list)}")
for output, output_len, l in zip(outputs_list, outputs_len_list, lab_list):
# print("output is", output)
text = tokenizer.decode(output, include_sos_eos=False)
if output_len < args.max_decoding_len:
pass
if args.use_bpe:
text = text.replace("@@ ", "")
text = text.strip("@@")
transfer_files[l].write(text+'\n')
count += 1
transfer_files[0].close()
transfer_files[1].close()
try:
assert count == num_data
except:
print(f"count: {count}, total_num: {num_data}")
raise RuntimeError()
bleu_evaluator = BLEUEvaluator()
if ref_path_list is not None:
bleu_score_021 = bleu_evaluator.score(ref_path_list[0], transfer_file_names[0])
bleu_score_120 = bleu_evaluator.score(ref_path_list[1], transfer_file_names[1])
bleu_score = (bleu_score_021 + bleu_score_120) / 2
else:
bleu_score = None
if data_path_list is not None:
self_bleu_score_021 = bleu_evaluator.score(data_path_list[0], transfer_file_names[0])
self_bleu_score_120 = bleu_evaluator.score(data_path_list[1], transfer_file_names[1])
self_bleu_score = (self_bleu_score_021 + self_bleu_score_120) / 2
else:
self_bleu_score = None
print("==============================")
if ref_path_list is not None:
print(
f"BLEU: {bleu_score:.4f} "\
f"(0->1:{bleu_score_021:.4f}, 1->0:{bleu_score_120:.4f}) ",
end='',
)
if data_path_list is not None:
print(
f"self-BLEU: {self_bleu_score:.4f} "\
f"(0->1:{self_bleu_score_021:.4f}, 1->0:{self_bleu_score_120:.4f}) ",
end='',
)
print(
f"acc: {eval_acc:.4f}\n"\
f"loss: {eval_loss:.4f} "\
f"rec_loss: {eval_rec_loss:.4f} "\
f"bt_loss: {eval_bt_loss:.4f} "\
f"src_cls_loss: {eval_src_cls_loss:.4f} "\
f"soft_out_cls_loss: {eval_soft_out_cls_loss:.4f} "\
f"out_cls_loss: {eval_out_cls_loss:.4f} "\
f"ca_loss: {eval_ca_loss:.4f} "\
f"time: {misc.timeBetween(start_time, time.time())}"
)
print("==============================\n")
return (bleu_score, self_bleu_score, eval_acc)
def test(self, test_dataloader=None, data_path_list=None, ref_path_list=None):
test_dataloader = test_dataloader if test_dataloader else self.test_dataloader
ref_path_list = ref_path_list if ref_path_list else self.test_ref_path_list
data_path_list = data_path_list if data_path_list else self.test_data_path_list
return self.evaluate(test_dataloader, data_path_list, ref_path_list, "test")
def save_train_result(self, train_record, eval_record):
args = self.args
train_loss_record = train_record
dev_metric_record, eval_gs_record = eval_record
dev_unzip = list(zip(*dev_metric_record))
dev_bleu_record, dev_self_bleu_record, dev_acc_record = np.array(dev_unzip[0]),\
np.array(dev_unzip[1]), np.array(dev_unzip[2])
if (dev_bleu_record!=None).all():
best_dev_bleu = np.max(dev_bleu_record)
step_of_best_dev_bleu = eval_gs_record[np.argmax(dev_bleu_record)]
print("best dev BLEU: %.4f in step %d" % (best_dev_bleu, step_of_best_dev_bleu))
fig = plt.figure()
ax_1 = fig.add_subplot(111)
ax_2 = ax_1.twinx()
ax_1.set_xlabel("step")
ax_1.set_ylabel("(self-)BLEU")
ax_2.set_ylabel("Acc")
line_list = []
line_label_list = []
if (dev_bleu_record!=None).all():
# l, = ax_1.plot(eval_gs_record, dev_bleu_record, '-', c='#1f77b4', label="dev BLEU")
l, = ax_1.plot(eval_gs_record, dev_bleu_record, '-', c='#1f77b4')
line_list.append(l)
line_label_list.append("dev BLEU")
# l, = ax_1.plot(eval_gs_record, dev_self_bleu_record, ':', c='#1f77b4', label="dev self-BLEU")
l, = ax_1.plot(eval_gs_record, dev_self_bleu_record, ':', c='#1f77b4')
line_list.append(l)
line_label_list.append("dev self-BLEU")
# l, = ax_2.plot(eval_gs_record, dev_acc_record, '--', c='#1f77b4', label="dev acc")
l, = ax_2.plot(eval_gs_record, dev_acc_record, '--', c='#1f77b4')
line_list.append(l)
line_label_list.append("dev acc")
plt.legend(line_list, line_label_list)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "bleu_and_acc.pdf"), format='pdf') # bbox_inches='tight'
plt.close()
plt.figure()
plt.xlabel("step")
plt.ylabel("loss")
plt.plot(list(range(len(train_loss_record))), train_loss_record)
# plt.plot(eval_gs_record, eval_loss_record)
plt.tight_layout()
plt.savefig(os.path.join(args.output_dir, "loss.pdf"), format='pdf')
plt.close()
| 42.047619 | 112 | 0.558824 | 3,779 | 30,022 | 4.111934 | 0.082826 | 0.024712 | 0.020593 | 0.015316 | 0.595083 | 0.507047 | 0.433297 | 0.380784 | 0.368042 | 0.336122 | 0 | 0.011509 | 0.340117 | 30,022 | 713 | 113 | 42.106592 | 0.772853 | 0.05053 | 0 | 0.337413 | 0 | 0 | 0.087746 | 0.021303 | 0 | 0 | 0 | 0 | 0.003497 | 1 | 0.02972 | false | 0.001748 | 0.026224 | 0 | 0.08042 | 0.078671 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d925f20fe6fe0eccd5e8c08b7081757ad19c44be | 3,414 | py | Python | privatefsbot.py | l0k9j8/fstgbot | 6b20d28466ecc97e09f0a3919d43a3c4d1a82357 | [
"MIT"
] | null | null | null | privatefsbot.py | l0k9j8/fstgbot | 6b20d28466ecc97e09f0a3919d43a3c4d1a82357 | [
"MIT"
] | null | null | null | privatefsbot.py | l0k9j8/fstgbot | 6b20d28466ecc97e09f0a3919d43a3c4d1a82357 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
import logging
logging.basicConfig(level=logging.DEBUG,
format='%(asctime)s - %(name)s - %(levelname)s - %(message)s')
logger = logging.getLogger(__name__)
from telegram import Updater
from commands import history, cat, cd, get, ls, pwd, save
from settings import ACCESS_LIST, BOT_TOCKEN
from utils import on_error_decorator
@on_error_decorator
def on_ls(bot, update):
path = update.message.text[3:].strip()
user = update.message.from_user['username']
bot.sendMessage(update.message.chat_id, text='<pre>%s</pre>' % ls(user, path), parse_mode='HTML')
@on_error_decorator
def on_start(bot, update):
if update.message.from_user['username'] not in ACCESS_LIST:
bot.sendMessage(update.message.chat_id, text='<b>Я не твоя мамочка!</b>', parse_mode='HTML')
else:
bot.sendMessage(update.message.chat_id, text=pwd(user))
def on_error(_, update, error):
logger.warn('Update "%s" caused error "%s"' % (update, error))
@on_error_decorator
def on_cd(bot, update):
path = update.message.text[3:].strip()
user = update.message.from_user['username']
bot.sendMessage(update.message.chat_id, text='<pre>%s</pre>' % cd(user, path), parse_mode='HTML')
@on_error_decorator
def on_get(bot, update):
path = update.message.text[4:].strip()
user = update.message.from_user['username']
f, f_type = get(user, path)
{'video': bot.sendVideo,
'audio': bot.sendAudio,
'image': bot.sendPhoto}.get(f_type,
bot.sendDocument(update.message.chat_id,
f,
filename=path)
)(update.message.chat_id, f)
@on_error_decorator
def on_pwd(bot, update):
user = update.message.from_user['username']
bot.sendMessage(update.message.chat_id, text=pwd(user))
@on_error_decorator
def on_history(bot, update):
user = update.message.from_user['username']
bot.sendMessage(update.message.chat_id, text=history(user))
@on_error_decorator
def on_message(bot, update):
if hasattr(update.message, 'document'):
bot.sendMessage(update.message.chat_id,
text=save(update.message.from_user['username'],
bot.getFile(update.message.document.file_id),
update.message.document.file_name))
@on_error_decorator
def on_cat(bot, update):
path = update.message.text[4:].strip()
user = update.message.from_user['username']
bot.sendMessage(update.message.chat_id, text='<pre>%s</pre>' % cat(user, path), parse_mode='HTML')
def run_bot():
updater = Updater(BOT_TOCKEN)
updater.dispatcher.addErrorHandler(on_error)
updater.dispatcher.addTelegramCommandHandler("start", on_start)
updater.dispatcher.addTelegramCommandHandler("ls", on_ls)
updater.dispatcher.addTelegramCommandHandler("cd", on_cd)
updater.dispatcher.addTelegramCommandHandler("get", on_get)
updater.dispatcher.addTelegramCommandHandler("cat", on_cat)
updater.dispatcher.addTelegramCommandHandler("pwd", on_pwd)
updater.dispatcher.addTelegramCommandHandler("history", on_history)
updater.dispatcher.addTelegramMessageHandler(on_message)
updater.start_polling()
updater.idle()
if __name__ == '__main__':
run_bot()
| 34.14 | 102 | 0.668131 | 425 | 3,414 | 5.174118 | 0.211765 | 0.147794 | 0.077308 | 0.086403 | 0.46703 | 0.387904 | 0.352433 | 0.318781 | 0.318781 | 0.295589 | 0 | 0.001826 | 0.198008 | 3,414 | 99 | 103 | 34.484848 | 0.801315 | 0.012302 | 0 | 0.273973 | 0 | 0 | 0.083383 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136986 | false | 0 | 0.068493 | 0 | 0.205479 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d9299b500db19594cc491478d54c215f25629150 | 670 | py | Python | app/platform.py | edwarts/igenweb_supplier | 90e03b7acdedf65ae6b338d39b067bd4d1c0eaad | [
"MIT"
] | null | null | null | app/platform.py | edwarts/igenweb_supplier | 90e03b7acdedf65ae6b338d39b067bd4d1c0eaad | [
"MIT"
] | null | null | null | app/platform.py | edwarts/igenweb_supplier | 90e03b7acdedf65ae6b338d39b067bd4d1c0eaad | [
"MIT"
] | null | null | null | import os
from config import config
def getpath(path):
base_path = os.path.join(config.upload_path, 'app', 'static',
'upload')
UPLOAD_LICENCE_FOLDER = os.path.join(base_path, 'licence')
UPLOAD_COVER_FOLDER = os.path.join(base_path, 'cover')
UPLOAD_PIECE_FOLDER = os.path.join(base_path, 'pieceimg')
UPLOAD_LIGHT_FOLDER = os.path.join(base_path, 'light')
if path == "licence":
return UPLOAD_LICENCE_FOLDER
elif path == "cover":
return UPLOAD_COVER_FOLDER
elif path == "pieceimg":
return UPLOAD_PIECE_FOLDER
elif path == "light":
return UPLOAD_LIGHT_FOLDER
return path
| 31.904762 | 65 | 0.658209 | 86 | 670 | 4.872093 | 0.244186 | 0.095465 | 0.119332 | 0.152745 | 0.229117 | 0.229117 | 0 | 0 | 0 | 0 | 0 | 0 | 0.237313 | 670 | 20 | 66 | 33.5 | 0.819961 | 0 | 0 | 0 | 0 | 0 | 0.097015 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d92ba4c735d6a176f4e2696a38a3cab4031d7e30 | 5,551 | py | Python | csvfit/fitpt.py | hanKo91/csvfit | 0b07929235f0531ea3b21df2d550390f680edfcf | [
"MIT"
] | null | null | null | csvfit/fitpt.py | hanKo91/csvfit | 0b07929235f0531ea3b21df2d550390f680edfcf | [
"MIT"
] | null | null | null | csvfit/fitpt.py | hanKo91/csvfit | 0b07929235f0531ea3b21df2d550390f680edfcf | [
"MIT"
] | null | null | null | from click.exceptions import FileError
from scipy.optimize import curve_fit
import matplotlib.pyplot as plt
from . import util
import numpy as np
import click
import sys
import csv
import os
def pt1(t, K, T):
""" time-domain solution/formula for
a first-order/pt1 system
Args:
t (float): time
K (float): gain
T (float): time-constant
Returns:
float: f(t)
"""
return K * (1 - np.exp(-t/T))
def pt2(t, K, T):
""" time-domain solution/formula for
a second-order/pt2 system with
critical damping, d = 1, T = T1 = T2
Args:
t (float): time
K (float): gain
T (float): time-constant
Returns:
float: f(t)
"""
return K * (1 - np.exp(-t/T) - ((t/T) * np.exp(-t/T)))
def pt1gen(t_arr, K, T, y0 = 0):
""" generate y(t) of PT1 for t in t_arr
Args:
t_arr (list(float)): time array
K (float): gain
T (float): time-constant
Returns:
list(float): y(t) for t in t_arr
"""
return [pt1(t, K, T) + y0 for t in t_arr]
def pt2gen(t_arr, K, T, y0 = 0):
""" generate y(t) of PT2 for t in t_arr
Args:
t_arr (list(float)): time array
K (float): gain
T (float): time-constant
Returns:
list(float): y(t) for t in t_arr
"""
return [pt2(t, K, T) + y0 for t in t_arr]
def pt1fit(t, y, Kg=1, Tg=1):
""" curve_fit of pt1(-like) data
Args:
t (list(float)): time
y (list(float)): output
Kg (float, optional): initial guess for gain. Defaults to 1.
Tg (float, optional): initial guess for time-constant. Defaults to 1.
Returns:
tuple(float,float): best fit -> K_opt, T_opt
"""
if not len(t) == len(y):
return None
# delete offset and normalize
t = [n - t[0] for n in t]
y = [n - y[0] for n in y]
t_norm = [n / max(t) for n in t]
y_norm = [n / max(y) for n in y]
(popt,_) = curve_fit(pt1, t_norm, y_norm, p0=[Kg, Tg], absolute_sigma=True)
K_opt = max(y) * popt[0]
T_opt = max(t) * popt[1]
return (K_opt, T_opt)
def pt2fit(t, y, Kg=1, Tg=1):
""" curve_fit of pt2(-like) data
Args:
t (list(float)): time
y (list(float)): output
Kg (float, optional): initial guess for gain. Defaults to 1.
Tg (float, optional): initial guess for time-constant. Defaults to 1.
Returns:
tuple(float,float): best fit -> K_opt, T_opt
"""
if not len(t) == len(y):
return None
# delete offset and normalize
t = [n - t[0] for n in t]
y = [n - t[0] for n in y]
t_norm = [n / max(t) for n in t]
y_norm = [n / max(y) for n in y]
(popt,_) = curve_fit(pt2, t_norm, y_norm, p0=[Kg, Tg], absolute_sigma=True)
K_opt = max(y) * popt[0]
T_opt = max(t) * popt[1]
return (K_opt, T_opt)
@click.option("--datapath", "-d", help="Path to csv file with target data", type=click.Path(exists=True))
@click.option("--eventspath", "-e", help="Path to csv file with event data", type=click.Path())
@click.option("--outdir", "-o", help="Directory to store output artifacts", type=click.Path(exists=True))
@click.option("--columns", "-c", help="Name of the columns", type=str, multiple=True)
@click.option("--type", "-t", help="PTn type: PT1, PT2", type=str, multiple=True)
@click.option("--show", "-s", help="Show plots", is_flag=True)
@click.command()
def do_fit(datapath, eventspath, outdir, columns, show):
data = []
delimiter = util.get_delimiter(datapath)
with open(datapath, 'r') as data_file:
reader = csv.DictReader(data_file, delimiter=delimiter)
for entry in reader:
data.append(entry)
events = []
delimiter = util.get_delimiter(eventspath)
with open(eventspath, 'r') as events_file:
reader = csv.DictReader(events_file, delimiter=delimiter)
for entry in reader:
events.append(entry)
data_per_event = {}
time_slots = []
for key in list(events[0].keys()):
if key == "<event-name>":
continue
from_index = int(events[0][key])
to_index = int(events[1][key])
time_slots.append(range(from_index, to_index))
data_per_event[key] = data[from_index:to_index]
for col in columns:
ptn_param = []
plt.figure()
plt.plot(util.column(data, col), label="all")
for index, key in enumerate(list(data_per_event.keys())):
if type == "PT1":
ptn_param.append(pt1fit(time_slots[index], util.column(data_per_event[key], col)))
elif type == "PT2":
ptn_param.append(pt2fit(time_slots[index], util.column(data_per_event[key], col)))
print(key, end=": (K_opt, T_opt)=")
print(ptn_param[index])
plt.plot(time_slots[index], util.column(data_per_event[key], col), label=f"{key} : {time_slots[index]}")
plt.legend()
plt.grid('both')
plt.savefig(f"{outdir}/timeslots_{col}.png")
for index, key in enumerate(list(data_per_event.keys())):
plt.figure()
col_data = np.array(util.column(data_per_event[key], col))
col_data -= col_data[0]
plt.plot(col_data, label=f"{key}")
t_arr = range(len(time_slots[index]))
K_opt = ptn_param[index][0]
T_opt = ptn_param[index][1]
if type == "PT1":
plt.plot(pt1gen(t_arr, K_opt, T_opt), "--", label=f"{key} --fit")
elif type == "PT2":
plt.plot(pt2gen(t_arr, K_opt, T_opt), "--", label=f"{key} --fit")
plt.title(f"K_opt: {K_opt}\nT_opt: {T_opt}")
plt.legend()
plt.grid('both')
plt.savefig(f"{outdir}/{col}_{key}_fit.png")
if show:
plt.show()
def main():
if len(sys.argv) == 1:
do_fit.main(["--help"])
else:
do_fit.main()
if __name__ == "__main__":
main()
| 27.755 | 110 | 0.611421 | 914 | 5,551 | 3.588621 | 0.175055 | 0.015854 | 0.017073 | 0.017073 | 0.568293 | 0.564329 | 0.532927 | 0.480488 | 0.480488 | 0.427439 | 0 | 0.014362 | 0.222302 | 5,551 | 199 | 111 | 27.894472 | 0.745425 | 0.233291 | 0 | 0.277778 | 0 | 0 | 0.102902 | 0.013655 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.083333 | 0 | 0.231481 | 0.018519 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d92e5de5ecf3982b6bb7d90e259217262a07f9b5 | 4,435 | py | Python | wireless_emulator/cli.py | Melacon/OpenYuma_WE | f43a25cf99444c29d9fbadfe336182d60e1bc3f4 | [
"Apache-2.0"
] | 1 | 2017-02-24T09:30:21.000Z | 2017-02-24T09:30:21.000Z | wireless_emulator/cli.py | Melacon/OpenYuma_WE | f43a25cf99444c29d9fbadfe336182d60e1bc3f4 | [
"Apache-2.0"
] | null | null | null | wireless_emulator/cli.py | Melacon/OpenYuma_WE | f43a25cf99444c29d9fbadfe336182d60e1bc3f4 | [
"Apache-2.0"
] | 2 | 2018-06-21T13:23:08.000Z | 2021-04-01T06:35:16.000Z | from cmd import Cmd
import sys
from select import poll, POLLIN
import string
from subprocess import call
from wireless_emulator import *
from wireless_emulator.clean import cleanup
class CLI(Cmd):
prompt = 'WirelessTransportEmulator>'
identchars = string.ascii_letters + string.digits + '_' + '-'
def __init__(self, emulator, stdin=sys.stdin):
self.emulator = emulator
self.inPoller = poll()
self.inPoller.register(stdin)
Cmd.__init__(self)
print( '*** Starting CLI:\n' )
self.run()
def run(self):
while True:
try:
# Make sure no nodes are still waiting
self.cmdloop()
break
except KeyboardInterrupt:
# Output a message - unless it's also interrupted
# pylint: disable=broad-except
try:
print( '\nKeyboard interrupt. Use quit or exit to shotdown the emulator.\n' )
except Exception:
pass
def default(self, line):
"""Called on an input line when the command prefix is not recognized.
Overridden to run shell commands when a node is the first
CLI argument. Past the first CLI argument, node names are
automatically replaced with corresponding IP addrs."""
first, args, line = self.parseline(line)
node = self.emulator.getNeByName(first)
if node is not None:
rest = args.split(' ')
node.executeCommand(args)
else:
print('Node %s not found' % first)
def emptyline( self ):
"Don't repeat last command when you hit return."
pass
def do_exit(self, _line):
"Exit"
cleanup(self.emulator.configFileName)
return 'exited by user command'
def do_quit(self, line):
"Exit"
return self.do_exit(line)
def do_print_nodes(self, _line):
"Prints the names of all the Network Elements emulated"
print('Available NEs are:')
for neObj in self.emulator.networkElementList:
print('%s' % neObj.uuid)
def do_print_node_info(self, line):
"Prints the information of the specified Network Element"
args = line.split()
if len(args) != 1:
print('ERROR: usage: print_node_info <NE_UUID>')
return
node = self.emulator.getNeByName(args[0])
if node is not None:
print('#########################################')
print('#### Network Element UUID: \'%s\'' % node.uuid)
print('#### Network Element management IP: %s' % node.managementIPAddressString)
print('########### Interfaces: ###########')
for intf in node.interfaceList:
print('Interface: UUID=\'%s\' having IP=%s and Linux Interface Name=\'%s\'' %
(intf.uuid, intf.IP, intf.interfaceName))
print('#########################################')
else:
print('Node %s not found' % args[0])
def do_dump_nodes(self, _line):
"Dumps the information about all of the available Network Elements"
for node in self.emulator.networkElementList:
print('#########################################')
print('#### Network Element UUID: \'%s\'' % node.uuid)
print('#### Network Element management IP: %s' % node.managementIPAddressString)
print('########### Interfaces: ###########')
for intf in node.interfaceList:
print('Interface: UUID=\'%s\' having IP=%s and Linux Interface Name=\'%s\'' %
(intf.uuid, intf.IP, intf.interfaceName))
print('#########################################')
def do_dump_links(self, _line):
"Dumps the links available in the network"
for topo in self.emulator.topologies:
print('#################### %s #####################' % topo.topologyLayer)
for link in topo.linkList:
print('## Link=%d ## \'%s\': \'%s\' <-------> \'%s\':\'%s\'' %
(link.linkId, link.interfacesObj[0].getNeName(), link.interfacesObj[0].getInterfaceUuid(),
link.interfacesObj[0].getInterfaceUuid(), link.interfacesObj[1].getNeName()))
print('#########################################')
| 39.247788 | 112 | 0.530778 | 460 | 4,435 | 5.05 | 0.358696 | 0.041326 | 0.032716 | 0.016358 | 0.291864 | 0.249247 | 0.193715 | 0.193715 | 0.193715 | 0.193715 | 0 | 0.002259 | 0.30124 | 4,435 | 112 | 113 | 39.598214 | 0.747338 | 0.140699 | 0 | 0.306818 | 0 | 0 | 0.272139 | 0.062687 | 0 | 0 | 0 | 0 | 0 | 1 | 0.113636 | false | 0.022727 | 0.079545 | 0 | 0.261364 | 0.272727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d931c71fea8c07e381405bd85803e56da95fcf53 | 755 | py | Python | azulejo/test/key_binder.py | johnteslade/azulejo | 3b1a35981360513b21f90d96afff10352b6363e6 | [
"MIT"
] | 3 | 2015-07-17T09:35:22.000Z | 2015-11-15T00:13:32.000Z | azulejo/test/key_binder.py | johnteslade/azulejo | 3b1a35981360513b21f90d96afff10352b6363e6 | [
"MIT"
] | 1 | 2015-07-17T09:36:45.000Z | 2015-07-22T20:20:53.000Z | azulejo/test/key_binder.py | johnteslade/azulejo | 3b1a35981360513b21f90d96afff10352b6363e6 | [
"MIT"
] | null | null | null |
class KeyBinderDummy(object):
"""Class used to allow keybindings to be caught and to be actioned."""
def __init__(self):
self.bindings = []
self.saved_obj = None
def bind(self, action, dispatcher, dispatcher_params):
""" Bind a key press """
self.bindings.append({
'action': action,
'dispatcher': dispatcher,
'dispatcher_params': dispatcher_params,
})
def action_key(self, action):
""" Actions a key press by calling the relavent dispatcher """
key_found = [x for x in self.bindings if x['action'] == action]
assert len(key_found) == 1
func = key_found[0]['dispatcher']
func(key_found[0]['dispatcher_params'])
| 23.59375 | 74 | 0.593377 | 88 | 755 | 4.931818 | 0.465909 | 0.147465 | 0.119816 | 0.059908 | 0.105991 | 0 | 0 | 0 | 0 | 0 | 0 | 0.005597 | 0.290066 | 755 | 31 | 75 | 24.354839 | 0.804104 | 0.181457 | 0 | 0 | 0 | 0 | 0.111111 | 0 | 0 | 0 | 0 | 0 | 0.066667 | 1 | 0.2 | false | 0 | 0 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d93384ace79fad1a67f4aac86155075e4bc1666a | 1,179 | py | Python | code/at_offer/dynamic_programming/coding_interview47.py | zhangrong1722/interview | 187a485de0774561eb843d8ee640236adda97b90 | [
"Apache-2.0"
] | 2 | 2020-01-05T07:46:20.000Z | 2020-04-17T02:58:13.000Z | code/at_offer/dynamic_programming/coding_interview47.py | zhangrong1722/interview | 187a485de0774561eb843d8ee640236adda97b90 | [
"Apache-2.0"
] | 1 | 2020-01-05T07:50:26.000Z | 2020-04-28T03:50:08.000Z | code/at_offer/dynamic_programming/coding_interview47.py | zhangrong1722/interview | 187a485de0774561eb843d8ee640236adda97b90 | [
"Apache-2.0"
] | 1 | 2020-04-18T03:58:26.000Z | 2020-04-18T03:58:26.000Z | """
题目:礼物的最大价值
在一个mxn的期盼的每一格都放有一个礼物 每个礼物有一定的价值(价值大于0) 你可以从棋盘的左上角开始拿格子里的礼物 并每次向右或者向下移动一格
直到达到棋盘的右下角 给定一个棋盘及其上面的礼物 请计算你最多能拿到多少价值的礼物
思路:动态规划 动态规划方程 dp[i][j]=max(dp[i-1][j],dp[i][j-1])+arr[i][j]
"""
class Solution:
def GetGiftMaxValue(self, arr):
if arr is None or len(arr) == 0:
return 0
rows, cols = len(arr), len(arr[0])
results = [[0 for _ in range(cols)] for _ in range(rows)]
for i in range(rows):
for j in range(cols):
try:
results[i][j] = max(results[i - 1][j], results[i][j - 1]) + arr[i][j]
except:
try:
results[i][j] = results[i - 1][j] + arr[i][j]
except:
try:
results[i][j] = results[i][j - 1] + arr[i][j]
except:
pass
return results[rows - 1][cols - 1]
s = Solution()
print(s.GetGiftMaxValue([[1, 10, 3, 8], [12, 2, 9, 6], [5, 7, 4, 11], [3, 7, 16, 5]]))
print(s.GetGiftMaxValue(None))
print(s.GetGiftMaxValue([[1, 4, 2]]))
print(s.GetGiftMaxValue([[1], [4], [2]]))
| 33.685714 | 89 | 0.47922 | 156 | 1,179 | 3.608974 | 0.346154 | 0.039076 | 0.079929 | 0.031972 | 0.25222 | 0.25222 | 0.152753 | 0.152753 | 0.152753 | 0.110124 | 0 | 0.051248 | 0.354538 | 1,179 | 34 | 90 | 34.676471 | 0.688568 | 0.167091 | 0 | 0.25 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0.041667 | 0 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d933e1e2f9d405d172ef31e50f4cff727e9bd7de | 218 | py | Python | doubanRequest.py | speedsnail99/PythonDouban | c4a556311632c547162589220433ec59a962a2d6 | [
"MIT"
] | null | null | null | doubanRequest.py | speedsnail99/PythonDouban | c4a556311632c547162589220433ec59a962a2d6 | [
"MIT"
] | null | null | null | doubanRequest.py | speedsnail99/PythonDouban | c4a556311632c547162589220433ec59a962a2d6 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# @File : doubanRequest.py
# @Author: G
# @Date : 2018/8/5
import requests
url = 'https://movie.douban.com'
doubanText = requests.get(url).text
print(doubanText)
| 12.111111 | 35 | 0.642202 | 30 | 218 | 4.666667 | 0.866667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.038674 | 0.169725 | 218 | 17 | 36 | 12.823529 | 0.734807 | 0.444954 | 0 | 0 | 0 | 0 | 0.206897 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0.25 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d934ab92936dea1622b31e73b9513677a43d6b45 | 31,876 | py | Python | tests/shop/test_shop_views.py | Torniojaws/vortech-backend | f775a97eeae089fa720088d86fe92d40bc5d65bc | [
"MIT"
] | null | null | null | tests/shop/test_shop_views.py | Torniojaws/vortech-backend | f775a97eeae089fa720088d86fe92d40bc5d65bc | [
"MIT"
] | 93 | 2017-09-01T22:24:10.000Z | 2021-12-22T14:07:06.000Z | tests/shop/test_shop_views.py | Torniojaws/vortech-backend | f775a97eeae089fa720088d86fe92d40bc5d65bc | [
"MIT"
] | null | null | null | import json
import unittest
from flask_caching import Cache
from app import app, db
from apps.shop.models import (
ShopItems,
ShopCategories,
ShopItemsCategoriesMapping,
ShopItemLogos,
ShopItemsURLMapping
)
from apps.users.models import Users, UsersAccessTokens, UsersAccessLevels, UsersAccessMapping
from apps.utils.time import get_datetime, get_datetime_one_hour_ahead
class TestShopViews(unittest.TestCase):
def setUp(self):
# Clear redis cache completely
cache = Cache()
cache.init_app(app, config={"CACHE_TYPE": "RedisCache"})
with app.app_context():
cache.clear()
self.app = app.test_client()
# Add some categories
cat1 = ShopCategories(
Category="Units",
SubCategory="Tests"
)
cat2 = ShopCategories(
Category="UnitTests",
SubCategory="TestsUnits"
)
db.session.add(cat1)
db.session.add(cat2)
db.session.commit()
self.valid_cats = [cat1.ShopCategoryID, cat2.ShopCategoryID]
# And some 3rd party logos
logo1 = ShopItemLogos(
Image="unittest-spotify.jpg",
Created=get_datetime()
)
logo2 = ShopItemLogos(
Image="unittest-bandcamp.jpg",
Created=get_datetime()
)
logo3 = ShopItemLogos(
Image="unittest-amazon.jpg",
Created=get_datetime()
)
logo4 = ShopItemLogos(
Image="unittest-deezer.jpg",
Created=get_datetime()
)
db.session.add(logo1)
db.session.add(logo2)
db.session.add(logo3)
db.session.add(logo4)
db.session.commit()
self.valid_logo_ids = [
logo1.ShopItemLogoID,
logo2.ShopItemLogoID,
logo3.ShopItemLogoID,
logo4.ShopItemLogoID,
]
# Add three shop items and related data
item1 = ShopItems(
Title="UnitTest ShopItem 1",
Description="UnitTest This is item 1",
Price=15.99,
Currency="EUR",
Image="unittest-shopitem1.jpg",
Created=get_datetime()
)
db.session.add(item1)
db.session.commit()
self.valid_items = [item1.ShopItemID]
item1_cat1 = ShopItemsCategoriesMapping(
ShopItemID=self.valid_items[0],
ShopCategoryID=self.valid_cats[0]
)
item1_cat2 = ShopItemsCategoriesMapping(
ShopItemID=self.valid_items[0],
ShopCategoryID=self.valid_cats[1]
)
db.session.add(item1_cat1)
db.session.add(item1_cat2)
db.session.commit()
item1_url1 = ShopItemsURLMapping(
ShopItemID=self.valid_items[0],
URLTitle="Spotify",
URL="http://www.example.com/spotify",
ShopItemLogoID=self.valid_logo_ids[0]
)
item1_url2 = ShopItemsURLMapping(
ShopItemID=self.valid_items[0],
URLTitle="BandCamp",
URL="http://www.example.com/bandcamp",
ShopItemLogoID=self.valid_logo_ids[1]
)
db.session.add(item1_url1)
db.session.add(item1_url2)
db.session.commit()
# Item 2
item2 = ShopItems(
Title="UnitTest ShopItem 2",
Description="UnitTest This is item 2",
Price=8.49,
Currency="EUR",
Image="unittest-shopitem2.jpg",
Created=get_datetime()
)
db.session.add(item2)
db.session.commit()
self.valid_items.append(item2.ShopItemID)
item2_cat1 = ShopItemsCategoriesMapping(
ShopItemID=self.valid_items[1],
ShopCategoryID=self.valid_cats[0]
)
db.session.add(item2_cat1)
db.session.commit()
item2_url1 = ShopItemsURLMapping(
ShopItemID=self.valid_items[1],
URLTitle="Spotify",
URL="http://www.example.com/spotify",
ShopItemLogoID=self.valid_logo_ids[0]
)
item2_url2 = ShopItemsURLMapping(
ShopItemID=self.valid_items[1],
URLTitle="BandCamp",
URL="http://www.example.com/bandcamp",
ShopItemLogoID=self.valid_logo_ids[1]
)
db.session.add(item2_url1)
db.session.add(item2_url2)
db.session.commit()
# Item 3
item3 = ShopItems(
Title="UnitTest ShopItem 3",
Description="UnitTest This is item 3",
Price=12,
Currency="EUR",
Image="unittest-shopitem3.jpg",
Created=get_datetime()
)
db.session.add(item3)
db.session.commit()
self.valid_items.append(item3.ShopItemID)
item3_cat1 = ShopItemsCategoriesMapping(
ShopItemID=self.valid_items[2],
ShopCategoryID=self.valid_cats[0]
)
item3_cat2 = ShopItemsCategoriesMapping(
ShopItemID=self.valid_items[2],
ShopCategoryID=self.valid_cats[1]
)
db.session.add(item3_cat1)
db.session.add(item3_cat2)
db.session.commit()
item3_url1 = ShopItemsURLMapping(
ShopItemID=self.valid_items[2],
URLTitle="Spotify",
URL="http://www.example.com/spotify",
ShopItemLogoID=self.valid_logo_ids[0]
)
item3_url2 = ShopItemsURLMapping(
ShopItemID=self.valid_items[2],
URLTitle="BandCamp",
URL="http://www.example.com/bandcamp",
ShopItemLogoID=self.valid_logo_ids[1]
)
db.session.add(item3_url1)
db.session.add(item3_url2)
db.session.commit()
# We also need a valid admin user for the add release endpoint test.
user = Users(
Name="UnitTest Admin",
Username="unittest",
Password="password"
)
db.session.add(user)
db.session.commit()
# This is non-standard, but is fine for testing.
self.access_token = "unittest-access-token"
user_token = UsersAccessTokens(
UserID=user.UserID,
AccessToken=self.access_token,
ExpirationDate=get_datetime_one_hour_ahead()
)
db.session.add(user_token)
db.session.commit()
# Define level for admin
if not UsersAccessLevels.query.filter_by(LevelName="Admin").first():
access_level = UsersAccessLevels(
UsersAccessLevelID=4,
LevelName="Admin"
)
db.session.add(access_level)
db.session.commit()
grant_admin = UsersAccessMapping(
UserID=user.UserID,
UsersAccessLevelID=4
)
db.session.add(grant_admin)
db.session.commit()
self.user_id = user.UserID
def tearDown(self):
for cat in ShopCategories.query.filter(ShopCategories.Category.like("Unit%")).all():
db.session.delete(cat)
for logo in ShopItemLogos.query.filter(ShopItemLogos.Image.like("unittest%")).all():
db.session.delete(logo)
for item in ShopItems.query.filter(ShopItems.Title.like("UnitTest%")).all():
db.session.delete(item)
db.session.commit()
user = Users.query.filter_by(UserID=self.user_id).first()
db.session.delete(user)
db.session.commit()
def test_getting_all_shopitems(self):
"""This should return all the shopitems along with their associated data, in ascending
order, ID=1 first."""
response = self.app.get("/api/1.0/shopitems/")
data = json.loads(response.data.decode())
self.assertEqual(200, response.status_code)
self.assertEqual(3, len(data["shopItems"]))
self.assertEqual("UnitTest ShopItem 1", data["shopItems"][0]["title"])
self.assertEqual("UnitTest This is item 1", data["shopItems"][0]["description"])
self.assertEqual(15.99, data["shopItems"][0]["price"])
self.assertEqual("EUR", data["shopItems"][0]["currency"])
self.assertEqual("unittest-shopitem1.jpg", data["shopItems"][0]["image"])
self.assertNotEqual("", data["shopItems"][0]["createdAt"])
self.assertTrue("updatedAt" in data["shopItems"][0])
self.assertEqual(
[self.valid_cats[0], self.valid_cats[1]],
data["shopItems"][0]["categories"]
)
self.assertEqual(2, len(data["shopItems"][0]["urls"]))
self.assertEqual("Spotify", data["shopItems"][0]["urls"][0]["urlTitle"])
self.assertEqual(
"http://www.example.com/spotify",
data["shopItems"][0]["urls"][0]["url"]
)
self.assertEqual(self.valid_logo_ids[0], data["shopItems"][0]["urls"][0]["logoID"])
def test_getting_specific_shopitem(self):
"""Should return the data of the specified shopitem."""
response = self.app.get("/api/1.0/shopitems/{}".format(self.valid_items[2]))
data = json.loads(response.data.decode())
self.assertEqual(200, response.status_code)
self.assertEqual(1, len(data["shopItems"]))
self.assertEqual("UnitTest ShopItem 3", data["shopItems"][0]["title"])
self.assertEqual("UnitTest This is item 3", data["shopItems"][0]["description"])
self.assertEqual(12, data["shopItems"][0]["price"])
self.assertEqual("EUR", data["shopItems"][0]["currency"])
self.assertEqual("unittest-shopitem3.jpg", data["shopItems"][0]["image"])
self.assertNotEqual("", data["shopItems"][0]["createdAt"])
self.assertTrue("updatedAt" in data["shopItems"][0])
self.assertEqual(
[self.valid_cats[0], self.valid_cats[1]],
data["shopItems"][0]["categories"]
)
self.assertEqual(2, len(data["shopItems"][0]["urls"]))
self.assertEqual("Spotify", data["shopItems"][0]["urls"][0]["urlTitle"])
self.assertEqual(
"http://www.example.com/spotify",
data["shopItems"][0]["urls"][0]["url"]
)
self.assertEqual(self.valid_logo_ids[0], data["shopItems"][0]["urls"][0]["logoID"])
self.assertEqual("BandCamp", data["shopItems"][0]["urls"][1]["urlTitle"])
self.assertEqual(
"http://www.example.com/bandcamp",
data["shopItems"][0]["urls"][1]["url"]
)
self.assertEqual(self.valid_logo_ids[1], data["shopItems"][0]["urls"][1]["logoID"])
def test_getting_shopitems_by_category(self):
"""Should return all items that match the subcategory."""
response = self.app.get("/api/1.0/shopitems/category/{}/".format(self.valid_cats[1]))
data = json.loads(response.data.decode())
self.assertEqual(200, response.status_code)
self.assertNotEqual(None, data)
self.assertEqual(2, len(data["shopItems"]))
self.assertEqual("UnitTest ShopItem 1", data["shopItems"][0]["title"])
self.assertEqual("UnitTest ShopItem 3", data["shopItems"][1]["title"])
def test_adding_shopitem(self):
"""Should add the new item and its related data (categories and urls). For URLs, there is
no valid case to reference any existing URLs in the database, so they will be added every
time. However, we can reuse a logo (eg. Spotify), so basically you can pick a logo in the
UI and then the POST data will have an ID."""
response = self.app.post(
"/api/1.0/shopitems/",
data=json.dumps(
dict(
title="UnitTest Post",
description="UnitTest Description",
price=14.95,
currency="EUR",
image="unittest-post.jpg",
categories=[
self.valid_cats[0],
{"category": "UnitTests", "subcategory": "UnitTest New Subcategory"}
],
urls=[
{
"title": "Spotify",
"url": "http://www.example.com/spotify/1",
"logoID": self.valid_logo_ids[0]
},
{
"title": "Amazon",
"url": "http://www.example.com/amazon/123",
"logoID": self.valid_logo_ids[2]
},
]
)
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
data = response.data.decode()
item = ShopItems.query.filter_by(Title="UnitTest Post").first_or_404()
cats = ShopItemsCategoriesMapping.query.filter_by(ShopItemID=item.ShopItemID).all()
urls = ShopItemsURLMapping.query.filter_by(ShopItemID=item.ShopItemID).all()
new_cat = ShopCategories.query.filter_by(
SubCategory="UnitTest New Subcategory").first()
self.assertEqual(201, response.status_code)
self.assertTrue("Location" in data)
self.assertNotEqual(None, item)
self.assertNotEqual(None, cats)
self.assertNotEqual(None, urls)
self.assertEqual("UnitTest Post", item.Title)
self.assertEqual("UnitTest Description", item.Description)
self.assertEqual(14.95, float(item.Price))
self.assertEqual("EUR", item.Currency)
self.assertEqual("unittest-post.jpg", item.Image)
self.assertEqual(2, len(cats))
self.assertEqual("UnitTests", new_cat.Category)
self.assertEqual("UnitTest New Subcategory", new_cat.SubCategory)
self.assertEqual(2, len(urls))
# These appear in insert order. Sorting by title would be a lot of work for little benefit
self.assertEqual("Spotify", urls[0].URLTitle)
self.assertEqual("http://www.example.com/spotify/1", urls[0].URL)
self.assertEqual("Amazon", urls[1].URLTitle)
self.assertEqual("http://www.example.com/amazon/123", urls[1].URL)
def test_updating_shop_item(self):
"""Should replace all existing values with the new updated values."""
response = self.app.put(
"/api/1.0/shopitems/{}".format(self.valid_items[1]),
data=json.dumps(
dict(
title="UnitTest Updated Title",
description="UnitTest Updated Description",
price=11.95,
currency="EUR",
image="unittest-update.jpg",
categories=[
self.valid_cats[0],
self.valid_cats[1],
{"category": "UnitTests", "subcategory": "UnitTest New Subcategory"}
],
urls=[
{
"title": "Spotify",
"url": "http://www.example.com/spotify/2",
"logoID": self.valid_logo_ids[0]
},
{
"title": "Amazon MP3",
"url": "http://www.example.com/amazon/124",
"logoID": self.valid_logo_ids[2]
},
{
"title": "BandCamp",
"url": "http://www.example.com/bandcamp/987",
"logoID": self.valid_logo_ids[2]
},
]
)
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
self.assertEqual(200, response.status_code)
self.assertEqual("", response.data.decode())
item = ShopItems.query.filter_by(ShopItemID=self.valid_items[1]).first_or_404()
cats = ShopItemsCategoriesMapping.query.filter_by(ShopItemID=self.valid_items[1]).all()
urls = ShopItemsURLMapping.query.filter_by(ShopItemID=self.valid_items[1]).all()
new_cat = ShopCategories.query.filter_by(
SubCategory="UnitTest New Subcategory").first()
self.assertNotEqual(None, item)
self.assertNotEqual(None, cats)
self.assertNotEqual(None, urls)
self.assertEqual("UnitTest Updated Title", item.Title)
self.assertEqual("UnitTest Updated Description", item.Description)
self.assertEqual(11.95, float(item.Price))
self.assertEqual("EUR", item.Currency)
self.assertEqual("unittest-update.jpg", item.Image)
self.assertNotEqual("", item.Updated)
self.assertEqual(3, len(cats))
self.assertEqual("UnitTests", new_cat.Category)
self.assertEqual("UnitTest New Subcategory", new_cat.SubCategory)
self.assertEqual(3, len(urls))
# These appear in insert order. Sorting by title would be a lot of work for little benefit
self.assertEqual("Spotify", urls[0].URLTitle)
self.assertEqual("http://www.example.com/spotify/2", urls[0].URL)
self.assertEqual("Amazon MP3", urls[1].URLTitle)
self.assertEqual("http://www.example.com/amazon/124", urls[1].URL)
self.assertEqual("BandCamp", urls[2].URLTitle)
self.assertEqual("http://www.example.com/bandcamp/987", urls[2].URL)
def test_patching_shopitem_add(self):
"""Patch a ShopItems entry with "add" operation."""
response = self.app.patch(
"/api/1.0/shopitems/{}".format(self.valid_items[1]),
data=json.dumps(
[
dict({
"op": "add",
"path": "/title",
"value": "UnitTest Patched Title"
}),
dict({
"op": "add",
"path": "/categories",
"value": [self.valid_cats[1]]
}),
dict({
"op": "add",
"path": "/urls",
"value": [
{
"title": "Deezer",
"url": "deezer.com",
"logoID": self.valid_logo_ids[3]
}
]
}),
]
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
item = ShopItems.query.filter_by(ShopItemID=self.valid_items[1]).first_or_404()
cats = ShopItemsCategoriesMapping.query.filter_by(ShopItemID=self.valid_items[1]).all()
urls = ShopItemsURLMapping.query.filter_by(ShopItemID=self.valid_items[1]).all()
self.assertEqual(204, response.status_code)
self.assertEqual("", response.data.decode())
self.assertEqual("UnitTest Patched Title", item.Title)
self.assertEqual(2, len(cats))
self.assertEqual(3, len(urls))
self.assertEqual("Deezer", urls[2].URLTitle)
self.assertEqual("deezer.com", urls[2].URL)
def test_patching_shopitem_copy(self):
"""Patch a ShopItems entry with "copy" operation. There is no possible copy operation for
categories and urls. Trying to do it would throw JsonPatchConflict since you can only copy
to the same resource, ie. on top of itself."""
response = self.app.patch(
"/api/1.0/shopitems/{}".format(self.valid_items[1]),
data=json.dumps(
[
dict({
"op": "copy",
"from": "/title",
"path": "/description"
})
]
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
item = ShopItems.query.filter_by(ShopItemID=self.valid_items[1]).first_or_404()
self.assertEqual(204, response.status_code)
self.assertEqual("", response.data.decode())
self.assertEqual("UnitTest ShopItem 2", item.Description)
def test_patching_shopitem_move(self):
"""Patch a ShopItems entry with "move" operation. Move will by definition empty the source
resource and populate the target resource with the value from source. However, this does
not currently work yet due to SQLAlchemy and JSONPatch incompatibility. Just the value is
replaced. The correct behaviour will be implemented later on."""
response = self.app.patch(
"/api/1.0/shopitems/{}".format(self.valid_items[1]),
data=json.dumps(
[
dict({
"op": "move",
"from": "/description",
"path": "/image"
})
]
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
item = ShopItems.query.filter_by(ShopItemID=self.valid_items[1]).first_or_404()
self.assertEqual(204, response.status_code)
self.assertEqual("", response.data.decode())
self.assertEqual("UnitTest This is item 2", item.Image)
def test_patching_shopitem_remove(self):
"""Patch a ShopItems entry with "remove" operation. This does not work for the base object
due to SQLAlchemy JSONPatch incompatibility. But it does work for the joined tables URLs
and categories."""
response = self.app.patch(
"/api/1.0/shopitems/{}".format(self.valid_items[1]),
data=json.dumps(
[
dict({
"op": "remove",
"path": "/title"
}),
dict({
"op": "remove",
"path": "/categories"
}),
dict({
"op": "remove",
"path": "/urls"
})
]
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
cats = ShopItemsCategoriesMapping.query.filter_by(ShopItemID=self.valid_items[1]).all()
urls = ShopItemsURLMapping.query.filter_by(ShopItemID=self.valid_items[1]).all()
self.assertEqual(204, response.status_code)
self.assertEqual("", response.data.decode())
self.assertEqual([], cats)
self.assertEqual([], urls)
def test_patching_shopitem_replace(self):
"""Patch a ShopItems entry with "replace" operation."""
response = self.app.patch(
"/api/1.0/shopitems/{}".format(self.valid_items[1]),
data=json.dumps(
[
dict({
"op": "replace",
"path": "/title",
"value": "UnitTest Patched Title"
}),
dict({
"op": "replace",
"path": "/categories",
"value": [self.valid_cats[1]]
}),
dict({
"op": "replace",
"path": "/urls",
"value": [
{
"title": "Deezer",
"url": "deezer.com",
"logoID": self.valid_logo_ids[3]
}
]
}),
]
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
item = ShopItems.query.filter_by(ShopItemID=self.valid_items[1]).first_or_404()
cats = ShopItemsCategoriesMapping.query.filter_by(ShopItemID=self.valid_items[1]).all()
urls = ShopItemsURLMapping.query.filter_by(ShopItemID=self.valid_items[1]).all()
self.assertEqual(204, response.status_code)
self.assertEqual("", response.data.decode())
self.assertEqual("UnitTest Patched Title", item.Title)
self.assertEqual(1, len(cats))
self.assertEqual(1, len(urls))
self.assertEqual("Deezer", urls[0].URLTitle)
self.assertEqual("deezer.com", urls[0].URL)
def test_deleting_shop_item(self):
"""Should delete the specified shop item and it's mappings."""
response = self.app.delete(
"/api/1.0/shopitems/{}".format(self.valid_items[2]),
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
cats = ShopItemsCategoriesMapping.query.filter_by(ShopItemID=self.valid_items[2]).all()
urls = ShopItemsURLMapping.query.filter_by(ShopItemID=self.valid_items[2]).all()
self.assertEqual(204, response.status_code)
self.assertEqual("", response.data.decode())
self.assertEqual([], cats)
self.assertEqual([], urls)
def test_invalid_category_id(self):
"""When an invalid category ID is given, it should be skipped."""
response = self.app.post(
"/api/1.0/shopitems/",
data=json.dumps(
dict(
title="UnitTest Post",
description="UnitTest Description",
price=14.95,
currency="EUR",
image="unittest-post.jpg",
categories=[0],
urls=[
{
"title": "Spotify",
"url": "http://www.example.com/spotify/1",
"logoID": self.valid_logo_ids[0]
}
]
)
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
data = response.data.decode()
item = ShopItems.query.filter_by(Title="UnitTest Post").first_or_404()
cats = ShopItemsCategoriesMapping.query.filter_by(ShopItemID=item.ShopItemID).all()
self.assertEqual(201, response.status_code)
self.assertTrue("Location" in data)
self.assertEqual([], cats)
def test_existing_string_category(self):
"""Should use the existing category and not create a new entry to ShopCategories."""
response = self.app.post(
"/api/1.0/shopitems/",
data=json.dumps(
dict(
title="UnitTest Post",
description="UnitTest Description",
price=14.95,
currency="EUR",
image="unittest-post.jpg",
categories=[
{
"category": "UnitTests",
"subcategory": "TestsUnits"
}
],
urls=[
{
"title": "Spotify",
"url": "http://www.example.com/spotify/1",
"logoID": self.valid_logo_ids[0]
}
]
)
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
data = response.data.decode()
item = ShopItems.query.filter_by(Title="UnitTest Post").first_or_404()
cats = ShopItemsCategoriesMapping.query.filter_by(ShopItemID=item.ShopItemID).all()
category_entries = ShopCategories.query.filter_by(Category="UnitTests").all()
self.assertEqual(201, response.status_code)
self.assertTrue("Location" in data)
self.assertEqual(1, len(cats))
# Should only have one entry for the given values.
self.assertEqual(1, len(category_entries))
def test_patching_categories(self):
"""Patch ShopItems categories with "copy" and "move" operations. There is no possible
operation for categories and urls. Trying to do it would throw JsonPatchConflict since you
can only copy to the same resource, ie. on top of itself."""
response = self.app.patch(
"/api/1.0/shopitems/{}".format(self.valid_items[1]),
data=json.dumps(
[
dict({
"op": "copy",
"from": "/categories",
"path": "/categories"
}),
dict({
"op": "move",
"from": "/categories",
"path": "/categories"
})
]
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
self.assertEqual(204, response.status_code)
self.assertEqual("", response.data.decode())
def test_patching_urls(self):
"""Patch ShopItems urls with "copy" and "move" operations. There is no possible
operation for categories and urls. Trying to do it would throw JsonPatchConflict since you
can only copy to the same resource, ie. on top of itself."""
response = self.app.patch(
"/api/1.0/shopitems/{}".format(self.valid_items[1]),
data=json.dumps(
[
dict({
"op": "copy",
"from": "/urls",
"path": "/urls"
}),
dict({
"op": "move",
"from": "/urls",
"path": "/urls"
})
]
),
content_type="application/json",
headers={
'User': self.user_id,
'Authorization': self.access_token
}
)
self.assertEqual(204, response.status_code)
self.assertEqual("", response.data.decode())
| 39.159705 | 99 | 0.513019 | 3,020 | 31,876 | 5.318874 | 0.103311 | 0.092448 | 0.033991 | 0.038847 | 0.704787 | 0.670983 | 0.621428 | 0.573679 | 0.550582 | 0.538442 | 0 | 0.019738 | 0.369024 | 31,876 | 813 | 100 | 39.207872 | 0.778899 | 0.080405 | 0 | 0.558952 | 0 | 0 | 0.141731 | 0.01389 | 0 | 0 | 0 | 0 | 0.165939 | 1 | 0.024745 | false | 0.001456 | 0.010189 | 0 | 0.03639 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d935132553f2baab50f6ba2b6d58b4003ca7df5b | 3,728 | py | Python | pyinsar/data_import/uavsar.py | MITeaps/pyinsar | 4d22e3ef90ef842d6b390074a8b5deedc7658a2b | [
"MIT"
] | 8 | 2019-03-15T19:51:27.000Z | 2022-02-16T07:27:36.000Z | pyinsar/data_import/uavsar.py | MITeaps/pyinsar | 4d22e3ef90ef842d6b390074a8b5deedc7658a2b | [
"MIT"
] | 1 | 2022-02-08T03:48:56.000Z | 2022-02-09T01:33:27.000Z | pyinsar/data_import/uavsar.py | MITeaps/pyinsar | 4d22e3ef90ef842d6b390074a8b5deedc7658a2b | [
"MIT"
] | 2 | 2021-01-12T05:32:21.000Z | 2021-01-13T08:35:26.000Z | # The MIT License (MIT)
# Copyright (c) 2017 Massachusetts Institute of Technology
#
# Author: Cody Rude
# This software has been created in projects supported by the US National
# Science Foundation and NASA (PI: Pankratius)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
import re
from collections import OrderedDict
def read_uavsar_metadata(in_file):
'''
Parse UAVSAR metadata
@param in_file: String of Metadata filename or file object (file should end in .ann)
@return OrderedDict of metadata
'''
if isinstance(in_file, str):
with open(in_file, 'r') as info_file:
data_info = info_file.readlines()
else:
data_info = [line.decode() for line in in_file.readlines()]
data_info = [line.strip() for line in data_info]
# Function to convert string to a number
def str_to_number(in_string):
try:
return int(in_string)
except:
return float(in_string)
data_name = data_info[0][31:]
meta_data_dict = OrderedDict()
for line in data_info:
# Only work on lines that aren't commented out
if re.match('^[^;]',line) != None:
# Get the data type ('&' is text)
data_type = re.search('\s+\((.*)\)\s+=', line).group(1)
# Remove data type from line
tmp = re.sub('\s+\(.*\)\s+=', ' =', line)
# Split line into key,value
split_list = tmp.split('=',maxsplit=1)
# remove any trailing comments and strip whitespace
split_list[1] = re.search('[^;]*',split_list[1]).group().strip()
split_list[0] = split_list[0].strip()
#If data type is not a string, parse it as a float or int
if data_type != '&':
# Check if value is N/A
if split_list[1] == 'N/A':
split_list[1] = float('nan')
# Check for Raskew Doppler Near Mid Far as this
# entry should be three seperate entries
elif split_list[0] == 'Reskew Doppler Near Mid Far':
split_list[0] = 'Reskew Doppler Near'
second_split = split_list[1].split()
split_list[1] = str_to_number(second_split[0])
meta_data_dict['Reskew Doppler Mid'] = str_to_number(second_split[1])
meta_data_dict['Reskew Doppler Far'] = str_to_number(second_split[2])
# Parse value to an int or float
else:
split_list[1] = str_to_number(split_list[1])
# Add key, value pair to dictionary
meta_data_dict[split_list[0]] = split_list[1]
return meta_data_dict
| 36.54902 | 89 | 0.635193 | 519 | 3,728 | 4.447013 | 0.39499 | 0.058492 | 0.038995 | 0.022097 | 0.118284 | 0.041594 | 0 | 0 | 0 | 0 | 0 | 0.009648 | 0.277092 | 3,728 | 101 | 90 | 36.910891 | 0.846753 | 0.489002 | 0 | 0.055556 | 0 | 0 | 0.070926 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.055556 | false | 0 | 0.055556 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d936d9d6566232424e349431594f08f1e023591e | 1,964 | py | Python | api/utils/responses.py | cakedan/files.gg | 6d8fc06376a69809c0ae0a56ea2a842d6caddb98 | [
"MIT"
] | 8 | 2018-05-03T16:28:30.000Z | 2020-02-02T12:22:36.000Z | api/utils/responses.py | cakedan/files.gg | 6d8fc06376a69809c0ae0a56ea2a842d6caddb98 | [
"MIT"
] | null | null | null | api/utils/responses.py | cakedan/files.gg | 6d8fc06376a69809c0ae0a56ea2a842d6caddb98 | [
"MIT"
] | 1 | 2019-03-20T23:39:25.000Z | 2019-03-20T23:39:25.000Z | import json
from urllib.parse import urlencode
from flask import Response
from werkzeug.http import HTTP_STATUS_CODES
class ApiResponse(Response):
default_status = 200
default_mimetype = 'application/json'
def __init__(self, data=None, status=None, **kwargs):
if data is None:
if kwargs.get('response') is None:
status = 204
else:
if hasattr(data, 'to_dict'):
data = data.to_dict()
kwargs['response'] = json.dumps(data)
if status is not None:
kwargs['status'] = status
super(Response, self).__init__(**kwargs)
class ApiRedirect(ApiResponse):
default_status = 302
def __init__(self, url, query=None, *args, **kwargs):
super(ApiResponse, self).__init__(None, *args, **kwargs)
if not (300 < self.status_code and self.status_code < 400):
raise ValueError('Invalid Status Code, Redirects should be equal to or between 300 and 399')
if query:
if '?' in url:
url += '&' + urlencode(query)
else:
url += '?' + urlencode(query)
self.headers.add('location', url)
class ApiError(Exception):
code = 0
message = None
status = 400
def __init__(self, message=None, status=None, *args, **kwargs):
super(Exception, self).__init__()
if status is not None:
self.status = status
if message is not None:
self.message = message
elif self.message is None:
self.message = HTTP_STATUS_CODES.get(self.status, 'Unknown Error')
if kwargs.get('code') is not None:
self.code = kwargs.get('code')
kwargs['data'] = kwargs.pop('metadata', None) or {}
kwargs['data'].update({'code': self.code, 'message': self.message, 'status': self.status})
kwargs['status'] = self.status
self.response = ApiResponse(**kwargs)
| 28.882353 | 104 | 0.59165 | 232 | 1,964 | 4.857759 | 0.293103 | 0.053239 | 0.031943 | 0.034605 | 0.030169 | 0 | 0 | 0 | 0 | 0 | 0 | 0.01796 | 0.291242 | 1,964 | 67 | 105 | 29.313433 | 0.791667 | 0 | 0 | 0.083333 | 0 | 0 | 0.095723 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.083333 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d937931645a0b4ee10a45ea07357635f954a0273 | 16,675 | py | Python | example/samgraph/multi_gpu/train_gcn.py | SJTU-IPADS/fgnn-artifacts | c96e7ec8204d767152958dc63a764466e90424fd | [
"Apache-2.0"
] | 23 | 2022-01-25T13:28:51.000Z | 2022-03-23T07:05:47.000Z | example/samgraph/multi_gpu/train_gcn.py | SJTU-IPADS/gnnlab | 5c73564e4a9bd5deeff7eed0b923c115ccba34d7 | [
"Apache-2.0"
] | null | null | null | example/samgraph/multi_gpu/train_gcn.py | SJTU-IPADS/gnnlab | 5c73564e4a9bd5deeff7eed0b923c115ccba34d7 | [
"Apache-2.0"
] | 1 | 2022-02-28T18:48:56.000Z | 2022-02-28T18:48:56.000Z | import argparse
import time
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
from dgl.nn.pytorch import GraphConv
import dgl.multiprocessing as mp
from torch.nn.parallel import DistributedDataParallel
import os
import sys
import samgraph.torch as sam
import datetime
from common_config import *
class GCN(nn.Module):
def __init__(self,
in_feats,
n_hidden,
n_classes,
n_layers,
activation,
dropout):
super(GCN, self).__init__()
self.layers = nn.ModuleList()
# input layer
self.layers.append(
GraphConv(in_feats, n_hidden, activation=activation, allow_zero_in_degree=True))
# hidden layers
for _ in range(n_layers - 2):
self.layers.append(
GraphConv(n_hidden, n_hidden, activation=activation, allow_zero_in_degree=True))
# output layer
self.layers.append(
GraphConv(n_hidden, n_classes, allow_zero_in_degree=True))
self.dropout = nn.Dropout(p=dropout)
def forward(self, blocks, features):
h = features
for i, layer in enumerate(self.layers):
if i != 0:
h = self.dropout(h)
h = layer(blocks[i], h)
return h
def parse_args(default_run_config):
argparser = argparse.ArgumentParser("GCN Training")
add_common_arguments(argparser, default_run_config)
argparser.add_argument('--fanout', nargs='+',
type=int, default=default_run_config['fanout'])
argparser.add_argument('--lr', type=float,
default=default_run_config['lr'])
argparser.add_argument('--dropout', type=float,
default=default_run_config['dropout'])
argparser.add_argument('--weight-decay', type=float,
default=default_run_config['weight_decay'])
return vars(argparser.parse_args())
def get_run_config():
run_config = {}
run_config.update(get_default_common_config(run_mode=RunMode.FGNN))
run_config['sample_type'] = 'khop2'
run_config['fanout'] = [5, 10, 15]
run_config['lr'] = 0.003
run_config['dropout'] = 0.5
run_config['weight_decay'] = 0.0005
run_config.update(parse_args(run_config))
process_common_config(run_config)
assert(run_config['arch'] == 'arch5')
assert(run_config['sample_type'] != 'random_walk')
run_config['num_fanout'] = run_config['num_layer'] = len(
run_config['fanout'])
print_run_config(run_config)
return run_config
def run_init(run_config):
sam.config(run_config)
sam.data_init()
if run_config['validate_configs']:
sys.exit()
def run_sample(worker_id, run_config):
num_worker = run_config['num_sample_worker']
global_barrier = run_config['global_barrier']
ctx = run_config['sample_workers'][worker_id]
print('[Sample Worker {:d}/{:d}] Started with PID {:d}({:s})'.format(
worker_id, num_worker, os.getpid(), torch.cuda.get_device_name(ctx)))
sam.sample_init(worker_id, ctx)
sam.notify_sampler_ready(global_barrier)
num_epoch = sam.num_epoch()
num_step = sam.steps_per_epoch()
if (worker_id == (num_worker - 1)):
num_step = int(num_step - int(num_step /
num_worker) * worker_id)
else:
num_step = int(num_step / num_worker)
epoch_sample_total_times_python = []
epoch_pipeline_sample_total_times_python = []
epoch_sample_total_times_profiler = []
epoch_sample_times = []
epoch_get_cache_miss_index_times = []
epoch_enqueue_samples_times = []
print('[Sample Worker {:d}] run sample for {:d} epochs with {:d} steps'.format(
worker_id, num_epoch, num_step))
# run start barrier
global_barrier.wait()
for epoch in range(num_epoch):
if run_config['pipeline']:
# epoch start barrier 1
global_barrier.wait()
tic = time.time()
for step in range(num_step):
sam.sample_once()
# sam.report_step(epoch, step)
toc0 = time.time()
if not run_config['pipeline']:
# epoch start barrier 2
global_barrier.wait()
# epoch end barrier
global_barrier.wait()
toc1 = time.time()
epoch_sample_total_times_python.append(toc0 - tic)
epoch_pipeline_sample_total_times_python.append(toc1 - tic)
epoch_sample_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochSampleTime))
epoch_get_cache_miss_index_times.append(
sam.get_log_epoch_value(
epoch, sam.KLogEpochSampleGetCacheMissIndexTime)
)
epoch_enqueue_samples_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochSampleSendTime)
)
epoch_sample_total_times_profiler.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochSampleTotalTime)
)
if worker_id == 0:
sam.report_step_average(epoch - 1, step - 1)
print('[Sample Worker {:d}] Avg Sample Total Time {:.4f} | Sampler Total Time(Profiler) {:.4f}'.format(
worker_id, np.mean(epoch_sample_total_times_python[1:]), np.mean(epoch_sample_total_times_profiler[1:])))
# run end barrier
global_barrier.wait()
if worker_id == 0:
sam.report_init()
if worker_id == 0:
test_result = []
test_result.append(('sample_time', np.mean(epoch_sample_times[1:])))
test_result.append(('get_cache_miss_index_time', np.mean(
epoch_get_cache_miss_index_times[1:])))
test_result.append(
('enqueue_samples_time', np.mean(epoch_enqueue_samples_times[1:])))
test_result.append(('epoch_time:sample_total', np.mean(
epoch_sample_total_times_python[1:])))
if run_config['pipeline']:
test_result.append(
('pipeline_sample_epoch_time', np.mean(epoch_pipeline_sample_total_times_python[1:])))
test_result.append(('init:presample', sam.get_log_init_value(sam.kLogInitL2Presample)))
test_result.append(('init:load_dataset:mmap', sam.get_log_init_value(sam.kLogInitL3LoadDatasetMMap)))
test_result.append(('init:load_dataset:copy:sampler', sam.get_log_init_value(sam.kLogInitL3LoadDatasetCopy)))
test_result.append(('init:dist_queue:alloc+push',
sam.get_log_init_value(sam.kLogInitL3DistQueueAlloc)+sam.get_log_init_value(sam.kLogInitL3DistQueuePush)))
test_result.append(('init:dist_queue:pin:sampler', sam.get_log_init_value(sam.kLogInitL3DistQueuePin)))
test_result.append(('init:internal:sampler', sam.get_log_init_value(sam.kLogInitL2InternalState)))
test_result.append(('init:cache:sampler', sam.get_log_init_value(sam.kLogInitL2BuildCache)))
for k, v in test_result:
print('test_result:{:}={:.2f}'.format(k, v))
global_barrier.wait() # barrier for pretty print
# trainer print result
sam.shutdown()
def run_train(worker_id, run_config):
ctx = run_config['train_workers'][worker_id]
num_worker = run_config['num_train_worker']
global_barrier = run_config['global_barrier']
train_device = torch.device(ctx)
print('[Train Worker {:d}/{:d}] Started with PID {:d}({:s})'.format(
worker_id, num_worker, os.getpid(), torch.cuda.get_device_name(ctx)))
# let the trainer initialization after sampler
# sampler should presample before trainer initialization
sam.wait_for_sampler_ready(global_barrier)
sam.train_init(worker_id, ctx)
if num_worker > 1:
dist_init_method = 'tcp://{master_ip}:{master_port}'.format(
master_ip='127.0.0.1', master_port='12345')
world_size = num_worker
torch.distributed.init_process_group(backend="nccl",
init_method=dist_init_method,
world_size=world_size,
rank=worker_id,
timeout=datetime.timedelta(seconds=get_default_timeout()))
in_feat = sam.feat_dim()
num_class = sam.num_class()
num_layer = run_config['num_layer']
model = GCN(in_feat, run_config['num_hidden'], num_class,
num_layer, F.relu, run_config['dropout'])
model = model.to(train_device)
if num_worker > 1:
model = DistributedDataParallel(
model, device_ids=[train_device], output_device=train_device)
loss_fcn = nn.CrossEntropyLoss()
loss_fcn = loss_fcn.to(train_device)
optimizer = optim.Adam(
model.parameters(), lr=run_config['lr'], weight_decay=run_config['weight_decay'])
num_epoch = sam.num_epoch()
num_step = sam.steps_per_epoch()
model.train()
epoch_copy_times = []
epoch_convert_times = []
epoch_train_times = []
epoch_total_times_python = []
epoch_train_total_times_profiler = []
epoch_pipeline_train_total_times_python = []
epoch_cache_hit_rates = []
epoch_miss_nbytes = []
epoch_feat_nbytes = []
copy_times = []
convert_times = []
train_times = []
total_times = []
align_up_step = int(
int((num_step + num_worker - 1) / num_worker) * num_worker)
# run start barrier
global_barrier.wait()
print('[Train Worker {:d}] run train for {:d} epochs with {:d} steps'.format(
worker_id, num_epoch, num_step))
run_start = time.time()
for epoch in range(num_epoch):
# epoch start barrier
global_barrier.wait()
tic = time.time()
if run_config['pipeline'] or run_config['single_gpu']:
need_steps = int(num_step / num_worker)
if worker_id < num_step % num_worker:
need_steps += 1
sam.extract_start(need_steps)
for step in range(worker_id, align_up_step, num_worker):
if step < num_step:
t0 = time.time()
if (not run_config['pipeline']) and (not run_config['single_gpu']):
sam.sample_once()
batch_key = sam.get_next_batch()
t1 = time.time()
blocks, batch_input, batch_label = sam.get_dgl_blocks(
batch_key, num_layer)
t2 = time.time()
else:
t0 = t1 = t2 = time.time()
# Compute loss and prediction
batch_pred = model(blocks, batch_input)
loss = loss_fcn(batch_pred, batch_label)
optimizer.zero_grad()
loss.backward()
optimizer.step()
# wait for the train finish then we can free the data safely
event_sync()
if (step + num_worker < num_step):
batch_input = None
batch_label = None
blocks = None
t3 = time.time()
copy_time = sam.get_log_step_value(epoch, step, sam.kLogL1CopyTime)
convert_time = t2 - t1
train_time = t3 - t2
total_time = t3 - t1
sam.log_step(epoch, step, sam.kLogL1TrainTime, train_time)
sam.log_step(epoch, step, sam.kLogL1ConvertTime, convert_time)
sam.log_epoch_add(epoch, sam.kLogEpochConvertTime, convert_time)
sam.log_epoch_add(epoch, sam.kLogEpochTrainTime, train_time)
sam.log_epoch_add(epoch, sam.kLogEpochTotalTime, total_time)
copy_times.append(copy_time)
convert_times.append(convert_time)
train_times.append(train_time)
total_times.append(total_time)
# sam.report_step_average(epoch, step)
# sync the train workers
if num_worker > 1:
torch.distributed.barrier()
toc = time.time()
epoch_total_times_python.append(toc - tic)
# epoch end barrier
global_barrier.wait()
feat_nbytes = sam.get_log_epoch_value(
epoch, sam.kLogEpochFeatureBytes)
miss_nbytes = sam.get_log_epoch_value(
epoch, sam.kLogEpochMissBytes)
epoch_miss_nbytes.append(miss_nbytes)
epoch_feat_nbytes.append(feat_nbytes)
epoch_cache_hit_rates.append(
(feat_nbytes - miss_nbytes) / feat_nbytes)
epoch_copy_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochCopyTime))
epoch_convert_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochConvertTime))
epoch_train_times.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochTrainTime))
epoch_train_total_times_profiler.append(
sam.get_log_epoch_value(epoch, sam.kLogEpochTotalTime))
if worker_id == 0:
print('Epoch {:05d} | Epoch Time {:.4f} | Total Train Time(Profiler) {:.4f} | Copy Time {:.4f}'.format(
epoch, epoch_total_times_python[-1], epoch_train_total_times_profiler[-1], epoch_copy_times[-1]))
# sync the train workers
if num_worker > 1:
torch.distributed.barrier()
print('[Train Worker {:d}] Avg Epoch Time {:.4f} | Train Total Time(Profiler) {:.4f} | Copy Time {:.4f}'.format(
worker_id, np.mean(epoch_total_times_python[1:]), np.mean(epoch_train_total_times_profiler[1:]), np.mean(epoch_copy_times[1:])))
# run end barrier
global_barrier.wait()
run_end = time.time()
# sampler print init and result
global_barrier.wait() # barrier for pretty print
if worker_id == 0:
sam.report_step_average(epoch - 1, step - 1)
sam.report_init()
test_result = []
test_result.append(('epoch_time:copy_time',
np.mean(epoch_copy_times[1:])))
test_result.append(('convert_time', np.mean(epoch_convert_times[1:])))
test_result.append(('train_time', np.mean(epoch_train_times[1:])))
test_result.append(('epoch_time:train_total', np.mean(
epoch_train_total_times_profiler[1:])))
test_result.append(
('cache_percentage', run_config['cache_percentage']))
test_result.append(('cache_hit_rate', np.mean(
epoch_cache_hit_rates[1:])))
test_result.append(('epoch_feat_nbytes', np.mean(epoch_feat_nbytes[1:])))
test_result.append(('batch_feat_nbytes', np.mean(epoch_feat_nbytes[1:])/(align_up_step/num_worker)))
test_result.append(('epoch_miss_nbytes', np.mean(epoch_miss_nbytes[1:])))
test_result.append(('batch_miss_nbytes', np.mean(epoch_miss_nbytes[1:])/(align_up_step/num_worker)))
test_result.append(('batch_copy_time', np.mean(epoch_copy_times[1:])/(align_up_step/num_worker)))
test_result.append(('batch_train_time', np.mean(epoch_train_total_times_profiler[1:])/(align_up_step/num_worker)))
if run_config['pipeline']:
test_result.append(
('pipeline_train_epoch_time', np.mean(epoch_total_times_python[1:])))
test_result.append(('run_time', run_end - run_start))
test_result.append(('init:load_dataset:copy:trainer', sam.get_log_init_value(sam.kLogInitL3LoadDatasetCopy)))
test_result.append(('init:dist_queue:pin:trainer', sam.get_log_init_value(sam.kLogInitL3DistQueuePin)))
test_result.append(('init:internal:trainer', sam.get_log_init_value(sam.kLogInitL2InternalState)))
test_result.append(('init:cache:trainer', sam.get_log_init_value(sam.kLogInitL2BuildCache)))
for k, v in test_result:
print('test_result:{:}={:.4f}'.format(k, v))
# sam.dump_trace()
sam.shutdown()
if __name__ == '__main__':
run_config = get_run_config()
run_init(run_config)
num_sample_worker = run_config['num_sample_worker']
num_train_worker = run_config['num_train_worker']
# global barrier is used to sync all the sample workers and train workers
run_config['global_barrier'] = mp.Barrier(
num_sample_worker + num_train_worker, timeout=get_default_timeout())
workers = []
# sample processes
for worker_id in range(num_sample_worker):
p = mp.Process(target=run_sample, args=(worker_id, run_config))
p.start()
workers.append(p)
# train processes
for worker_id in range(num_train_worker):
p = mp.Process(target=run_train, args=(worker_id, run_config))
p.start()
workers.append(p)
ret = sam.wait_one_child()
if ret != 0:
for p in workers:
p.kill()
for p in workers:
p.join()
if ret != 0:
sys.exit(1)
| 36.891593 | 138 | 0.64054 | 2,115 | 16,675 | 4.715839 | 0.134752 | 0.052336 | 0.048125 | 0.020453 | 0.495087 | 0.434831 | 0.33086 | 0.241929 | 0.177161 | 0.144676 | 0 | 0.009665 | 0.249175 | 16,675 | 451 | 139 | 36.973392 | 0.786981 | 0.043658 | 0 | 0.202381 | 0 | 0.008929 | 0.10284 | 0.025129 | 0 | 0 | 0 | 0 | 0.005952 | 1 | 0.020833 | false | 0 | 0.044643 | 0 | 0.077381 | 0.029762 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d93c3126662cf31eb885a0986f780983d532d782 | 3,453 | py | Python | src/AIC2018_iamai/ReID/ReID_CNN/logger.py | gordonjun2/CenterTrack | 358f94c36ef03b8ae7d15d8a48fbf70fff937e79 | [
"MIT"
] | 2 | 2020-04-13T14:06:23.000Z | 2020-06-10T08:41:28.000Z | src/AIC2018_iamai/ReID/ReID_CNN/logger.py | gordonjun2/CenterTrack | 358f94c36ef03b8ae7d15d8a48fbf70fff937e79 | [
"MIT"
] | null | null | null | src/AIC2018_iamai/ReID/ReID_CNN/logger.py | gordonjun2/CenterTrack | 358f94c36ef03b8ae7d15d8a48fbf70fff937e79 | [
"MIT"
] | null | null | null | import os
import pathlib
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.cm as cm
import pandas as pd
from collections import OrderedDict
class Logger:
def __init__(self, save_dir, prefix=''):
#names = ['epoch',
# 'loss', 'loss_max', 'loss_median', 'loss_min', 'active_loss',
# 'feat_2-norm_max', 'feat_2-norm_median', 'feat_2-norm_min']
self.log = OrderedDict([('epoch', [])])
self.save_dir = os.path.join(save_dir)
pathlib.Path(save_dir).mkdir(parents=True, exist_ok=True)
self.prefix = prefix
def logg(self, d):
for k in d:
if k not in self.log:
self.log[k] = []
self.log[k].append(d[k])
def append_epoch(self, e):
self.log['epoch'].append(e)
def append_loss(self, b_loss):
names = ['loss', 'loss_max', 'loss_median', 'loss_min', 'active_loss']
for n in names:
if n not in self.log: self.log[n] = []
self.log['loss'].append(b_loss.mean())
self.log['loss_max'].append(b_loss.max())
self.log['loss_median'].append(np.median(b_loss))
self.log['loss_min'].append(b_loss.min())
self.log['active_loss'].append((b_loss > 1e-3).mean())
def append_feat(self, b_feat):
names = ['feat_2-norm_max', 'feat_2-norm_median', 'feat_2-norm_min']
for n in names:
if n not in self.log: self.log[n] = []
norm = np.linalg.norm(b_feat, axis=1)
self.log['feat_2-norm_max'].append(norm.max())
self.log['feat_2-norm_median'].append(np.median(norm))
self.log['feat_2-norm_min'].append(norm.min())
def write_log(self):
dataframe = pd.DataFrame(self.log)
dataframe.to_csv(os.path.join(self.save_dir, '%slog.csv' % self.prefix), index=False)
def plot(self):
epoch = np.array(self.log['epoch'])
plt.figure()
labels = ['loss_max', 'loss_median', 'loss_min']
for i, l in enumerate(labels):
data = np.array(self.log[l])
plt.semilogy(epoch, data, label=l, color=cm.Blues(0.25+float(i)*0.25))
data = np.array(self.log['loss'])
plt.semilogy(epoch, data, label='loss', color='r')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('loss')
plt.title('loss vs. epoch')
plt.savefig(os.path.join(self.save_dir, '%sloss.png' % self.prefix))
plt.close()
plt.figure()
data = np.array(self.log['active_loss'])
plt.plot(epoch, data, label='active_loss')
plt.legend()
plt.xlabel('epoch')
plt.ylabel('% of active loss')
plt.title('% of active loss vs. epoch')
plt.savefig(os.path.join(self.save_dir, '%sactive_loss.png' % self.prefix))
plt.close()
plt.figure()
labels = ['feat_2-norm_max', 'feat_2-norm_median', 'feat_2-norm_min']
for i, l in enumerate(labels):
data = np.array(self.log[l])
plt.plot(epoch, data, label=l, color=cm.Blues(0.25+float(i)*0.25))
plt.legend()
plt.xlabel('epoch')
plt.ylabel('2-norm of feature')
plt.title('2-norm of feature vs. epoch')
plt.savefig(os.path.join(self.save_dir, '%sfeature_norm.png' % self.prefix))
plt.close()
| 35.96875 | 94 | 0.56936 | 493 | 3,453 | 3.84787 | 0.194726 | 0.084871 | 0.056932 | 0.0369 | 0.491302 | 0.405377 | 0.373221 | 0.290986 | 0.290986 | 0.250923 | 0 | 0.011513 | 0.270489 | 3,453 | 95 | 95 | 36.347368 | 0.741564 | 0.044889 | 0 | 0.25974 | 0 | 0 | 0.144985 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.090909 | false | 0 | 0.103896 | 0 | 0.207792 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d93f8a137f8c8b7524ee61b15619bc1ddd81fbf9 | 1,401 | py | Python | src/logistic/logistic_sklearn.py | wenfengand/machine_learning_tools | 7233e14ccb2cc32198ee5d73ee2c5952b5947443 | [
"MIT"
] | null | null | null | src/logistic/logistic_sklearn.py | wenfengand/machine_learning_tools | 7233e14ccb2cc32198ee5d73ee2c5952b5947443 | [
"MIT"
] | null | null | null | src/logistic/logistic_sklearn.py | wenfengand/machine_learning_tools | 7233e14ccb2cc32198ee5d73ee2c5952b5947443 | [
"MIT"
] | null | null | null | import numpy as np
import pandas as pd
from sklearn.feature_extraction.text import TfidfVectorizer
from sklearn.preprocessing import LabelBinarizer
from sklearn.linear_model.logistic import LogisticRegression
from sklearn.model_selection import train_test_split, cross_val_score
from sklearn.metrics import classification_report
from os.path import dirname, abspath, join
PROJECT_ROOT = dirname(dirname(dirname(abspath(__file__))))
INPUT_ROOT = join(PROJECT_ROOT, 'input')
SMS_FILE = join(INPUT_ROOT, 'sms', 'SMSSpamCollection')
df = pd.read_csv(SMS_FILE, delimiter='\t', header=None)
x = df[1].values
y = df[0].values
x_train_raw, x_test_raw, y_train, y_test = train_test_split(x,y)
vectorizer = TfidfVectorizer()
x_train = vectorizer.fit_transform(x_train_raw)
x_test = vectorizer.transform(x_test_raw)
lb = LabelBinarizer()
y_train_binarized = lb.fit_transform(y_train)
y_test_binarized = lb.transform(y_test)
classifier = LogisticRegression()
classifier.fit(x_train, y_train_binarized)
predictions = classifier.predict(x_test)
precisions = cross_val_score(classifier, x_train, y_train_binarized, cv=5, scoring='precision')
print('Precisions from cross_val_score', precisions)
report = classification_report(y_test_binarized, predictions,\
target_names=['ham', 'spam'], labels=lb.transform(['ham','spam']).reshape(-1))
print('Report from classification_report\n', report)
| 35.923077 | 95 | 0.797288 | 198 | 1,401 | 5.353535 | 0.368687 | 0.051887 | 0.036792 | 0.018868 | 0.066038 | 0 | 0 | 0 | 0 | 0 | 0 | 0.003167 | 0.098501 | 1,401 | 38 | 96 | 36.868421 | 0.836105 | 0 | 0 | 0 | 0 | 0 | 0.082857 | 0.016429 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.275862 | 0 | 0.275862 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d941561113d1d4744ab96a504558e7c214535b01 | 1,289 | py | Python | code/curvature_and_offset.py | amoi9/advanced-lane-finding | 334ebcee8a232e62aa54ed88190dd2333026112c | [
"MIT"
] | null | null | null | code/curvature_and_offset.py | amoi9/advanced-lane-finding | 334ebcee8a232e62aa54ed88190dd2333026112c | [
"MIT"
] | null | null | null | code/curvature_and_offset.py | amoi9/advanced-lane-finding | 334ebcee8a232e62aa54ed88190dd2333026112c | [
"MIT"
] | null | null | null | import numpy as np
from lane_pixel_finder import find_lane_pixels
'''
Calculates the curvature of polynomial functions in meters.
'''
# Define conversions in x and y from pixels space to meters
ym_per_pix = 30/720 # meters per pixel in y dimension
xm_per_pix = 3.7/700 # meters per pixel in x dimension
def measure_curvature_real_with_pixels(img_shape, x, y):
# Generate x and y values for plotting
ploty = np.linspace(0, img_shape[0]-1, img_shape[0])
# Fit a second order polynomial to each using `np.polyfit`
fit_cr = np.polyfit(y*ym_per_pix, x*xm_per_pix, 2)
# Define y-value where we want radius of curvature
# We'll choose the maximum y-value, corresponding to the bottom of the image
y_eval = np.max(ploty)
##### calculation of R_curve (radius of curvature) #####
curverad = ((1 + (2*fit_cr[0]*y_eval*ym_per_pix + fit_cr[1])**2)**1.5) / np.absolute(2*fit_cr[0])
return curverad, fit_cr
def measure_offset_real(img_shape, left_fit, right_fit):
y = ym_per_pix * img_shape[0]
l_fitValue = left_fit[0]* y**2 + left_fit[1]*y + left_fit[2]
r_fit_Value = right_fit[0]*y**2 + right_fit[1]*y + right_fit[2]
lane_center_pos = (l_fitValue + r_fit_Value) /2
return lane_center_pos - img_shape[1] / 2 * xm_per_pix | 40.28125 | 101 | 0.699767 | 233 | 1,289 | 3.630901 | 0.364807 | 0.049645 | 0.037825 | 0.037825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.034549 | 0.191621 | 1,289 | 32 | 102 | 40.28125 | 0.777351 | 0.297905 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.125 | 0 | 0.375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d941a0519c73ad134015de68bcfb2d050dd83e9d | 816 | py | Python | notes/23 - exceptions/basic-input-example.py | hSpiels/ICS3-Python-Notes | 5cb06623d6714a62ff20550d635c1fd3f7d27ea2 | [
"MIT"
] | 3 | 2022-02-10T19:06:28.000Z | 2022-03-25T17:55:56.000Z | notes/23 - exceptions/basic-input-example.py | hSpiels/ICS3-Python-Notes | 5cb06623d6714a62ff20550d635c1fd3f7d27ea2 | [
"MIT"
] | null | null | null | notes/23 - exceptions/basic-input-example.py | hSpiels/ICS3-Python-Notes | 5cb06623d6714a62ff20550d635c1fd3f7d27ea2 | [
"MIT"
] | 17 | 2020-09-15T16:40:23.000Z | 2022-03-22T17:52:32.000Z | #-----------------------------------------------------------------------------
# Name: Catching Exceptions (try-except.py)
# Purpose: To provide example of a simple input loop using try-catch
#
# Author: Mr. Brooks
# Created: 01-Oct-2020
# Updated: 01-March-2021
#-----------------------------------------------------------------------------
while True: #Start an infinite loop
value = input('Enter a number between -100 and 100: ') #Get a value from the user
try:
value = int(value) #Convert the value to an int
except Exception as err:
print(f'Something went wrong: {err}') #You should probably add a nicer error message
else:
#No exception was thrown, so break out of the infinite loop
break
print (value) | 32.64 | 92 | 0.502451 | 93 | 816 | 4.408602 | 0.709677 | 0.058537 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.029221 | 0.245098 | 816 | 25 | 93 | 32.64 | 0.636364 | 0.645833 | 0 | 0 | 0 | 0 | 0.231047 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0 | 0 | 0 | 0.222222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d944f0a9c7214dc418886b1718145909d59eb408 | 2,791 | py | Python | python_examples/example_truncated_normal.py | KristerSJakobsson/pygosolnp | 5a890d67782ff04f521644daeaef2f7708959e79 | [
"BSL-1.0"
] | null | null | null | python_examples/example_truncated_normal.py | KristerSJakobsson/pygosolnp | 5a890d67782ff04f521644daeaef2f7708959e79 | [
"BSL-1.0"
] | null | null | null | python_examples/example_truncated_normal.py | KristerSJakobsson/pygosolnp | 5a890d67782ff04f521644daeaef2f7708959e79 | [
"BSL-1.0"
] | null | null | null | ############################
# This example shows how to run pygosolnp with Truncated Normal distribution using Numpy and Scipy
############################
from typing import List, Optional
# Numpy random has the PCG64 generator which according to some research is better than Mersenne Twister
from numpy.random import Generator, PCG64
# Note that this script depends on scipy, which is not a requirement for pygosolnp
from scipy.stats import truncnorm
import pygosolnp
# The Sampling class is an abstract class that can be inherited and customized as you please
class TruncatedNormalSampling(pygosolnp.sampling.Sampling):
def __init__(self,
parameter_lower_bounds: List[float],
parameter_upper_bounds: List[float],
seed: Optional[int]):
self.__generator = Generator(PCG64(seed))
self.__parameter_lower_bounds = parameter_lower_bounds
self.__parameter_upper_bounds = parameter_upper_bounds
def generate_sample(self, sample_size: int) -> List[float]:
# This function returns random starting values for one sample
return truncnorm.rvs(a=self.__parameter_lower_bounds,
b=self.__parameter_upper_bounds,
size=sample_size,
random_state=self.__generator)
# The Permutation Function has unique solution f(x) = 0 when x_i = i
def permutation_function(data):
n = 4
b = 0.5
result1 = 0
for index1 in range(1, n + 1):
result2 = 0
for index2 in range(1, n + 1):
result2 += ((pow(index2, index1) + b) * (pow(data[index2 - 1] / index2, index1) - 1))
result1 += pow(result2, 2)
return result1
parameter_lower_bounds = [-4.0] * 4
parameter_upper_bounds = [4.0] * 4
if __name__ == '__main__':
# Instantiate sampling object
sampling = TruncatedNormalSampling(
parameter_lower_bounds=parameter_lower_bounds,
parameter_upper_bounds=parameter_upper_bounds,
seed=99)
# Note that the seed variable to pygosolnp.solve is ignored due to the custom sampling
results = pygosolnp.solve(
obj_func=permutation_function,
par_lower_limit=parameter_lower_bounds,
par_upper_limit=parameter_upper_bounds,
number_of_restarts=6,
number_of_simulations=20000,
pysolnp_max_major_iter=25,
pysolnp_tolerance=1E-9,
start_guess_sampling=sampling)
print(results.best_solution)
# Best solution: [2.651591117309446, 1.7843343303461394, 3.8557508243271172, 2.601788248290573]
# Objective function value: 101.48726054338877
# Not very good, the truncated normal function has generated samples that are mostly close to 0
# This is not very good for the permutation function
| 36.723684 | 103 | 0.685776 | 348 | 2,791 | 5.272989 | 0.431034 | 0.061035 | 0.087193 | 0.039237 | 0.105722 | 0.105722 | 0 | 0 | 0 | 0 | 0 | 0.062384 | 0.230383 | 2,791 | 75 | 104 | 37.213333 | 0.791899 | 0.320315 | 0 | 0 | 0 | 0 | 0.004376 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068182 | false | 0 | 0.090909 | 0.022727 | 0.227273 | 0.022727 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d945b415451fdc9d37b82bd626b439e042bbaee1 | 11,757 | py | Python | SAM/Classifiers/classifier_svm.py | lucaspuvis/SAM | 159427d0b2a7fdd353b96c13085f926df096f309 | [
"CC-BY-4.0"
] | 3 | 2019-05-14T17:22:54.000Z | 2020-07-05T15:39:11.000Z | SAM/Classifiers/classifier_svm.py | lucaspuvis/SAM | 159427d0b2a7fdd353b96c13085f926df096f309 | [
"CC-BY-4.0"
] | null | null | null | SAM/Classifiers/classifier_svm.py | lucaspuvis/SAM | 159427d0b2a7fdd353b96c13085f926df096f309 | [
"CC-BY-4.0"
] | null | null | null | import argparse, joblib, csv, sys, os
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.patches as mpatches
import pandas as pd
from mpl_toolkits.mplot3d import Axes3D
from yellowbrick.text import TSNEVisualizer
from sklearn.cluster import KMeans
from sklearn.svm import SVC, LinearSVC
from sklearn.pipeline import Pipeline
from sklearn.decomposition import PCA
from sklearn.metrics import confusion_matrix, f1_score
from sklearn.model_selection import train_test_split, StratifiedKFold, GridSearchCV, learning_curve
from sklearn.feature_extraction.text import TfidfVectorizer
'''
SVM classifier
'''
# GLOBALS
# Paths
dir_path = os.path.dirname(os.path.realpath(__file__))
data_path = dir_path + '/TrainingData/training_data_all.csv'
test_data_path = dir_path + '/TrainingData/HypothesisData.csv'
model_path = dir_path + '/Model/svm_pipeline.joblib'
stop_words_path = dir_path + '/TrainingData/stop_words_da.txt'
# Rest
# Train our SVM model
def train_model(X, y, auto_split=False):
# Create data processing and classifier pipeline
svm_pipeline = Pipeline([
('tfidf', TfidfVectorizer(ngram_range=(1,10),
analyzer='char_wb',
stop_words=load_stop_words(),
use_idf=False,
smooth_idf=True,
sublinear_tf=False
)),
('svm', LinearSVC(C=3))
])
# Parameters for Grid Search. This is used for finding the best values for processing and classifying
parameters = {#'tfidf__stop_words':(load_stop_words(), None),
# 'tfidf__smooth_idf':(True, False),
# 'tfidf__sublinear_tf':(True, False),
}
out = open('svm_f1score.txt', 'w+')
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
skf = StratifiedKFold(4, True)
if auto_split is True:
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=0.1)
clf = GridSearchCV(svm_pipeline, parameters, cv=skf.split(X_train, y_train), verbose=2, return_train_score=True, n_jobs=-1)
clf.fit(X_train, y_train)
clf = clf.best_estimator_
cm = confusion_matrix(y_test, clf.predict(X_test))
plt.figure()
plot_confusion_matrix(cm)
y_pred = clf.predict(X_test)
f_score = f1_score(y_true=y_test, y_pred=y_pred, average='weighted')
score = clf.score(X_test, y_test)
out.write('{}, {}\n'.format(score, f_score))
else:
clf = GridSearchCV(svm_pipeline, parameters, cv=skf.split(X, y), verbose=2, return_train_score=True, n_jobs=-1)
X_test, y_test = load_test_dataset(squish_classes=True)
clf.fit(X, y)
clf = clf.best_estimator_
cm = confusion_matrix(y_test, clf.predict(X_test))
plot_confusion_matrix(cm)
svm_score = clf.score(X_test, y_test)
y_pred = clf.predict(X_test)
f_score = f1_score(y_true=y_test, y_pred=y_pred, average='weighted')
out.write('{}, {}\n'.format(svm_score, f_score))
print(cm)
print('SVM Accuracy: {}'.format(round(svm_score*100, 4)))
print('SVM F1 Score: {}'.format(round(f_score*100, 4)))
joblib.dump(clf, model_path)
return clf
def load_dataset(encoding='utf8', squish_classes=True):
'''
Loads training data and splits it into test and train sets
Parameters
-----------
encoding: The encoding of the file loaded. Default is UTF-8
Returns
-------
X: The sentences,
y: The labels
'''
csv_reader = csv.reader(open(data_path, encoding=encoding))
X, y = [], []
# Saving comments and likes in seperate lists
for row in csv_reader:
X.append(row[1])
if squish_classes:
if int(row[0]) < 0:
y.append(-1)
elif int(row[0]) > 0:
y.append(1)
else:
y.append(0)
else:
y.append(row[0])
y = np.asarray(y)
X = np.asarray(X)
return X, y
def load_test_dataset(encoding='utf-8-sig', squish_classes=True):
'''
Loads training data and splits it into test and train sets
Parameters
-----------
encoding: The encoding of the file loaded. Default is UTF-8
Returns
-------
X: The sentences,
y: The labels
'''
csv_reader = csv.reader(open(test_data_path, encoding=encoding))
X, y = [], []
# Saving comments and likes in seperate lists
for row in csv_reader:
X.append(row[1])
if squish_classes:
if int(row[0]) < 0:
y.append(-1)
elif int(row[0]) > 0:
y.append(1)
else:
y.append(0)
else:
y.append(row[0])
y = np.asarray(y)
X = np.asarray(X)
return X, y
# Get list of stop words
def load_stop_words():
stop_words = []
stop_words_list = open(stop_words_path, 'r')
for word in stop_words_list.readlines():
stop_words.append(word.replace('\n', ''))
return stop_words
# <---------------------->
# <- PLOTTING FUNCTIONS ->
# <---------------------->
def plot_data_2d(X_transformed, y):
# PCA
data2D = PCA(n_components=3).fit_transform(X_transformed.todense())
# Plot the datapoints with different colors depending on label
for i in range(0, len(data2D)):
if int(y[i]) < 0:
plt.plot(data2D[i, 0], data2D[i, 1], "yo")
elif int(y[i]) == 0:
plt.plot(data2D[i, 0], data2D[i, 1], "bo")
else:
plt.plot(data2D[i, 0], data2D[i, 1], "co")
# Labels for the plot
negative_plt = mpatches.Patch(color='yellow', label='Negative')
neutral_plt = mpatches.Patch(color='blue', label='Neutral')
positive_plt = mpatches.Patch(color='cyan', label='Positive')
plt.legend(handles=[positive_plt, neutral_plt, negative_plt])
plt.show()
def plot_data_3d(X_transformed, y):
'''
Loads training data and splits it into test and train sets
Parameters
-----------
X_transformed: The corpus transformed to a feature space,
y: The labels
'''
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
data3d = PCA(n_components=3).fit_transform(X_transformed.todense())
# data3d = TSNE(n_components=3).fit_transform(X_transformed.todense())
#
neg_xs, neg_ys, neg_zs = [], [], []
neu_xs, neu_ys, neu_zs = [], [], []
pos_xs, pos_ys, pos_zs = [], [], []
for i in range(0, len(y)):
if y[i] < 0:
neg_xs.append(data3d[i, 0])
neg_ys.append(data3d[i, 1])
neg_zs.append(data3d[i, 2])
if y[i] == 0:
neu_xs.append(data3d[i, 0])
neu_ys.append(data3d[i, 1])
neu_zs.append(data3d[i, 2])
else:
pos_xs.append(data3d[i, 0])
pos_ys.append(data3d[i, 1])
pos_zs.append(data3d[i, 2])
ax.scatter(neg_xs, neg_ys, neg_zs, c='b')
ax.scatter(neu_xs, neu_ys, neu_zs, c='r')
ax.scatter(pos_xs, pos_ys, pos_zs, c='g')
ax.set_xlabel('X')
ax.set_ylabel('Y')
ax.set_zlabel('Z')
def plot_confusion_matrix(cm, title='SVM Confusion matrix', cmap=plt.get_cmap('Blues')):
plt.imshow(cm, interpolation='nearest', cmap=cmap)
plt.title(title)
plt.colorbar()
tick_marks = np.arange(3)
plt.xticks(tick_marks, [-1, 0, 1], rotation=45)
plt.yticks(tick_marks, [-1, 0, 1])
plt.tight_layout()
plt.ylabel('True label')
plt.xlabel('Predicted label')
plt.show()
# From https://scikit-learn.org/stable/auto_examples/model_selection/plot_learning_curve.html#sphx-glr-auto-examples-model-selection-plot-learning-curve-py
def plot_learning_curve(estimator, title, X, y, ylim=None, cv=None,
n_jobs=None, train_sizes=np.linspace(.1, 1.0, 10)):
"""
Generate a simple plot of the test and training learning curve.
Parameters
----------
estimator : object type that implements the "fit" and "predict" methods
An object of that type which is cloned for each validation.
title : string
Title for the chart.
X : array-like, shape (n_samples, n_features)
Training vector, where n_samples is the number of samples and
n_features is the number of features.
y : array-like, shape (n_samples) or (n_samples, n_features), optional
Target relative to X for classification or regression;
None for unsupervised learning.
ylim : tuple, shape (ymin, ymax), optional
Defines minimum and maximum yvalues plotted.
cv : int, cross-validation generator or an iterable, optional
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 3-fold cross-validation,
- integer, to specify the number of folds.
- :term:`CV splitter`,
- An iterable yielding (train, test) splits as arrays of indices.
For integer/None inputs, if ``y`` is binary or multiclass,
:class:`StratifiedKFold` used. If the estimator is not a classifier
or if ``y`` is neither binary nor multiclass, :class:`KFold` is used.
Refer :ref:`User Guide <cross_validation>` for the various
cross-validators that can be used here.
n_jobs : int or None, optional (default=None)
Number of jobs to run in parallel.
``None`` means 1 unless in a :obj:`joblib.parallel_backend` context.
``-1`` means using all processors. See :term:`Glossary <n_jobs>`
for more details.
train_sizes : array-like, shape (n_ticks,), dtype float or int
Relative or absolute numbers of training examples that will be used to
generate the learning curve. If the dtype is float, it is regarded as a
fraction of the maximum size of the training set (that is determined
by the selected validation method), i.e. it has to be within (0, 1].
Otherwise it is interpreted as absolute sizes of the training sets.
Note that for classification the number of samples usually have to
be big enough to contain at least one sample from each class.
(default: np.linspace(0.1, 1.0, 5))
"""
plt.figure()
plt.title(title)
if ylim is not None:
plt.ylim(*ylim)
plt.xlabel("Training examples")
plt.ylabel("Score")
train_sizes, train_scores, test_scores = learning_curve(
estimator, X, y, cv=8, n_jobs=n_jobs, train_sizes=train_sizes)
train_scores_mean = np.mean(train_scores, axis=1)
train_scores_std = np.std(train_scores, axis=1)
test_scores_mean = np.mean(test_scores, axis=1)
test_scores_std = np.std(test_scores, axis=1)
plt.grid()
plt.fill_between(train_sizes, train_scores_mean - train_scores_std,
train_scores_mean + train_scores_std, alpha=0.1,
color="r")
plt.fill_between(train_sizes, test_scores_mean - test_scores_std,
test_scores_mean + test_scores_std, alpha=0.1, color="g")
plt.plot(train_sizes, train_scores_mean, 'o-', color="r",
label="Training score")
plt.plot(train_sizes, test_scores_mean, 'o-', color="g",
label="Cross-validation score")
plt.legend(loc="best")
return plt
# <---------------------->
# <- SCRIPT STARTS HERE ->
# <---------------------->
# Train model first time
X, y = load_dataset(squish_classes=True)
pipeline = train_model(X, y, auto_split=False)
X_transformed = pipeline.named_steps['tfidf'].transform(X)
# tsne = TSNEVisualizer()
# tsne.fit(X_transformed, y)
# tsne.poof()
| 34.377193 | 155 | 0.623969 | 1,653 | 11,757 | 4.266788 | 0.238355 | 0.017865 | 0.016589 | 0.008507 | 0.346519 | 0.279314 | 0.243017 | 0.216929 | 0.207571 | 0.171558 | 0 | 0.015014 | 0.25219 | 11,757 | 341 | 156 | 34.478006 | 0.787193 | 0.316662 | 0 | 0.278689 | 0 | 0 | 0.054536 | 0.01614 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043716 | false | 0 | 0.076503 | 0 | 0.147541 | 0.016393 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d948e42d9fe9ba9b6716af9941792faae40da7f8 | 1,164 | py | Python | build.py | 2-propanol/BTF_extractor | 0ec5358504ab51aff6256b98f51d29e540012ce8 | [
"Zlib"
] | 1 | 2022-02-16T14:53:26.000Z | 2022-02-16T14:53:26.000Z | build.py | 2-propanol/BTF_extractor | 0ec5358504ab51aff6256b98f51d29e540012ce8 | [
"Zlib"
] | 1 | 2021-02-05T10:04:20.000Z | 2021-04-11T13:45:01.000Z | build.py | 2-propanol/BTF_extractor | 0ec5358504ab51aff6256b98f51d29e540012ce8 | [
"Zlib"
] | 1 | 2021-02-04T04:22:19.000Z | 2021-02-04T04:22:19.000Z | import platform
from setuptools import Extension
import numpy
from Cython.Build import cythonize
compile_args = []
link_args = []
pf = platform.system()
if pf == "Windows":
# for MSVC
compile_args = ["/std:c++14", "/DNOMINMAX", "/O2", "/openmp"]
elif pf == "Darwin":
# for clang
compile_args = ["-std=c++14", "-O2", "-march=native", "-Xpreprocessor", "-fopenmp"]
link_args = ["-lomp"]
elif pf == "Linux":
# for gcc
compile_args = ["-std=c++14", "-Ofast", "-march=native", "-fopenmp"]
link_args = ["-fopenmp"]
ext_modules = [
Extension(
name="ubo2014_cy",
sources=["btf_extractor/ubo2014.pyx"],
include_dirs=[numpy.get_include(), "btf_extractor/c_ext"],
define_macros=[("BTF_IMPLEMENTATION", "1"), ("NPY_NO_DEPRECATED_API", "1")],
extra_compile_args=compile_args,
extra_link_args=link_args,
language="c++",
)
]
def build(setup_kwargs):
"""
This function is mandatory in order to build the extensions.
"""
setup_kwargs.update(
{"ext_modules": cythonize(ext_modules)}
)
return setup_kwargs
if __name__ == "__main__":
build({})
| 24.25 | 87 | 0.617698 | 139 | 1,164 | 4.899281 | 0.517986 | 0.096916 | 0.061674 | 0.066079 | 0.07489 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019672 | 0.213918 | 1,164 | 47 | 88 | 24.765957 | 0.72459 | 0.075601 | 0 | 0 | 0 | 0 | 0.248582 | 0.043478 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030303 | false | 0 | 0.121212 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d94b812ea86c3f0c6f03bfe005b3691242fb682f | 1,871 | py | Python | src/compas_plotters/artists/circleartist.py | XingxinHE/compas | d2901dbbacdaf4694e5adae78ba8f093f10532bf | [
"MIT"
] | null | null | null | src/compas_plotters/artists/circleartist.py | XingxinHE/compas | d2901dbbacdaf4694e5adae78ba8f093f10532bf | [
"MIT"
] | null | null | null | src/compas_plotters/artists/circleartist.py | XingxinHE/compas | d2901dbbacdaf4694e5adae78ba8f093f10532bf | [
"MIT"
] | null | null | null | from compas_plotters.artists import Artist
from matplotlib.patches import Circle as CirclePatch
# from matplotlib.transforms import ScaledTranslation
__all__ = ['CircleArtist']
class CircleArtist(Artist):
""""""
zorder = 1000
def __init__(self, circle, linewidth=1.0, linestyle='solid', facecolor=(1.0, 1.0, 1.0), edgecolor=(0, 0, 0), fill=True, alpha=1.0):
super(CircleArtist, self).__init__(circle)
self._mpl_circle = None
self.circle = circle
self.linewidth = linewidth
self.linestyle = linestyle
self.facecolor = facecolor
self.edgecolor = edgecolor
self.fill = fill
self.alpha = alpha
@property
def data(self):
points = [
self.circle.center[:2],
self.circle.center[:2],
self.circle.center[:2],
self.circle.center[:2]
]
points[0][0] -= self.circle.radius
points[1][0] += self.circle.radius
points[2][1] -= self.circle.radius
points[3][1] += self.circle.radius
return points
def update_data(self):
self.plotter.axes.update_datalim(self.data)
def draw(self):
circle = CirclePatch(
self.circle.center[:2],
linewidth=self.linewidth,
linestyle=self.linestyle,
radius=self.circle.radius,
facecolor=self.facecolor,
edgecolor=self.edgecolor,
fill=self.fill,
zorder=self.zorder
)
self._mpl_circle = self.plotter.axes.add_artist(circle)
self.update_data()
def redraw(self):
self._mpl_circle.center = self.circle.center[:2]
self._mpl_circle.set_radius(self.circle.radius)
self._mpl_circle.set_edgecolor(self.edgecolor)
self._mpl_circle.set_facecolor(self.facecolor)
self.update_data()
| 30.672131 | 135 | 0.611438 | 217 | 1,871 | 5.119816 | 0.230415 | 0.135014 | 0.070207 | 0.091809 | 0.121512 | 0.061206 | 0.061206 | 0.061206 | 0.061206 | 0.061206 | 0 | 0.022794 | 0.273116 | 1,871 | 60 | 136 | 31.183333 | 0.794118 | 0.027258 | 0 | 0.122449 | 0 | 0 | 0.009382 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102041 | false | 0 | 0.040816 | 0 | 0.204082 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d94bae590f7b253620b6f2a82a919c8745ff9eb2 | 1,044 | py | Python | SortedPriorityQueue.py | sidhu177/pythonprog | a75285e9e4d3cd6f1257b9a79dc39e49c68a695d | [
"MIT"
] | 2 | 2019-05-01T04:32:07.000Z | 2019-05-04T02:22:16.000Z | SortedPriorityQueue.py | sidhu177/pythonprog | a75285e9e4d3cd6f1257b9a79dc39e49c68a695d | [
"MIT"
] | null | null | null | SortedPriorityQueue.py | sidhu177/pythonprog | a75285e9e4d3cd6f1257b9a79dc39e49c68a695d | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Tue Sep 18 21:06:14 2018
Taken from Data Structures and Algorithms using Python
"""
class SortedPriorityQueue(PriorityQueueBase):
def __init__(self):
self._data = PositionalList()
def __len__(self):
return len(self._data)
def add(self,key,value):
newest = self._Item(key,value)
walk = self._data.last()
while walk is not None and newest < walk.element():
walk = self._data.before(walk)
if walk is None:
self._data.add_first(newest)
else:
self._data.add_after(walk,newest)
def min(self):
if self.is_empty():
raise Empty('Priority Queue is empty')
p = self._data.first()
item = p.element()
return (item._key,item._value)
def remove_min(self):
if self.is_empty():
raise Empty('Priority queue is empty')
item =self._data.delete(self._data.first())
return (item._key, item._value) | 29 | 59 | 0.579502 | 131 | 1,044 | 4.412214 | 0.412214 | 0.124567 | 0.041522 | 0.044983 | 0.249135 | 0.17301 | 0.17301 | 0.17301 | 0.17301 | 0.17301 | 0 | 0.017956 | 0.306513 | 1,044 | 36 | 60 | 29 | 0.780387 | 0.109195 | 0 | 0.16 | 0 | 0 | 0.049837 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0 | 0.04 | 0.36 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d94bb0c3552420db1ae043fe4832aefa03e3c1f7 | 14,576 | py | Python | f5/utils/test/test_iapp_parser.py | jputrino/f5-common-python | 64cd019eb22b0e9a49e0c49ebb05f2a23ffa0e49 | [
"Apache-2.0"
] | null | null | null | f5/utils/test/test_iapp_parser.py | jputrino/f5-common-python | 64cd019eb22b0e9a49e0c49ebb05f2a23ffa0e49 | [
"Apache-2.0"
] | null | null | null | f5/utils/test/test_iapp_parser.py | jputrino/f5-common-python | 64cd019eb22b0e9a49e0c49ebb05f2a23ffa0e49 | [
"Apache-2.0"
] | null | null | null | # Copyright 2016 F5 Networks Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
from f5.utils import iapp_parser as ip
import pytest
good_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm }
}'''
brace_in_quote_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for "" the template
}
implementation {
# TMSH"{}{{}}}}}""{{{{}}"implementation code
}
presentation {
# APL"{}{}{{{{{{" presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm }
}'''
no_desc_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
partition <partition name>
requires-modules { ltm }
}'''
empty_rm_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
partition <partition name>
requires-modules { }
}'''
whitespace_rm_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
partition <partition name>
requires-modules {}
}'''
none_rm_templ = '''sys application template good_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
partition <partition name>
requires-modules none
}'''
no_open_brace_templ = '''sys application template no_open_brace_templ {
actions {
definition {
html-help
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl {security role}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
no_close_brace_templ = '''sys application template no_close_brace_template {
actions {
definition {
html-help {
# HTML Help for the template
# Missing closing braces
implementation {
# TMSH implementation code
'''
no_pres_templ = '''sys application template no_pres_templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
role-acl {<security role>}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
no_name_templ = '''sys application template {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
bad_name_templ = '''sys application template bad#updown {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
role-acl {<security role>}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
name_brace_templ = '''sys application template name_next_to_brace{
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
role-acl {security role}
run-as <user context>
}
}
description <template description>
partition <partition name>
}'''
good_attr_templ = '''sys application template good_templ {
actions {
definition {
html-help {}
implementation {}
presentation {}
}
}
description <template description>
partition just_a_partition name
}'''
no_help_templ = '''sys application template good_templ {
actions {
definition {
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm asm }
}'''
dot_name_templ = '''sys application template good.dot.templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm }
}'''
dot_hyphen_name_templ = '''sys application template good.-dot-hyphen.-templ {
actions {
definition {
html-help {
# HTML Help for the template
}
implementation {
# TMSH implementation code
}
presentation {
# APL presentation language
}
role-acl { hello test }
run-as <user context>
}
}
description <template description>
partition <partition name>
requires-modules { ltm }
}'''
good_templ_dict = {
u'name': u'good_templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm'],
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
brace_in_quote_templ_dict = {
u'name': u'good_templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm'],
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for "" the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH"{}{{}}}}}""{{{{}}"implementation code',
u'presentation': u'# APL"{}{}{{{{{{" presentation language'
}
}
}
no_help_templ_dict = {
u'name': u'good_templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm', u'asm'],
'actions': {
'definition': {
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
none_rm_templ_dict = {
u'name': u'good_templ',
u'partition': u'<partition name>',
u'requiresModules': u'none',
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
dot_name_templ_dict = {
u'name': u'good.dot.templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm'],
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
dot_hyphen_name_templ_dict = {
u'name': u'good.-dot-hyphen.-templ',
u'description': u'<template description>',
u'partition': u'<partition name>',
u'requiresModules': [u'ltm'],
'actions': {
'definition': {
u'htmlHelp': u'# HTML Help for the template',
u'roleAcl': [u'hello', u'test'],
u'implementation': u'# TMSH implementation code',
u'presentation': u'# APL presentation language'
}
}
}
@pytest.fixture
def TemplateSectionSetup(request):
def tearDown():
prsr.template_sections.remove('notfound')
request.addfinalizer(tearDown)
prsr = ip.IappParser(good_templ)
prsr.template_sections.append('notfound')
return prsr
def test__init__():
prsr = ip.IappParser(good_templ)
assert prsr.template_str == good_templ
def test__init__error():
prsr = None
with pytest.raises(ip.EmptyTemplateException) as EmptyTemplateExceptInfo:
prsr = ip.IappParser('')
assert EmptyTemplateExceptInfo.value.message == \
'Template empty or None value.'
assert prsr is None
def test_get_section_end_index():
prsr = ip.IappParser(good_templ)
impl_start = prsr._get_section_start_index(u'implementation')
impl_end = prsr._get_section_end_index(u'implementation', impl_start)
templ_impl = unicode('''{
# TMSH implementation code
}''')
assert good_templ[impl_start:impl_end+1] == templ_impl
def test_get_section_start_index_no_open_brace_error():
prsr = ip.IappParser(no_open_brace_templ)
with pytest.raises(ip.NonextantSectionException) as \
NonextantSectionExceptInfo:
prsr._get_section_start_index(u'html-help')
assert NonextantSectionExceptInfo.value.message == \
'Section html-help not found in template'
def test_get_section_end_no_close_brace_error():
prsr = ip.IappParser(no_close_brace_templ)
with pytest.raises(ip.CurlyBraceMismatchException) as \
CurlyBraceMismatchExceptInfo:
help_start = prsr._get_section_start_index(u'html-help')
prsr._get_section_end_index(u'html_help', help_start)
assert CurlyBraceMismatchExceptInfo.value.message == \
'Curly braces mismatch in section html_help.'
def test_get_template_name():
prsr = ip.IappParser(good_templ)
assert prsr._get_template_name() == u'good_templ'
def test_get_template_name_next_to_brace():
prsr = ip.IappParser(name_brace_templ)
assert prsr._get_template_name() == u'name_next_to_brace'
def test_get_template_name_error():
prsr = ip.IappParser(no_name_templ)
with pytest.raises(ip.NonextantTemplateNameException) as \
NonextantTemplateNameExceptInfo:
prsr._get_template_name()
assert NonextantTemplateNameExceptInfo.value.message == \
'Template name not found.'
def test_get_template_name_bad_name_error():
prsr = ip.IappParser(bad_name_templ)
with pytest.raises(ip.NonextantTemplateNameException) as \
NonextantTemplateNameExceptInfo:
prsr._get_template_name()
assert NonextantTemplateNameExceptInfo.value.message == \
'Template name not found.'
def test_get_template_name_with_dot():
prsr = ip.IappParser(dot_name_templ)
assert prsr.parse_template() == dot_name_templ_dict
def test_get_template_name_with_dot_hyphen():
prsr = ip.IappParser(dot_hyphen_name_templ)
assert prsr.parse_template() == dot_hyphen_name_templ_dict
def test_parse_template():
prsr = ip.IappParser(good_templ)
assert prsr.parse_template() == good_templ_dict
def test_parse_template_brace_in_quote():
prsr = ip.IappParser(brace_in_quote_templ)
assert prsr.parse_template() == brace_in_quote_templ_dict
def test_parse_template_no_section_found(TemplateSectionSetup):
with pytest.raises(ip.NonextantSectionException) as \
NonextantSectionExceptInfo:
TemplateSectionSetup.parse_template()
assert 'notfound' in TemplateSectionSetup.template_sections
assert 'Section notfound not found in template' in \
NonextantSectionExceptInfo.value.message
def test_parse_template_no_section_found_not_required():
prsr = ip.IappParser(no_help_templ)
templ_dict = prsr.parse_template()
assert templ_dict == no_help_templ_dict
def test_get_template_attr():
prsr = ip.IappParser(good_attr_templ)
attr = prsr._get_template_attr(u'partition')
assert attr == u'just_a_partition name'
def test_get_template_attr_attr_not_exists():
prsr = ip.IappParser(good_attr_templ)
attr = prsr._get_template_attr(u'bad_attr')
assert attr is None
def test_attr_no_description():
prsr = ip.IappParser(no_desc_templ)
templ_dict = prsr.parse_template()
assert 'description' not in templ_dict
def test_attr_empty_rm_error():
prsr = ip.IappParser(empty_rm_templ)
with pytest.raises(ip.MalformedTCLListException) as ex:
prsr.parse_template()
assert 'requires-modules' in ex.value.message
def test_attr_whitespace_rm_error():
prsr = ip.IappParser(whitespace_rm_templ)
with pytest.raises(ip.MalformedTCLListException) as ex:
prsr.parse_template()
assert 'TCL list for "requires-modules" is malformed. If no elements are '\
'needed "none" should be used without curly braces.' in \
ex.value.message
def test_attr_none_rm():
prsr = ip.IappParser(none_rm_templ)
templ_dict = prsr.parse_template()
assert templ_dict == none_rm_templ_dict
| 26.215827 | 79 | 0.64565 | 1,627 | 14,576 | 5.590658 | 0.110018 | 0.034301 | 0.05321 | 0.029244 | 0.748461 | 0.691953 | 0.657432 | 0.583773 | 0.557828 | 0.547713 | 0 | 0.001011 | 0.253224 | 14,576 | 555 | 80 | 26.263063 | 0.834635 | 0.03787 | 0 | 0.552017 | 0 | 0 | 0.551788 | 0.010208 | 0 | 0 | 0 | 0 | 0.048832 | 1 | 0.048832 | false | 0 | 0.004246 | 0 | 0.055202 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d9514413b1dc12beee51ba849953b233bcf53932 | 6,217 | py | Python | tests/test_redis.py | fedej/aio-rom | e84d55b84ca459b930d0cd86fd33f161cb26c7df | [
"MIT"
] | 6 | 2021-03-22T22:12:34.000Z | 2022-02-14T01:30:37.000Z | tests/test_redis.py | fedej/aio-rom | e84d55b84ca459b930d0cd86fd33f161cb26c7df | [
"MIT"
] | 52 | 2021-02-22T16:38:27.000Z | 2022-03-07T18:06:18.000Z | tests/test_redis.py | fedej/aio-rom | e84d55b84ca459b930d0cd86fd33f161cb26c7df | [
"MIT"
] | null | null | null | import os
import sys
from dataclasses import field
from typing import List, Optional, Set, cast
from unittest import skipUnless
from aio_rom import Model
from aio_rom.attributes import RedisModelSet
if sys.version_info >= (3, 8):
from unittest.async_case import IsolatedAsyncioTestCase as TestCase
ASYNCTEST = False
else:
from asynctest import TestCase
ASYNCTEST = True
from aio_rom.fields import Metadata
from aio_rom.session import redis_pool
class Bar(Model, unsafe_hash=True):
field1: int
field2: str
field3: List[int] = field(metadata=Metadata(eager=True), hash=False)
field4: int = 3
class Foo(Model, unsafe_hash=True):
eager_bars: List[Bar] = field(metadata=Metadata(eager=True), hash=False)
lazy_bars: Set[Bar] = field(compare=False, metadata=Metadata(cascade=True))
f1: Optional[str] = None
class FooBar(Model):
foos: Set[Foo] = field(metadata=Metadata(cascade=True, eager=True))
@skipUnless(os.environ.get("CI"), "Redis CI test only")
class RedisIntegrationTestCase(TestCase):
async def asyncSetUp(self) -> None:
self.bar = Bar(1, 123, "value", [1, 2, 3])
async def asyncTearDown(self) -> None:
await Foo.delete_all()
await Bar.delete_all()
await FooBar.delete_all()
if ASYNCTEST:
tearDown = asyncTearDown # type: ignore[assignment]
setUp = asyncSetUp # type: ignore[assignment]
async def test_save(self) -> None:
await self.bar.save()
async with redis_pool() as redis:
field1 = await redis.hget("bar:1", "field1")
field2 = await redis.hget("bar:1", "field2")
field3 = await redis.hget("bar:1", "field3")
field3_value = await redis.lrange("bar:1:field3", 0, -1)
assert "123" == field1
assert "value" == field2
assert "bar:1:field3" == field3
assert ["1", "2", "3"] == field3_value
async def test_get(self) -> None:
await self.bar.save()
bar = await Bar.get(1)
assert self.bar == bar
async def test_get_with_references(self) -> None:
await self.bar.save()
foo = Foo(123, [self.bar], {self.bar})
await foo.save()
gotten_foo = await Foo.get(123)
assert foo == gotten_foo
await cast(RedisModelSet[Bar], gotten_foo.lazy_bars).load()
for bar in gotten_foo.lazy_bars:
assert bar in foo.lazy_bars
assert len(foo.lazy_bars) == len(gotten_foo.lazy_bars)
async def _test_collection_references(self, test_cascade: bool = False) -> None:
await self.bar.save()
foo = Foo(123, [self.bar], {self.bar})
if not test_cascade:
await foo.save()
foobar = FooBar(321, {foo})
await foobar.save()
gotten_foobar = await FooBar.get(321)
assert foobar == gotten_foobar
assert {foo} == gotten_foobar.foos
for gotten_foo in gotten_foobar.foos:
assert 1 == len(gotten_foo.eager_bars)
await cast(RedisModelSet[Bar], gotten_foo.lazy_bars).load()
for bar in gotten_foo.lazy_bars:
assert bar in foo.lazy_bars
async def test_collections(self) -> None:
await self._test_collection_references()
async def test_collection_cascades_references(self) -> None:
await self._test_collection_references(test_cascade=True)
async def test_update_collection_references(self) -> None:
await self.bar.save()
foo = Foo(123, [self.bar], {self.bar})
foobar = FooBar(321, {foo})
await foobar.save()
refreshed = await foobar.refresh()
foo2 = Foo(222, [], set())
refreshed.foos.add(foo2)
await refreshed.save()
gotten_foobar = await FooBar.get(321)
assert refreshed == gotten_foobar
assert {foo, foo2} == gotten_foobar.foos
async def test_update(self) -> None:
await self.bar.save()
await self.bar.update(field2="updated")
async with redis_pool() as redis:
field2 = await redis.hget("bar:1", "field2")
assert "updated" == field2
bar = await Bar.get(1)
assert "updated" == bar.field2
async def test_update_reference(self) -> None:
await self.bar.save()
foo = Foo(123, [self.bar], {self.bar})
await foo.save()
bar2 = Bar(2, 123, "otherbar", [1, 2, 3, 4])
await bar2.save()
foo = await foo.update(lazy_bars={bar2})
async with redis_pool() as redis:
lazy_bars = await redis.smembers("foo:123:lazy_bars")
assert ["2"] == lazy_bars
foo = await foo.update(eager_bars=[bar2])
async with redis_pool() as redis:
eager_bars = await redis.lrange("foo:123:eager_bars", 0, -1)
assert ["2"] == eager_bars
gotten_foo = await Foo.get(123)
assert foo == gotten_foo
async def test_save_again_overrides_previous(self) -> None:
await self.bar.save()
bar = await Bar.get(1)
bar.field2 = "updated"
await bar.save()
async with redis_pool() as redis:
field2 = await redis.hget("bar:1", "field2")
assert "updated" == field2
async def test_delete(self) -> None:
await self.bar.save()
async with redis_pool() as redis:
assert await redis.exists("bar:1")
await self.bar.delete()
assert not await redis.exists("bar:1")
async def test_delete_all(self) -> None:
await self.bar.save()
async with redis_pool() as redis:
await Bar.delete_all()
assert not await redis.keys("bar*")
async def test_lazy_collection_cascade(self) -> None:
foo = Foo(123, [self.bar], {self.bar})
await foo.save()
foo = await Foo.get(123)
other_bar = Bar(2, 124, "value2", [])
foo.lazy_bars.add(other_bar)
await foo.save()
gotten_foo = await Foo.get(123)
assert foo == gotten_foo
await cast(RedisModelSet[Bar], gotten_foo.lazy_bars).load()
await cast(RedisModelSet[Bar], foo.lazy_bars).load()
assert 2 == len(foo.lazy_bars) == len(gotten_foo.lazy_bars)
| 33.605405 | 84 | 0.618144 | 821 | 6,217 | 4.550548 | 0.142509 | 0.044968 | 0.041756 | 0.050054 | 0.442987 | 0.415418 | 0.395343 | 0.327891 | 0.287473 | 0.269272 | 0 | 0.028646 | 0.264436 | 6,217 | 184 | 85 | 33.788043 | 0.788323 | 0.007882 | 0 | 0.378378 | 0 | 0 | 0.034874 | 0 | 0 | 0 | 0 | 0 | 0.168919 | 1 | 0 | false | 0 | 0.074324 | 0 | 0.155405 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d95365e9560d7743acef72009c98362dad09f3a4 | 1,514 | py | Python | data/transcoder_evaluation_gfg/python/DYNAMIC_PROGRAMMING_SET_17_PALINDROME_PARTITIONING.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 241 | 2021-07-20T08:35:20.000Z | 2022-03-31T02:39:08.000Z | data/transcoder_evaluation_gfg/python/DYNAMIC_PROGRAMMING_SET_17_PALINDROME_PARTITIONING.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 49 | 2021-07-22T23:18:42.000Z | 2022-03-24T09:15:26.000Z | data/transcoder_evaluation_gfg/python/DYNAMIC_PROGRAMMING_SET_17_PALINDROME_PARTITIONING.py | mxl1n/CodeGen | e5101dd5c5e9c3720c70c80f78b18f13e118335a | [
"MIT"
] | 71 | 2021-07-21T05:17:52.000Z | 2022-03-29T23:49:28.000Z | # Copyright (c) 2019-present, Facebook, Inc.
# All rights reserved.
#
# This source code is licensed under the license found in the
# LICENSE file in the root directory of this source tree.
#
def f_gold ( str ) :
n = len ( str )
C = [ [ 0 for i in range ( n ) ] for i in range ( n ) ]
P = [ [ False for i in range ( n ) ] for i in range ( n ) ]
j = 0
k = 0
L = 0
for i in range ( n ) :
P [ i ] [ i ] = True ;
C [ i ] [ i ] = 0 ;
for L in range ( 2 , n + 1 ) :
for i in range ( n - L + 1 ) :
j = i + L - 1
if L == 2 :
P [ i ] [ j ] = ( str [ i ] == str [ j ] )
else :
P [ i ] [ j ] = ( ( str [ i ] == str [ j ] ) and P [ i + 1 ] [ j - 1 ] )
if P [ i ] [ j ] == True :
C [ i ] [ j ] = 0
else :
C [ i ] [ j ] = 100000000
for k in range ( i , j ) :
C [ i ] [ j ] = min ( C [ i ] [ j ] , C [ i ] [ k ] + C [ k + 1 ] [ j ] + 1 )
return C [ 0 ] [ n - 1 ]
#TOFILL
if __name__ == '__main__':
param = [
('ydYdV',),
('4446057',),
('0111',),
('keEj',),
('642861576557',),
('11111000101',),
('ram',),
('09773261',),
('1',),
('AVBEKClFdj',)
]
n_success = 0
for i, parameters_set in enumerate(param):
if f_filled(*parameters_set) == f_gold(*parameters_set):
n_success+=1
print("#Results: %i, %i" % (n_success, len(param))) | 29.115385 | 97 | 0.402246 | 207 | 1,514 | 2.859903 | 0.328502 | 0.094595 | 0.060811 | 0.111486 | 0.165541 | 0.14527 | 0.118243 | 0.081081 | 0.081081 | 0.081081 | 0 | 0.086705 | 0.428666 | 1,514 | 52 | 98 | 29.115385 | 0.597688 | 0.122193 | 0 | 0.047619 | 0 | 0 | 0.067322 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.02381 | false | 0 | 0 | 0 | 0.047619 | 0.02381 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d955de847bc6455510ab5fae7ec28c13dd87bbec | 1,201 | py | Python | Algorithms/0033_Search_in_Rotated_Sorted_Array/Python/Search_in_Rotated_Sorted_Array_Solution_1.py | lht19900714/Leetcode_Solutions | dac7a038329a5c1f8a78e86cc6f49116b963f1fb | [
"MIT"
] | null | null | null | Algorithms/0033_Search_in_Rotated_Sorted_Array/Python/Search_in_Rotated_Sorted_Array_Solution_1.py | lht19900714/Leetcode_Solutions | dac7a038329a5c1f8a78e86cc6f49116b963f1fb | [
"MIT"
] | null | null | null | Algorithms/0033_Search_in_Rotated_Sorted_Array/Python/Search_in_Rotated_Sorted_Array_Solution_1.py | lht19900714/Leetcode_Solutions | dac7a038329a5c1f8a78e86cc6f49116b963f1fb | [
"MIT"
] | null | null | null |
# Space: O(1)
# Time: O(logn)
class Solution:
def search(self, nums, target):
length = len(nums)
if length == 0: return -1
if length == 1: return 0 if nums[0] == target else -1
# First, find out the actual end point of sorted array
left, right = 0, length - 1
while left + 1 < right:
mid = (left + right) // 2
if nums[mid] > nums[right]:
left = mid
else:
right = mid
actual_end_point = right if nums[right] > nums[left] else left
# Second, execute regular binary search for target number
res = self.binary_search(nums, target, 0, actual_end_point)
if res != -1:
return res
else:
return self.binary_search(nums, target, actual_end_point + 1, length - 1)
def binary_search(self, alist, target, start, end):
left, right = start, end
while left <= right:
mid = (left + right) // 2
if alist[mid] == target:
return mid
if alist[mid] < target:
left = mid + 1
else:
right = mid - 1
return -1
| 27.930233 | 85 | 0.507077 | 150 | 1,201 | 4 | 0.28 | 0.075 | 0.093333 | 0.056667 | 0.153333 | 0.066667 | 0 | 0 | 0 | 0 | 0 | 0.026243 | 0.397169 | 1,201 | 42 | 86 | 28.595238 | 0.802486 | 0.111574 | 0 | 0.172414 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0 | 0 | 0.241379 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d959215b82b03d107b269df9d66aba263c6dfe42 | 7,807 | py | Python | nxs_libs/interface/workload_manager/simple_policy.py | microsoft/nxs | b271c0637576084b36bd0bd397a673fb348913b3 | [
"MIT"
] | 5 | 2022-03-23T21:27:42.000Z | 2022-03-24T19:57:27.000Z | nxs_libs/interface/workload_manager/simple_policy.py | microsoft/nxs | b271c0637576084b36bd0bd397a673fb348913b3 | [
"MIT"
] | null | null | null | nxs_libs/interface/workload_manager/simple_policy.py | microsoft/nxs | b271c0637576084b36bd0bd397a673fb348913b3 | [
"MIT"
] | 1 | 2022-03-23T21:27:44.000Z | 2022-03-23T21:27:44.000Z | import time
import numpy as np
from typing import Dict, List, Tuple
from nxs_libs.interface.workload_manager import (
NxsBaseWorkloadManagerPolicy,
)
from nxs_types.frontend import FrontendModelPipelineWorkloadReport
from nxs_types.message import (
NxsMsgPinWorkload,
NxsMsgType,
NxsMsgReportInputWorkloads,
NxsMsgUnpinWorkload,
)
from nxs_types.nxs_args import NxsWorkloadManagerArgs
class FrontendWorkloads:
def __init__(self, frontend: str, model_timeout_secs: float) -> None:
self.frontend = frontend
self.model_timeout_secs = model_timeout_secs
self.uuid2throughput: Dict[str, List[float]] = {}
self.uuid2timestamps: Dict[str, List[float]] = {}
# self.uuid2pipelineuuid: Dict[str, str] = {}
# self.uuid2sessionuuid: Dict[str, str] = {}
def add_workload(self, workload: FrontendModelPipelineWorkloadReport):
uuid = f"{workload.pipeline_uuid}_{workload.session_uuid}"
if uuid not in self.uuid2throughput:
self.uuid2throughput[uuid] = []
self.uuid2timestamps[uuid] = []
# self.uuid2pipelineuuid[uuid] = workload.pipeline_uuid
# self.uuid2sessionuuid[uuid] = workload.session_uuid
self.uuid2throughput[uuid].append(workload.fps)
self.uuid2timestamps[uuid].append(time.time())
def _remove_expired(self, uuid: str):
timestamps = self.uuid2timestamps.get(uuid, [])
for idx in range(len(timestamps)):
elapsed = time.time() - timestamps[0]
# print(idx, elapsed, self.model_timeout_secs)
if elapsed < self.model_timeout_secs:
break
self.uuid2throughput[uuid].pop(0)
self.uuid2timestamps[uuid].pop(0)
if not self.uuid2throughput[uuid]:
self.uuid2throughput.pop(uuid)
self.uuid2timestamps.pop(uuid)
# self.uuid2pipelineuuid.pop(uuid)
# self.uuid2sessionuuid.pop(uuid)
# print(f"Removed workload {uuid} from frontend {self.frontend}")
def remove_expired(self):
for uuid in list(self.uuid2throughput.keys()):
self._remove_expired(uuid)
def get_workloads(self) -> Dict[str, float]:
data = {}
self.remove_expired()
for uuid in self.uuid2throughput:
fps = np.sum(self.uuid2throughput[uuid])
if fps > 0:
duration = max(1, time.time() - self.uuid2timestamps[uuid][0])
data[uuid] = float(fps) / duration
return data
class NxsSimpleWorkloadManagerPolicy(NxsBaseWorkloadManagerPolicy):
def __init__(self, args: NxsWorkloadManagerArgs) -> None:
super().__init__(args)
# self.frontend2workloads:Dict[str, FrontendWorkloads] = {}
self.uuid2throughput: Dict[str, List[float]] = {}
self.uuid2timestamps: Dict[str, List[float]] = {}
self.pinned_workloads: Dict[str, float] = {}
self.t0 = time.time()
def add_workload(self, workload: FrontendModelPipelineWorkloadReport) -> bool:
is_new_workload = False
uuid = f"{workload.pipeline_uuid}_{workload.session_uuid}"
if uuid not in self.uuid2throughput:
self.uuid2throughput[uuid] = []
self.uuid2timestamps[uuid] = []
# self.uuid2pipelineuuid[uuid] = workload.pipeline_uuid
# self.uuid2sessionuuid[uuid] = workload.session_uuid
is_new_workload = True
self._log(f"Added new workload {uuid}")
self.uuid2throughput[uuid].append(workload.fps)
self.uuid2timestamps[uuid].append(time.time())
return is_new_workload
def _remove_expired(self, uuid: str):
timestamps = self.uuid2timestamps.get(uuid, [])
for idx in range(len(timestamps)):
elapsed = time.time() - timestamps[0]
# print(idx, elapsed, self.model_timeout_secs)
if elapsed < self.args.model_timeout_secs:
break
self.uuid2throughput[uuid].pop(0)
self.uuid2timestamps[uuid].pop(0)
if not self.uuid2throughput[uuid]:
self.uuid2throughput.pop(uuid)
self.uuid2timestamps.pop(uuid)
# self.uuid2pipelineuuid.pop(uuid)
# self.uuid2sessionuuid.pop(uuid)
# print(f"Removed workload {uuid}")
self._log(f"Removed workload {uuid}")
def remove_expired(self):
for uuid in list(self.uuid2throughput.keys()):
self._remove_expired(uuid)
def get_workloads(self) -> Dict[str, float]:
data = {}
self.remove_expired()
for uuid in self.uuid2throughput:
fps = np.sum(self.uuid2throughput[uuid])
if fps > 0:
duration = max(1, time.time() - self.uuid2timestamps[uuid][0])
data[uuid] = float(fps) / duration
return data
def generate_scheduling_msgs(
self,
) -> List[FrontendModelPipelineWorkloadReport]:
workloads_dict = {}
msgs = []
frontend_workloads_dict = self.get_workloads()
for uuid in frontend_workloads_dict:
if uuid not in workloads_dict:
workloads_dict[uuid] = 0
workloads_dict[uuid] += frontend_workloads_dict[uuid]
# process pinned_workloads
for uuid in self.pinned_workloads:
if uuid not in workloads_dict:
workloads_dict[uuid] = 0
workloads_dict[uuid] += self.pinned_workloads[uuid]
for uuid in workloads_dict:
# print(uuid)
pipeline_uuid, session_uuid = uuid.split("_")
msg = FrontendModelPipelineWorkloadReport(
pipeline_uuid=pipeline_uuid,
session_uuid=session_uuid,
fps=workloads_dict[uuid],
)
msgs.append(msg)
return msgs
def process_msgs(
self, msgs: List[NxsMsgReportInputWorkloads]
) -> Tuple[bool, List[FrontendModelPipelineWorkloadReport]]:
to_schedule = False
scheduling_msgs = []
for msg in msgs:
# print(msg)
if msg.type == NxsMsgType.REGISTER_WORKLOADS:
# frontend_name = msg.data.frontend_name
for workload in msg.data.workload_reports:
if (
self.add_workload(workload)
and self.args.enable_instant_scheduling
):
to_schedule = True
elif msg.type == NxsMsgType.PIN_WORKLOADS:
pin_msg: NxsMsgPinWorkload = msg
uuid = f"{pin_msg.pipeline_uuid}_{pin_msg.session_uuid}"
self.pinned_workloads[uuid] = pin_msg.fps
to_schedule = True
self._log(
f"Pinning workload - pipeline_uuid: {pin_msg.pipeline_uuid} - session_uuid: {pin_msg.session_uuid} - fps: {pin_msg.fps}"
)
elif msg.type == NxsMsgType.UNPIN_WORKLOADS:
unpin_msg: NxsMsgUnpinWorkload = msg
uuid = f"{unpin_msg.pipeline_uuid}_{unpin_msg.session_uuid}"
if uuid in self.pinned_workloads:
self.pinned_workloads.pop(uuid)
self._log(
f"Unpinning workload - pipeline_uuid: {unpin_msg.pipeline_uuid} - session_uuid: {unpin_msg.session_uuid}"
)
if time.time() - self.t0 > self.args.report_workloads_interval:
to_schedule = True
if to_schedule:
# generate scheduling data
scheduling_msgs = self.generate_scheduling_msgs()
# print(scheduling_msgs)
self.t0 = time.time()
return to_schedule, scheduling_msgs
| 35.811927 | 140 | 0.608685 | 800 | 7,807 | 5.7525 | 0.13375 | 0.082573 | 0.049978 | 0.017384 | 0.562364 | 0.511082 | 0.473707 | 0.473707 | 0.473707 | 0.473707 | 0 | 0.011293 | 0.296785 | 7,807 | 217 | 141 | 35.976959 | 0.826958 | 0.103625 | 0 | 0.473333 | 0 | 0.006667 | 0.06594 | 0.040998 | 0 | 0 | 0 | 0 | 0 | 1 | 0.08 | false | 0 | 0.046667 | 0 | 0.173333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d95e1a45cff563a0bc7667c6cb86319a45a18004 | 23,258 | py | Python | examples/grid_convergence.py | H0R5E/SNL-Delft3D-CEC-Verify | 234c0acead13c74bad2979b300671733c7b184f7 | [
"MIT"
] | null | null | null | examples/grid_convergence.py | H0R5E/SNL-Delft3D-CEC-Verify | 234c0acead13c74bad2979b300671733c7b184f7 | [
"MIT"
] | 2 | 2021-12-10T17:17:21.000Z | 2022-02-22T00:25:15.000Z | examples/grid_convergence.py | H0R5E/SNL-Delft3D-CEC-Verify | 234c0acead13c74bad2979b300671733c7b184f7 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import os
import uuid
import platform
import warnings
from pathlib import Path
from collections import defaultdict
from dataclasses import replace
import numpy as np
import pandas as pd
import matplotlib
import matplotlib.pyplot as plt
from convergence import Convergence
from snl_d3d_cec_verify import (MycekStudy,
Report,
Result,
LiveRunner,
Template,
Validate)
from snl_d3d_cec_verify.result import (get_reset_origin,
get_normalised_dims,
get_normalised_data,
get_normalised_data_deficit)
from snl_d3d_cec_verify.text import Spinner
matplotlib.rcParams.update({'font.size': 8})
def main(template_type, max_experiments, omp_num_threads):
# Steps:
#
# 1. Define a series of grid studies, doubling resolution
# 2. Iterate
# 3. Determine U_\infty by running without turbines
# 4. Run with turbines
# 5. Record results
# 6. After 3 runs record asymptotic ratio
# 7. If in asymptotic range stop iterating
# 8. Calculate resolution at desired GCI
# 9. Compute at desired resolution if lower than last iteration
# 10. Make report
# Set grid resolutions and reporting times
grid_resolution = [1 / 2 ** i for i in range(max_experiments)]
sigma = [int(2 / delta) for delta in grid_resolution]
kwargs = {"dx": grid_resolution,
"dy": grid_resolution,
"sigma": sigma,
"restart_interval": 600}
# Choose options based on the template type
if template_type == "fm":
kwargs["stats_interval"] = [240 / (k ** 2) for k in sigma]
elif template_type == "structured":
# Set time step based on flexible mesh runs
dt_init_all = [0.5, 0.25, 0.1, 0.0375, 0.0125]
kwargs["dt_init"] = dt_init_all[:max_experiments]
else:
raise ValueError(f"Template type '{template_type}' unrecognised")
cases = MycekStudy(**kwargs)
template = Template(template_type)
# Use the LiveRunner class to get real time feedback from the Delft3D
# calculation
runner = LiveRunner(get_d3d_bin_path(),
omp_num_threads=omp_num_threads)
u_infty_data = defaultdict(list)
u_wake_data = defaultdict(list)
transect_data = defaultdict(list)
u_infty_convergence = Convergence()
u_wake_convergence = Convergence()
case_counter = 0
run_directory = Path(template_type) / "runs"
run_directory.mkdir(exist_ok=True, parents=True)
report = Report(79, "%d %B %Y")
report_dir = Path(template_type) / "grid_convergence_report"
report_dir.mkdir(exist_ok=True, parents=True)
global_validate = Validate()
ustar_figs = []
ustar_axs = []
gamma_figs = []
gamma_axs = []
for _ in global_validate:
ustar_fig, ustar_ax = plt.subplots(figsize=(5, 3.5), dpi=300)
gamma_fig, gamma_ax = plt.subplots(figsize=(5, 3.5), dpi=300)
ustar_figs.append(ustar_fig)
ustar_axs.append(ustar_ax)
gamma_figs.append(gamma_fig)
gamma_axs.append(gamma_ax)
while True:
if case_counter + 1 > len(cases):
break
case = cases[case_counter]
no_turb_case = replace(case, simulate_turbines=False)
validate = Validate(case)
ncells = get_cells(case)
section = f"{case.dx}m Resolution"
print(section)
no_turb_dir = find_project_dir(run_directory, no_turb_case)
if no_turb_dir is not None:
try:
Result(no_turb_dir)
print("Loading pre-existing simulation at path "
f"'{no_turb_dir}'")
except FileNotFoundError:
no_turb_dir = None
# Determine $U_\infty$ for case, by running without the turbine
if no_turb_dir is None:
print("Simulating without turbine")
no_turb_dir = get_unique_dir(run_directory)
no_turb_dir.mkdir()
template(no_turb_case, no_turb_dir)
case_path = no_turb_dir / "case.yaml"
no_turb_case.to_yaml(case_path)
with Spinner() as spin:
for line in runner(no_turb_dir):
spin(line)
result = Result(no_turb_dir)
u_infty_ds = result.faces.extract_turbine_centre(-1, no_turb_case)
u_infty = u_infty_ds["$u$"].values.take(0)
u_infty_data["resolution (m)"].append(case.dx)
u_infty_data["# cells"].append(ncells)
u_infty_data["$U_\\infty$"].append(u_infty)
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="Insufficient grids for analysis")
u_infty_convergence.add_grids([(case.dx, u_infty)])
turb_dir = find_project_dir(run_directory, case)
if turb_dir is not None:
try:
Result(turb_dir)
print(f"Loading pre-existing simulation at path '{turb_dir}'")
except FileNotFoundError:
turb_dir = None
# Run with turbines
if turb_dir is None:
print("Simulating with turbine")
turb_dir = get_unique_dir(run_directory)
turb_dir.mkdir()
template(case, turb_dir)
case_path = turb_dir / "case.yaml"
case.to_yaml(case_path)
with Spinner() as spin:
for line in runner(turb_dir):
spin(line)
result = Result(turb_dir)
# Collect wake velocity at 1.2D downstream
u_wake_ds = result.faces.extract_turbine_centre(-1,
case,
offset_x=0.84)
u_wake = u_wake_ds["$u$"].values.take(0)
u_wake_data["resolution (m)"].append(case.dx)
u_wake_data["# cells"].append(ncells)
u_wake_data["$U_{1.2D}$"].append(u_wake)
# Record
with warnings.catch_warnings():
warnings.filterwarnings("ignore",
message="Insufficient grids for analysis")
u_wake_convergence.add_grids([(case.dx, u_wake)])
plot_transects(case, validate, result, u_infty, ustar_axs, gamma_axs)
get_transect_error(case,
validate,
result,
u_infty,
transect_data)
case_counter += 1
if case_counter < 3: continue
if abs(1 - u_wake_convergence[0].asymptotic_ratio) < 0.01:
break
if case_counter == max_experiments:
break
gci_required = 0.01
u_infty_exact = u_infty_convergence[0].fine.f_exact
u_infty_gci = u_infty_convergence.get_resolution(gci_required)
err = [abs((f0 / u_infty_exact) - 1) for f0 in u_infty_data["$U_\\infty$"]]
u_infty_data["error"] = err
u_infty_df = pd.DataFrame(u_infty_data)
u_wake_exact = u_wake_convergence[0].fine.f_exact
u_wake_gci = u_wake_convergence.get_resolution(gci_required)
err = [abs((f0 / u_wake_exact) - 1) for f0 in u_wake_data["$U_{1.2D}$"]]
u_wake_data["error"] = err
u_wake_df = pd.DataFrame(u_wake_data)
gamma0_sim = 100 * (1 - u_wake_exact / u_infty_exact)
centreline = global_validate[0]
gamma0_true = 100 * (1 - centreline.data[0] /
centreline.attrs["$U_\infty$"])
gamma0_err = abs((gamma0_sim - gamma0_true) / gamma0_true)
transect_df = pd.DataFrame(transect_data)
transect_grouped = transect_df.groupby(["Transect"])
transect_summary = ""
n_transects = len(global_validate)
lower_first = lambda s: s[:1].lower() + s[1:] if s else ''
for i, transect in enumerate(global_validate):
description = transect.attrs['description']
transect_df = transect_grouped.get_group(description).drop("Transect",
axis=1)
transect_rmse = transect_df.iloc[-1, 1]
transect_summary += (
f"For the {lower_first(description)} transect, the root mean "
"square error at the lowest grid resolution was "
f"{transect_rmse:.4g}.")
if (i + 1) < n_transects:
transect_summary += " "
report.content.add_heading("Summary", level=2)
summary_text = (
f"This is a grid convergence study of {len(cases)} cases. The "
f"case with the finest grid resolution, of {case.dx}m, achieved an "
f"asymptotic ratio of {u_wake_convergence[0].asymptotic_ratio:.4g} "
"(asymptotic range is indicated by a value $\\approx 1$). At zero "
"grid resolution, the normalised velocity deficit measured 1.2 "
f"diameters downstream from the turbine was {gamma0_sim:.4g}\%, a "
f"{gamma0_err * 100:.4g}\% error against the measured value of "
f"{gamma0_true:.4g}\%. ")
summary_text += transect_summary
report.content.add_text(summary_text)
report.content.add_heading("Grid Convergence Studies", level=2)
report.content.add_heading("Free Stream Velocity", level=3)
report.content.add_text(
"This section presents the convergence study for the free stream "
"velocity ($U_\\infty$). For the final case, with grid resolution of "
f"{case.dx}m, an asymptotic ratio of "
f"{u_infty_convergence[0].asymptotic_ratio:.4g} was achieved "
"(asymptotic range is indicated by a value $\\approx 1$). The free "
f"stream velocity at zero grid resolution is {u_infty_exact:.4g}m/s. "
"The grid resolution required for a fine-grid GCI of "
f"{gci_required * 100}\% is {u_infty_gci:.4g}m.")
caption = ("Free stream velocity ($U_\\infty$) per grid resolution "
"with computational cells and error against value at zero grid "
"resolution")
report.content.add_table(u_infty_df,
index=False,
caption=caption)
fig, ax = plt.subplots(figsize=(4, 2.75), dpi=300)
u_infty_df.plot(ax=ax, x="# cells", y="error", marker='x')
plt.yscale("log")
plt.xscale("log")
plot_name = "u_infty_convergence.png"
plot_path = report_dir / plot_name
fig.savefig(plot_path, bbox_inches='tight')
# Add figure with caption
caption = ("Free stream velocity error against value at zero grid "
"resolution per grid resolution ")
report.content.add_image(plot_name, caption, width="3.64in")
report.content.add_heading("Wake Velocity", level=3)
report.content.add_text(
"This section presents the convergence study for the wake centerline "
"velocity measured 1.2 diameters downstream from the turbine "
"($U_{1.2D}$). For the final case, with grid resolution of "
f"{case.dx}m, an asymptotic ratio of "
f"{u_wake_convergence[0].asymptotic_ratio:.4g} was achieved "
"(asymptotic range is indicated by a value $\\approx 1$). The free "
f"stream velocity at zero grid resolution is {u_wake_exact:.4g}m/s. "
"The grid resolution required for a fine-grid GCI of "
f"{gci_required * 100}\% is {u_wake_gci:.4g}m.")
caption = ("Wake centerline velocity 1.2 diameters downstream "
"($U_{1.2D}$) per grid resolution with computational cells and "
"error against value at zero grid resolution")
report.content.add_table(u_wake_df,
index=False,
caption=caption)
fig, ax = plt.subplots(figsize=(4, 2.75), dpi=300)
u_wake_df.plot(ax=ax, x="# cells", y="error", marker='x')
plt.yscale("log")
plt.xscale("log")
plot_name = "u_wake_convergence.png"
plot_path = report_dir / plot_name
fig.savefig(plot_path, bbox_inches='tight')
# Add figure with caption
caption = ("Wake velocity error against value at zero grid resolution "
"per grid resolution ")
report.content.add_image(plot_name, caption, width="3.64in")
report.content.add_heading("Validation", level=3)
report.content.add_text(
"At zero grid resolution, the normalised deficit of $U_{1.2D}$, "
f"($\\gamma_{{0(1.2D)}}$) is {gamma0_sim:.4g}\%, a "
f"{gamma0_err * 100:.4g}\% error against the measured value of "
f"{gamma0_true:.4g}\%.")
report.content.add_heading("Wake Transects", level=2)
report.content.add_text(
"This section presents axial velocity transects along the turbine "
"centreline and at cross-sections along the $y$-axis. Errors are "
"reported relative to the experimental data given in [@mycek2014].")
for i, transect in enumerate(global_validate):
description = transect.attrs['description']
report.content.add_heading(description, level=3)
transect_df = transect_grouped.get_group(description).drop("Transect",
axis=1)
transect_rmse = transect_df.iloc[-1, 1]
report.content.add_text(
"The root mean square error (RMSE) for this transect at the "
f"finest grid resolution of {case.dx}m was {transect_rmse:.4g}.")
caption = ("Root mean square error (RMSE) for the normalised "
"velocity, $u^*_0$, per grid resolution.")
report.content.add_table(transect_df,
index=False,
caption=caption)
transect_true = transect.to_xarray()
major_axis = f"${transect.attrs['major_axis']}^*$"
transect_true_u0 = get_u0(transect_true, transect_true, 0.8)
transect_true_u0.plot(ax=ustar_axs[i],
x=major_axis,
label='Experiment')
ustar_axs[i].legend(loc='center left', bbox_to_anchor=(1, 0.5))
ustar_axs[i].grid()
ustar_axs[i].set_title("")
plot_name = f"transect_u0_{i}.png"
plot_path = report_dir / plot_name
ustar_figs[i].savefig(plot_path, bbox_inches='tight')
# Add figure with caption
caption = ("Normalised velocity, $u^*_0$, (m/s) per grid resolution "
"comparison. Experimental data reverse engineered from "
f"[@mycek2014, fig. {transect.attrs['figure']}].")
report.content.add_image(plot_name, caption, width="5.68in")
transect_true_gamma0 = get_gamma0(transect_true,
transect_true)
transect_true_gamma0.plot(ax=gamma_axs[i],
x=major_axis,
label='Experiment')
gamma_axs[i].legend(loc='center left', bbox_to_anchor=(1, 0.5))
gamma_axs[i].grid()
gamma_axs[i].set_title("")
plot_name = f"transect_gamma0_{i}.png"
plot_path = report_dir / plot_name
gamma_figs[i].savefig(plot_path, bbox_inches='tight')
# Add figure with caption
caption = ("Normalised velocity deficit, $\gamma_0$, (%) per grid "
"resolution comparison. Experimental data reverse "
"engineered from [@mycek2014, fig. "
f"{transect.attrs['figure']}].")
report.content.add_image(plot_name, caption, width="5.68in")
# Add section for the references
report.content.add_heading("References", level=2)
# Add report metadata
os_name = platform.system()
report.title = f"Grid Convergence Study ({os_name})"
report.date = "today"
# Write the report to file
with open(report_dir / "report.md", "wt") as f:
for line in report:
f.write(line)
# Convert file to docx or print report to stdout
try:
import pypandoc
pypandoc.convert_file(f"{report_dir / 'report.md'}",
'docx',
outputfile=f"{report_dir / 'report.docx'}",
extra_args=['-C',
f'--resource-path={report_dir}',
'--bibliography=examples.bib',
'--reference-doc=reference.docx'])
except ImportError:
print(report)
def get_d3d_bin_path():
env = dict(os.environ)
if 'D3D_BIN' in env:
root = Path(env['D3D_BIN'].replace('"', ''))
print('D3D_BIN found')
else:
root = Path("..") / "src" / "bin"
print('D3D_BIN not found')
print(f'Setting bin folder path to {root.resolve()}')
return root.resolve()
def find_project_dir(path, case):
path = Path(path)
files = list(Path(path).glob("**/case.yaml"))
ignore_fields = ["stats_interval",
"restart_interval"]
for file in files:
test = MycekStudy.from_yaml(file)
if test.is_equal(case, ignore_fields):
return file.parent
return None
def get_unique_dir(path, max_tries=1e6):
parent = Path(path)
for _ in range(int(max_tries)):
name = uuid.uuid4().hex
child = parent / name
if not child.exists(): return child
raise RuntimeError("Could not find unique directory name")
def get_u0(da, transect, factor, case=None):
if case is not None:
da = get_reset_origin(da, (case.turb_pos_x,
case.turb_pos_y,
case.turb_pos_z))
da = get_normalised_dims(da, transect.attrs["$D$"])
da = get_normalised_data(da, factor)
return da
def get_gamma0(da, transect, case=None):
if case is not None:
da = get_reset_origin(da, (case.turb_pos_x,
case.turb_pos_y,
case.turb_pos_z))
da = get_normalised_dims(da, transect.attrs["$D$"])
da = get_normalised_data_deficit(da,
transect.attrs["$U_\\infty$"],
"$\gamma_0$")
return da
def plot_transects(case,
validate,
result,
factor,
ustar_ax,
gamma_ax):
for i, transect in enumerate(validate):
transect_true = transect.to_xarray()
# Compare transect
transect_sim = result.faces.extract_z(-1, **transect)
# Determine plot x-axis
major_axis = f"${transect.attrs['major_axis']}^*$"
# Create and save a u0 figure
transect_sim_u0 = get_u0(transect_sim["$u$"],
transect_true,
factor,
case)
transect_sim_u0.plot(ax=ustar_ax[i],
x=major_axis,
label=f'{case.dx}m')
# Create and save a gamma0 figure
transect_sim_gamma0 = get_gamma0(transect_sim["$u$"],
transect_true,
case)
transect_sim_gamma0.plot(ax=gamma_ax[i],
x=major_axis,
label=f'{case.dx}m')
def get_rmse(estimated, observed):
estimated = estimated[~np.isnan(estimated)]
if len(estimated) == 0: return np.nan
observed = observed[:len(estimated)]
return np.sqrt(((estimated - observed[:len(estimated)]) ** 2).mean())
def get_transect_error(case, validate, result, factor, data):
for i, transect in enumerate(validate):
transect_true = transect.to_xarray()
# Compare transect
transect_sim = result.faces.extract_z(-1, **transect)
transect_sim_u0 = get_u0(transect_sim["$u$"],
transect_true,
factor,
case)
transect_true_u0 = get_u0(transect_true,
transect_true,
transect_true.attrs["$U_\infty$"],
case)
# Calculate RMS error and store
rmse = get_rmse(transect_sim_u0.values, transect_true_u0.values)
data["resolution (m)"].append(case.dx)
data["Transect"].append(transect.attrs['description'])
data["RMSE"].append(rmse)
def get_cells(case):
top = (case.x1 - case.x0) * (case.y1 - case.y0) * case.sigma
bottom = case.dx * case.dy
return top / bottom
def check_positive(value):
ivalue = int(value)
if ivalue <= 0:
msg = f"{value} is an invalid positive int value"
raise argparse.ArgumentTypeError(msg)
return ivalue
if __name__ == "__main__":
import argparse
parser = argparse.ArgumentParser()
subparsers = parser.add_subparsers(dest='MODEL',
required=True)
parent_parser = argparse.ArgumentParser(add_help=False)
parent_parser.add_argument('--experiments',
type=check_positive,
choices=range(3, 6),
default=5,
help=("number of experiments to run - defaults "
"to 5"))
parser_fm = subparsers.add_parser('fm',
parents=[parent_parser],
help='execute flexible mesh model')
parser_fm.add_argument('--threads',
type=check_positive,
default=1,
help=("number of CPU threads to utilise - defaults "
"to 1"))
parser_structured = subparsers.add_parser('structured',
parents=[parent_parser],
help='execute structured model')
args = parser.parse_args()
if "threads" not in args:
args.threads = 1
main(args.MODEL, args.experiments, args.threads)
| 36.003096 | 79 | 0.551896 | 2,686 | 23,258 | 4.567386 | 0.160834 | 0.018585 | 0.027388 | 0.014998 | 0.473264 | 0.422563 | 0.359798 | 0.314232 | 0.304777 | 0.272742 | 0 | 0.0184 | 0.350374 | 23,258 | 645 | 80 | 36.058915 | 0.793567 | 0.046436 | 0 | 0.264706 | 0 | 0 | 0.20645 | 0.023802 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024887 | false | 0 | 0.040724 | 0 | 0.08371 | 0.020362 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d95e400e7414be6bd28582167c1534a6fade0266 | 15,236 | py | Python | GLM/GLM_Model/GLM_Model_GP.py | ys7yoo/npglm | 98cc040fff8a861e2d7e210fef049207f1714b2a | [
"MIT"
] | 9 | 2020-11-20T17:43:36.000Z | 2021-02-26T22:18:59.000Z | GLM/GLM_Model/GLM_Model_GP.py | ys7yoo/npglm | 98cc040fff8a861e2d7e210fef049207f1714b2a | [
"MIT"
] | 1 | 2021-02-04T13:51:17.000Z | 2021-02-04T23:56:07.000Z | GLM/GLM_Model/GLM_Model_GP.py | ys7yoo/npglm | 98cc040fff8a861e2d7e210fef049207f1714b2a | [
"MIT"
] | 1 | 2020-11-22T19:36:35.000Z | 2020-11-22T19:36:35.000Z | import numpy as np
import matplotlib.pyplot as plt
import torch
import scipy
from GLM.GLM_Model import GLM_Model, PyTorchObj
from scipy.optimize import minimize, Bounds
from tqdm import tqdm
class GLM_Model_GP(GLM_Model.GLM_Model):
def __init__(self, params):
super().__init__(params)
self.kernel_prep_dict = None
self.first_time_train_this_covariate = None
self.covariate_training = None
self.total_likelihood = None
self.total_exp = None
self.total_kld = None
def add_covariate(self, covariate):
super().add_covariate(covariate)
self.register_parameter(name=f'{covariate.name}_u', param=covariate.time.time_dict['u'])
self.bound_duration_check(covariate)
def bound_duration_check(self, covariate):
filter_inducing_max = covariate.time.time_dict_t['u']().max()
filter_inducing_min = covariate.time.time_dict_t['u']().min()
inducing_bdd_max = covariate.bounds_params['u'][1]
inducing_bdd_min = covariate.bounds_params['u'][0]
if filter_inducing_max > inducing_bdd_max:
raise ValueError(f'Upper Bound for {covariate.name} Filter less than initial maximum inducing point')
if filter_inducing_min < inducing_bdd_min:
raise ValueError(f'Lower Bound for {covariate.name} Filter greater than initial minimum inducing point')
def train_variational_parameters(self, kernel_prep_dict, i):
self.kernel_prep_dict = kernel_prep_dict
self.update_time_bounds()
for covariate_name, covariate in self.covariates.items():
if covariate.etc_params['use_basis_form']:
continue
if i <= 2 or (i > 2 and i % 2 == 0):
params_to_optimize = [param for param in self.state_dict().keys() if (param.startswith(covariate_name) and
not (param.endswith('_hyper'))) and not (param.endswith('_u'))]
else:
params_to_optimize = [param for param in self.state_dict().keys() if (param.startswith(covariate_name) and
not (param.endswith('_hyper')))]
params_to_optimize.append('baseline')
for name, param in self.named_parameters():
if name not in params_to_optimize:
param.requires_grad = False
else:
param.requires_grad = True
self.update_covariate_gp_objects()
self.set_training_parameters(params_to_optimize)
# self.optimizer = torch.optim.LBFGS(self.training_parameters, lr=1, history_size=10, max_iter=self.params.gp_variational_iter, line_search_fn='strong_wolfe')
# optimizer_closure = self.nll_closure()
self.first_time_train_this_covariate = True
self.covariate_training = covariate_name
self.total_likelihood = torch.zeros(1, dtype=self.params.torch_d_type)
self.total_exp = torch.zeros(self.y.shape[0], dtype=self.params.torch_d_type)
self.total_kld = torch.zeros(1, dtype=self.params.torch_d_type)
maxiter = self.params.gp_variational_iter
with tqdm(total=maxiter) as pbar:
def verbose(xk):
pbar.update(1)
obj = PyTorchObj.PyTorchObjective(self, params_to_optimize, self.scipy_closure)
xL = scipy.optimize.minimize(obj.fun, obj.x0, method='TNC', jac=obj.jac, callback=verbose,
options={'gtol': 1e-6, 'disp': True,
'maxiter': maxiter})
self.update_covariate_design_matrices()
self.update_time_bounds()
print('done')
def add_noise_parameter(self):
for covariate_name, covariate in self.covariates.items():
if covariate.etc_params['use_basis_form']:
continue
covariate.add_noise_param(self)
def train_hyperparameters(self, kernel_prep_dict, i):
self.kernel_prep_dict = kernel_prep_dict
self.update_gp_param_bounds()
if i > 4:
self.add_noise_parameter()
for covariate_name, covariate in self.covariates.items():
if covariate.etc_params['use_basis_form']:
continue
params_to_optimize = [param for param in self.state_dict().keys() if (param.startswith(covariate_name) and
param.endswith('_hyper'))]
for name, param in self.named_parameters():
if name not in params_to_optimize:
param.requires_grad = False
else:
param.requires_grad = True
# params_to_optimize = [param for param in self.state_dict().keys() if (not param.startswith('History') and
# param.endswith('_hyper'))]
self.update_covariate_gp_objects()
self.set_training_parameters(params_to_optimize)
# self.optimizer = torch.optim.LBFGS(self.training_parameters, lr=0.3, history_size=5, max_iter=self.params.gp_hyperparameter_iter, line_search_fn='strong_wolfe')
self.first_time_train_this_covariate = True
self.covariate_training = covariate_name
self.total_likelihood = torch.zeros(1, dtype=self.params.torch_d_type)
self.total_exp = torch.zeros(self.y.shape[0], dtype=self.params.torch_d_type)
self.total_kld = torch.zeros(1, dtype=self.params.torch_d_type)
# optimizer_closure = self.nll_closure_hyper()
# self.zero_grad()
# print(self.optimizer.step(optimizer_closure))
maxiter = self.params.gp_hyperparameter_iter
with tqdm(total=maxiter) as pbar:
def verbose(xk):
pbar.update(1)
obj = PyTorchObj.PyTorchObjective(self, params_to_optimize, self.scipy_closure)
xL = scipy.optimize.minimize(obj.fun, obj.x0, method='TNC', jac=obj.jac, callback=verbose,
options={'gtol': 1e-6, 'disp': True,
'maxiter': maxiter})
print('done')
def scipy_closure(self):
self.zero_grad()
# TODO
self.update_covariate_gp_objects(update_all=False)
loss = self.get_nlog_likelihood()
return loss
def nll_closure(self):
def closure():
self.optimizer.zero_grad()
# TODO
self.update_covariate_gp_objects(update_all=False)
loss = self.get_nlog_likelihood()
loss.backward()
return loss
return closure
def nll_closure_hyper(self):
def closure():
self.optimizer.zero_grad()
# TODO
self.update_covariate_gp_objects(update_all=False)
loss = self.get_nlog_likelihood()
loss.backward()
return loss
return closure
def update_covariate_gp_objects(self, update_all=True):
if update_all:
with torch.no_grad():
for covariate_name, covariate in self.covariates.items():
covariate.gp_obj.update_kernels()
covariate.gp_obj.compute_needed_chol_and_inv(self.kernel_prep_dict)
self.zero_grad()
else:
self.covariates[self.covariate_training].gp_obj.update_kernels()
self.covariates[self.covariate_training].gp_obj.compute_needed_chol_and_inv(self.kernel_prep_dict)
def update_gp_param_bounds(self):
for covariate_name, covariate in self.covariates.items():
covariate.update_gp_param_bounds()
def update_time_bounds(self):
for covariate_name, covariate in self.covariates.items():
covariate.time.update_with_new_bounds('u')
def update_covariate_design_matrices(self):
for covariate_name, covariate in self.covariates.items():
covariate.update_design_matrix()
def get_nlog_likelihood(self, optimize_hyper=False):
total_likelihood = torch.zeros(1, dtype=self.params.torch_d_type)
total_exp = torch.zeros(self.y.shape[0], dtype=self.params.torch_d_type)
total_kld = torch.zeros(1, dtype=self.params.torch_d_type)
for covariate_name, cov in self.covariates.items():
if covariate_name != self.covariate_training and not self.first_time_train_this_covariate:
continue
ll, e_arg, gaussian_term = cov.get_log_likelihood_terms()
total_likelihood += self.y @ ll
total_exp += e_arg
total_kld += gaussian_term
if covariate_name != self.covariate_training and self.first_time_train_this_covariate:
self.total_likelihood += self.y @ ll
self.total_exp += e_arg
self.total_kld += gaussian_term
if self.first_time_train_this_covariate:
total_exp = torch.sum(torch.exp(total_exp + self.baseline * torch.ones(self.y.shape[0], dtype=self.params.torch_d_type)))
total_likelihood = total_likelihood + self.y @ (self.baseline * torch.ones(self.y.shape[0], dtype=self.params.torch_d_type))
nll = -1 * (total_likelihood - self.params.delta * total_exp + total_kld)
self.first_time_train_this_covariate = False
else:
total_exp = torch.sum(torch.exp(total_exp + self.total_exp + self.baseline * torch.ones(self.y.shape[0], dtype=self.params.torch_d_type)))
total_likelihood = total_likelihood + self.total_likelihood + self.y @ (self.baseline * torch.ones(self.y.shape[0], dtype=self.params.torch_d_type))
nll = -1 * (total_likelihood - self.params.delta * total_exp + total_kld + self.total_kld)
return nll
def get_nats_per_bin(self, y, exp_arg):
lambda_0 = torch.sum(y) / (y.shape[0] * self.params.delta)
nats_per_bin = y * exp_arg - self.params.delta * torch.exp(exp_arg)
nats_per_bin = nats_per_bin - (y * torch.log(lambda_0) - self.params.delta * lambda_0 * torch.ones_like(y, dtype=self.params.torch_d_type))
# nats_per_bin = nats_per_bin - (y * np.log(lambda_0) - self.params.delta * lambda_0 * np.ones_like(y))
total_num_spikes = torch.sum(y)
nll_test_mean = torch.sum(nats_per_bin) / total_num_spikes
return nll_test_mean
def get_loss(self):
total_likelihood = torch.zeros(1, dtype=self.params.torch_d_type)
total_exp = torch.zeros(self.y.shape[0], dtype=self.params.torch_d_type)
for covariate_name, cov in self.covariates.items():
ll, e_arg = cov.loss()
total_likelihood += self.y @ ll
total_exp += e_arg
total_exp = (total_exp + self.baseline * torch.ones(self.y.shape[0], dtype=self.params.torch_d_type))
total_likelihood = total_likelihood + self.y @ (self.baseline * torch.ones(self.y.shape[0], dtype=self.params.torch_d_type))
loss = -1 * (total_likelihood - self.params.delta * torch.sum(torch.exp(total_exp)))
loss = self.get_nats_per_bin(self.y, total_exp)
return loss
def get_test_loss(self):
total_likelihood = torch.zeros(1, dtype=self.params.torch_d_type)
total_exp = torch.zeros(self.y_test.shape[0], dtype=self.params.torch_d_type)
for covariate_name, cov in self.covariates.items():
ll, e_arg = cov.test_loss()
total_likelihood += self.y_test @ ll
total_exp += e_arg
total_exp = (total_exp + self.baseline * torch.ones(self.y_test.shape[0], dtype=self.params.torch_d_type))
total_likelihood = total_likelihood + self.y_test @ (self.baseline * torch.ones(self.y_test.shape[0], dtype=self.params.torch_d_type))
loss = -1 * (total_likelihood - self.params.delta * torch.sum(torch.exp(total_exp)))
loss = self.get_nats_per_bin(self.y_test, total_exp)
return loss
def plot_covariates(self, evolution_df_dict, timer_obj):
timer_obj.time_waste_start()
with torch.no_grad():
nll = self.get_loss()
nll_test = self.get_test_loss()
plt.style.use("ggplot")
fig, axs = plt.subplots(2, int(np.ceil(len(self.covariates.keys())/2)), figsize=(3*len(self.covariates.keys()), 10))
axs = axs.flatten()
for dx, (name, covariate) in enumerate(self.covariates.items()):
if name == 'History':
axs[dx].set_ylim([-7, 2])
plot_mean, plot_std, plot_time, entire_mean, entire_std, entire_time = self.covariates[name].get_values_to_plot()
plot_time, plot_mean, plot_std = zip(*sorted(zip(plot_time, plot_mean, plot_std)))
plot_time = np.array(plot_time)
plot_mean = np.array(plot_mean)
plot_std = np.array(plot_std)
axs[dx].plot(plot_time, plot_mean, label='posterior mean', color='tomato')
axs[dx].fill_between(plot_time, plot_mean - 2 * plot_std, plot_mean + 2 * plot_std, alpha=0.3, color='salmon')
if not covariate.etc_params['use_basis_form']:
axs[dx].plot(self.covariates[name].time.time_dict_t['u']().data.detach().numpy(),
np.zeros(self.covariates[name].time.time_dict['u'].shape[0]),
'o', color='orange', label='inducing points')
axs[dx].axhline(y=0, linestyle='--', zorder=0)
axs[dx].axvline(x=0, linestyle='--', zorder=0)
axs[dx].set_title(name)
axs[dx].legend()
ev_dx = evolution_df_dict[name].shape[0]
evolution_df_dict[name].at[ev_dx, 'plot_mean'] = np.copy(plot_mean)
evolution_df_dict[name].at[ev_dx, 'plot_2std'] = np.copy(2 * plot_std)
evolution_df_dict[name].at[ev_dx, 'plot_time'] = np.copy(plot_time)
evolution_df_dict[name].at[ev_dx, 'entire_mean'] = np.copy(entire_mean)
evolution_df_dict[name].at[ev_dx, 'entire_2std'] = np.copy(2 * entire_std)
evolution_df_dict[name].at[ev_dx, 'entire_time'] = np.copy(entire_time)
evolution_df_dict[name].at[ev_dx, 'nll'] = np.copy(nll.data.detach().numpy())
evolution_df_dict[name].at[ev_dx, 'nll_test'] = np.copy(nll_test.data.detach().numpy())
timer_obj.time_waste_end()
evolution_df_dict[name].at[ev_dx, 'time_so_far'] = timer_obj.get_time()
timer_obj.time_waste_start()
evolution_df_dict[name].to_pickle(f'{self.params.gp_ev_path}_{name}')
plt.subplots_adjust(hspace=1.0)
plt.savefig(self.params.gp_filter_plot_path, dpi=300)
print(f'nll: {nll_test.data.detach().numpy()}')
plt.show()
timer_obj.time_waste_end()
| 46.169697 | 174 | 0.616894 | 1,959 | 15,236 | 4.509954 | 0.127106 | 0.044143 | 0.037351 | 0.049802 | 0.68987 | 0.617091 | 0.559932 | 0.535031 | 0.498585 | 0.485456 | 0 | 0.007089 | 0.277829 | 15,236 | 329 | 175 | 46.31003 | 0.795874 | 0.051523 | 0 | 0.448133 | 0 | 0 | 0.03686 | 0.004365 | 0 | 0 | 0 | 0.00304 | 0 | 1 | 0.091286 | false | 0 | 0.029046 | 0 | 0.161826 | 0.012448 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d96094c005965d2c5403a91fbbccf6c6f031c21a | 4,303 | py | Python | src/coreclr/scripts/antigen_unique_issues.py | KirillOsenkov/runtime | 11742903dcc40a55e8688a1c61291459215f8ed0 | [
"MIT"
] | 1 | 2021-06-18T04:59:29.000Z | 2021-06-18T04:59:29.000Z | src/coreclr/scripts/antigen_unique_issues.py | KirillOsenkov/runtime | 11742903dcc40a55e8688a1c61291459215f8ed0 | [
"MIT"
] | 1 | 2021-11-11T02:02:54.000Z | 2021-11-13T00:05:50.000Z | src/coreclr/scripts/antigen_unique_issues.py | KirillOsenkov/runtime | 11742903dcc40a55e8688a1c61291459215f8ed0 | [
"MIT"
] | 1 | 2021-12-03T00:19:45.000Z | 2021-12-03T00:19:45.000Z | #!/usr/bin/env python3
#
## Licensed to the .NET Foundation under one or more agreements.
## The .NET Foundation licenses this file to you under the MIT license.
#
##
# Title: antigen_unique_issues.py
#
# Notes:
#
# Script to identify unique issues from all partitions and print them on console.
#
################################################################################
################################################################################
# import sys
import argparse
import os
from os import walk
from coreclr_arguments import *
import re
parser = argparse.ArgumentParser(description="description")
parser.add_argument("-issues_directory", help="Path to issues directory")
unique_issue_dir_pattern = re.compile(r"\*\*\*\* .*UniqueIssue\d+")
assertion_patterns = [re.compile(r"Assertion failed '(.*)' in '.*' during '(.*)'"),
re.compile(r"Assert failure\(PID \d+ \[0x[0-9a-f]+], Thread: \d+ \[0x[0-9a-f]+]\):(.*)")]
def setup_args(args):
""" Setup the args.
Args:
args (ArgParse): args parsed by arg parser
Returns:
args (CoreclrArguments)
"""
coreclr_args = CoreclrArguments(args, require_built_core_root=False, require_built_product_dir=False,
require_built_test_dir=False, default_build_type="Checked")
coreclr_args.verify(args,
"run_configuration",
lambda unused: True,
"Unable to set run_configuration")
coreclr_args.verify(args,
"issues_directory",
lambda issues_directory: os.path.isdir(issues_directory),
"issues_directory doesn't exist")
return coreclr_args
def print_unique_issues_summary(issues_directory):
"""Merge issues-summary-*-PartitionN.txt files from each partitions
and print unique issues
Args:
issues_directory (string): Issues directory
Returns:
Number of issues found
"""
issues_found = 0
unique_issues_all_partitions = {}
for file_path, dirs, files in walk(issues_directory, topdown=True):
for file_name in files:
if not file_name.startswith("issues-summary-") or "Partition" not in file_name:
continue
issues_summary_file = os.path.join(file_path, file_name)
partition_name = file_path.split(os.sep)[-1]
add_header = True
unique_issues = []
with open(issues_summary_file, 'r') as sf:
contents = sf.read()
unique_issues = list(filter(None, re.split(unique_issue_dir_pattern, contents)))
# Iterate over all unique issues of this partition
for unique_issue in unique_issues:
# Find the matching assertion message
for assertion_pattern in assertion_patterns:
issue_match = re.search(assertion_pattern, unique_issue)
if issue_match is not None:
assert_string = " ".join(issue_match.groups())
# Check if previous partitions has already seen this assert
if assert_string not in unique_issues_all_partitions:
unique_issues_all_partitions[assert_string] = unique_issue
issues_found += 1
if add_header:
print("%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%% {} %%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%%".format(partition_name))
add_header = False
print(unique_issue.strip())
print("------------------------------------")
break
print("===== Found {} unique issues.".format(issues_found))
return issues_found
def main(main_args):
"""Main entrypoint
Args:
main_args ([type]): Arguments to the script
"""
coreclr_args = setup_args(main_args)
issues_directory = coreclr_args.issues_directory
issues_found = print_unique_issues_summary(issues_directory)
return 1 if issues_found > 0 else 0
if __name__ == "__main__":
args = parser.parse_args()
sys.exit(main(args))
| 36.466102 | 166 | 0.563096 | 455 | 4,303 | 5.087912 | 0.349451 | 0.067387 | 0.032829 | 0.032397 | 0.039741 | 0.033693 | 0 | 0 | 0 | 0 | 0 | 0.004225 | 0.284918 | 4,303 | 117 | 167 | 36.777778 | 0.748131 | 0.182198 | 0 | 0.033333 | 0 | 0.016667 | 0.152267 | 0.047488 | 0 | 0 | 0 | 0 | 0.116667 | 1 | 0.05 | false | 0 | 0.083333 | 0 | 0.183333 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d96292f6031d95ffe48e989555808517308f5c23 | 35,592 | py | Python | titer_model/implementation-nextstrain-augur/base/process.py | blab/dengue | 5eacc47fbd77c59e7342d5be4aa81f7d3b4ff0bf | [
"CC-BY-4.0",
"MIT"
] | 4 | 2019-03-31T22:03:48.000Z | 2020-06-16T21:04:24.000Z | titer_model/implementation-nextstrain-augur/base/process.py | emmahodcroft/dengue-antigenic-dynamics | 5eacc47fbd77c59e7342d5be4aa81f7d3b4ff0bf | [
"CC-BY-4.0",
"MIT"
] | 4 | 2018-10-12T02:13:10.000Z | 2019-07-24T02:44:53.000Z | titer_model/implementation-nextstrain-augur/base/process.py | emmahodcroft/dengue-antigenic-dynamics | 5eacc47fbd77c59e7342d5be4aa81f7d3b4ff0bf | [
"CC-BY-4.0",
"MIT"
] | 5 | 2018-09-10T23:14:09.000Z | 2020-12-27T20:57:34.000Z | from __future__ import division, print_function
import argparse
import sys, os, time, gzip, glob
from collections import defaultdict
from base.config import combine_configs
from base.io_util import make_dir, remove_dir, tree_to_json, write_json, myopen
from base.sequences_process import sequence_set
from base.utils import num_date, save_as_nexus, parse_date
from base.tree import tree
# from base.fitness_model import fitness_model
from base.frequencies import alignment_frequencies, tree_frequencies, make_pivots
from base.auspice_export import export_metadata_json, export_frequency_json, export_tip_frequency_json
import numpy as np
from datetime import datetime
import json
from pdb import set_trace
from base.logger import logger
from Bio import SeqIO
from Bio import AlignIO
import cPickle as pickle
def collect_args():
parser = argparse.ArgumentParser(
description = "Process (prepared) JSON(s)",
formatter_class=argparse.ArgumentDefaultsHelpFormatter
)
parser.add_argument('-j', '--json', help="prepared JSON to process")
parser.add_argument('--clean', default=False, action='store_true', help="clean build (remove previous checkpoints)")
parser.add_argument('--tree_method', type=str, default='raxml', choices=["fasttree", "raxml", "iqtree"], help="specify the method used to build the tree")
parser.add_argument('--no_tree', action='store_true', help="do not build a tree")
return parser
class process(object):
"""process influenza virus sequences in mutliple steps to allow visualization in browser
* filtering and parsing of sequences
* alignment
* tree building
* frequency estimation of clades and mutations
* export as json
"""
def __init__(self, config):
""" check config file, make necessary directories, set up logger """
super(process, self).__init__()
self.config = combine_configs("process", config)
# try:
# assert(os.path.basename(os.getcwd()) == self.config["dir"])
# except AssertionError:
# print("Run this script from within the {} directory".format(self.config["dir"]))
# sys.exit(2)
for p in self.config["output"].values():
if not os.path.isdir(p):
os.makedirs(p)
self.log = logger(self.config["output"]["data"], False)
# parse the JSON into different data bits
try:
with open(self.config["in"], 'r') as fh:
data = json.load(fh)
except Exception as e:
self.log.fatal("Error loading JSON. Error: {}".format(e))
self.info = data["info"]
if "time_interval" in data["info"]:
self.info["time_interval"] = [datetime.strptime(x, '%Y-%m-%d').date()
for x in data["info"]["time_interval"]]
self.info["lineage"] = data["info"]["lineage"]
if 'leaves' in data:
self.tree_leaves = data['leaves']
try:
self.colors = data["colors"]
except KeyError:
self.log.notify("* colours have not been set")
self.colors = False
try:
self.lat_longs = data["lat_longs"]
except KeyError:
self.log.notify("* latitude & longitudes have not been set")
self.lat_longs = False
# backwards compatability - set up file_dumps (need to rewrite sometime)
# self.sequence_fname = self.input_data_path+'.fasta'
self.file_dumps = {}
self.output_path = os.path.join(self.config["output"]["data"], self.info["prefix"])
self.file_dumps['seqs'] = self.output_path + '_sequences.pkl.gz'
self.file_dumps['tree'] = self.output_path + '_tree.newick'
self.file_dumps['nodes'] = self.output_path + '_nodes.pkl.gz'
if self.config["clean"] == True:
self.log.notify("Removing intermediate files for a clean build")
for f in glob.glob(self.output_path+"*"):
os.remove(f)
if "reference" in data:
self.seqs = sequence_set(self.log, data["sequences"], data["reference"], self.info["date_format"])
else:
self.log.fatal("No reference provided. Cannot continue.")
# self.seqs = sequence_set(self.log, data["sequences"], False, self.info["date_format"])
# backward compatability
self.reference_seq = self.seqs.reference_seq
self.proteins = self.seqs.proteins
for trait in self.info["traits_are_dates"]:
self.seqs.convert_trait_to_numerical_date(trait, self.info["date_format"])
# Prepare titers if they are available.
if "titers" in data:
self.log.debug("Loaded %i titer measurements" % len(data["titers"]))
# Convert titer dictionary indices from JSON-compatible strings back
# to tuples.
self.titers = {eval(key): value
for key, value in data["titers"].iteritems()}
## usefull flag to set (from pathogen run file) to disable restoring
self.try_to_restore = True
def dump(self):
'''
write the current state to file
'''
self.log.warn("unsure if dump() works")
from cPickle import dump
from Bio import Phylo
for attr_name, fname in self.file_dumps.iteritems():
if hasattr(self,attr_name):
print("dumping",attr_name)
#if attr_name=='seqs': self.seqs.all_seqs = None
with myopen(fname, 'wb') as ofile:
if attr_name=='nodes':
continue
elif attr_name=='tree':
#biopython trees don't pickle well, write as newick + node info
self.tree.dump(fname, self.file_dumps['nodes'])
else:
dump(getattr(self,attr_name), ofile, -1)
def load(self, debug=False):
'''
reconstruct instance from files
'''
self.log.warn("unsure if load() works")
from cPickle import load
for attr_name, fname in self.file_dumps.iteritems():
if attr_name=='tree':
continue
if os.path.isfile(fname):
with myopen(fname, 'r') as ifile:
print('loading',attr_name,'from file',fname)
setattr(self, attr_name, load(ifile))
tree_name = self.file_dumps['tree']
if os.path.isfile(tree_name):
if os.path.isfile(self.file_dumps['nodes']):
node_file = self.file_dumps['nodes']
else:
node_file = None
# load tree, build if no tree file available
self.build_tree(tree_name, node_file, root='none', debug=debug)
def align(self, codon_align=False, debug=False, fill_gaps=False):
'''
(1) Align sequences, remove non-reference insertions
NB step 1 is skipped if a valid aln file is found
(2) Translate
(3) Write to multi-fasta
CODON ALIGNMENT IS NOT IMPLEMENTED
'''
fnameStripped = self.output_path + "_aligned_stripped.mfa"
if self.try_to_restore:
self.seqs.try_restore_align_from_disk(fnameStripped)
if not hasattr(self.seqs, "aln"):
if codon_align:
self.seqs.codon_align()
else:
self.seqs.align(self.config["subprocess_verbosity_level"], debug=debug)
# need to redo everything
self.try_to_restore = False
self.seqs.strip_non_reference()
if fill_gaps:
self.seqs.make_gaps_ambiguous()
else:
self.seqs.make_terminal_gaps_ambiguous()
AlignIO.write(self.seqs.aln, fnameStripped, 'fasta')
if not self.seqs.reference_in_dataset:
self.seqs.remove_reference_from_alignment()
# if outgroup is not None:
# self.seqs.clock_filter(n_iqd=3, plot=False, max_gaps=0.05, root_seq=outgroup)
self.seqs.translate() # creates self.seqs.translations
# save additional translations - disabled for now
# for name, msa in self.seqs.translations.iteritems():
# SeqIO.write(msa, self.output_path + "_aligned_" + name + ".mfa", "fasta")
def get_pivots_via_spacing(self):
try:
time_interval = self.info["time_interval"]
assert("pivot_spacing" in self.config)
except AssertionError:
self.log.fatal("Cannot space pivots without prividing \"pivot_spacing\" in the config")
except KeyError:
self.log.fatal("Cannot space pivots without a time interval in the prepared JSON")
return np.arange(time_interval[1].year+(time_interval[1].month-1)/12.0,
time_interval[0].year+time_interval[0].month/12.0,
self.config["pivot_spacing"])
def restore_mutation_frequencies(self):
if self.try_to_restore:
try:
with open(self.output_path + "_mut_freqs.pickle", 'rb') as fh:
pickle_seqs = pickle.load(fh)
assert(pickle_seqs == set(self.seqs.seqs.keys()))
pickled = pickle.load(fh)
assert(len(pickled) == 3)
self.mutation_frequencies = pickled[0]
self.mutation_frequency_confidence = pickled[1]
self.mutation_frequency_counts = pickled[2]
self.log.notify("Successfully restored mutation frequencies")
return
except IOError:
pass
except AssertionError as err:
self.log.notify("Tried to restore mutation frequencies but failed: {}".format(err))
#no need to remove - we'll overwrite it shortly
self.mutation_frequencies = {}
self.mutation_frequency_confidence = {}
self.mutation_frequency_counts = {}
def estimate_mutation_frequencies(self,
inertia=0.0,
min_freq=0.01,
stiffness=20.0,
pivots=24,
region="global",
include_set={}):
'''
calculate the frequencies of mutation in a particular region
currently the global frequencies should be estimated first
because this defines the set of positions at which frequencies in
other regions are estimated.
'''
if not hasattr(self.seqs, 'aln'):
self.log.warn("Align sequences first")
return
def filter_alignment(aln, region=None, lower_tp=None, upper_tp=None):
from Bio.Align import MultipleSeqAlignment
tmp = aln
if region is not None:
if type(region)==str:
tmp = [s for s in tmp if s.attributes['region']==region]
elif type(region)==list:
tmp = [s for s in tmp if s.attributes['region'] in region]
else:
self.log.warn("region must be string or list")
return
if lower_tp is not None:
tmp = [s for s in tmp if np.mean(s.attributes['num_date'])>=lower_tp]
if upper_tp is not None:
tmp = [s for s in tmp if np.mean(s.attributes['num_date'])<upper_tp]
return MultipleSeqAlignment(tmp)
if not hasattr(self, 'pivots'):
tps = np.array([np.mean(x.attributes['num_date']) for x in self.seqs.seqs.values()])
self.pivots=make_pivots(pivots, tps)
# else:
# self.log.notify('estimate_mutation_frequencies: using self.pivots')
if not hasattr(self, 'mutation_frequencies'):
self.restore_mutation_frequencies()
# loop over nucleotide sequences and translations and calcuate
# region specific frequencies of mutations above a certain threshold
if type(region)==str:
region_name = region
region_match = region
elif type(region)==tuple:
region_name=region[0]
region_match=region[1]
else:
self.log.warn("region must be string or tuple")
return
# loop over different alignment types
for prot, aln in [('nuc',self.seqs.aln)] + self.seqs.translations.items():
if (region_name,prot) in self.mutation_frequencies:
self.log.notify("Skipping Frequency Estimation for region \"{}\", protein \"{}\"".format(region_name, prot))
continue
self.log.notify("Starting Frequency Estimation for region \"{}\", protein \"{}\"".format(region_name, prot))
# determine set of positions that have to have a frequency calculated
if prot in include_set:
tmp_include_set = [x for x in include_set[prot]]
else:
tmp_include_set = []
tmp_aln = filter_alignment(aln, region = None if region=='global' else region_match,
lower_tp=self.pivots[0], upper_tp=self.pivots[-1])
if ('global', prot) in self.mutation_frequencies:
tmp_include_set += set([pos for (pos, mut) in self.mutation_frequencies[('global', prot)]])
time_points = [np.mean(x.attributes['num_date']) for x in tmp_aln]
if len(time_points)==0:
self.log.notify('no samples in region {} (protein: {})'.format(region_name, prot))
self.mutation_frequency_counts[region_name]=np.zeros_like(self.pivots)
continue
# instantiate alignment frequency
aln_frequencies = alignment_frequencies(tmp_aln, time_points, self.pivots,
ws=max(2,len(time_points)//10),
inertia=inertia,
stiffness=stiffness, method='SLSQP')
if prot=='nuc': # if this is a nucleotide alignment, set all non-canonical states to N
A = aln_frequencies.aln
A[~((A=='A')|(A=='C')|(A=='G')|(A=='T')|('A'=='-'))] = 'N'
aln_frequencies.mutation_frequencies(min_freq=min_freq, include_set=tmp_include_set,
ignore_char='N' if prot=='nuc' else 'X')
self.mutation_frequencies[(region_name,prot)] = aln_frequencies.frequencies
self.mutation_frequency_confidence[(region_name,prot)] = aln_frequencies.calc_confidence()
self.mutation_frequency_counts[region_name]=aln_frequencies.counts
self.log.notify("Saving mutation frequencies (pickle)")
with open(self.output_path + "_mut_freqs.pickle", 'wb') as fh:
pickle.dump(set(self.seqs.seqs.keys()), fh, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump((self.mutation_frequencies,
self.mutation_frequency_confidence,
self.mutation_frequency_counts), fh, protocol=pickle.HIGHEST_PROTOCOL)
def global_frequencies(self, min_freq, average_global=False, inertia=2.0/12, stiffness=2.0*12):
# set pivots and define groups of larger regions for frequency display
pivots = self.get_pivots_via_spacing()
acronyms = set([x[1] for x in self.info["regions"] if x[1]!=""])
region_groups = {str(x):[str(y[0]) for y in self.info["regions"] if y[1] == x] for x in acronyms}
pop_sizes = {str(x):np.sum([y[-1] for y in self.info["regions"] if y[1] == x]) for x in acronyms}
total_popsize = np.sum(pop_sizes.values())
# if global frequencies are to be calculated from the set of sequences, do the following
if average_global==False:
self.estimate_mutation_frequencies(pivots=pivots, min_freq=min_freq,
inertia=np.exp(-inertia), stiffness=stiffness)
for region in region_groups.iteritems():
self.estimate_mutation_frequencies(region=region, min_freq=min_freq,
inertia=np.exp(-inertia), stiffness=stiffness)
return
# ELSE:
# if global frequences are to be calculated from a weighted average of regional ones
# the following applies:
# determine sites whose frequencies need to be computed in all regions
self.seqs.diversity_statistics()
include_set = {}
for prot in ['nuc'] + self.seqs.translations.keys():
include_set[prot] = np.where(np.sum(self.seqs.af[prot][:-2]**2, axis=0)
<np.sum(self.seqs.af[prot][:-2], axis=0)**2-min_freq)[0]
# estimate frequencies in individual regions
for region in region_groups.iteritems():
self.estimate_mutation_frequencies(pivots=pivots, region=region, min_freq=min_freq, include_set=include_set,
inertia=np.exp(-inertia), stiffness=stiffness)
# perform a weighted average of frequencies across the regions to determine
# global frequencies.
# First: compute the weights accounting for seasonal variation and populations size
weights = {region: np.array(self.mutation_frequency_counts[region], dtype = float)
for region in acronyms}
for region in weights: # map maximal count across time to 1.0, weigh by pop size
weights[region] = np.maximum(0.1, weights[region]/weights[region].max())
weights[region]*=pop_sizes[region]
# compute the normalizer
total_weight = np.sum([weights[region] for region in acronyms],axis=0)
# average regional frequencies to calculate global
for prot in ['nuc'] + self.seqs.translations.keys():
gl_freqs, gl_counts, gl_confidence = {}, {}, {}
all_muts = set()
for region in acronyms: # list all unique mutations
all_muts.update(self.mutation_frequencies[(region, prot)].keys())
for mut in all_muts: # compute the weighted average
gl_freqs[mut] = np.sum([self.mutation_frequencies[(region, prot)][mut]*weights[region] for region in acronyms
if mut in self.mutation_frequencies[(region, prot)]], axis=0)/total_weight
gl_confidence[mut] = np.sqrt(np.sum([self.mutation_frequency_confidence[(region, prot)][mut]**2*weights[region]
for region in acronyms
if mut in self.mutation_frequencies[(region, prot)]], axis=0)/total_weight)
gl_counts = np.sum([self.mutation_frequency_counts[region] for region in acronyms
if mut in self.mutation_frequencies[(region, prot)]], axis=0)
# save in mutation_frequency data structure
self.mutation_frequencies[("global", prot)] = gl_freqs
self.mutation_frequency_counts["global"] = gl_counts
self.mutation_frequency_confidence[("global", prot)] = gl_confidence
def save_tree_frequencies(self):
"""
Save tree frequencies to a pickle on disk.
"""
self.log.notify("Saving tree frequencies (pickle)")
with open(self.output_path + "_tree_freqs.pickle", 'wb') as fh:
pickle.dump(set(self.seqs.seqs.keys()), fh, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump((self.tree_frequencies,
self.tree_frequency_confidence,
self.tree_frequency_counts,
self.pivots), fh, protocol=pickle.HIGHEST_PROTOCOL)
def restore_tree_frequencies(self):
try:
assert(self.try_to_restore == True)
with open(self.output_path + "_tree_freqs.pickle", 'rb') as fh:
pickle_seqs = pickle.load(fh)
assert(pickle_seqs == set(self.seqs.seqs.keys()))
pickled = pickle.load(fh)
assert(len(pickled) == 4)
self.tree_frequencies = pickled[0]
self.tree_frequency_confidence = pickled[1]
self.tree_frequency_counts = pickled[2]
self.pivots = pickled[3]
self.log.notify("Successfully restored tree frequencies")
return
except IOError:
pass
except AssertionError as err:
self.log.notify("Tried to restore tree frequencies but failed: {}".format(err))
#no need to remove - we'll overwrite it shortly
self.tree_frequencies = {}
self.tree_frequency_confidence = {}
self.tree_frequency_counts = {}
def estimate_tree_frequencies(self, region='global', pivots=24, stiffness=20.0):
'''
estimate frequencies of clades in the tree, possibly region specific
'''
if not hasattr(self, 'tree_frequencies'):
self.restore_tree_frequencies()
if region in self.tree_frequencies:
self.log.notify("Skipping tree frequency estimation for region: %s" % region)
return
if not hasattr(self, 'pivots'):
tps = np.array([np.mean(x.attributes['num_date']) for x in self.seqs.seqs.values()])
self.pivots=make_pivots(pivots, tps)
self.log.notify('Estimate tree frequencies for %s: using self.pivots' % (region))
# Omit strains sampled prior to the first pivot from frequency calculations.
if region=='global':
node_filter_func = lambda node: node.attr["num_date"] >= self.pivots[0]
else:
node_filter_func = lambda node: (node.attr['region'] == region) and (node.attr["num_date"] >= self.pivots[0])
tree_freqs = tree_frequencies(self.tree.tree, self.pivots, method='SLSQP',
node_filter = node_filter_func,
ws = max(2,self.tree.tree.count_terminals()//10),
stiffness = stiffness)
tree_freqs.estimate_clade_frequencies()
conf = tree_freqs.calc_confidence()
self.tree_frequencies[region] = tree_freqs.frequencies
self.tree_frequency_confidence[region] = conf
self.tree_frequency_counts[region] = tree_freqs.counts
self.save_tree_frequencies()
def build_tree(self):
'''
(1) instantiate a tree object (process.tree)
(2) If newick file doesn't exist or isn't valid: build a newick tree (normally RAxML)
(3) Make a TimeTree
'''
self.tree = tree(aln=self.seqs.aln, proteins=self.proteins, verbose=self.config["subprocess_verbosity_level"])
newick_file = self.output_path + ".newick"
if self.try_to_restore and os.path.isfile(newick_file):# and self.tree.check_newick(newick_file):
self.log.notify("Newick file \"{}\" can be used to restore".format(newick_file))
else:
self.log.notify("Building newick tree.")
self.tree.build_newick(newick_file, **self.config["newick_tree_options"])
def clock_filter(self):
if self.config["clock_filter"] == False:
return
self.tree.tt.clock_filter(reroot='best', n_iqd=self.config["clock_filter"]["n_iqd"], plot=self.config["clock_filter"]["plot"])
leaves = [x for x in self.tree.tree.get_terminals()]
for n in leaves:
if n.bad_branch:
self.tree.tt.tree.prune(n)
print('pruning leaf ', n.name)
if self.config["clock_filter"]["remove_deep_splits"]:
self.tree.tt.tree.ladderize()
current_root = self.tree.tt.tree.root
if sum([x.branch_length for x in current_root])>0.1 \
and sum([x.count_terminals() for x in current_root.clades[:-1]])<5:
new_root = current_root.clades[-1]
new_root.up=False
self.tree.tt.tree.root = new_root
with open(self.output_path+"_outliers.txt", 'a') as ofile:
for x in current_root.clades[:-1]:
ofile.write("\n".join([leaf.name for leaf in x.get_terminals()])+'\n')
self.tree.tt.prepare_tree()
def timetree_setup_filter_run(self):
def try_restore():
try:
assert(os.path.isfile(self.output_path + "_timetree.new"))
assert(os.path.isfile(self.output_path + "_timetree.pickle"))
except AssertionError:
return False
self.log.notify("Attempting to restore timetree")
with open(self.output_path+"_timetree.pickle", 'rb') as fh:
pickled = pickle.load(fh)
try:
assert(self.config["timetree_options"] == pickled["timetree_options"])
assert(self.config["clock_filter"] == pickled["clock_filter_options"])
#assert(set(self.seqs.sequence_lookup.keys()) == set(pickled["original_seqs"]))
except AssertionError as e:
print(e)
self.log.warn("treetime is out of date - rerunning")
return False
# this (treetime) newick is _after_ clock filtering and remove_outliers_clades
# so these methods should not be rerun here
self.tree.tt_from_file(self.output_path + "_timetree.new", nodefile=None, root=None)
try:
self.tree.restore_timetree_node_info(pickled["nodes"])
except KeyError:
self.log.warn("treetime node info missing - rerunning")
return False
self.log.notify("TreeTime successfully restored.")
return True
if "temporal_confidence" in self.config:
self.config["timetree_options"]["confidence"] = True
self.config["timetree_options"]["use_marginal"] = True
if self.try_to_restore:
success = try_restore()
else:
success = False
if not success:
self.log.notify("Setting up TimeTree")
self.tree.tt_from_file(self.output_path + ".newick", nodefile=None, root="best")
self.log.notify("Running Clock Filter")
self.clock_filter()
self.tree.remove_outlier_clades() # this is deterministic
self.log.notify("Reconstructing Ancestral Sequences, branch lengths & dating nodes")
self.tree.timetree(**self.config["timetree_options"])
# do we ever not want to use timetree?? If so:
# self.tree.ancestral(**kwargs) instead of self.tree.timetree
self.tree.save_timetree(fprefix=self.output_path, ttopts=self.config["timetree_options"], cfopts=self.config["clock_filter"])
self.tree.add_translations()
self.tree.refine()
self.tree.layout()
def matchClades(self, clades, offset=-1):
'''
finds branches in the tree corresponding to named clades by searching for the
oldest node with a particular genotype.
- params
- clades: a dictionary with clade names as keys and lists of genoypes as values
- offset: the offset to be applied to the position specification, typically -1
to conform with counting starting at 0 as opposed to 1
"clade_annotation" is a label to a specific node in the tree that is used to hang a text label in auspice
"clade_membership" is an attribute of every node in the tree that defines clade membership, used as coloring in auspice
'''
def match(node, genotype):
return all([node.translations[gene][pos+offset]==state if gene in node.translations else node.sequence[pos+offset]==state
for gene, pos, state in genotype])
## Label root nodes for each clade as clade_annotation via clades_to_nodes
## NOTE clades_to_nodes is used in the (full) frequencies export
self.clades_to_nodes = {}
for clade_name, genotype in clades.iteritems():
matching_nodes = filter(lambda x:match(x,genotype), self.tree.tree.get_nonterminals())
matching_nodes.sort(key=lambda x:x.numdate if hasattr(x,'numdate') else x.dist2root)
if len(matching_nodes):
self.clades_to_nodes[clade_name] = matching_nodes[0]
self.clades_to_nodes[clade_name].attr['clade_annotation'] = clade_name
else:
print('matchClades: no match found for ', clade_name, genotype)
for allele in genotype:
partial_matches = filter(lambda x:match(x,[allele]), self.tree.tree.get_nonterminals())
print('Found %d partial matches for allele '%len(partial_matches), allele)
## Now preorder traverse the tree with state replacement to set the clade_membership via clade_annotation
for node in self.tree.tree.find_clades():
node.attr['clade_membership'] = 'unassigned'
ordered_clades = sorted(self.clades_to_nodes.keys(), key=lambda name: self.clades_to_nodes[name].numdate)
for clade_annotation in ordered_clades:
for node in self.clades_to_nodes[clade_annotation].find_clades(order='preorder'):
node.attr['clade_membership'] = clade_annotation
def annotate_fitness(self):
"""Run the fitness prediction model and annotate the tree's nodes with fitness
values. Returns the resulting fitness model instance.
"""
if not hasattr(self, "tree_frequencies"):
self.log.warn("Could not find tree frequencies.")
return
kwargs = {
"tree": self.tree.tree,
"frequencies": self.tree_frequencies,
"time_interval": self.info["time_interval"],
"pivots": np.around(self.pivots, 2)
}
if "predictors" in self.config:
kwargs["predictor_input"] = self.config["predictors"]
if "epitope_mask" in self.config:
kwargs["epitope_masks_fname"] = self.config["epitope_mask"]
if "epitope_mask_version" in self.config:
kwargs["epitope_mask_version"] = self.config["epitope_mask_version"]
if "tolerance_mask_version" in self.config:
kwargs["tolerance_mask_version"] = self.config["tolerance_mask_version"]
if self.config["subprocess_verbosity_level"] > 0:
kwargs["verbose"] = 1
model = fitness_model(**kwargs)
model.predict()
return model
def make_control_json(self, controls):
controls_json = {}
for super_cat, fields in controls.iteritems():
cat_count = {}
for n in self.tree.tree.get_terminals():
tmp = cat_count
for field in fields:
tmp["name"] = field
if field in n.attr:
cat = n.attr[field]
else:
cat='unknown'
if cat in tmp:
tmp[cat]['count']+=1
else:
tmp[cat] = {'count':1, 'subcats':{}}
tmp = tmp[cat]['subcats']
controls_json[super_cat] = cat_count
return controls_json
def auspice_export(self):
'''
export the tree, sequences, frequencies to json files for auspice visualization
'''
prefix = os.path.join(self.config["output"]["auspice"], self.info["prefix"])
indent = 2
## ENTROPY (alignment diversity) ##
if "entropy" in self.config["auspice"]["extra_jsons"]:
self.seqs.export_diversity(fname=prefix+'_entropy.json', indent=indent)
## TREE & SEQUENCES ##
if hasattr(self, 'tree') and self.tree is not None:
self.tree.export(
path = prefix,
extra_attr = self.config["auspice"]["extra_attr"] + ["muts", "aa_muts","attr", "clade"],
indent = indent,
write_seqs_json = "sequences" in self.config["auspice"]["extra_jsons"]
)
## FREQUENCIES ##
if "frequencies" in self.config["auspice"]["extra_jsons"]:
export_frequency_json(self, prefix=prefix, indent=indent)
export_tip_frequency_json(self, prefix=prefix, indent=indent)
## METADATA ##
export_metadata_json(self, prefix=prefix, indent=indent)
def run_geo_inference(self):
if self.config["geo_inference"] == False:
self.log.notify("Not running geo inference")
return
try:
kwargs = {"report_confidence": self.config["geo_inference_options"]["confidence"]}
except KeyError:
kwargs = {}
## try load pickle...
try:
assert(self.try_to_restore == True)
with open(self.output_path + "_mugration.pickle", 'rb') as fh:
options = pickle.load(fh)
restored_data = pickle.load(fh)
assert(options == self.config["geo_inference_options"])
assert(set(restored_data.keys()) == set([x.name for x in self.tree.tree.find_clades()]))
except IOError:
restored_data = False
except AssertionError as err:
restored_data = False
self.log.notify("Tried to restore mutation frequencies but failed: {}".format(err))
# only run geo inference if lat + longs are defined.
if not self.lat_longs or len(self.lat_longs)==0:
self.log.notify("no geo inference - no specified lat/longs")
return
for geo_attr in self.config["geo_inference"]:
try:
self.tree.restore_geo_inference(restored_data, geo_attr, self.config["geo_inference_options"]["confidence"])
self.log.notify("Restored geo inference for {}".format(geo_attr))
except KeyError:
try:
kwargs["root_state"] = self.config["geo_inference_options"]["root_state"][geo_attr]
except KeyError:
pass
self.log.notify("running geo inference for {} with parameters {}".format(geo_attr, kwargs))
self.tree.geo_inference(geo_attr, **kwargs)
# SAVE MUGRATION RESULTS:
attrs = set(self.tree.mugration_attrs)
try:
data = {}
for node in self.tree.tree.find_clades():
assert(len(attrs - set(node.attr.keys()))==0)
data[node.name] = {x:node.attr[x] for x in attrs}
except AssertionError:
self.log.warn("Error saving mugration data - will not be able to restore")
return
with open(self.output_path + "_mugration.pickle", 'wb') as fh:
pickle.dump(self.config["geo_inference_options"], fh, protocol=pickle.HIGHEST_PROTOCOL)
pickle.dump(data, fh, protocol=pickle.HIGHEST_PROTOCOL)
self.log.notify("Saved mugration data (pickle)")
def save_as_nexus(self):
save_as_nexus(self.tree.tree, self.output_path + "_timeTree.nex")
if __name__=="__main__":
print("This shouldn't be called as a script.")
| 46.464752 | 158 | 0.595274 | 4,243 | 35,592 | 4.841857 | 0.151544 | 0.021417 | 0.017718 | 0.006231 | 0.297849 | 0.210524 | 0.164768 | 0.138143 | 0.11692 | 0.106698 | 0 | 0.004458 | 0.300489 | 35,592 | 765 | 159 | 46.52549 | 0.820701 | 0.143515 | 0 | 0.19145 | 0 | 0 | 0.129098 | 0.009005 | 0 | 0 | 0 | 0 | 0.039033 | 1 | 0.04461 | false | 0.005576 | 0.042751 | 0.001859 | 0.130112 | 0.01487 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d9629e18c5a751f1f05e83298b1aeab4711e6438 | 1,777 | py | Python | nodux_contabilidad/nodux_contabilidad/doctype/nodux_item_price/nodux_item_price.py | jessica-tandazo/nodux_contabilidad | a9f853e167160b1d883b937d2edbf354fd14d144 | [
"MIT"
] | null | null | null | nodux_contabilidad/nodux_contabilidad/doctype/nodux_item_price/nodux_item_price.py | jessica-tandazo/nodux_contabilidad | a9f853e167160b1d883b937d2edbf354fd14d144 | [
"MIT"
] | null | null | null | nodux_contabilidad/nodux_contabilidad/doctype/nodux_item_price/nodux_item_price.py | jessica-tandazo/nodux_contabilidad | a9f853e167160b1d883b937d2edbf354fd14d144 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
# Copyright (c) 2015, nodux and contributors
# For license information, please see license.txt
from __future__ import unicode_literals
import frappe
from frappe.model.document import Document
from frappe import throw, _
class NoduxItemPrice(Document):
def validate(self):
self.validate_item()
self.validate_price_list()
self.check_duplicate_item()
self.update_price_list_details()
self.update_item_details()
def validate_item(self):
if not frappe.db.exists("Item", self.item_code):
throw(_("Item {0} not found").format(self.item_code))
def validate_price_list(self):
enabled = frappe.db.get_value("Nodux Price List", self.price_list, "enabled")
if not enabled:
throw(_("Price List {0} is disabled").format(self.price_list))
def check_duplicate_item(self):
if frappe.db.sql("""select name from `tabNodux Item Price`
where item_code=%s and price_list=%s and name!=%s""", (self.item_code, self.price_list, self.name)):
frappe.throw(_("Item {0} appears multiple times in Price List {1}").format(self.item_code, self.price_list),
NoduxItemPriceDuplicateItem)
# def update_price_list_details(self):
# self.buying, self.selling, self.currency = \
# #frappe.db.get_value("Nodux Price List", {"name": self.price_list, "enabled": 1},
# frappe.db.get_value("Nodux Price List", {"name": self.price_list},
# ["buying", "selling", "currency"])
def update_price_list_details(self):
self.buying, self.selling, self.currency = \
frappe.db.get_value("Nodux Price List", {"name": self.price_list, "enabled": 1},
["buying", "selling", "currency"])
def update_item_details(self):
self.item_name, self.item_description = frappe.db.get_value("Item",
self.item_code, ["item_name", "description"])
| 36.265306 | 111 | 0.723692 | 253 | 1,777 | 4.869565 | 0.256917 | 0.138799 | 0.073864 | 0.064935 | 0.355519 | 0.293019 | 0.252435 | 0.228084 | 0.228084 | 0.228084 | 0 | 0.00712 | 0.130557 | 1,777 | 48 | 112 | 37.020833 | 0.790291 | 0.216657 | 0 | 0 | 0 | 0 | 0.204776 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.2 | false | 0 | 0.133333 | 0 | 0.366667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d96823b574d707b8c5884043e9c5fc59a212d82c | 1,900 | py | Python | ebr_board/database/queries.py | eugene-davis/ebr-board | f592a752e17e869a6fd35ef82398f97748dbdc78 | [
"Apache-2.0"
] | null | null | null | ebr_board/database/queries.py | eugene-davis/ebr-board | f592a752e17e869a6fd35ef82398f97748dbdc78 | [
"Apache-2.0"
] | 4 | 2019-08-02T09:35:51.000Z | 2019-08-05T04:45:47.000Z | ebr_board/database/queries.py | LaudateCorpus1/ebr-board | f592a752e17e869a6fd35ef82398f97748dbdc78 | [
"Apache-2.0"
] | 1 | 2021-09-14T03:58:40.000Z | 2021-09-14T03:58:40.000Z | """
Query functions to run against ElasticSearch
"""
# pylint: disable=invalid-name
from ebr_connector.schema.build_results import BuildResults
detailed_build_info = {
"includes": [
"br_build_date_time",
"br_job_name",
"br_job_url_key",
"br_source",
"br_build_id_key",
"br_platform",
"br_product",
"br_status_key",
"br_version_key",
"br_tests_object",
],
"excludes": [
"lhi*",
"br_tests_object.br_tests_passed_object.*",
"br_tests_object.br_tests_failed_object.*",
"br_tests_object.br_tests_skipped_object.*",
"br_tests_object.br_suites_object.*",
],
}
def make_query( # pylint: disable=too-many-arguments
index, combined_filter, includes, excludes, agg=None, size=1, start=0
):
"""
Simplifies the execution and usage of a typical query, including cleaning up the results.
Args:
index: index to search on
combined_filter: combined set of filters to run the query with
includes: list of fields to include on the results (keep as small as possible to improve execution time)
excludes: list of fields to explicitly exclude from the results
size: [Optional] number of results to return. Defaults to 1.
Returns:
List of dicts with results of the query.
"""
search = BuildResults().search(index=index)
search = search.source(includes=includes, excludes=excludes)
if agg:
search = search.aggs.metric("fail_count", agg)
search = search.query("bool", filter=[combined_filter])[0:1]
search = search[start : start + size]
response = search.execute()
results = []
if agg:
results = response["aggregations"]["fail_count"]["buckets"]
else:
for hit in response["hits"]["hits"]:
results.append(hit["_source"])
return results
| 31.666667 | 113 | 0.647895 | 238 | 1,900 | 4.962185 | 0.436975 | 0.047417 | 0.066046 | 0.050804 | 0.078747 | 0.04403 | 0 | 0 | 0 | 0 | 0 | 0.003487 | 0.245263 | 1,900 | 59 | 114 | 32.20339 | 0.820084 | 0.315789 | 0 | 0.102564 | 0 | 0 | 0.291801 | 0.124598 | 0 | 0 | 0 | 0 | 0 | 1 | 0.025641 | false | 0.025641 | 0.025641 | 0 | 0.076923 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d96a378a19360aa836edef6494a1d0f976078638 | 4,714 | py | Python | src/scripts/vnet/uri/dummy_app.py | amithbraj/vpp | edf1da94dc099c6e2ab1d455ce8652fada3cdb04 | [
"Apache-2.0"
] | 751 | 2017-07-13T06:16:46.000Z | 2022-03-30T09:14:35.000Z | src/scripts/vnet/uri/dummy_app.py | amithbraj/vpp | edf1da94dc099c6e2ab1d455ce8652fada3cdb04 | [
"Apache-2.0"
] | 63 | 2018-06-11T09:48:35.000Z | 2021-01-05T09:11:03.000Z | src/scripts/vnet/uri/dummy_app.py | amithbraj/vpp | edf1da94dc099c6e2ab1d455ce8652fada3cdb04 | [
"Apache-2.0"
] | 479 | 2017-07-13T06:17:26.000Z | 2022-03-31T18:20:43.000Z | #!/usr/bin/env python3
import socket
import sys
import time
import argparse
# action can be reflect or drop
action = "drop"
test = 0
def test_data (data, n_rcvd):
n_read = len (data);
for i in range(n_read):
expected = (n_rcvd + i) & 0xff
byte_got = ord (data[i])
if (byte_got != expected):
print("Difference at byte {}. Expected {} got {}"
.format(n_rcvd + i, expected, byte_got))
return n_read
def handle_connection (connection, client_address):
print("Received connection from {}".format(repr(client_address)))
n_rcvd = 0
try:
while True:
data = connection.recv(4096)
if not data:
break;
if (test == 1):
n_rcvd += test_data (data, n_rcvd)
if (action != "drop"):
connection.sendall(data)
finally:
connection.close()
def run_tcp_server(ip, port):
print("Starting TCP server {}:{}".format(repr(ip), repr(port)))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_address = (ip, int(port))
sock.bind(server_address)
sock.listen(1)
while True:
connection, client_address = sock.accept()
handle_connection (connection, client_address)
def run_udp_server(ip, port):
print("Starting UDP server {}:{}".format(repr(ip), repr(port)))
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
server_address = (ip, int(port))
sock.bind(server_address)
while True:
data, addr = sock.recvfrom(4096)
if (action != "drop"):
#snd_sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.sendto (data, addr)
def run_server(ip, port, proto):
if (proto == "tcp"):
run_tcp_server(ip, port)
elif (proto == "udp"):
run_udp_server(ip, port)
def prepare_data(power):
buf = []
for i in range (0, pow(2, power)):
buf.append(i & 0xff)
return bytearray(buf)
def run_tcp_client(ip, port):
print("Starting TCP client {}:{}".format(repr(ip), repr(port)))
sock = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
server_address = (ip, int(port))
sock.connect(server_address)
data = prepare_data(16)
n_rcvd = 0
n_sent = len (data)
try:
sock.sendall(data)
timeout = time.time() + 2
while n_rcvd < n_sent and time.time() < timeout:
tmp = sock.recv(1500)
tmp = bytearray (tmp)
n_read = len(tmp)
for i in range(n_read):
if (data[n_rcvd + i] != tmp[i]):
print("Difference at byte {}. Sent {} got {}"
.format(n_rcvd + i, data[n_rcvd + i], tmp[i]))
n_rcvd += n_read
if (n_rcvd < n_sent or n_rcvd > n_sent):
print("Sent {} and got back {}".format(n_sent, n_rcvd))
else:
print("Got back what we've sent!!");
finally:
sock.close()
def run_udp_client(ip, port):
print("Starting UDP client {}:{}".format(repr(ip), repr(port)))
n_packets = 100
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (ip, int(port))
data = prepare_data(10)
try:
for i in range (0, n_packets):
sock.sendto(data, server_address)
finally:
sock.close()
def run_client(ip, port, proto):
if (proto == "tcp"):
run_tcp_client(ip, port)
elif (proto == "udp"):
run_udp_client(ip, port)
def run(mode, ip, port, proto):
if (mode == "server"):
run_server (ip, port, proto)
elif (mode == "client"):
run_client (ip, port, proto)
else:
raise Exception("Unknown mode. Only client and server supported")
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('-m', action='store', dest='mode')
parser.add_argument('-i', action='store', dest='ip')
parser.add_argument('-p', action='store', dest='port')
parser.add_argument('-proto', action='store', dest='proto')
parser.add_argument('-a', action='store', dest='action')
parser.add_argument('-t', action='store', dest='test')
results = parser.parse_args()
action = results.action
test = results.test
run(results.mode, results.ip, results.port, results.proto)
#if (len(sys.argv)) < 4:
# raise Exception("Usage: ./dummy_app <mode> <ip> <port> [<action> <test>]")
#if (len(sys.argv) == 6):
# action = sys.argv[4]
# test = int(sys.argv[5])
#run (sys.argv[1], sys.argv[2], int(sys.argv[3]))
| 33.197183 | 83 | 0.591854 | 635 | 4,714 | 4.234646 | 0.209449 | 0.027891 | 0.026776 | 0.040907 | 0.427296 | 0.257717 | 0.213834 | 0.195984 | 0.175902 | 0.159911 | 0 | 0.011491 | 0.261561 | 4,714 | 141 | 84 | 33.432624 | 0.760988 | 0.071065 | 0 | 0.291667 | 0 | 0 | 0.094966 | 0 | 0 | 0 | 0.001831 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.033333 | 0 | 0.133333 | 0.075 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d96a84eccbdb4344152ec7775f2333ab5fdd6d60 | 2,806 | py | Python | getData.py | siddsax/WD-GAN | c5f7d68394ea60760db3eacb5f059ebebef6060d | [
"BSD-3-Clause"
] | null | null | null | getData.py | siddsax/WD-GAN | c5f7d68394ea60760db3eacb5f059ebebef6060d | [
"BSD-3-Clause"
] | null | null | null | getData.py | siddsax/WD-GAN | c5f7d68394ea60760db3eacb5f059ebebef6060d | [
"BSD-3-Clause"
] | null | null | null | import torch
from torch.utils import data
import numpy as np
import os
import cv2
import torchvision.transforms as transforms
from PIL import Image
import random
from PIL import ImageFile
def get_transform(opt):
transform_list = []
if opt.resize_or_crop == 'resize_and_crop':
osize = [opt.loadSize_1, opt.loadSize_2]
transform_list.append(transforms.Resize(osize, Image.BICUBIC))
transform_list.append(transforms.RandomCrop((opt.fineSize_1, opt.fineSize_2 )))
elif opt.resize_or_crop == 'crop':
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'scale_width':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.fineSize)))
elif opt.resize_or_crop == 'scale_width_and_crop':
transform_list.append(transforms.Lambda(
lambda img: __scale_width(img, opt.loadSize)))
transform_list.append(transforms.RandomCrop(opt.fineSize))
elif opt.resize_or_crop == 'none':
transform_list.append(transforms.Lambda(
lambda img: __adjust(img)))
else:
raise ValueError('--resize_or_crop %s is not a valid option.' % opt.resize_or_crop)
# if opt.isTrain and not opt.no_flip:
# print("="*1000)
# # exit()
# transform_list.append(transforms.RandomHorizontalFlip())
transform_list += [transforms.ToTensor()]
# transforms.Normalize((0.5, 0.5, 0.5),
# (0.5, 0.5, 0.5))]
return transforms.Compose(transform_list)
class Dataset(data.Dataset):
'Characterizes a dataset for PyTorch'
def __init__(self, opt):
'Initialization'
self.transform = get_transform(opt)
self.dataroot = opt.dataroot
self.AB_paths = os.listdir(opt.dataroot)
self.train = opt.train
self.opt = opt
def __len__(self):
'Denotes the total number of samples'
return len(self.AB_paths)
def __getitem__(self, index):
AB_path = self.dataroot + '/' + self.AB_paths[index]
AB = Image.open(AB_path).convert('RGB')
if self.train:
w, h = AB.size
w2 = int(w / 2)
B = AB.crop((w2, 0, w, h)).resize((self.opt.loadSize_1, self.opt.loadSize_2), Image.BICUBIC)
else:
B = AB
seed = random.randint(0,2**32)
random.seed(seed)
# B = transforms.ToTensor()(B)
B = self.transform(B)
w_offset = random.randint(0, max(0, self.opt.loadSize_1 - self.opt.fineSize_1 - 1))
h_offset = random.randint(0, max(0, self.opt.loadSize_2 - self.opt.fineSize_2 - 1))
B = B[:, h_offset:h_offset + self.opt.fineSize_2, w_offset:w_offset + self.opt.fineSize_1]
return B, 0
| 35.075 | 104 | 0.63186 | 372 | 2,806 | 4.55914 | 0.268817 | 0.084316 | 0.089623 | 0.136792 | 0.322524 | 0.306604 | 0.288325 | 0.232901 | 0.232901 | 0.159198 | 0 | 0.020932 | 0.250891 | 2,806 | 79 | 105 | 35.518987 | 0.785918 | 0.106914 | 0 | 0.118644 | 0 | 0 | 0.071207 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067797 | false | 0 | 0.152542 | 0 | 0.288136 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d96ff25d9d5722c19fe4236bed106b32c2d92cde | 10,795 | py | Python | nova/virt/powervm/tasks/network.py | zjzh/nova | 7bb21723171c59b93e28f5d508c2b6df39220f13 | [
"Apache-2.0"
] | 1,874 | 2015-01-04T05:18:34.000Z | 2022-03-31T03:30:28.000Z | nova/virt/powervm/tasks/network.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | 132 | 2017-03-27T11:31:52.000Z | 2022-03-30T08:45:02.000Z | nova/virt/powervm/tasks/network.py | woraser/nova | fc3890667e4971e3f0f35ac921c2a6c25f72adec | [
"Apache-2.0"
] | 1,996 | 2015-01-04T15:11:51.000Z | 2022-03-31T11:03:13.000Z | # Copyright 2015, 2017 IBM Corp.
#
# All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import eventlet
from oslo_log import log as logging
from pypowervm.tasks import cna as pvm_cna
from pypowervm.wrappers import managed_system as pvm_ms
from pypowervm.wrappers import network as pvm_net
from taskflow import task
from nova import conf as cfg
from nova import exception
from nova.virt.powervm import vif
from nova.virt.powervm import vm
LOG = logging.getLogger(__name__)
CONF = cfg.CONF
SECURE_RMC_VSWITCH = 'MGMTSWITCH'
SECURE_RMC_VLAN = 4094
class PlugVifs(task.Task):
"""The task to plug the Virtual Network Interfaces to a VM."""
def __init__(self, virt_api, adapter, instance, network_infos):
"""Create the task.
Provides 'vm_cnas' - the list of the Virtual Machine's Client Network
Adapters as they stand after all VIFs are plugged. May be None, in
which case the Task requiring 'vm_cnas' should discover them afresh.
:param virt_api: The VirtAPI for the operation.
:param adapter: The pypowervm adapter.
:param instance: The nova instance.
:param network_infos: The network information containing the nova
VIFs to create.
"""
self.virt_api = virt_api
self.adapter = adapter
self.instance = instance
self.network_infos = network_infos or []
self.crt_network_infos, self.update_network_infos = [], []
# Cache of CNAs that is filled on initial _vif_exists() call.
self.cnas = None
super(PlugVifs, self).__init__(
name='plug_vifs', provides='vm_cnas', requires=['lpar_wrap'])
def _vif_exists(self, network_info):
"""Does the instance have a CNA for a given net?
:param network_info: A network information dict. This method expects
it to contain key 'address' (MAC address).
:return: True if a CNA with the network_info's MAC address exists on
the instance. False otherwise.
"""
if self.cnas is None:
self.cnas = vm.get_cnas(self.adapter, self.instance)
vifs = self.cnas
return network_info['address'] in [vm.norm_mac(v.mac) for v in vifs]
def execute(self, lpar_wrap):
# Check to see if the LPAR is OK to add VIFs to.
modifiable, reason = lpar_wrap.can_modify_io()
if not modifiable:
LOG.error("Unable to create VIF(s) for instance in the system's "
"current state. The reason from the system is: %s",
reason, instance=self.instance)
raise exception.VirtualInterfaceCreateException()
# We will have two types of network infos. One is for newly created
# vifs. The others are those that exist, but should be re-'treated'
for network_info in self.network_infos:
if self._vif_exists(network_info):
self.update_network_infos.append(network_info)
else:
self.crt_network_infos.append(network_info)
# If there are no vifs to create or update, then just exit immediately.
if not self.crt_network_infos and not self.update_network_infos:
return []
# For existing VIFs that we just need to update, run the plug but do
# not wait for the neutron event as that likely won't be sent (it was
# already done).
for network_info in self.update_network_infos:
LOG.info("Updating VIF with mac %s for instance.",
network_info['address'], instance=self.instance)
vif.plug(self.adapter, self.instance, network_info, new_vif=False)
# For the new VIFs, run the creates (and wait for the events back)
try:
with self.virt_api.wait_for_instance_event(
self.instance, self._get_vif_events(),
deadline=CONF.vif_plugging_timeout,
error_callback=self._vif_callback_failed):
for network_info in self.crt_network_infos:
LOG.info('Creating VIF with mac %s for instance.',
network_info['address'], instance=self.instance)
new_vif = vif.plug(
self.adapter, self.instance, network_info,
new_vif=True)
if self.cnas is not None:
self.cnas.append(new_vif)
except eventlet.timeout.Timeout:
LOG.error('Error waiting for VIF to be created for instance.',
instance=self.instance)
raise exception.VirtualInterfaceCreateException()
return self.cnas
def _vif_callback_failed(self, event_name, instance):
LOG.error('VIF Plug failure for callback on event %s for instance.',
event_name, instance=self.instance)
if CONF.vif_plugging_is_fatal:
raise exception.VirtualInterfaceCreateException()
def _get_vif_events(self):
"""Returns the VIF events that need to be received for a VIF plug.
In order for a VIF plug to be successful, certain events should be
received from other components within the OpenStack ecosystem. This
method returns the events neutron needs for a given deploy.
"""
# See libvirt's driver.py -> _get_neutron_events method for
# more information.
if CONF.vif_plugging_is_fatal and CONF.vif_plugging_timeout:
return [('network-vif-plugged', network_info['id'])
for network_info in self.crt_network_infos
if not network_info.get('active', True)]
def revert(self, lpar_wrap, result, flow_failures):
if not self.network_infos:
return
LOG.warning('VIF creation being rolled back for instance.',
instance=self.instance)
# Get the current adapters on the system
cna_w_list = vm.get_cnas(self.adapter, self.instance)
for network_info in self.crt_network_infos:
try:
vif.unplug(self.adapter, self.instance, network_info,
cna_w_list=cna_w_list)
except Exception:
LOG.exception("An exception occurred during an unplug in the "
"vif rollback. Ignoring.",
instance=self.instance)
class UnplugVifs(task.Task):
"""The task to unplug Virtual Network Interfaces from a VM."""
def __init__(self, adapter, instance, network_infos):
"""Create the task.
:param adapter: The pypowervm adapter.
:param instance: The nova instance.
:param network_infos: The network information containing the nova
VIFs to create.
"""
self.adapter = adapter
self.instance = instance
self.network_infos = network_infos or []
super(UnplugVifs, self).__init__(name='unplug_vifs')
def execute(self):
# If the LPAR is not in an OK state for deleting, then throw an
# error up front.
lpar_wrap = vm.get_instance_wrapper(self.adapter, self.instance)
modifiable, reason = lpar_wrap.can_modify_io()
if not modifiable:
LOG.error("Unable to remove VIFs from instance in the system's "
"current state. The reason reported by the system is: "
"%s", reason, instance=self.instance)
raise exception.VirtualInterfaceUnplugException(reason=reason)
# Get all the current Client Network Adapters (CNA) on the VM itself.
cna_w_list = vm.get_cnas(self.adapter, self.instance)
# Walk through the VIFs and delete the corresponding CNA on the VM.
for network_info in self.network_infos:
vif.unplug(self.adapter, self.instance, network_info,
cna_w_list=cna_w_list)
class PlugMgmtVif(task.Task):
"""The task to plug the Management VIF into a VM."""
def __init__(self, adapter, instance):
"""Create the task.
Requires 'vm_cnas' from PlugVifs. If None, this Task will retrieve the
VM's list of CNAs.
Provides the mgmt_cna. This may be None if no management device was
created. This is the CNA of the mgmt vif for the VM.
:param adapter: The pypowervm adapter.
:param instance: The nova instance.
"""
self.adapter = adapter
self.instance = instance
super(PlugMgmtVif, self).__init__(
name='plug_mgmt_vif', provides='mgmt_cna', requires=['vm_cnas'])
def execute(self, vm_cnas):
LOG.info('Plugging the Management Network Interface to instance.',
instance=self.instance)
# Determine if we need to create the secure RMC VIF. This should only
# be needed if there is not a VIF on the secure RMC vSwitch
vswitch = None
vswitches = pvm_net.VSwitch.search(
self.adapter, parent_type=pvm_ms.System.schema_type,
parent_uuid=self.adapter.sys_uuid, name=SECURE_RMC_VSWITCH)
if len(vswitches) == 1:
vswitch = vswitches[0]
if vswitch is None:
LOG.warning('No management VIF created for instance due to lack '
'of Management Virtual Switch', instance=self.instance)
return None
# This next check verifies that there are no existing NICs on the
# vSwitch, so that the VM does not end up with multiple RMC VIFs.
if vm_cnas is None:
has_mgmt_vif = vm.get_cnas(self.adapter, self.instance,
vswitch_uri=vswitch.href)
else:
has_mgmt_vif = vswitch.href in [cna.vswitch_uri for cna in vm_cnas]
if has_mgmt_vif:
LOG.debug('Management VIF already created for instance',
instance=self.instance)
return None
lpar_uuid = vm.get_pvm_uuid(self.instance)
return pvm_cna.crt_cna(self.adapter, None, lpar_uuid, SECURE_RMC_VLAN,
vswitch=SECURE_RMC_VSWITCH, crt_vswitch=True)
| 41.519231 | 79 | 0.631774 | 1,408 | 10,795 | 4.697443 | 0.224432 | 0.045358 | 0.034472 | 0.031297 | 0.310402 | 0.275627 | 0.239643 | 0.194436 | 0.178561 | 0.166163 | 0 | 0.002378 | 0.298749 | 10,795 | 259 | 80 | 41.679537 | 0.871334 | 0.313201 | 0 | 0.275362 | 0 | 0 | 0.112771 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.072464 | false | 0 | 0.072464 | 0 | 0.224638 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d9733cb8ddf1ffbcfc514f4195c8b460a2b0fff8 | 560 | py | Python | typer-cli-python/source_code_step_2/rptodo/cli.py | syberflea/materials | 54f44725b40edf00c1b523d7a85b34a85014d7eb | [
"MIT"
] | 3,682 | 2018-05-07T19:45:24.000Z | 2022-03-31T15:19:10.000Z | typer-cli-python/source_code_step_2/rptodo/cli.py | sribarrow/materials | c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5 | [
"MIT"
] | 148 | 2018-05-15T21:18:49.000Z | 2022-03-21T11:25:39.000Z | typer-cli-python/source_code_step_2/rptodo/cli.py | sribarrow/materials | c17c4a4d6f8487e59eac1df8c88ca92b73d6d2a5 | [
"MIT"
] | 5,535 | 2018-05-25T23:36:08.000Z | 2022-03-31T16:55:52.000Z | """This module provides the RP To-Do CLI."""
from typing import Optional
import typer
from rptodo import __app_name__, __version__
app = typer.Typer()
def _version_callback(value: bool) -> None:
if value:
typer.echo(f"{__app_name__} v{__version__}")
raise typer.Exit()
@app.callback()
def main(
version: Optional[bool] = typer.Option(
None,
"--version",
"-v",
help="Show the application's version and exit.",
callback=_version_callback,
is_eager=True,
)
) -> None:
return
| 18.666667 | 56 | 0.623214 | 69 | 560 | 4.724638 | 0.565217 | 0.042945 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.255357 | 560 | 29 | 57 | 19.310345 | 0.781775 | 0.067857 | 0 | 0 | 0 | 0 | 0.155039 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.15 | 0.05 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d97515b3fc95c50563dceea347d6cfbeb7c8f9bf | 4,629 | py | Python | surrortg/devices/relay.py | bn102/surrortg-sdk | 5f51515d0fd83741b3359b9a682c0a9afc38886f | [
"MIT"
] | 21 | 2020-11-03T23:41:56.000Z | 2022-03-21T04:11:46.000Z | surrortg/devices/relay.py | bn102/surrortg-sdk | 5f51515d0fd83741b3359b9a682c0a9afc38886f | [
"MIT"
] | 5 | 2021-02-11T14:36:03.000Z | 2021-07-20T11:45:07.000Z | surrortg/devices/relay.py | bn102/surrortg-sdk | 5f51515d0fd83741b3359b9a682c0a9afc38886f | [
"MIT"
] | 11 | 2020-11-13T11:14:33.000Z | 2022-03-21T04:11:51.000Z | import asyncio
import logging
import pigpio
class Relay:
"""Simple to use relay class implemented with pigpio
:param pin: GPIO pin number
:type pin: int
:param on_level_low: Determines the logic level of the on-state.
If set to True, the relay is on when the GPIO pin state is LOW.
Defaults to True.
:type on_level_low: bool, optional
:param initial_state_off: Determines whether the relay should be
set to off-state when initialized. If set to False, the relay is
set to on-state at init. Defaults to True.
:type initial_state_off: bool, optional
:raises RuntimeError: If cannot connect to pigpio daemon
:raises RuntimeError: If methods are called after calling stop
"""
def __init__(self, pin, on_level_low=True, initial_state_off=True):
self._pin = pin
self._on_level_low = on_level_low
self._stopped = False
if on_level_low:
self._on_level = pigpio.LOW
self._off_level = pigpio.HIGH
else:
self._on_level = pigpio.HIGH
self._off_level = pigpio.LOW
self._pi = pigpio.pi()
if not self._pi.connected:
raise RuntimeError("Could not connect to pigpio daemon")
self._pi.set_mode(self._pin, pigpio.OUTPUT)
if initial_state_off:
self.off()
else:
self.on()
def on(self):
"""Turns the relay on"""
self._check_if_stopped()
self._pi.write(self._pin, self._on_level)
def off(self):
"""Turns the relay off"""
self._check_if_stopped()
self._pi.write(self._pin, self._off_level)
def toggle(self):
"""Toggles the relay's state
Turns the relay on if the state was previously off, and vice versa.
"""
self._check_if_stopped()
if self.is_on():
self.off()
else:
self.on()
async def press_once(self, press_time):
"""Turns the relay on and off, waiting press_time seconds in between
:param press_time: Time in seconds to wait between turning the relay
on and off
:type press_time: float or int
"""
assert isinstance(press_time, float) or isinstance(
press_time, int
), "press_time should be float or int"
self._check_if_stopped()
if self.is_on():
logging.warning(
"Relay is already on when pressing once! Will turn relay off "
f"in {press_time} seconds."
)
self.on()
await asyncio.sleep(press_time)
self.off()
def is_on(self):
"""Checks if the relay is turned on
:return: True if the relay is turned on
:rtype: bool
"""
self._check_if_stopped()
return self._pi.read(self._pin) == self._on_level
def is_off(self):
"""Checks if the relay is turned off
:return: True if the relay is turned off
:rtype: bool
"""
self._check_if_stopped()
return not self.is_on()
def on_level_is_low(self):
"""Checks if the relay is on when the GPIO state is LOW
:return: True if the relay is on when the GPIO state is LOW
:rtype: bool
"""
self._check_if_stopped()
return self._on_level_low
def _check_if_stopped(self):
if self._stopped:
raise RuntimeError("Relay already stopped")
def stop(self):
"""Sets the pin to input state and stops pigpio daemon connection"""
self._check_if_stopped()
self._pi.set_pull_up_down(self._pin, pigpio.PUD_OFF)
self._pi.set_mode(self._pin, pigpio.INPUT)
self._pi.stop()
self._stopped = True
if __name__ == "__main__":
async def main():
relay = Relay(26)
print(f"Relay on level is low: {relay.on_level_is_low()}")
print(f"Relay is initially on: {relay.is_on()}")
await asyncio.sleep(0.5)
print("Turning the relay on")
relay.on()
await asyncio.sleep(1)
print("Turning the relay off")
relay.off()
await asyncio.sleep(2)
print("Pressing the relay once for 1 second")
await relay.press_once(1)
await asyncio.sleep(2)
print("Toggle relay state")
relay.toggle()
print(f"Relay is now on: {relay.is_on()}")
await asyncio.sleep(1)
print("Toggle relay state again")
relay.toggle()
print(f"Relay is now off: {relay.is_off()}")
relay.stop()
asyncio.run(main())
| 28.054545 | 78 | 0.597969 | 639 | 4,629 | 4.13302 | 0.195618 | 0.054525 | 0.047709 | 0.054525 | 0.330178 | 0.274896 | 0.230973 | 0.10602 | 0.056797 | 0.056797 | 0 | 0.00314 | 0.311946 | 4,629 | 164 | 79 | 28.22561 | 0.82606 | 0.242817 | 0 | 0.280899 | 0 | 0 | 0.145531 | 0.008067 | 0 | 0 | 0 | 0 | 0.011236 | 1 | 0.101124 | false | 0 | 0.033708 | 0 | 0.179775 | 0.101124 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d976858493e0b3bdb11753531577c643cf5f3d49 | 8,322 | py | Python | voice.py | ImPurpl3/egg | 875f8105140544897e7b81af660e3da864b4cd54 | [
"MIT"
] | null | null | null | voice.py | ImPurpl3/egg | 875f8105140544897e7b81af660e3da864b4cd54 | [
"MIT"
] | null | null | null | voice.py | ImPurpl3/egg | 875f8105140544897e7b81af660e3da864b4cd54 | [
"MIT"
] | 1 | 2021-12-17T01:23:31.000Z | 2021-12-17T01:23:31.000Z | """
MIT License
Copyright (c) 2020 ValkyriaKing711
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
"""
import asyncio
import os
from asyncio import AbstractEventLoop
from datetime import datetime
from typing import TypeVar, Union
import discord
from async_timeout import timeout
from cogs.utils import utils
from discord import (AudioSource, FFmpegPCMAudio, Guild, PCMVolumeTransformer,
TextChannel)
from discord.ext import commands, tasks
from discord.ext.commands import Cog, Context
from youtube_dl import YoutubeDL
utcnow = datetime.utcnow
Y = TypeVar("Y", bound="YTDLSource")
FFMPEG_EXECUTABLE = "ffmpeg"
FFMPEG_OPTIONS = {
"before_options": "-reconnect 1 -reconnect_streamed 1 -reconnect_delay_max 5",
"options": "-vn"
}
ytdl = YoutubeDL({
"format": "bestaudio/best",
"outtmpl": "downloads/%(autonumber)s-%(extractor)s-%(id)s-%(title)s.%(ext)s",
"restrictfilenames": True,
"noplaylist": True,
"nocheckcertificate": True,
"ignoreerrors": False,
"logtostderr": False,
"quiet": False,
"verbose": True,
"no_warnings": True,
"default_search": "auto",
"source_address": "0.0.0.0",
"geo_bypass_country": "FI",
"age_limit": 30
})
class YTDLSource(PCMVolumeTransformer):
def __init__(self, source: AudioSource, *,
data: dict, volume=1.0):
super().__init__(source, volume)
self.data = data
self.title = data.get("title")
self.url = data.get("url")
@classmethod
async def from_query(cls, query: str, *,
loop: AbstractEventLoop = None,
stream: bool = True, partial: bool = False,
ctx: Context = None) -> Union[dict, Y]:
if not stream and partial:
raise ValueError("partial cannot be True when not streaming")
loop = loop or asyncio.get_running_loop()
data = await loop.run_in_executor(
None,
lambda: ytdl.extract_info(query, download=not stream)
)
if "entries" in data:
data = data["entries"][0]
if ctx:
data["context"] = ctx
if partial:
for key in ("formats", "http_headers", "downloader_options", "thumbnails", "url"):
try:
del data[key]
except Exception:
pass
return data
options = FFMPEG_OPTIONS.copy()
if stream:
source = data["url"]
else:
source = ytdl.prepare_filename(data)
data["filename"] = source
options.pop("before_options")
return cls(FFmpegPCMAudio(source, **options), data=data)
@classmethod
async def regather_stream(cls, data: dict, *,
loop: AbstractEventLoop = None) -> Y:
loop = loop or asyncio.get_running_loop()
ctx = data.get("context")
data = await loop.run_in_executor(
None,
lambda: ytdl.extract_info(data["webpage_url"], download=False)
)
if ctx:
data["context"] = ctx
return cls(FFmpegPCMAudio(data["url"]), data=data)
class MusicPlayer:
def __init__(self, ctx: Context):
self.bot: utils.Bot = ctx.bot
self._channel: TextChannel = ctx.channel
self._cog: Cog = ctx.cog
self._guild: Guild = ctx.guild
self.next = asyncio.Event()
self.queue = asyncio.Queue()
self.current = None
self.volume = 1.0
self.first_play_id = None
self.skipped = None
self.player_loop.start() # pylint: disable=no-member
@tasks.loop()
async def player_loop(self):
self.next.clear()
try:
async with timeout(300):
source = await self.queue.get()
except asyncio.TimeoutError:
print("timeout")
return await self.destroy(self._guild)
if not isinstance(source, YTDLSource):
try:
source = await YTDLSource.regather_stream(
source, loop=self.bot.loop
)
except Exception as e:
embed = discord.Embed(
description=f"```css\n{e}\n```",
color=0xF6DECF,
timestamp=utcnow()
)
embed.set_author(
name="An error occurred while processing the track.",
icon_url=self._guild.me.display_avatar.url
)
return await self._channel.send(embed=embed)
ctx = source.data["context"]
source.volume = self.volume
self.current = source
self._guild.voice_client.play(
source,
after=lambda _: self.bot.loop.call_soon_threadsafe(self.next.set)
)
if self.skipped:
embed = discord.Embed(
description=f"**Now playing {self.current.data['title']}**",
color=0xF6DECF,
timestamp=utcnow()
)
embed.set_author(
name=f"Skipped {self.skipped.data['title']}",
icon_url=self.skipped.data["skipper"].display_avatar.url,
url=source.data["webpage_url"]
)
self.skipped = None
if source.data["is_live"]:
duration = "🔴 LIVE"
else:
duration = utils.format_time(source.data["duration"])
embed.add_field(name="Uploader", value=source.data["uploader"])
embed.add_field(name="Duration", value=duration)
embed.add_field(name="Requested by", value=ctx.author.mention)
embed.set_thumbnail(url=source.data["thumbnail"])
await self._channel.send(embed=embed)
elif ctx.message.id != self.first_play_id:
embed = discord.Embed(
color=0xF6DECF, timestamp=utcnow()
)
embed.set_author(
name=f"Now playing {source.title}",
icon_url=ctx.author.display_avatar.url,
url=source.data["webpage_url"]
)
if source.data["is_live"]:
duration = "🔴 LIVE"
else:
duration = utils.format_time(source.data["duration"])
embed.add_field(name="Uploader", value=source.data["uploader"])
embed.add_field(name="Duration", value=duration)
embed.add_field(name="Requested by", value=ctx.author.mention)
embed.set_thumbnail(url=source.data["thumbnail"])
await self._channel.send(embed=embed)
await self.next.wait()
source.cleanup()
self.current = None
filename = source.data.get("filename")
if filename and os.path.isfile(filename):
os.remove(filename)
@player_loop.before_loop
async def wait_until_ready(self):
await self.bot.wait_until_ready()
def destroy(self, guild: Guild):
return self._cog.cleanup(guild)
| 32.76378 | 95 | 0.577866 | 921 | 8,322 | 5.121607 | 0.32139 | 0.02756 | 0.016536 | 0.021624 | 0.226415 | 0.206063 | 0.199703 | 0.186559 | 0.160271 | 0.140343 | 0 | 0.005332 | 0.323961 | 8,322 | 253 | 96 | 32.893281 | 0.832741 | 0.131939 | 0 | 0.276243 | 0 | 0.005525 | 0.124946 | 0.017378 | 0 | 0 | 0.003447 | 0 | 0 | 1 | 0.016575 | false | 0.01105 | 0.066298 | 0.005525 | 0.127072 | 0.005525 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d977def1eab47165344401c96a3e6718cbc8e63f | 689 | py | Python | solutions/Bulls and Cows/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | 3 | 2021-06-06T22:03:15.000Z | 2021-06-08T08:49:04.000Z | solutions/Bulls and Cows/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | null | null | null | solutions/Bulls and Cows/solution.py | nilax97/leetcode-solutions | d3c12f2b289662d199510e0431e177bbf3cda121 | [
"MIT"
] | null | null | null | class Solution:
def getHint(self, secret: str, guess: str) -> str:
bull = 0
cow = 0
values = dict()
for i in range(len(secret)):
if secret[i] == guess[i]:
bull += 1
elif secret[i] in values:
values[secret[i]] += 1
else:
values[secret[i]] = 1
for i in range(len(secret)):
if secret[i] != guess[i]:
if guess[i] in values:
if values[guess[i]] > 0:
cow +=1
values[guess[i]] -= 1
return str(bull) + "A" + str(cow) + "B"
| 31.318182 | 54 | 0.37881 | 77 | 689 | 3.38961 | 0.324675 | 0.1341 | 0.045977 | 0.084291 | 0.268199 | 0.268199 | 0.268199 | 0.268199 | 0.268199 | 0.268199 | 0 | 0.023188 | 0.499274 | 689 | 21 | 55 | 32.809524 | 0.733333 | 0 | 0 | 0.105263 | 0 | 0 | 0.002903 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d9789ea5dc5332d1b7a15a1afc6b61a382b8814b | 2,392 | py | Python | tap_parquet/streams.py | berenddeboer/tap-parquet | d9c50ea92a68b7777e31ca622468e1dadd86d9ce | [
"Apache-2.0"
] | null | null | null | tap_parquet/streams.py | berenddeboer/tap-parquet | d9c50ea92a68b7777e31ca622468e1dadd86d9ce | [
"Apache-2.0"
] | 4 | 2021-04-02T16:32:14.000Z | 2021-11-09T22:54:03.000Z | tap_parquet/streams.py | berenddeboer/tap-parquet | d9c50ea92a68b7777e31ca622468e1dadd86d9ce | [
"Apache-2.0"
] | 2 | 2021-11-09T06:44:46.000Z | 2021-12-01T12:28:29.000Z | """Stream class for tap-parquet."""
import requests
from copy import deepcopy
from pathlib import Path
from typing import Any, Dict, Optional, Union, List, Iterable
from singer_sdk.streams import Stream
from singer_sdk.typing import (
ArrayType,
BooleanType,
DateTimeType,
IntegerType,
NumberType,
ObjectType,
PropertiesList,
Property,
StringType,
JSONTypeHelper,
)
import pyarrow.parquet as pq
SCHEMAS_DIR = Path(__file__).parent / Path("./schemas")
def get_jsonschema_type(ansi_type: str) -> JSONTypeHelper:
"""Return a JSONTypeHelper object for the given type name."""
if "int" in ansi_type:
return IntegerType()
if "string" in ansi_type:
return StringType()
if "bool" in ansi_type:
return BooleanType()
if "timestamp[ns]" in ansi_type:
return DateTimeType()
raise ValueError(f"Unmappable data type '{ansi_type}'.")
class ParquetStream(Stream):
"""Stream class for Parquet streams."""
@property
def filepath(self) -> str:
"""Return the filepath for the parquet stream."""
return self.config["filepath"]
@property
def schema(self) -> dict:
"""Dynamically detect the json schema for the stream.
This is evaluated prior to any records being retrieved.
"""
properties: List[Property] = []
parquet_schema = pq.ParquetFile(self.filepath).schema_arrow
for i in range(len(parquet_schema.names)):
name, dtype = parquet_schema.names[i], parquet_schema.types[i]
properties.append(Property(name, get_jsonschema_type(str(dtype))))
return PropertiesList(*properties).to_dict()
def get_records(self, partition: Optional[dict] = None) -> Iterable[dict]:
"""Return a generator of row-type dictionary objects."""
try:
parquet_file = pq.ParquetFile(self.filepath)
except Exception as ex:
raise IOError(f"Could not read from parquet file '{self.filepath}': {ex}")
for i in range(parquet_file.num_row_groups):
table = parquet_file.read_row_group(i)
for batch in table.to_batches():
for row in zip(*batch.columns):
yield {
table.column_names[i]: val.as_py()
for i, val in enumerate(row, start=0)
}
| 31.473684 | 86 | 0.637542 | 285 | 2,392 | 5.231579 | 0.417544 | 0.032193 | 0.026828 | 0.042924 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000567 | 0.26296 | 2,392 | 75 | 87 | 31.893333 | 0.84515 | 0.134615 | 0 | 0.037037 | 0 | 0 | 0.066271 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.074074 | false | 0 | 0.12963 | 0 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d97defe4ab7d19c2df85621955ea08007777df4a | 354 | py | Python | test/test_format.py | gongso1st/geopy | 9252f4b12197ff3c5e3fae50d9bae74974d5d20f | [
"MIT"
] | 1 | 2019-07-17T14:38:52.000Z | 2019-07-17T14:38:52.000Z | test/test_format.py | gongso1st/geopy | 9252f4b12197ff3c5e3fae50d9bae74974d5d20f | [
"MIT"
] | null | null | null | test/test_format.py | gongso1st/geopy | 9252f4b12197ff3c5e3fae50d9bae74974d5d20f | [
"MIT"
] | 1 | 2020-06-03T01:42:17.000Z | 2020-06-03T01:42:17.000Z |
import unittest
from geopy.point import Point
from geopy.format import format_degrees
class TestFormat(unittest.TestCase):
@unittest.skip("")
def test_format(self):
"""
format_degrees
"""
self.assertEqual(
format_degrees(Point.parse_degrees('-13', '19', 0)),
"-13 19\' 0.0\""
)
| 19.666667 | 64 | 0.584746 | 39 | 354 | 5.179487 | 0.487179 | 0.193069 | 0.049505 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043651 | 0.288136 | 354 | 17 | 65 | 20.823529 | 0.757937 | 0.039548 | 0 | 0 | 0 | 0 | 0.057143 | 0 | 0 | 0 | 0 | 0 | 0.1 | 1 | 0.1 | false | 0 | 0.3 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d97e9bfb4e01d1a5a972e104691a2c436b4de3ca | 653 | py | Python | Sorts.py | marinajacks/nowcoder | 5fafb9b12f56f111737e56358016206023c8067c | [
"MIT"
] | null | null | null | Sorts.py | marinajacks/nowcoder | 5fafb9b12f56f111737e56358016206023c8067c | [
"MIT"
] | null | null | null | Sorts.py | marinajacks/nowcoder | 5fafb9b12f56f111737e56358016206023c8067c | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""
Created on Sun Oct 20 20:52:56 2019
@author: 陈彪,版权所有
这个是一个排序算法的总结,将所有的排序算法都重新写一遍,然后我们首先会分析算法的时间
复杂度,然后简单介绍一下这些算法的原理,最后使用python实现,然后我们会使用测试案例
来进行测试。
"""
import random
'''首先映入眼帘的就是冒泡排序,这是一个让人理解起来最简单的排序算法,这个算法的时间复
杂度是O(N^2),从下面的程序中也能看出来这个算法的时间复杂度确实是O(N^2).
'''
def bubble(a):
for i in range(len(a)):
for j in range(i,len(a)):
if(a[i]>a[j]):
temp=a[i]
a[i]=a[j]
a[j]=temp
return a
if __name__=="__main__":
a=[]
for i in range(10):
a.append(random.randint(10,40))
print(a)
print(bubble(a))
print('hello world!') | 15.926829 | 44 | 0.580398 | 90 | 653 | 4.122222 | 0.577778 | 0.032345 | 0.024259 | 0.037736 | 0.06469 | 0 | 0 | 0 | 0 | 0 | 0 | 0.043478 | 0.260337 | 653 | 41 | 45 | 15.926829 | 0.724638 | 0.2634 | 0 | 0 | 0 | 0 | 0.052493 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.0625 | false | 0 | 0.0625 | 0 | 0.1875 | 0.1875 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d980d8c3ac914ab7b6744057703f0c8a2e3c1e3d | 2,711 | py | Python | nederlands.py | rec/neederlands | f5b71a768c9a51a06014a386ffafc8844943e4b2 | [
"Unlicense"
] | 1 | 2020-02-05T17:48:22.000Z | 2020-02-05T17:48:22.000Z | nederlands.py | rec/nederlands | f5b71a768c9a51a06014a386ffafc8844943e4b2 | [
"Unlicense"
] | null | null | null | nederlands.py | rec/nederlands | f5b71a768c9a51a06014a386ffafc8844943e4b2 | [
"Unlicense"
] | null | null | null | import string
WIKI_BESTAND = '/Users/tom/Downloads/\
nlwiktionary-20191020-pages-articles-multistream-index.txt'
WOORD_BESTAND = 'woord-frequenties.txt'
SLECHT_BESTAND = 'slechte-woorden.txt'
BLACKLIST = {i.strip() for i in open(SLECHT_BESTAND)}
AANTAL = 1000000000000000
MIN = 4
MIN_ACHTERVOEGSEL = 4
VOORVOEGSELS = (
'aan',
'achter',
'achterop',
'af',
'be',
'bij',
'binnen',
'boven',
'door',
'er',
'goed',
'her',
'in',
'los',
'mee',
'mis',
'na',
'neer',
'om',
'onder',
'ont',
'op',
'over',
'samen',
'tegen',
'teleur',
'toe',
'tussen',
'uit',
'vast',
'ver',
'vol',
'voor',
'voorbe',
'vrij',
'weer',
'weg',
'zwart',
)
is_woord = set(string.ascii_lowercase).issuperset
def wikitionary():
for lijn in open(WIKI_BESTAND):
_, _, woord = lijn.strip().split(':', maxsplit=2)
if is_woord(woord):
yield woord
def freq():
for lijn in open(WOORD_BESTAND):
woord, _ = lijn.strip().rsplit(maxsplit=1)
yield woord
def werkwoorden(woorden):
alle = set()
resultaat = {}
for woord in woorden:
if not (woord.endswith('en') or woord.endswith('gaan')):
continue
alle.add(woord)
for v in VOORVOEGSELS:
if not woord.startswith(v):
continue
achtervoegsel = woord[len(v):]
if len(achtervoegsel) < MIN_ACHTERVOEGSEL:
continue
if achtervoegsel.startswith('ge') and achtervoegsel != 'geven':
continue
if achtervoegsel in BLACKLIST:
continue
resultaat.setdefault(achtervoegsel, []).append(woord)
for achtervoegsel, lijst in resultaat.items():
if achtervoegsel in alle:
lijst.append(achtervoegsel)
lijst.sort()
resultaat = {k: v for k, v in resultaat.items() if len(v) > 1}
return sorted(resultaat.items(), key=lambda v: len(v[1]), reverse=True)
def druck_werkwoorden(werkwoorden):
for i, (k, v) in enumerate(werkwoorden):
print(k)
for j in v:
print(' ', j)
print()
if i > AANTAL:
break
def classic_extract():
wiki = list(wikitionary())
ww = werkwoorden(wiki)
druck_werkwoorden(ww)
print()
print('----------------')
print()
for i, (k, v) in enumerate(ww):
if i > 10:
break
words = set(w for w in wiki if w.endswith(k))
print(k)
for missing in sorted(words.difference(v)):
print(' ', missing)
if __name__ == '__main__':
classic_extract()
| 20.231343 | 75 | 0.54408 | 300 | 2,711 | 4.83 | 0.426667 | 0.008282 | 0.008282 | 0.017943 | 0.023464 | 0.023464 | 0 | 0 | 0 | 0 | 0 | 0.017186 | 0.313169 | 2,711 | 133 | 76 | 20.383459 | 0.76101 | 0 | 0 | 0.130841 | 0 | 0 | 0.082995 | 0.007746 | 0 | 0 | 0 | 0 | 0 | 1 | 0.046729 | false | 0 | 0.009346 | 0 | 0.065421 | 0.074766 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d984472a164601e55d0346ed01540ed2b20dc88d | 832 | py | Python | config/tortoise.py | nythonore/fastapi-async | 82f34dd421e573f96af1953cc1f72be565743df8 | [
"MIT"
] | null | null | null | config/tortoise.py | nythonore/fastapi-async | 82f34dd421e573f96af1953cc1f72be565743df8 | [
"MIT"
] | null | null | null | config/tortoise.py | nythonore/fastapi-async | 82f34dd421e573f96af1953cc1f72be565743df8 | [
"MIT"
] | null | null | null | from tortoise.contrib.fastapi import register_tortoise as config_tortoise
from config.settings import settings
DB_URL = f'postgres://{settings.DB_USERNAME}:{settings.DB_PASSWORD}@{settings.DB_HOST}:{settings.DB_PORT}/{settings.DB_DATABASE}'
TORTOISE_MODULES = ['app.example.model']
TORTOISE_ORM_MODULES = TORTOISE_MODULES
TORTOISE_ORM_MODULES.append('aerich.models')
TORTOISE_ORM = {
'connections': {
'default': DB_URL
},
'apps':
{
'models':
{
'models': TORTOISE_ORM_MODULES,
'default_connection': 'default'
}
}
}
def register_tortoise(app):
config_tortoise(
app,
db_url=DB_URL,
modules={'models': TORTOISE_MODULES},
generate_schemas=False,
add_exception_handlers=True,
)
| 24.470588 | 129 | 0.638221 | 87 | 832 | 5.793103 | 0.436782 | 0.119048 | 0.107143 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.251202 | 832 | 33 | 130 | 25.212121 | 0.808989 | 0 | 0 | 0 | 0 | 0.037037 | 0.254808 | 0.140625 | 0 | 0 | 0 | 0 | 0 | 1 | 0.037037 | false | 0.037037 | 0.074074 | 0 | 0.111111 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d986985c57ee93c51e109add53b8920f894727ed | 982 | py | Python | setup.py | orange-kao/rpm-s3-mirror | 4a08cdb47de33045c5e5bc8be1c5ee17bc169d56 | [
"Apache-2.0"
] | null | null | null | setup.py | orange-kao/rpm-s3-mirror | 4a08cdb47de33045c5e5bc8be1c5ee17bc169d56 | [
"Apache-2.0"
] | 4 | 2020-05-08T03:36:15.000Z | 2022-03-31T10:51:18.000Z | setup.py | aiven/rpm-s3-mirror | 55f049a92258eed3cc863135a964c10c25a3c25a | [
"Apache-2.0"
] | null | null | null | # Copyright (c) 2020 Aiven, Helsinki, Finland. https://aiven.io/
from setuptools import setup
import version
version = version.get_project_version("rpm_s3_mirror/version.py")
setup(
name="rpm_s3_mirror",
packages=["rpm_s3_mirror"],
version=version,
description="Tool for syncing RPM repositories with S3",
license="Apache 2.0",
author="Aiven",
author_email="willcoe@aiven.io",
url="https://github.com/aiven/rpm-s3-mirror",
install_requires=[
"defusedxml",
"requests",
"python-dateutil",
"boto3",
"lxml",
],
entry_points={
"console_scripts": [
"rpm_s3_mirror = rpm_s3_mirror.__main__:main",
],
},
classifiers=[
"Intended Audience :: Developers",
"Intended Audience :: Information Technology",
"Intended Audience :: System Administrators",
"Programming Language :: Python :: 3.7",
"Natural Language :: English",
],
)
| 26.540541 | 65 | 0.619145 | 103 | 982 | 5.708738 | 0.621359 | 0.05102 | 0.112245 | 0.061224 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.021622 | 0.246436 | 982 | 36 | 66 | 27.277778 | 0.772973 | 0.063136 | 0 | 0.09375 | 0 | 0 | 0.479303 | 0.055556 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.0625 | 0 | 0.0625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d987308ee279d5897c812d0ddad5761b6c09fe3e | 8,668 | py | Python | pendium/filesystem.py | LuRsT/Pendium | f71b3db987853df919c14f0be4238df00852a9a7 | [
"Apache-2.0"
] | 5 | 2015-05-07T21:26:06.000Z | 2016-07-27T11:41:49.000Z | pendium/filesystem.py | LuRsT/Pendium | f71b3db987853df919c14f0be4238df00852a9a7 | [
"Apache-2.0"
] | 9 | 2017-12-21T20:22:16.000Z | 2019-07-24T13:04:35.000Z | pendium/filesystem.py | LuRsT/Pendium | f71b3db987853df919c14f0be4238df00852a9a7 | [
"Apache-2.0"
] | null | null | null | import codecs
from logging import getLogger
import os
from pendium import app
from pendium.plugins import IRenderPlugin
from pendium.plugins import ISearchPlugin
from yapsy.PluginManager import PluginManager
log = getLogger(__name__)
# Populate plugins
lib_path = os.path.abspath(os.path.dirname(__file__))
manager = PluginManager()
manager.setPluginPlaces([os.path.join(lib_path, "plugins")])
manager.setCategoriesFilter(
{"Search": ISearchPlugin, "Render": IRenderPlugin,}
)
manager.collectPlugins()
class PathExists(Exception):
pass
class PathNotFound(Exception):
pass
class CannotRender(Exception):
pass
class NoSearchPluginAvailable(Exception):
pass
class Wiki(object):
def __init__(
self,
basepath,
extensions={},
default_renderer=None,
plugins_config={},
has_vcs=False,
):
self.basepath = basepath
self.extensions = extensions
self.default_renderer = default_renderer
self.has_vcs = has_vcs
self.vcs = None
if self.has_vcs:
try:
from pendium import git_wrapper
self.vcs = git_wrapper.GitWrapper(basepath)
except:
raise Exception("You need to install GitPython")
# Plugin configuration
for name, configuration in plugins_config.items():
for cat in ["Search", "Render"]:
plugin = manager.getPluginByName(name, category=cat)
if not plugin:
continue
msg = "Configuring plugin: %s with :%s" % (name, configuration)
log.debug(msg)
plugin.plugin_object.configure(configuration)
def search(self, term):
best_plugin_score = 0
best_plugin = None
for plugin in manager.getPluginsOfCategory("Search"):
if plugin.plugin_object.search_speed > best_plugin_score:
best_plugin_score = plugin.plugin_object.search_speed
best_plugin = plugin
if best_plugin is None:
raise NoSearchPluginAvailable
log.debug("Searching with %s" % best_plugin.name)
return best_plugin.plugin_object.search(self, term)
def root(self):
return self.get(".")
def get(self, path):
completepath = os.path.normpath(os.path.join(self.basepath, path))
if os.path.isdir(completepath):
return WikiDir(self, path)
else:
return WikiFile(self, path)
def refresh(self):
if not self.has_vcs:
return ""
return self.vcs.refresh()
class WikiPath(object):
def __init__(self, wiki, path):
self.path = path
self.wiki = wiki
self.abs_path = os.path.join(wiki.basepath, path)
self.abs_path = os.path.normpath(self.abs_path)
self.name = os.path.split(self.path)[1]
self.is_node = False
self.is_leaf = False
if not os.path.exists(self.abs_path):
raise PathNotFound(self.abs_path)
def ancestor(self):
if self.path == "":
return None
ancestor_dir = os.path.split(self.path)[0]
return self.wiki.get(ancestor_dir)
def ancestors(self):
if self.ancestor():
return self.ancestor().ancestors() + [self.ancestor()]
return []
def items(self):
if not os.path.isdir(self.abs_path):
self = self.ancestor()
filenames = []
for f in os.listdir(self.abs_path):
if f.find(".") == 0:
continue
if os.path.splitext(f)[1][1:] in app.config["BLACKLIST_EXTENSIONS"]:
continue
complete_path = os.path.join(self.path, f)
filenames.append(self.wiki.get(complete_path))
return sorted(filenames, key=lambda Wiki: Wiki.is_leaf)
@property
def editable(self):
if app.config["EDITABLE"]:
return os.access(self.abs_path, os.W_OK)
return False
def delete(self):
top = self.abs_path
for root, dirs, files in os.walk(top, topdown=False):
for name in files:
log.debug("Will remove FILE: %s", os.path.join(root, name))
os.remove(os.path.join(root, name))
for name in dirs:
log.debug("Will remove DIR: %s", os.path.join(root, name))
os.rmdir(os.path.join(root, name))
if self.is_node:
log.debug("Will remove DIR: %s", self.abs_path)
os.rmdir(self.abs_path)
else:
log.debug("Will remove FILE: %s", self.abs_path)
os.remove(self.abs_path)
if self.wiki.has_vcs:
self.wiki.vcs.delete(path=self.path)
class WikiFile(WikiPath):
def __init__(self, *args, **kwargs):
super(WikiFile, self).__init__(*args, **kwargs)
self.is_leaf = True
self.extension = os.path.splitext(self.name)[1][1:]
self._content = ""
def renderer(self):
for plugin in manager.getPluginsOfCategory("Render"):
log.debug("Testing for plugin %s", plugin.plugin_object.name)
extensions = self.wiki.extensions.get(plugin.plugin_object.name, None)
if extensions is None:
continue # Try the next plugin
if self.extension in extensions:
log.debug(self.extension)
log.debug(plugin.plugin_object.name)
return plugin.plugin_object
# If no renderer found and binary, give up
if self.is_binary:
return None
# If is not binary and we have a default renderer
# return it
if self.wiki.default_renderer:
return self.wiki.default_renderer
return None
@property
def can_render(self):
return bool(self.renderer())
def render(self):
if self.can_render:
renderer = self.renderer()
return renderer.render(self.content())
# No renderer found
if self.is_binary:
return self.content(decode=False)
return self.content()
@property
def is_binary(self):
"""
Return true if the file is binary.
"""
fin = open(self.abs_path, "rb")
try:
CHUNKSIZE = 1024
while 1:
chunk = fin.read(CHUNKSIZE).decode("utf-8")
if "\0" in chunk: # Found null byte
return True
if len(chunk) < CHUNKSIZE:
break # Done
finally:
fin.close()
return False
@property
def refs(self):
"""
Special property for Git refs
"""
if self.wiki.has_vcs:
return self.wiki.vcs.file_refs(self.path)
return []
def ref(self, ref):
"""
Update file content with appropriate reference from git to display
older file versions
"""
try:
content = self.wiki.vcs.show(filepath=self.path, ref=ref)
self._content = content.decode("utf8")
return True
except:
return False
def content(self, content=None, decode=True):
"""
Helper method, needs refactoring
"""
if self._content and content is None:
return self._content
fp = open(self.abs_path, "r")
ct = fp.read()
if decode:
ct = ct
fp.close()
if not content:
self._content = ct
return ct
self._content = content
if content == ct:
return ct
def save(self, comment=None):
fp = codecs.open(self.abs_path, "w", "utf-8")
fp.write(self._content)
fp.close()
if self.wiki.has_vcs:
self.wiki.vcs.save(path=self.path, comment=comment)
class WikiDir(WikiPath):
def __init__(self, *args, **kwargs):
super(WikiDir, self).__init__(*args, **kwargs)
self.is_node = True
def create_file(self, filename):
new_abs_path = os.path.join(self.abs_path, filename)
if os.path.exists(new_abs_path):
raise PathExists(new_abs_path)
fp = open(new_abs_path, "w")
fp.close()
return self.wiki.get(os.path.join(self.path, filename))
def create_directory(self, name):
new_abs_path = os.path.join(self.abs_path, name)
if os.path.exists(new_abs_path):
raise PathExists(new_abs_path)
os.makedirs(new_abs_path)
np = self.wiki.get(os.path.join(self.path, name))
return np
| 27.782051 | 82 | 0.578565 | 1,034 | 8,668 | 4.72147 | 0.190522 | 0.03728 | 0.040557 | 0.017206 | 0.193773 | 0.122081 | 0.094224 | 0.055715 | 0.032773 | 0.020074 | 0 | 0.002884 | 0.320028 | 8,668 | 311 | 83 | 27.871383 | 0.825416 | 0.043955 | 0 | 0.201794 | 0 | 0 | 0.033015 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.103139 | false | 0.017937 | 0.035874 | 0.008969 | 0.318386 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d988d23ea27ce8a2b5e3d30a08db96c282196fd0 | 3,957 | py | Python | sabcom/helpers.py | blackrhinoabm/sabcom | ec0d9c37e11a8bd49352539f3f16ef322e1b5cf8 | [
"MIT"
] | 6 | 2020-05-21T11:42:27.000Z | 2020-10-20T03:00:22.000Z | sabcom/helpers.py | blackrhinoabm/sabcom | ec0d9c37e11a8bd49352539f3f16ef322e1b5cf8 | [
"MIT"
] | 2 | 2020-04-08T17:45:37.000Z | 2020-09-22T16:13:27.000Z | sabcom/helpers.py | blackrhinoabm/sabcom | ec0d9c37e11a8bd49352539f3f16ef322e1b5cf8 | [
"MIT"
] | 4 | 2020-04-10T14:18:34.000Z | 2020-10-31T16:18:30.000Z | import random
import numpy as np
import pandas as pd
import math
from sklearn import preprocessing
import scipy.stats as stats
def edge_in_cliq(edge, nodes_in_cliq):
if edge[0] in nodes_in_cliq:
return True
else:
return False
def edges_to_remove_neighbourhood(all_edges, neighbourhood_density, nbh_nodes):
neighbourhood_edges = [e for e in all_edges if edge_in_cliq(e, nbh_nodes)]
sample_size = int(len(neighbourhood_edges) * (1-neighbourhood_density))
# sample random edges
chosen_edges = random.sample(neighbourhood_edges, sample_size)
return chosen_edges
def what_neighbourhood(index, neighbourhood_nodes):
for n in neighbourhood_nodes:
if index in neighbourhood_nodes[n]:
return n
raise ValueError('Neighbourhood not found.')
def what_coordinates(neighbourhood_name, dataset):
for x in range(len(dataset)):
if neighbourhood_name in dataset[x]:
return dataset[x][1]['lon'], dataset[x][1]['lat'],
raise ValueError("Corresponding coordinates not found")
def what_informality(neighbourhood_name, dataset):
for x in range(len(dataset)):
if neighbourhood_name in dataset[x]:
try:
return dataset[x][1]['Informal_residential']
except:
return None
raise ValueError("Corresponding informality not found")
def confidence_interval(data, av):
sample_stdev = np.std(data)
sigma = sample_stdev/math.sqrt(len(data))
return stats.t.interval(alpha=0.95, df=24, loc=av, scale=sigma)
def generate_district_data(number_of_agents, path, max_districts=None):
"""
Transforms input data on informal residential, initial infections, and population and transforms it to
a list of organised data for the simulation.
:param number_of_agents: number of agents in the simulation, integer
:param max_districts: (optional) maximum amount of districts simulated, integer
:return: data set containing district data for simulation, list
"""
informal_residential = pd.read_csv('{}/f_informality.csv'.format(path))#.iloc[:-1]
inital_infections = pd.read_csv('{}/f_initial_cases.csv'.format(path), index_col=1)
inital_infections = inital_infections.sort_index()
population = pd.read_csv('{}/f_population.csv'.format(path))
# normalise district informality
x = informal_residential[['Informal_residential']].values.astype(float)
min_max_scaler = preprocessing.MinMaxScaler()
x_scaled = min_max_scaler.fit_transform(x)
informal_residential['Informal_residential'] = pd.DataFrame(x_scaled)
population['Informal_residential'] = informal_residential['Informal_residential']
# determine smallest district based on number of agents
smallest_size = population['Population'].sum() / number_of_agents
# generate data set for model input
districts_data = []
for i in range(len(population)):
if population['Population'].iloc[i] > smallest_size:
districts_data.append(
[int(population['WardID'].iloc[i]), {'Population': population['Population'].iloc[i],
#'lon': population['lon'].iloc[i],
#'lat': population['lat'].iloc[i],
'Informal_residential': population['Informal_residential'].iloc[i],
'Cases_With_Subdistricts':
inital_infections.loc[population['WardID'].iloc[i]][
'Cases'],
},
])
if max_districts is None:
max_districts = len(districts_data) # this can be manually shortened to study dynamics in some districts
return districts_data[:max_districts]
| 39.57 | 120 | 0.645438 | 458 | 3,957 | 5.382096 | 0.312227 | 0.092495 | 0.028398 | 0.061663 | 0.091684 | 0.060041 | 0.060041 | 0.060041 | 0.060041 | 0.060041 | 0 | 0.004108 | 0.261815 | 3,957 | 99 | 121 | 39.969697 | 0.839781 | 0.16275 | 0 | 0.063492 | 0 | 0 | 0.116371 | 0.013745 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.095238 | 0 | 0.349206 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d98cbe93e213130d35e4010a52b1592965b94b18 | 14,115 | py | Python | src/matching/games/hospital_resident.py | drvinceknight/matching | da18fc12c880a1292a04d06824b5c17e68349e83 | [
"MIT"
] | null | null | null | src/matching/games/hospital_resident.py | drvinceknight/matching | da18fc12c880a1292a04d06824b5c17e68349e83 | [
"MIT"
] | null | null | null | src/matching/games/hospital_resident.py | drvinceknight/matching | da18fc12c880a1292a04d06824b5c17e68349e83 | [
"MIT"
] | null | null | null | """ The HR solver and algorithm. """
from matching import Game, Matching
from matching import Player as Resident
from matching.players import Hospital
from .util import delete_pair, match_pair
class HospitalResident(Game):
""" A class for solving instances of the hospital-resident assignment
problem (HR).
In this case, a blocking pair is any resident-hospital pair that satisfies
**all** of the following:
- They are present in each other's preference lists;
- either the resident is unmatched, or they prefer the hospital to their
current match;
- either the hospital is under-subscribed, or they prefer the resident
to at least one of their current matches.
Parameters
----------
residents : list of Player
The residents in the matching game. Each resident must rank a subset of
those in :code:`hospitals`.
hospitals : list of Hospital
The hospitals in the matching game. Each hospital must rank all of (and
only) the residents which rank it.
Attributes
----------
matching : Matching or None
Once the game is solved, a matching is available as a :code:`Matching`
object with the hospitals as keys and their resident matches as values.
Initialises as :code:`None`.
blocking_pairs : list of (Player, Hospital) or None
Initialises as `None`. Otherwise, a list of the resident-hospital
blocking pairs.
"""
def __init__(self, residents=None, hospitals=None):
self.residents = residents
self.hospitals = hospitals
super().__init__()
self._check_inputs()
@classmethod
def create_from_dictionaries(
cls, resident_prefs, hospital_prefs, capacities
):
""" Create an instance of :code:`HospitalResident` from two preference
dictionaries and capacities. """
residents, hospitals = _make_players(
resident_prefs, hospital_prefs, capacities
)
game = cls(residents, hospitals)
return game
def solve(self, optimal="resident"):
""" Solve the instance of HR using either the resident- or
hospital-oriented algorithm. Return the matching. """
self._matching = Matching(
hospital_resident(self.residents, self.hospitals, optimal)
)
return self.matching
def check_validity(self):
""" Check whether the current matching is valid. """
self._check_resident_matching()
self._check_hospital_capacity()
self._check_hospital_matching()
return True
def check_stability(self):
""" Check for the existence of any blocking pairs in the current
matching, thus determining the stability of the matching. """
blocking_pairs = []
for resident in self.residents:
for hospital in self.hospitals:
if (
_check_mutual_preference(resident, hospital)
and _check_resident_unhappy(resident, hospital)
and _check_hospital_unhappy(resident, hospital)
):
blocking_pairs.append((resident, hospital))
self.blocking_pairs = blocking_pairs
return not any(blocking_pairs)
def _check_resident_matching(self):
""" Check that no resident is matched to an unacceptable hospital. """
errors = []
for resident in self.residents:
if (
resident.matching is not None
and resident.matching not in resident.prefs
):
errors.append(
ValueError(
f"{resident} is matched to {resident.matching} but "
"they do not appear in their preference list: "
f"{resident.prefs}."
)
)
if errors:
raise Exception(*errors)
return True
def _check_hospital_capacity(self):
""" Check that no hospital is over-subscribed. """
errors = []
for hospital in self.hospitals:
if len(hospital.matching) > hospital.capacity:
errors.append(
ValueError(
f"{hospital} is matched to {hospital.matching} which "
f"is over their capacity of {hospital.capacity}."
)
)
if errors:
raise Exception(*errors)
return True
def _check_hospital_matching(self):
""" Check that no hospital is matched to an unacceptable resident. """
errors = []
for hospital in self.hospitals:
for resident in hospital.matching:
if resident not in hospital.prefs:
errors.append(
ValueError(
f"{hospital} has {resident} in their matching but "
"they do not appear in their preference list: "
f"{hospital.prefs}."
)
)
if errors:
raise Exception(*errors)
return True
def _check_inputs(self):
""" Raise an error if any of the conditions of the game have been
broken. """
self._check_resident_prefs()
self._check_hospital_prefs()
def _check_resident_prefs(self):
""" Make sure that the residents' preferences are all subsets of the
available hospital names. Otherwise, raise an error. """
errors = []
for resident in self.residents:
if not set(resident.prefs).issubset(set(self.hospitals)):
errors.append(
ValueError(
f"{resident} has ranked a non-hospital: "
f"{set(resident.prefs)} != {set(self.hospitals)}"
)
)
if errors:
raise Exception(*errors)
return True
def _check_hospital_prefs(self):
""" Make sure that every hospital ranks all and only those residents
that have ranked it. Otherwise, raise an error. """
errors = []
for hospital in self.hospitals:
residents_that_ranked = [
res for res in self.residents if hospital in res.prefs
]
if set(hospital.prefs) != set(residents_that_ranked):
errors.append(
ValueError(
f"{hospital} has not ranked all the residents that "
f"ranked it: {set(hospital.prefs)} != "
f"{set(residents_that_ranked)}."
)
)
if errors:
raise Exception(*errors)
return True
def _check_mutual_preference(resident, hospital):
""" Determine whether two players each have a preference of the other. """
return resident in hospital.prefs and hospital in resident.prefs
def _check_resident_unhappy(resident, hospital):
""" Determine whether a resident is unhappy because they are unmatched, or
they prefer the hospital to their current match. """
return resident.matching is None or resident.prefers(
hospital, resident.matching
)
def _check_hospital_unhappy(resident, hospital):
""" Determine whether a hospital is unhappy because they are
under-subscribed, or they prefer the resident to at least one of their
current matches. """
return len(hospital.matching) < hospital.capacity or any(
[hospital.prefers(resident, match) for match in hospital.matching]
)
def unmatch_pair(resident, hospital):
""" Unmatch a (resident, hospital)-pair. """
resident.unmatch()
hospital.unmatch(resident)
def hospital_resident(residents, hospitals, optimal="resident"):
""" Solve an instance of HR using an adapted Gale-Shapley algorithm. A
unique, stable and optimal matching is found for the given set of residents
and hospitals. The optimality of the matching is found with respect to one
party and is subsequently the worst stable matching for the other.
Parameters
----------
residents : list of Player
The residents in the game. Each resident must rank a non-empty subset
of the elements of ``hospitals``.
hospitals : list of Hospital
The hospitals in the game. Each hospital must rank all the residents
that have ranked them.
optimal : str, optional
Which party the matching should be optimised for. Must be one of
``"resident"`` and ``"hospital"``. Defaults to the former.
Returns
-------
matching : Matching
A dictionary-like object where the keys are the members of
``hospitals``, and the values are their matches ranked by preference.
"""
if optimal == "resident":
return resident_optimal(residents, hospitals)
if optimal == "hospital":
return hospital_optimal(hospitals)
def resident_optimal(residents, hospitals):
""" Solve the instance of HR to be resident-optimal. The algorithm is as
follows:
0. Set all residents to be unmatched, and all hospitals to be totally
unsubscribed.
1. Take any unmatched resident with a non-empty preference list,
:math:`r`, and consider their most preferred hospital, :math:`h`. Match
them to one another.
2. If, as a result of this new matching, :math:`h` is now
over-subscribed, find the worst resident currently assigned to
:math:`h`, :math:`r'`. Set :math:`r'` to be unmatched and remove them
from :math:`h`'s matching. Otherwise, go to 3.
3. If :math:`h` is at capacity (fully subscribed) then find their worst
current match :math:`r'`. Then, for each successor, :math:`s`, to
:math:`r'` in the preference list of :math:`h`, delete the pair
:math:`(s, h)` from the game. Otherwise, go to 4.
4. Go to 1 until there are no such residents left, then end.
"""
free_residents = residents[:]
while free_residents:
resident = free_residents.pop()
hospital = resident.get_favourite()
match_pair(resident, hospital)
if len(hospital.matching) > hospital.capacity:
worst = hospital.get_worst_match()
unmatch_pair(worst, hospital)
free_residents.append(worst)
if len(hospital.matching) == hospital.capacity:
successors = hospital.get_successors()
for successor in successors:
delete_pair(hospital, successor)
if not successor.prefs:
free_residents.remove(successor)
return {r: r.matching for r in hospitals}
def hospital_optimal(hospitals):
""" Solve the instance of HR to be hospital-optimal. The algorithm is as
follows:
0. Set all residents to be unmatched, and all hospitals to be totally
unsubscribed.
1. Take any hospital, :math:`h`, that is under-subscribed and whose
preference list contains any resident they are not currently assigned
to, and consider their most preferred such resident, :math:`r`.
2. If :math:`r` is currently matched, say to :math:`h'`, then unmatch
them from one another. In any case, match :math:`r` to :math:`h` and go
to 3.
3. For each successor, :math:`s`, to :math:`h` in the preference list of
:math:`r`, delete the pair :math:`(r, s)` from the game.
4. Go to 1 until there are no such hospitals left, then end.
"""
free_hospitals = hospitals[:]
while free_hospitals:
hospital = free_hospitals.pop()
resident = hospital.get_favourite()
if resident.matching:
curr_match = resident.matching
unmatch_pair(resident, curr_match)
if curr_match not in free_hospitals:
free_hospitals.append(curr_match)
match_pair(resident, hospital)
if len(hospital.matching) < hospital.capacity and [
res for res in hospital.prefs if res not in hospital.matching
]:
free_hospitals.append(hospital)
successors = resident.get_successors()
for successor in successors:
delete_pair(resident, successor)
if (
not [
res
for res in successor.prefs
if res not in successor.matching
]
and successor in free_hospitals
):
free_hospitals.remove(successor)
return {r: r.matching for r in hospitals}
def _make_players(resident_prefs, hospital_prefs, capacities):
""" Make a set of residents and hospitals from the dictionaries given, and
add their preferences. """
resident_dict, hospital_dict = _make_instances(
resident_prefs, hospital_prefs, capacities
)
for resident_name, resident in resident_dict.items():
prefs = [hospital_dict[name] for name in resident_prefs[resident_name]]
resident.set_prefs(prefs)
for hospital_name, hospital in hospital_dict.items():
prefs = [resident_dict[name] for name in hospital_prefs[hospital_name]]
hospital.set_prefs(prefs)
residents = list(resident_dict.values())
hospitals = list(hospital_dict.values())
return residents, hospitals
def _make_instances(resident_prefs, hospital_prefs, capacities):
""" Create ``Player`` (resident) and ``Hospital`` instances for the names in
each dictionary. """
resident_dict, hospital_dict = {}, {}
for resident_name in resident_prefs:
resident = Resident(name=resident_name)
resident_dict[resident_name] = resident
for hospital_name in hospital_prefs:
capacity = capacities[hospital_name]
hospital = Hospital(name=hospital_name, capacity=capacity)
hospital_dict[hospital_name] = hospital
return resident_dict, hospital_dict
| 34.426829 | 80 | 0.616932 | 1,646 | 14,115 | 5.190765 | 0.136695 | 0.022823 | 0.009129 | 0.01264 | 0.384129 | 0.280197 | 0.20529 | 0.168539 | 0.149813 | 0.115871 | 0 | 0.001533 | 0.306624 | 14,115 | 409 | 81 | 34.511002 | 0.871462 | 0.365639 | 0 | 0.278049 | 0 | 0 | 0.064821 | 0.010882 | 0 | 0 | 0 | 0 | 0 | 1 | 0.097561 | false | 0 | 0.019512 | 0 | 0.209756 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d98e4516cf482bd1a8b30548c1119b56db7376b4 | 2,931 | py | Python | solutions/rank-1/prepare_data.py | mattmotoki/ashrae-great-energy-predictor-3-solution-analysis | 8a5260049d4537c57c37a78e77f2fba13c55177d | [
"MIT"
] | 48 | 2020-03-18T11:34:49.000Z | 2022-03-31T18:30:00.000Z | solutions/rank-1/prepare_data.py | mattmotoki/ashrae-great-energy-predictor-3-solution-analysis | 8a5260049d4537c57c37a78e77f2fba13c55177d | [
"MIT"
] | 40 | 2020-03-24T18:17:51.000Z | 2022-03-12T00:30:30.000Z | solutions/rank-1/prepare_data.py | mattmotoki/ashrae-great-energy-predictor-3-solution-analysis | 8a5260049d4537c57c37a78e77f2fba13c55177d | [
"MIT"
] | 24 | 2020-04-18T02:52:47.000Z | 2022-01-22T19:13:16.000Z | #!/usr/bin/env python
# coding: utf-8
# based on public kernel https://www.kaggle.com/corochann/ashrae-feather-format-for-fast-loading
import os
import random
import gc
import tqdm
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
def prepare(root, output):
train_df = pd.read_csv(os.path.join(root, 'train.csv'))
test_df = pd.read_csv(os.path.join(root, 'test.csv'))
building_meta_df = pd.read_csv(os.path.join(root, 'building_metadata.csv'))
sample_submission = pd.read_csv(os.path.join(root, 'sample_submission.csv'))
weather_train_df = pd.read_csv(os.path.join(root, 'weather_train.csv'))
weather_test_df = pd.read_csv(os.path.join(root, 'weather_test.csv'))
train_df['timestamp'] = pd.to_datetime(train_df['timestamp'])
test_df['timestamp'] = pd.to_datetime(test_df['timestamp'])
weather_train_df['timestamp'] = pd.to_datetime(weather_train_df['timestamp'])
weather_test_df['timestamp'] = pd.to_datetime(weather_test_df['timestamp'])
# # Save data in feather format
train_df.to_feather(os.path.join(output,'train.feather'))
test_df.to_feather(os.path.join(output,'test.feather'))
weather_train_df.to_feather(os.path.join(output,'weather_train.feather'))
weather_test_df.to_feather(os.path.join(output,'weather_test.feather'))
building_meta_df.to_feather(os.path.join(output,'building_metadata.feather'))
sample_submission.to_feather(os.path.join(output,'sample_submission.feather'))
# # Read data in feather format
train_df = pd.read_feather(os.path.join(output, 'train.feather'))
weather_train_df = pd.read_feather(os.path.join(output, 'weather_train.feather'))
test_df = pd.read_feather(os.path.join(output, 'test.feather'))
weather_test_df = pd.read_feather(os.path.join(output, 'weather_test.feather'))
building_meta_df = pd.read_feather(os.path.join(output, 'building_metadata.feather'))
sample_submission = pd.read_feather(os.path.join(output, 'sample_submission.feather'))
# # Count zero streak
train_df = train_df.merge(building_meta_df, on='building_id', how='left')
train_df = train_df.merge(weather_train_df, on=['site_id', 'timestamp'], how='left')
train_df['black_count']=0
for bid in train_df.building_id.unique():
df = train_df[train_df.building_id==bid]
for meter in df.meter.unique():
dfm = df[df.meter == meter]
b = (dfm.meter_reading == 0).astype(int)
train_df.loc[(train_df.building_id==bid) & (train_df.meter == meter), 'black_count'] = b.groupby((~b.astype(bool)).cumsum()).cumsum()
#train_df[train_df.building_id == 0].meter_reading.plot()
#train_df[train_df.building_id == 0].black_count.plot()
train_df.to_feather(os.path.join(output, 'train_black.feather'))
if __name__ == '__main__':
root = 'input'
output = 'processed'
prepare(root, output)
| 40.708333 | 145 | 0.716479 | 445 | 2,931 | 4.460674 | 0.197753 | 0.095214 | 0.095718 | 0.111335 | 0.609068 | 0.561713 | 0.467003 | 0.416625 | 0.285139 | 0.117884 | 0 | 0.001974 | 0.13579 | 2,931 | 71 | 146 | 41.28169 | 0.781682 | 0.107813 | 0 | 0 | 0 | 0 | 0.189708 | 0.070661 | 0 | 0 | 0 | 0 | 0 | 1 | 0.022222 | false | 0 | 0.177778 | 0 | 0.2 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d98fb88b15b7a7bd1330a40dd1ecdb89f69e5b99 | 23,531 | py | Python | models/triangular_lattice.py | macthecadillac/Interacting-Fermions | 6122d2a7e67533b28e581929995ce8e2a2ad41fc | [
"BSD-3-Clause"
] | 1 | 2020-07-29T06:06:12.000Z | 2020-07-29T06:06:12.000Z | models/triangular_lattice.py | macthecadillac/Interacting-Fermions | 6122d2a7e67533b28e581929995ce8e2a2ad41fc | [
"BSD-3-Clause"
] | null | null | null | models/triangular_lattice.py | macthecadillac/Interacting-Fermions | 6122d2a7e67533b28e581929995ce8e2a2ad41fc | [
"BSD-3-Clause"
] | null | null | null | import copy
import functools
import os
import numpy as np
from scipy import sparse
from spinsys import constructors, half, dmrg, exceptions
from cffi import FFI
class SiteVector(constructors.PeriodicBCSiteVector):
def __init__(self, ordered_pair, Nx, Ny):
super().__init__(ordered_pair, Nx, Ny)
def angle_with(self, some_site):
"""Returns the angle * 2 between (some_site - self) with the
horizontal. Only works on nearest neighbors
"""
Δx, Δy = some_site - self
if Δx == 0:
if Δy != 0:
return -2 * np.pi / 3
elif Δy == 0:
if Δx != 0:
return 0
else:
return 2 * np.pi / 3
def a1_hop(self, stride):
vec = self.xhop(stride)
if vec == self:
raise exceptions.SameSite
return vec
def a2_hop(self, stride):
vec = self.xhop(-1 * stride).yhop(stride)
if vec == self:
raise exceptions.SameSite
return vec
def a3_hop(self, stride):
vec = self.yhop(-stride)
if vec == self:
raise exceptions.SameSite
return vec
def b1_hop(self, stride):
"""hop in the a1 - a3 aka b1 direction. Useful for second nearest
neighbor coupling interactions
"""
vec = self.xhop(stride).yhop(stride)
if vec == self:
raise exceptions.SameSite
return vec
def b2_hop(self, stride):
vec = self.xhop(-2 * stride).yhop(stride)
if vec == self:
raise exceptions.SameSite
return vec
def b3_hop(self, stride):
vec = self.b1_hop(-stride).b2_hop(-stride)
if vec == self:
raise exceptions.SameSite
return vec
def _neighboring_sites(self, strides, funcs):
neighbors = []
for stride in strides:
for func in funcs:
try:
neighbors.append(func(stride))
except exceptions.SameSite:
continue
return neighbors
@property
def nearest_neighboring_sites(self, all=False):
strides = [1, -1] if all else [1]
funcs = [self.a1_hop, self.a2_hop, self.a3_hop]
return self._neighboring_sites(strides, funcs)
@property
def second_neighboring_sites(self, all=False):
"""with the all option enabled the method will enumerate all
the sites that are second neighbors to the current site.
Otherwise it will only enumerate the sites along the b1, b2
and b3 directions
"""
strides = [1, -1] if all else [1]
funcs = [self.b1_hop, self.b2_hop, self.b3_hop]
return self._neighboring_sites(strides, funcs)
@property
def third_neighboring_sites(self, all=False):
strides = [2, -2] if all else [2]
funcs = [self.a1_hop, self.a2_hop, self.a3_hop]
return self._neighboring_sites(strides, funcs)
class SemiPeriodicBCSiteVector(SiteVector):
"""A version of SiteVector that is periodic only along the x
direction
"""
def __init__(self, ordered_pair, Nx, Ny):
super().__init__(ordered_pair, Nx, Ny)
def diff(self, other):
"""Finds the shortest distance from this site to the other"""
Δx = self.x - other.x
Δy = self.y - other.y
return (Δx, Δy)
def yhop(self, stride):
new_vec = copy.copy(self)
new_y = self.y + stride
if new_y // self.Ny == self.x // self.Ny:
new_vec.y = new_y
else:
raise exceptions.OutOfBoundsError("Hopping off the lattice")
return new_vec
@property
def neighboring_sites(self):
neighbors = []
funcs = [self.xhop, self.yhop]
for Δ in [1, -1]:
for func in funcs:
try:
neighbors.append(func(Δ).lattice_index)
except exceptions.OutOfBoundsError:
continue
try:
neighbors.append(self.xhop(Δ).yhop(-Δ).lattice_index)
except exceptions.OutOfBoundsError:
continue
return neighbors
@functools.lru_cache(maxsize=None)
def _generate_bonds(Nx, Ny):
N = Nx * Ny
vec = SiteVector((0, 0), Nx, Ny)
# range_orders = [set(), set(), set()] # sets de-duplicates the list of bonds
range_orders = [[], [], []]
for i in range(N):
nearest_neighbor = vec.nearest_neighboring_sites
second_neighbor = vec.second_neighboring_sites
third_neighbor = vec.third_neighboring_sites
neighbors = [nearest_neighbor, second_neighbor, third_neighbor]
for leap, bonds in enumerate(range_orders):
for n in neighbors[leap]:
# sort them so identical bonds will always have the same hash
bond = sorted((vec, n))
bonds.append(tuple(bond))
vec = vec.next_site()
return range_orders
@functools.lru_cache(maxsize=None)
def _gen_full_ops(N):
σ_p = constructors.raising()
σ_m = constructors.lowering()
σz = constructors.sigmaz()
p_mats = [half.full_matrix(σ_p, k, N) for k in range(N)]
m_mats = [half.full_matrix(σ_m, k, N) for k in range(N)]
z_mats = [half.full_matrix(σz, k, N) for k in range(N)]
return p_mats, m_mats, z_mats
def _gen_z_pm_ops(N, bonds):
"""generate the H_z and H_pm components of the Hamiltonian"""
H_pm = H_z = 0
p_mats, m_mats, z_mats = _gen_full_ops(N)
for bond in bonds:
site1, site2 = bond
i, j = site1.lattice_index, site2.lattice_index
H_pm += p_mats[i].dot(m_mats[j]) + m_mats[i].dot(p_mats[j])
H_z += z_mats[i].dot(z_mats[j])
return H_pm, H_z
@functools.lru_cache(maxsize=None)
def hamiltonian_dp_components(Nx, Ny):
"""Generate the reusable pieces of the hamiltonian"""
N = Nx * Ny
nearest, second, third = _generate_bonds(Nx, Ny)
H_pm1, H_z1 = _gen_z_pm_ops(N, nearest)
H_pm2, H_z2 = _gen_z_pm_ops(N, second)
H_pm3, H_z3 = _gen_z_pm_ops(N, third)
H_ppmm = H_pmz = 0
p_mats, m_mats, z_mats = _gen_full_ops(N)
for bond in nearest:
site1, site2 = bond
i, j = site1.lattice_index, site2.lattice_index
γ = np.exp(1j * site1.angle_with(site2))
H_ppmm += \
γ * p_mats[i].dot(p_mats[j]) + \
γ.conj() * m_mats[i].dot(m_mats[j])
H_pmz += 1j * (γ.conj() * z_mats[i].dot(p_mats[j]) -
γ * z_mats[i].dot(m_mats[j]) +
γ.conj() * p_mats[i].dot(z_mats[j]) -
γ * m_mats[i].dot(z_mats[j]))
return H_pm1, H_z1, H_ppmm, H_pmz, H_pm2, H_z2, H_z3, H_pm3
def hamiltonian_dp(Nx, Ny, J_pm=0, J_z=0, J_ppmm=0, J_pmz=0, J2=0, J3=0):
"""Generates hamiltonian for the triangular lattice model with
direct product
Parameters
--------------------
Nx: int
number of sites along the x-direction
Ny: int
number of sites along the y-direction
J_pm: float
J_+- parameter
J_z: float
J_z parameter
J_ppmm: float
J_++-- parameter
J_pmz: float
J_+-z parameter
J2: float
second nearest neighbor interaction parameter
J3: float
third nearest neighbor interaction parameter
Returns
--------------------
H: scipy.sparse.csc_matrix
"""
components = hamiltonian_dp_components(Nx, Ny)
H_pm1, H_z1, H_ppmm, H_pmz, H_pm2, H_z2, H_z3, H_pm3 = components
nearest_neighbor_terms = J_pm * H_pm1 + J_z * H_z1 + J_ppmm * H_ppmm + J_pmz * H_pmz
second_neighbor_terms = third_neighbor_terms = 0
if not J2 == 0:
second_neighbor_terms = J2 * (H_pm2 + J_z / J_pm * H_z2)
if not J3 == 0:
third_neighbor_terms = J3 * (H_pm3 + J_z / J_pm * H_z3)
return nearest_neighbor_terms + second_neighbor_terms + third_neighbor_terms
class DMRG_Hamiltonian(dmrg.Hamiltonian):
def __init__(self, Nx, Ny, J_pm=0, J_z=0, J_ppmm=0, J_pmz=0):
self.generators = {
'+': constructors.raising(),
'-': constructors.lowering(),
'z': constructors.sigmaz()
}
self.N = Nx * Ny
self.Nx = Nx
self.Ny = Ny
self.J_pm = J_pm
self.J_z = J_z
self.J_ppmm = J_ppmm
self.J_pmz = J_pmz
super().__init__()
def initialize_storage(self):
init_block = sparse.csc_matrix(([], ([], [])), dims=[2, 2])
init_ops = self.generators
self.storage = dmrg.Storage(init_block, init_block, init_ops)
def newsite_ops(self, size):
return dict((i, sparse.kron(sparse.eye(size // 2), self.generators[i]))
for i in self.generators.keys())
# TODO: Inconsistent shapes error at runtime
def block_newsite_interaction(self, block_key):
block_side, curr_site = block_key
site = SemiPeriodicBCSiteVector.from_index(curr_site, self.Nx, self.Ny)
neighbors = [i for i in site.neighboring_sites if i < curr_site]
H_pm_new = H_z_new = H_ppmm_new = H_pmz_new = 0
for i in neighbors:
key = (block_side, i + 1)
block_ops = self.storage.get_item(key).ops
site_ops = self.generators
H_pm_new += \
sparse.kron(block_ops['+'], site_ops['-']) + \
sparse.kron(block_ops['-'], site_ops['+'])
H_z_new += sparse.kron(block_ops['z'], site_ops['z'])
H_ppmm_new += \
sparse.kron(block_ops['+'], site_ops['+']) + \
sparse.kron(block_ops['-'], site_ops['-'])
H_pmz_new += \
sparse.kron(block_ops['z'], site_ops['+']) + \
sparse.kron(block_ops['z'], site_ops['-']) + \
sparse.kron(block_ops['+'], site_ops['z']) + \
sparse.kron(block_ops['-'], site_ops['z'])
return self.J_pm * H_pm_new + self.J_z * H_z_new + \
self.J_ppmm * H_ppmm_new + self.J_pmz * H_pmz_new
##########################################################
### FFI wrapper code for functions implemented in Rust ###
##########################################################
ffi = FFI()
modpath = os.path.dirname(__file__)
rootdir = os.path.split(modpath)[0]
rust_dir = os.path.join(rootdir, "rust", "triangular_lattice_ext")
# Only define the following functions if the shared object is compiled or else
# Python is going to throw exceptions on import.
# The header file only exists if the Rust shared object is compiled.
if os.path.exists(os.path.join(rust_dir, "triangular_lattice_ext.h")):
with open(os.path.join(rust_dir, "triangular_lattice_ext.h")) as header:
# remove directives from header file since cffi can't process directives yet
h = [line for line in header.readlines() if not line[0] == "#"]
ffi.cdef(''.join(h))
_lib = ffi.dlopen(os.path.join(rust_dir, "target", "release",
"libtriangular_lattice_ext.so"))
class CoordMatrix:
"""A class that encapsulates the matrix and provides methods that would
help memoery management across the FFI boundary
"""
def __init__(self, mat):
"""Initializer
Parameters
--------------------
mat: CoordMatrix
"""
self.__obj = mat # the pointer to the pointers to the arrays
self.data = np.frombuffer(ffi.buffer(mat.data.ptr, mat.data.len * 16),
np.complex128)
self.col = np.frombuffer(ffi.buffer(mat.col.ptr, mat.col.len * 4),
np.int32)
self.row = np.frombuffer(ffi.buffer(mat.row.ptr, mat.row.len * 4),
np.int32)
self.ncols = mat.ncols
self.nrows = mat.nrows
def __enter__(self):
"""For use with context manager"""
return self
def __exit__(self, exc_type, exc_value, traceback):
"""For use with context manager"""
self.data = None
self.col = None
self.row = None
_lib.request_free(self.__obj) # deallocates Rust object
self.__obj = None
def to_csc(self):
"""Returns a CSC matrix"""
return sparse.csc_matrix((self.data, (self.col, self.row)),
shape=(self.nrows, self.ncols))
def to_csr(self):
"""Returns a CSR matrix"""
return sparse.csr_matrix((self.data, (self.col, self.row)),
shape=(self.nrows, self.ncols))
def h_ss_z_consv_k(Nx, Ny, kx, ky, l):
"""construct the H_z matrix in the given momentum configuration
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
l: int
Returns
--------------------
H: scipy.sparse.csr_matrix
"""
mat = _lib.k_h_ss_z(Nx, Ny, kx, ky, l)
with CoordMatrix(mat) as coordmat:
H = coordmat.to_csr()
return H
def h_ss_xy_consv_k(Nx, Ny, kx, ky, l):
"""construct the H_xy matrix in the given momentum configuration
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
l: int
Returns
--------------------
H: scipy.sparse.csr_matrix
"""
mat = _lib.k_h_ss_xy(Nx, Ny, kx, ky, l)
with CoordMatrix(mat) as coordmat:
H = coordmat.to_csr()
return H
def h_ss_ppmm_consv_k(Nx, Ny, kx, ky, l):
"""construct the H_ppmm matrix in the given momentum configuration
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
l: int
Returns
--------------------
H: scipy.sparse.csr_matrix
"""
mat = _lib.k_h_ss_ppmm(Nx, Ny, kx, ky, l)
with CoordMatrix(mat) as coordmat:
H = coordmat.to_csr()
return H
def h_ss_pmz_consv_k(Nx, Ny, kx, ky, l):
"""construct the H_pmz matrix in the given momentum configuration
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
l: int
Returns
--------------------
H: scipy.sparse.csr_matrix
"""
mat = _lib.k_h_ss_pmz(Nx, Ny, kx, ky, l)
with CoordMatrix(mat) as coordmat:
H = coordmat.to_csr()
return H
def h_sss_chi_consv_k(Nx, Ny, kx, ky):
"""construct the H_chi matrix in the given momentum configuration
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
Returns
--------------------
H: scipy.sparse.csr_matrix
"""
mat = _lib.k_h_sss_chi(Nx, Ny, kx, ky)
with CoordMatrix(mat) as coordmat:
H = coordmat.to_csr()
return H
def h_ss_z_consv_k_s(Nx, Ny, kx, ky, nup, l):
"""construct the H_z matrix in the given momentum configuration
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
nup: int
the total number of sites with a spin-up
l: int
Returns
--------------------
H: scipy.sparse.csr_matrix
"""
mat = _lib.ks_h_ss_z(Nx, Ny, kx, ky, nup, l)
with CoordMatrix(mat) as coordmat:
H = coordmat.to_csr()
return H
def h_ss_xy_consv_k_s(Nx, Ny, kx, ky, nup, l):
"""construct the H_xy matrix in the given momentum configuration
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
nup: int
the total number of sites with a spin-up
l: int
Returns
--------------------
H: scipy.sparse.csr_matrix
"""
mat = _lib.ks_h_ss_xy(Nx, Ny, kx, ky, nup, l)
with CoordMatrix(mat) as coordmat:
H = coordmat.to_csr()
return H
def h_sss_chi_consv_k_s(Nx, Ny, kx, ky, nup):
"""construct the H_chi matrix in the given momentum configuration
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
nup: int
Returns
--------------------
H: scipy.sparse.csr_matrix
"""
mat = _lib.ks_h_sss_chi(Nx, Ny, kx, ky, nup)
with CoordMatrix(mat) as coordmat:
H = coordmat.to_csr()
return H
def ss_z_consv_k(Nx, Ny, kx, ky, l):
"""construct the Σsz_i * sz_j operators with the given separation
with translational symmetry taken into account
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Ny / 2π in a [0, 2π)
Brillouin zone
l: int
the separation between sites: |i - j|
Returns
--------------------
ss_z: scipy.sparse.csr_matrix
"""
mat = _lib.k_ss_z(Nx, Ny, kx, ky, l)
with CoordMatrix(mat) as coordmat:
op = coordmat.to_csr()
return op
def ss_xy_consv_k(Nx, Ny, kx, ky, l):
"""construct the Σ(sx_i * sx_j + sy_i * sy_j) operators with the given
separation with translational symmetry taken into account
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Ny / 2π in a [0, 2π)
Brillouin zone
l: int
the separation between sites: |i - j|
Returns
--------------------
ss_xy: scipy.sparse.csr_matrix
"""
mat = _lib.k_ss_xy(Nx, Ny, kx, ky, l)
with CoordMatrix(mat) as coordmat:
op = coordmat.to_csr()
return op
def ss_z_consv_k_s(Nx, Ny, kx, ky, nup, l):
"""construct the Σsz_i * sz_j operators with the given separation
with translational symmetry taken into account
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Ny / 2π in a [0, 2π)
Brillouin zone
nup: int
the total number of sites with a spin-up
l: int
the separation between sites: |i - j|
Returns
--------------------
ss_z: scipy.sparse.csr_matrix
"""
mat = _lib.ks_ss_z(Nx, Ny, kx, ky, nup, l)
with CoordMatrix(mat) as coordmat:
op = coordmat.to_csr()
return op
def ss_xy_consv_k_s(Nx, Ny, kx, ky, nup, l):
"""construct the Σ(sx_i * sx_j + sy_i * sy_j) operators with the given
separation with translational symmetry taken into account
Parameters
--------------------
Nx: int
lattice length in the x-direction
Ny: int
lattice length in the y-direction
kx: int
the x-component of lattice momentum * Nx / 2π in a [0, 2π)
Brillouin zone
ky: int
the y-component of lattice momentum * Ny / 2π in a [0, 2π)
Brillouin zone
nup: int
the total number of sites with a spin-up
l: int
the separation between sites: |i - j|
Returns
--------------------
ss_xy: scipy.sparse.csr_matrix
"""
mat = _lib.ks_ss_xy(Nx, Ny, kx, ky, nup, l)
with CoordMatrix(mat) as coordmat:
op = coordmat.to_csr()
return op
def min_necessary_ks(Nx, Ny):
"""Returns the momentum that we absolutely need to compute
Parameters
--------------------
Nx: int
Ny: int
Returns
--------------------
list of ints
"""
ks = []
arrs = []
for kx in range(Nx):
for ky in range(Ny):
arr = np.outer(np.exp(2j * np.pi * kx * np.arange(Nx) / Nx),
np.exp(2j * np.pi * ky * np.arange(Ny) / Ny))
for arr0 in arrs:
if np.allclose(arr0, arr) or np.allclose(arr0, arr.conjugate()):
break
else:
ks.append((kx, ky))
arrs.append(arr)
return ks
| 32.411846 | 88 | 0.537164 | 3,109 | 23,531 | 3.900289 | 0.116115 | 0.012865 | 0.011875 | 0.015834 | 0.614877 | 0.578921 | 0.541069 | 0.521854 | 0.507834 | 0.492495 | 0 | 0.013106 | 0.344992 | 23,531 | 725 | 89 | 32.456552 | 0.773633 | 0.33662 | 0 | 0.312694 | 0 | 0 | 0.012049 | 0.00738 | 0 | 0 | 0 | 0.001379 | 0 | 1 | 0.133127 | false | 0 | 0.021672 | 0.003096 | 0.287926 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
d99179f5f0c295d6288591b72b99cc96a11e545c | 5,223 | py | Python | python/tvm/tensor_graph/core2/nn/functional/convolution.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | 22 | 2022-03-18T07:29:31.000Z | 2022-03-23T14:54:32.000Z | python/tvm/tensor_graph/core2/nn/functional/convolution.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | null | null | null | python/tvm/tensor_graph/core2/nn/functional/convolution.py | QinHan-Erin/AMOS | 634bf48edf4015e4a69a8c32d49b96bce2b5f16f | [
"Apache-2.0"
] | 2 | 2022-03-18T08:26:34.000Z | 2022-03-20T06:02:48.000Z | import tvm
from tvm.tensor_graph.core2.graph.concrete import Compute, Tensor
from .padding import zero_pad2d
######################################################################
# for functional, all states are inputs, data from inside functionals
# can only be constants
######################################################################
def conv2d_nchw(inputs, weight, bias=None, stride=1, padding=0, dilation=1, groups=1,
output_dtype="float32", requires_grad=False):
"""Convolution 2d NCHW layout
Args:
-----------------------------
inputs : Tensor
shape [batch, channel, height, width]
weight : Tensor
shape [out_channel, channel // groups, kernel_height, kernel_width]
bias : (optional:None) Tensor
shape [out_channel]
stride : (optional:1) int or tuple
padding : (optional:0) int or tuple
dilation: (optional:1) int
groups : (optional:1) int
-----------------------------
Returns:
-----------------------------
Tensor
shape [batch, out_channel, output_height, output_width]
-----------------------------
"""
batch_size, in_channel, in_h, in_w = inputs.shape
out_channel, channel_per_group, k_h, k_w = weight.shape
assert channel_per_group * groups == in_channel, "%d vs. %d" % (channel_per_group * groups, in_channel)
out_channel_per_group = out_channel // groups
assert out_channel_per_group * groups == out_channel
stride = (stride, stride) if isinstance(stride, (int, tvm.tir.IntImm)) else stride
padding = (padding, padding) if isinstance(padding, (int, tvm.tir.IntImm)) else padding
dilation = (dilation, dilation) if isinstance(dilation, (int, tvm.tir.IntImm)) else dilation
assert isinstance(stride, tuple) and len(stride) == 2
assert isinstance(padding, tuple) and len(padding) == 2
assert isinstance(dilation, tuple) and len(dilation) == 2
out_h = (in_h + 2 * padding[0] - dilation[0] * (k_h - 1) - 1) // stride[0] + 1
out_w = (in_w + 2 * padding[1] - dilation[1] * (k_w - 1) - 1) // stride[1] + 1
padded = zero_pad2d(inputs, padding=padding, output_dtype=output_dtype, requires_grad=requires_grad)
conv_out_shape = (batch_size, out_channel, out_h, out_w)
if bias is not None:
if groups > 1:
def _inner_conv2d_nchw(padded, weight, bias):
def _for_spatial(b, c, h, w):
def _for_reduce(rc, rw, rh):
return (padded[b, c // out_channel_per_group * channel_per_group + rc,
h * stride[0] + rh * dilation[0], w * stride[1] + rw * dilation[1]]
* weight[c, rc, rh, rw]) + bias[c] / (channel_per_group*k_w*k_h)
return _for_reduce, [channel_per_group, k_w, k_h], "sum"
return _for_spatial
conv_out = Compute(conv_out_shape, output_dtype, padded, weight, bias,
fhint=_inner_conv2d_nchw, name="conv2d_nchw", requires_grad=requires_grad)
return conv_out
else:
def _inner_conv2d_nchw(padded, weight, bias):
def _for_spatial(b, c, h, w):
def _for_reduce(rc, rw, rh):
return (padded[b, rc,
h * stride[0] + rh * dilation[0], w * stride[1] + rw * dilation[1]]
* weight[c, rc, rh, rw]) + bias[c] / (channel_per_group*k_w*k_h)
return _for_reduce, [channel_per_group, k_w, k_h], "sum"
return _for_spatial
conv_out = Compute(conv_out_shape, output_dtype, padded, weight, bias,
fhint=_inner_conv2d_nchw, name="conv2d_nchw", requires_grad=requires_grad)
return conv_out
else:
if groups > 1:
def _inner_conv2d_nchw(padded, weight):
def _for_spatial(b, c, h, w):
def _for_reduce(rc, rw, rh):
return (padded[b, c // out_channel_per_group * channel_per_group + rc,
h * stride[0] + rh * dilation[0], w * stride[1] + rw * dilation[1]]
* weight[c, rc, rh, rw])
return _for_reduce, [channel_per_group, k_w, k_h], "sum"
return _for_spatial
conv_out = Compute(conv_out_shape, output_dtype, padded, weight,
fhint=_inner_conv2d_nchw, name="conv2d_nchw", requires_grad=requires_grad)
return conv_out
else:
def _inner_conv2d_nchw(padded, weight):
def _for_spatial(b, c, h, w):
def _for_reduce(rc, rw, rh):
return (padded[b, rc,
h * stride[0] + rh * dilation[0], w * stride[1] + rw * dilation[1]]
* weight[c, rc, rh, rw])
return _for_reduce, [channel_per_group, k_w, k_h], "sum"
return _for_spatial
conv_out = Compute(conv_out_shape, output_dtype, padded, weight,
fhint=_inner_conv2d_nchw, name="conv2d_nchw", requires_grad=requires_grad)
return conv_out | 45.815789 | 107 | 0.551216 | 645 | 5,223 | 4.193798 | 0.141085 | 0.055453 | 0.083179 | 0.041405 | 0.55268 | 0.531608 | 0.509427 | 0.509427 | 0.509427 | 0.502773 | 0 | 0.016996 | 0.301551 | 5,223 | 114 | 108 | 45.815789 | 0.724507 | 0.125407 | 0 | 0.7 | 0 | 0 | 0.016571 | 0 | 0 | 0 | 0 | 0 | 0.071429 | 1 | 0.185714 | false | 0 | 0.042857 | 0.057143 | 0.457143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
793f2b848c3758a8f7dae311e7d721594f8e8f09 | 3,424 | py | Python | setup.py | HEmile/problog | 576b6fd305f72b12125111c8d4d62cf8a7bbda0f | [
"Apache-2.0"
] | 189 | 2019-05-27T08:20:10.000Z | 2022-03-28T09:29:22.000Z | setup.py | HEmile/problog | 576b6fd305f72b12125111c8d4d62cf8a7bbda0f | [
"Apache-2.0"
] | 60 | 2019-06-11T15:07:48.000Z | 2022-03-25T02:31:23.000Z | setup.py | HEmile/problog | 576b6fd305f72b12125111c8d4d62cf8a7bbda0f | [
"Apache-2.0"
] | 33 | 2019-07-03T13:14:24.000Z | 2022-02-20T01:07:15.000Z | #! /usr/bin/env python
import sys
import os
version_file = os.path.join(
os.path.abspath(os.path.dirname(__file__)), "problog/version.py"
)
version = {}
with open(version_file) as fp:
exec(fp.read(), version)
version = version["version"]
if __name__ == "__main__" and len(sys.argv) == 1:
from problog import setup as problog_setup
problog_setup.install()
elif __name__ == "__main__":
from setuptools import setup, find_packages
from setuptools.command.install import install
class ProbLogInstall(install):
def run(self):
install.run(self)
before_dir = os.getcwd()
sys.path.insert(0, self.install_lib)
from problog import setup as problog_setup
try:
problog_setup.install()
except Exception as err:
print("Optional ProbLog installation failed: %s" % err, file=sys.stderr)
os.chdir(before_dir)
package_data = {
"problog": [
"bin/darwin/cnf2dDNNF_wine",
"bin/darwin/dsharp",
"bin/darwin/maxsatz",
"bin/linux/dsharp",
"bin/linux/maxsatz",
"bin/source/maxsatz/maxsatz2009.c",
"bin/windows/dsharp.exe",
"bin/windows/maxsatz.exe",
"bin/windows/libgcc_s_dw2-1.dll",
"bin/windows/libstdc++-6.dll",
"web/*.py",
"web/editor_local.html" "web/editor_adv.html",
"web/js/problog_editor.js",
"library/*.pl",
"library/*.py",
"library/nlp4plp.d/*",
]
}
setup(
name="problog",
version=version,
description="ProbLog2: Probabilistic Logic Programming toolbox",
url="https://dtai.cs.kuleuven.be/problog",
author="ProbLog team",
author_email="anton.dries@cs.kuleuven.be",
license="Apache Software License",
classifiers=[
"Development Status :: 4 - Beta",
"License :: OSI Approved :: Apache Software License",
"Intended Audience :: Science/Research",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Prolog",
"Topic :: Scientific/Engineering :: Artificial Intelligence",
],
keywords="prolog probabilistic logic",
packages=find_packages(),
extras_require={"sdd": ["pysdd>=0.2.6"]},
entry_points={"console_scripts": ["problog=problog.tasks:main"]},
package_data=package_data,
cmdclass={"install": ProbLogInstall},
)
def increment_release(v):
v = v.split(".")
if len(v) == 4:
v = v[:3] + [str(int(v[3]) + 1)]
else:
v = v[:4]
return ".".join(v)
def increment_dev(v):
v = v.split(".")
if len(v) == 4:
v = v[:3] + [str(int(v[3]) + 1), "dev1"]
else:
v = v[:4] + ["dev" + str(int(v[4][3:]) + 1)]
return ".".join(v)
def increment_version_dev():
v = increment_dev(version)
os.path.dirname(__file__)
with open(version_file, "w") as f:
f.write("version = '%s'\n" % v)
def increment_version_release():
v = increment_release(version)
with open(version_file, "w") as f:
f.write("version = '%s'\n" % v)
| 29.517241 | 88 | 0.561332 | 394 | 3,424 | 4.730964 | 0.388325 | 0.008584 | 0.053648 | 0.055794 | 0.151824 | 0.109442 | 0.109442 | 0.070815 | 0.070815 | 0.070815 | 0 | 0.015226 | 0.290304 | 3,424 | 115 | 89 | 29.773913 | 0.751852 | 0.006133 | 0 | 0.168421 | 0 | 0 | 0.313639 | 0.081717 | 0 | 0 | 0 | 0 | 0 | 1 | 0.052632 | false | 0 | 0.063158 | 0 | 0.147368 | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79423433cdcc39041c7fd83b1754e656cc596c82 | 3,178 | py | Python | backend/api/models.py | AndyPaPaLeu/Disfactory | 4afc370ae6b0d526891fce2b1fe0b9c687309ed1 | [
"MIT"
] | null | null | null | backend/api/models.py | AndyPaPaLeu/Disfactory | 4afc370ae6b0d526891fce2b1fe0b9c687309ed1 | [
"MIT"
] | null | null | null | backend/api/models.py | AndyPaPaLeu/Disfactory | 4afc370ae6b0d526891fce2b1fe0b9c687309ed1 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import uuid
from django.conf import settings
from django.contrib.gis.db import models
from django.contrib.gis.geos import Point
from django.contrib.postgres.fields import JSONField
class Factory(models.Model):
"""Factories that are potential to be illegal."""
# List of fact_type & status
factory_type_list = [
("1","金屬"),
("2-1","沖床、銑床、車床、鏜孔"),
("2-2", "焊接、鑄造、熱處理"),
("2-3", "金屬表面處理、噴漆"),
("3", "塑膠加工、射出"),
("4", "橡膠加工"),
("5", "非金屬礦物(石材)"),
("6", "食品"),
("7", "皮革"),
("8", "紡織"),
("9", "其他")
]
status_list = [
("D","已舉報"),
("F","資料不齊"),
("A","待審核")
]
# All Features
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
verbose_name="ID",
)
lat = models.FloatField()
lng = models.FloatField()
point = models.PointField(srid=settings.POSTGIS_SRID)
landcode = models.CharField(max_length=50, blank=True, null=True)
name = models.CharField(max_length=50, blank=True, null=True)
factory_type = models.CharField(max_length=3, choices=factory_type_list, default="9")
status = models.CharField(max_length=1, choices=status_list, default="A")
status_time = models.DateTimeField(auto_now_add=True)
created_at = models.DateTimeField(auto_now_add=True)
updated_at = models.DateTimeField(auto_now=True)
def save(self, *args, **kwargs):
self.point = Point(self.lng, self.lat, srid=4326)
self.point.transform(settings.POSTGIS_SRID)
super(Factory, self).save(*args, **kwargs)
class ReportRecord(models.Model):
"""Report records send by users.
`ReportRecord` will be queried in advanced by admins from
Citizen of the Earth, Taiwan. They will filter the most recent
records out every a few weeks to catch the bad guys.
"""
id = models.AutoField(primary_key=True)
factory = models.ForeignKey("Factory", on_delete=models.PROTECT)
user_ip = models.GenericIPAddressField(default="192.168.0.1", blank=True, null=True)
action_type = models.CharField(max_length=10) # PUT, POST
action_body = JSONField() # request body
created_at = models.DateTimeField(auto_now_add=True)
contact = models.CharField(max_length=64, blank=True, null=True)
others = models.CharField(max_length=1024, blank=True)
class Image(models.Model):
"""Images of factories that are uploaded by user."""
id = models.UUIDField(
primary_key=True,
default=uuid.uuid4,
editable=False,
)
factory = models.ForeignKey(
"Factory",
on_delete=models.PROTECT,
related_name="images",
blank=True,
null=True,
)
report_record = models.ForeignKey(
"ReportRecord",
on_delete=models.PROTECT,
blank=True,
null=True,
)
image_path = models.URLField(max_length=256) # get from Imgur
created_at = models.DateTimeField(auto_now_add=True)
# the DB saving time
orig_time = models.DateTimeField(blank=True, null=True)
# the actual photo taken time
| 30.854369 | 89 | 0.636249 | 406 | 3,178 | 4.866995 | 0.421182 | 0.036437 | 0.063765 | 0.08502 | 0.278846 | 0.236336 | 0.219636 | 0.219636 | 0.104251 | 0.060729 | 0 | 0.019192 | 0.22939 | 3,178 | 102 | 90 | 31.156863 | 0.787668 | 0.139711 | 0 | 0.226667 | 0 | 0 | 0.050483 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013333 | false | 0 | 0.066667 | 0 | 0.48 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7943f595c674438a1cfec4698c62343f1a8c742b | 656 | py | Python | infrastructure/crypto_ml/utils/_utils.py | ATCUWgithub/CryptoML | 6010c5daf7d985217fa76197b29331457a60a306 | [
"MIT"
] | 1 | 2020-02-18T00:38:16.000Z | 2020-02-18T00:38:16.000Z | infrastructure/crypto_ml/utils/_utils.py | ATCUWgithub/CryptoML | 6010c5daf7d985217fa76197b29331457a60a306 | [
"MIT"
] | null | null | null | infrastructure/crypto_ml/utils/_utils.py | ATCUWgithub/CryptoML | 6010c5daf7d985217fa76197b29331457a60a306 | [
"MIT"
] | 1 | 2020-02-18T00:39:12.000Z | 2020-02-18T00:39:12.000Z | import json as _json
import datetime as _datetime
def parse_timestamp(dataset, time_format="%Y-%m-%dT%H:%M:%S.000Z"):
for d in dataset:
d["timestamp"] = _datetime.datetime.strptime(d["timestamp"], time_format)
return dataset
def load_json(filename, time_format="%Y-%m-%dT%H:%M:%S.000Z"):
dictionary = dict()
with open(filename) as f:
dictionary = _json.load(f)
return parse_timestamp(dictionary, time_format)
def generate_config(dataset):
start_idx = 0
end_idx = len(dataset) - 1
return {
"test_start": dataset[start_idx]["timestamp"],
"test_end": dataset[end_idx]["timestamp"]
}
| 29.818182 | 81 | 0.660061 | 91 | 656 | 4.56044 | 0.406593 | 0.096386 | 0.053012 | 0.057831 | 0.101205 | 0.101205 | 0.101205 | 0.101205 | 0.101205 | 0 | 0 | 0.015209 | 0.198171 | 656 | 21 | 82 | 31.238095 | 0.773764 | 0 | 0 | 0 | 0 | 0 | 0.14939 | 0.067073 | 0 | 0 | 0 | 0 | 0 | 1 | 0.166667 | false | 0 | 0.111111 | 0 | 0.444444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79442688528877f19538302cd834c0bc231e8349 | 959 | py | Python | leetcode/two_numbers_sum.py | clnFind/DayDayAlgorithm | 5644a666a3d84547d8cf00031fc2e30273cc0e9a | [
"Apache-2.0"
] | null | null | null | leetcode/two_numbers_sum.py | clnFind/DayDayAlgorithm | 5644a666a3d84547d8cf00031fc2e30273cc0e9a | [
"Apache-2.0"
] | null | null | null | leetcode/two_numbers_sum.py | clnFind/DayDayAlgorithm | 5644a666a3d84547d8cf00031fc2e30273cc0e9a | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
import copy
class Solution(object):
"""
给定 nums = [2, 7, 11, 15], target = 9
因为 nums[0] + nums[1] = 2 + 7 = 9
所以返回 [0, 1]
"""
def twoSum(self, nums, target):
"""
:type nums: List[int]
:type target: int
:rtype: List[int]
"""
for i in range(len(nums)):
nums_copy = copy.copy(nums)
nums_copy.remove(nums[i])
for j in nums_copy:
if nums[i] + j == target:
return i, nums.index(j)
return None
def two_sum(self, nums, target):
for num in nums:
val = target - num
if val in nums:
return nums.index(num), nums.index(val)
return None
if __name__ == '__main__':
l = [3, 4, 10, 2, 7]
target = 9
result = Solution().twoSum(l, target)
print(result)
result1 = Solution().two_sum(l, target)
print(result1)
| 21.795455 | 55 | 0.486966 | 127 | 959 | 3.574803 | 0.401575 | 0.013216 | 0.061674 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.040201 | 0.377477 | 959 | 43 | 56 | 22.302326 | 0.720268 | 0.168926 | 0 | 0.086957 | 0 | 0 | 0.010899 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.043478 | 0 | 0.347826 | 0.086957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794467ea5227d786240a4dc2c21fda99810bd1c3 | 1,162 | py | Python | bpcs/bpcs_steg_decode.py | BburnN123/bpcs | f53caede7e202ce07b51890f028b9caf73a22937 | [
"MIT"
] | 20 | 2017-04-25T21:07:24.000Z | 2022-03-30T11:11:47.000Z | bpcs/bpcs_steg_decode.py | BburnN123/bpcs | f53caede7e202ce07b51890f028b9caf73a22937 | [
"MIT"
] | 4 | 2016-04-06T01:19:27.000Z | 2020-09-26T18:38:29.000Z | bpcs/bpcs_steg_decode.py | BburnN123/bpcs | f53caede7e202ce07b51890f028b9caf73a22937 | [
"MIT"
] | 12 | 2017-04-02T23:10:46.000Z | 2022-03-21T03:43:55.000Z | import numpy as np
from .logger import log
from .array_grid import get_next_grid_dims
from .act_on_image import ActOnImage
from .array_message import write_conjugated_message_grids
from .bpcs_steg import arr_bpcs_complexity
def remove_message_from_vessel(arr, alpha, grid_size):
messages = []
nfound, nkept, nleft = 0, 0, 0
complexities = []
for dims in get_next_grid_dims(arr, grid_size):
nfound += 1
grid = arr[tuple(dims)]
cmplx = arr_bpcs_complexity(grid)
if cmplx < alpha:
nleft += 1
continue
complexities.append(cmplx)
nkept += 1
messages.append(grid)
assert nfound == nkept + nleft
log.critical('Found {0} out of {1} grids with complexity above {2}'.format(nkept, nfound, alpha))
return messages
class BPCSDecodeImage(ActOnImage):
def modify(self, alpha):
return remove_message_from_vessel(self.arr, alpha, (8,8))
def decode(infile, outfile, alpha=0.45):
x = BPCSDecodeImage(infile, as_rgb=True, bitplane=True, gray=True, nbits_per_layer=8)
grids = x.modify(alpha)
write_conjugated_message_grids(outfile, grids, alpha)
| 33.2 | 101 | 0.692771 | 160 | 1,162 | 4.83125 | 0.4375 | 0.023286 | 0.028461 | 0.03881 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.016447 | 0.215146 | 1,162 | 34 | 102 | 34.176471 | 0.83114 | 0 | 0 | 0 | 0 | 0 | 0.04475 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.1 | false | 0 | 0.2 | 0.033333 | 0.4 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7946dedb29967a5ff96a8d7cd312b2fd2bc51b15 | 6,859 | py | Python | notebooks/02_crash_severity.py | jennan/crash_prediction | 498b59704ed2aca61c78e4eb7c5558abe9edaffc | [
"MIT"
] | 3 | 2020-12-07T04:07:04.000Z | 2021-08-19T10:41:08.000Z | notebooks/02_crash_severity.py | jennan/crash_prediction | 498b59704ed2aca61c78e4eb7c5558abe9edaffc | [
"MIT"
] | 2 | 2020-12-10T19:12:02.000Z | 2020-12-10T19:12:08.000Z | notebooks/02_crash_severity.py | jennan/crash_prediction | 498b59704ed2aca61c78e4eb7c5558abe9edaffc | [
"MIT"
] | 2 | 2021-04-14T14:32:39.000Z | 2021-12-10T10:36:59.000Z | # # Exploration of the crash severity information in CAS data
#
# In this notebook, we will explore the severity of crashes, as it will be the
# target of our predictive models.
from pathlib import Path
import numpy as np
import pandas as pd
import scipy.stats as st
import matplotlib.pyplot as plt
import seaborn as sb
from crash_prediction import cas_data
# set seaborn default style
sb.set()
# But first, we ensure we have the data or download it if needed
dset_path = Path("..") / "data" / "cas_dataset.csv"
if not dset_path.exists():
dset_path.parent.mkdir(parents=True, exist_ok=True)
cas_data.download(dset_path)
# and load it.
dset = pd.read_csv(dset_path)
dset.head()
# The CAS dataset has 4 features that can be associated with the crash severity:
#
# - `crashSeverity`, severity of a crash, determined by the worst injury
# sustained in the crash at time of entry,
# - `fatalCount`, count of the number of fatal casualties associated with this
# crash,
# - `minorInjuryCount`, count of the number of minor injuries associated with
# this crash,
# - `seriousInjuryCount`, count of the number of serious injuries associated
# with this crash.
severity_features = [
"fatalCount",
"seriousInjuryCount",
"minorInjuryCount",
"crashSeverity",
]
fig, axes = plt.subplots(2, 2, figsize=(15, 12))
for ax, feat in zip(axes.flat, severity_features):
counts = dset[feat].value_counts(dropna=False)
counts.plot.bar(ylabel="# crashes", title=feat, ax=ax)
ax.set(yscale="log")
fig.tight_layout()
# To check the geographical distribution, we will focus on Auckland and replace
# discrete levels of `crashSeverity` with number to ease plotting.
dset_auckland = dset[dset["X"].between(174.7, 174.9) & dset["Y"].between(-37, -36.8)]
mapping = {
"Non-Injury Crash": 1,
"Minor Crash": 2,
"Serious Crash": 3,
"Fatal Crash": 4,
}
dset_auckland = dset_auckland.replace({"crashSeverity": mapping})
# Given the data set imbalance, we plot the local maxima to better see the
# location of more severe car crashes.
fig, axes = plt.subplots(2, 2, figsize=(15, 15))
for ax, feat in zip(axes.flat, severity_features):
dset_auckland.plot.hexbin(
"X",
"Y",
feat,
gridsize=500,
reduce_C_function=np.max,
cmap="BuPu",
title=feat,
ax=ax,
sharex=False,
)
ax.set_xticklabels([])
ax.set_yticklabels([])
fig.tight_layout()
# Few remarks coming from these plots:
#
# - fatal counts are (hopefully) very low,
# - crashes with serious injuries are also very sparse,
# - crashes with minor injuries are denser and seem to follow major axes,
# - the crash severity feature looks like the most homogeneous feature, yet
# highlighting some roads more than others.
#
# The crash severity is probably a good go-to target, as it's quite
# interpretable and actionable. The corresponding ML problem is a supervised
# multi-class prediction problem.
# To simplify the problem, we can also just try to predict if a crash is going
# to involve an injury (minor, severe or fatal) or none. Here is how it would
# look like in Auckland
dset_auckland["injuryCrash"] = (dset_auckland["crashSeverity"] > 1) * 1.0
dset_auckland.plot.hexbin(
"X",
"Y",
"injuryCrash",
gridsize=500,
cmap="BuPu",
title="Crash with injury",
sharex=False,
figsize=(10, 10),
)
# Interestingly, the major axes do not pop up as saliently here, as we are
# averaging instead of taking the local maxima.
# This brings us to to the another question: is the fraction of crash with
# injuries constant fraction of the number of crashes in an area? This would
# imply that a simple binomial model can model locally binned data.
# We first discretize space into 0.01° wide cells and count the total number of
# crashes in each cell as well as the number of crashes with injuries.
# +
dset["X_bin"] = pd.cut(
dset["X"], pd.interval_range(dset.X.min(), dset.X.max(), freq=0.01)
)
dset["Y_bin"] = pd.cut(
dset["Y"], pd.interval_range(dset.Y.min(), dset.Y.max(), freq=0.01)
)
counts = (
dset.groupby(["X_bin", "Y_bin"], observed=True).size().reset_index(name="crash")
)
injury_counts = (
dset.groupby(["X_bin", "Y_bin"], observed=True)
.apply(lambda x: (x["crashSeverity"] != "Non-Injury Crash").sum())
.reset_index(name="injury")
)
counts = counts.merge(injury_counts)
# -
# For each number of crashes in cells, we can check the fraction of crashes with
# injuries. Here we see that cells with 1 or few crashes have a nearly 50/50
# chance of injuries, compared to cells with a larger number of accidents, where
# it goes down to about 20%.
injury_fraction = counts.groupby("crash").apply(
lambda x: x["injury"].sum() / x["crash"].sum()
)
ax = injury_fraction.plot(style=".", ylabel="fraction of injuries", figsize=(10, 7))
ax.set_xscale("log")
# Then we can also check how good is a binomial distribution at modeling binned
# data, using it to derive a 95% predictive interval.
ratio = counts["injury"].sum() / counts["crash"].sum()
xs = np.arange(1, counts["crash"].max() + 1)
pred_intervals = st.binom(xs, ratio).ppf([[0.025], [0.975]])
# +
fig, axes = plt.subplots(1, 2, figsize=(15, 7))
counts.plot.scatter(x="crash", y="injury", alpha=0.3, c="b", s=2, ax=axes[0])
axes[0].fill_between(
xs,
pred_intervals[0],
pred_intervals[1],
alpha=0.3,
color="r",
label="95% equal-tail interval for binomial",
)
axes[0].legend()
counts.plot.scatter(x="crash", y="injury", alpha=0.3, c="b", s=2, ax=axes[1])
axes[1].fill_between(
xs,
pred_intervals[0],
pred_intervals[1],
alpha=0.3,
color="r",
label="95% equal-tail interval for binomial",
)
axes[1].legend()
axes[1].set_xscale("log")
axes[1].set_yscale("log")
# -
# The predictive interval seems to have a poor coverage, overshooting the high
# counts regions and being to narrow for the regions with hundreds of crashes.
# We can compute the empirical coverage of these interval to check this.
counts["covered"] = counts["injury"].between(
pred_intervals[0, counts["crash"] - 1], pred_intervals[1, counts["crash"] - 1]
)
print(f"95% predictive interval has {counts['covered'].mean() * 100:.2f}%.")
print("95% predictive interval coverage per quartile of crash counts:")
mask = counts["crash"] > 1
counts[mask].groupby(pd.qcut(counts.loc[mask, "crash"], 4))["covered"].mean()
# So it turns out that on a macro scale, the coverage of this simple model is
# quite good, but if we split by number of crashes, the coverage isn't so good
# anymore for the cells with higher number of crashes.
#
# Hence, including the number of crashes in a vicinity could be an relevant
# predictor for the probability of crash with injury.
# ---
# ## Original computing environment
# !date -R
# !uname -a
# !pip freeze
| 30.896396 | 85 | 0.697478 | 1,064 | 6,859 | 4.446429 | 0.327068 | 0.018601 | 0.022194 | 0.010991 | 0.146269 | 0.113295 | 0.103149 | 0.103149 | 0.09089 | 0.059184 | 0 | 0.021118 | 0.178452 | 6,859 | 221 | 86 | 31.036199 | 0.818279 | 0.460417 | 0 | 0.245614 | 0 | 0 | 0.170799 | 0.006887 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.061404 | 0 | 0.061404 | 0.017544 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794737a97c176c9f701f94c89a9d3fa6ea1cba13 | 601 | py | Python | python/cartpole1.py | lusing/mljs | 4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8 | [
"MIT"
] | null | null | null | python/cartpole1.py | lusing/mljs | 4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8 | [
"MIT"
] | null | null | null | python/cartpole1.py | lusing/mljs | 4c708bb8e0759803ed94ead3e9cfadc3a97d6ed8 | [
"MIT"
] | null | null | null | import gym
def cartpole():
environment = gym.make('CartPole-v1')
environment.reset()
for i in range(1000):
# environment.render()
action = environment.action_space.sample()
observation, reward, done, info = environment.step(action)
print("Step {}:".format(i))
print("action: {}:".format(action))
print('observation: {}'.format(observation))
print('reward: {}'.format(reward))
print('done: {}'.format(done))
print('info: {}'.format(info))
if done:
break
if __name__ == '__main__':
cartpole()
| 28.619048 | 66 | 0.577371 | 61 | 601 | 5.540984 | 0.459016 | 0.065089 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.011161 | 0.254576 | 601 | 20 | 67 | 30.05 | 0.743304 | 0.033278 | 0 | 0 | 0 | 0 | 0.136442 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.117647 | 0.352941 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794855d07b967464fa463b2ba9dd7683a00f2311 | 3,466 | py | Python | kw3pan/pancakeswap/factory/core/pancakeswap_factory.py | kkristof200/py_web3_pancakeswap | ae9dc7021b7da2365ce675f29f89e103fe44d77f | [
"MIT"
] | 6 | 2021-05-09T12:43:37.000Z | 2021-12-07T01:56:02.000Z | kw3pan/pancakeswap/factory/core/pancakeswap_factory.py | kkristof200/py_web3_pancakeswap | ae9dc7021b7da2365ce675f29f89e103fe44d77f | [
"MIT"
] | null | null | null | kw3pan/pancakeswap/factory/core/pancakeswap_factory.py | kkristof200/py_web3_pancakeswap | ae9dc7021b7da2365ce675f29f89e103fe44d77f | [
"MIT"
] | null | null | null | # ------------------------------------------------------------ Imports ----------------------------------------------------------- #
# System
from typing import Optional
# Pip
from kw3 import WrappedContract, Web3
from kw3.constants import Constants as KW3Constants
# Local
from ._abi import pancakeswap_factory_abi
from ...liquidity_pool import PancakeswapLiquidityPool, PancakeswapBusdLiquidityPool, PancakeswapWbnbLiquidityPool
from ...constants import Constants
# -------------------------------------------------------------------------------------------------------------------------------- #
# --------------------------------------------------- class: PancakeswapFactory -------------------------------------------------- #
class PancakeswapFactory(WrappedContract):
# --------------------------------------------------------- Init --------------------------------------------------------- #
def __init__(
self,
web3: Web3
):
super().__init__(
web3=web3,
address=Constants.ADDRESS_PANCAKESWAP_FACTORY,
abi=pancakeswap_factory_abi
)
# ---------------------------------------------------- Public methods ---------------------------------------------------- #
# Forwarders
def liquidityPoolAddressesLength(self) -> int:
return self.functions.allPairsLength().call()
def liquidityPoolAddressAtIndex(
self,
index: int
) -> str:
return self.functions.allPairs(index).call()
def liquidityPoolAtIndex(
self,
index: int
) -> PancakeswapLiquidityPool:
return PancakeswapBusdLiquidityPool(
web3=self._web3,
address=self.liquidityPoolAddressAtIndex(
index=index
)
)
# Custom
def getPairAddress(
self,
address0: str,
address1: str
) -> Optional[str]:
return self.functions.getPair(
Web3.toChecksumAddress(address0),
Web3.toChecksumAddress(address1)
).call()
def getPair(
self,
address0: str,
address1: str
) -> Optional[PancakeswapLiquidityPool]:
return self.__getPair(
PancakeswapLiquidityPool,
address0=address0,
address1=address1
)
def getWbnbPair(
self,
token_address: str
) -> Optional[PancakeswapWbnbLiquidityPool]:
return self.__getPair(
PancakeswapWbnbLiquidityPool,
address0=KW3Constants.WBNB.ADDRESS,
address1=token_address
)
def getBusdPair(
self,
token_address: str
) -> Optional[PancakeswapBusdLiquidityPool]:
return self.__getPair(
PancakeswapBusdLiquidityPool,
address0=KW3Constants.BUSD.ADDRESS,
address1=token_address
)
# ---------------------------------------------------- Private methods --------------------------------------------------- #
def __getPair(
self,
_type,
address0: str,
address1: str
) -> Optional[PancakeswapLiquidityPool]:
pair_address = self.getPairAddress(address0, address1)
return _type(
self._web3,
pair_address
) if pair_address else None
# -------------------------------------------------------------------------------------------------------------------------------- # | 28.409836 | 132 | 0.467398 | 206 | 3,466 | 7.694175 | 0.291262 | 0.037855 | 0.039748 | 0.04164 | 0.126183 | 0.092114 | 0 | 0 | 0 | 0 | 0 | 0.012644 | 0.246971 | 3,466 | 122 | 133 | 28.409836 | 0.594636 | 0.265147 | 0 | 0.308642 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.111111 | false | 0 | 0.074074 | 0.08642 | 0.296296 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794b0eee657db516c725d2d35f15819da5d490ca | 17,648 | py | Python | functions_for_AirBnB.py | dalpengholic/Udacity_Boston-AirBNB-Data | ef918f4ddf8041a9f646e6fe786730f191746c2b | [
"MIT"
] | null | null | null | functions_for_AirBnB.py | dalpengholic/Udacity_Boston-AirBNB-Data | ef918f4ddf8041a9f646e6fe786730f191746c2b | [
"MIT"
] | null | null | null | functions_for_AirBnB.py | dalpengholic/Udacity_Boston-AirBNB-Data | ef918f4ddf8041a9f646e6fe786730f191746c2b | [
"MIT"
] | null | null | null | # The collection of functions for the Boston AirBnB dataset
# import necessary libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
from pandas.tseries.holiday import USFederalHolidayCalendar as calendar #To check holidays in the U.S
import time
import copy
def load_bnb_files():
'''Load AirBnB files'''
df_listing = pd.read_csv('./data/listings.csv')
df_calendar = pd.read_csv('./data/calendar.csv')
return df_listing, df_calendar
# Modify df_calendar for future work
# Special event : marathon, new academic season
def modify_calendar(df_calendar):
'''
This function creates 'year', 'month', 'day', 'weekday', and 'week_number' columns from 'date' coulmn of df_calendar
and remove '$' string from 'price' coulmn.
Input : a Pandas dataframe having a date data column
Output : a Pandas dataframe having year, month, day, weekday, us_holiday columns
'''
# Split date column into year, month,day, weekday columns
# The day of the week with Monday=0, Sunday=6
# Set the range of weekends from Friday to Sunday
df_calendar['year'] = pd.DatetimeIndex(df_calendar['date']).year
df_calendar['month'] = pd.DatetimeIndex(df_calendar['date']).month
df_calendar['day'] = pd.DatetimeIndex(df_calendar['date']).day
df_calendar['weekday'] = pd.DatetimeIndex(df_calendar['date']).weekday
df_calendar['week_number'] = pd.DatetimeIndex(df_calendar['date']).week
df_calendar['price']= df_calendar['price'].str.replace('$','')
df_calendar['price']=df_calendar['price'].str.replace(',','')
df_calendar['price'] = df_calendar['price'].astype(float)
# Add us_holiday column
cal = calendar()
holidays = cal.holidays(start=df_calendar.date.min(), end=df_calendar.date.max())
df_calendar['us_holiday'] = df_calendar.date.astype('datetime64').isin(holidays)
# Add weekend column #Friday, Saturday
weekend = [4,5]
df_calendar['weekend'] = df_calendar.weekday.isin(weekend)
# Replace values in weekday column
df_calendar['weekday'].replace({0:'Monday', 1:'Tuesday', 2:'Wednesday', 3:'Thursday',4:'Friday', 5:'Saturday', 6:'Sunday'}, inplace=True)
return df_calendar
def add_availabledays_price(df_listing, df_cal_modified):
'''
This function creates the columns of 'unavail_days', 'avail_days_weekends',
'avail_days_weekdays', 'price_weekend', and 'price_weekday' where calculated from df_cal_modified on df_listing.
Input :
- A Pandas dataframe made from 'listings.csv' : df_listing
- A pandas dataframe modified by modify_calendar() : df_cal_modified
Output :
- The modified df_listing dataframe with new 'unavail_days', 'avail_days_weekends',
'avail_days_weekdays', 'price_weekend', and 'price_weekday' columns
'''
id_list = df_listing.id[:]
unavailable_days_array = np.array([])
avail_days_weekends_array = np.array([])
avail_days_weekdays_array = np.array([])
price_weekend_array = np.array([])
price_weekday_array = np.array([])
for i in np.nditer(id_list):
tmp = df_cal_modified[(df_cal_modified.listing_id == i)] # Make a dataframe coming from df_listing with a certain id
available_dict = tmp.available.value_counts().to_dict()
if 'f' in available_dict:
unavailable_days = tmp[tmp.available == 'f'].shape[0]
else:
unavailable_days = 0
if 't' in available_dict:
available_weekends = tmp[(tmp.available == 't') & (tmp.weekend == True)].shape[0]
available_weekdays = tmp[(tmp.available == 't') & (tmp.weekend == False)].shape[0]
price_weekend = tmp[(tmp.weekend == True) & (tmp.available == 't')].price.astype(float).describe()['mean']
price_weekday = tmp[(tmp.weekend == False) & (tmp.available == 't')].price.astype(float).describe()['mean']
else:
available_weekends = 0
available_weekdays = 0
price_weekend = np.nan
price_weekday = np.nan
unavailable_days_array = np.append(unavailable_days_array, unavailable_days)
avail_days_weekends_array = np.append(avail_days_weekends_array, available_weekends)
avail_days_weekdays_array = np.append(avail_days_weekdays_array, available_weekdays)
price_weekend_array = np.append(price_weekend_array, price_weekend)
price_weekday_array = np.append(price_weekday_array, price_weekday)
df_listing['unavail_days'] = pd.Series(unavailable_days_array)
df_listing['avail_days_weekends'] = pd.Series(avail_days_weekends_array)
df_listing['avail_days_weekdays'] = pd.Series(avail_days_weekdays_array)
df_listing['price_weekend'] = pd.Series(price_weekend_array)
df_listing['price_weekday'] = pd.Series(price_weekday_array)
return df_listing
def clean_listing_df(df_listing):
'''
This function aims to make the df_listing dataframe for data analysis by
- removing irrelevant columns
- changing object type columns to numeric columns or manipulating them using one hot encoding
- filling NaN values
- creating an integrated_score_log column by the natural log of the result from 'review_scores_rating' times 'number_of_reviews' +1
Input :
- A Pandas dataframe made from 'listings.csv' : df_listing
Output :
- Cleaned df_listing
'''
# Drop columns having 50% of nan value. There were reasons that I decided 50% the threshold for dropping columns.
# 1. Easy to see the dataframe and to check the meaning of the columns.
# 2. Decide which ones have to be dropped.
# The candidates columns to be dropped are 'notes', 'neighbourhood_group_cleansed', 'square_feet', 'weekly_price', 'monthly_price', 'security_deposit', 'has_availability', 'license', 'jurisdiction_names'. Most of them are duplicated to other columns or irrelavant except 'security_deposit' column. I didn't do imputing by the mean or mode of the column because it can distort real shape. I didn't do one-hot-encoding to make the dataframe straightforward. 'security_deposit' has 55 unique values.
df_missing = df_listing.isna().mean()
df_listing_modi1 = df_listing.drop(df_missing[df_missing>0.5].index.to_list(), axis=1)
# Drop columns related with urls and other irrelevant columns.
# url and othe columns are all unique or useless.
remove_list1 = ['listing_url', 'scrape_id', 'last_scraped', 'thumbnail_url', 'medium_url', 'picture_url', 'xl_picture_url', 'host_url',
'host_thumbnail_url', 'host_picture_url', 'country_code', 'country']
df_listing_modi1.drop(remove_list1, axis=1, inplace=True)
# Drop the columns because of data overlap [city, smart_location], Only one value [state],
# Drop the wrong data [market, calendar_last_scraped]
remove_list2 = ['smart_location', 'state', 'name', 'summary', 'space', 'description','neighborhood_overview',
'transit','access','market','calendar_last_scraped']
df_listing_modi1.drop(remove_list2, axis=1, inplace=True)
# Modify 'house_rules' column to 'house_rules_exist_tf' having True value if there is a rule.
# False value, if there is no rule.
# Houes_rules are different for every host. So it is not practical to use one-hot-encoding. Instead of that,
# It is changed to binary type, which is there is rule in a house, True, otherwise, False.
# This can save some information, which is better than just dropping.
df_listing_modi1['house_rules_exist_tf']= pd.notna(df_listing_modi1.house_rules)
df_listing_modi1.drop(['house_rules'], axis=1, inplace=True)
# Remove columns having 1000 unique string valuses and irrelevant data
remove_list3 = ['interaction', 'host_name', 'host_since', 'host_about', 'street','first_review','experiences_offered','requires_license',
'last_review','host_location','neighbourhood_cleansed','experiences_offered','requires_license']
df_listing_modi2 = df_listing_modi1.drop(remove_list3, axis=1)
# Change the columns 'host_response_rate', 'host_acceptance_rate' to float type
columns_change_type = ['host_response_rate','host_acceptance_rate', 'price', 'cleaning_fee']
for i in columns_change_type:
df_listing_modi2[i] = df_listing_modi2[i].str.replace('%','')
df_listing_modi2[i] = df_listing_modi2[i].str.replace('$','')
df_listing_modi2[i] = df_listing_modi2[i].str.replace(',','')
df_listing_modi2[i] = df_listing_modi2[i].astype(float)
# Modify and Split values in 'amenities' column
# Amenities can be one of reason that potential candidate might consider.
df_listing_modi2.amenities = df_listing_modi2.amenities.str.replace("[{}]", "")
df_amenities = df_listing_modi2.amenities.str.get_dummies(sep = ",")
df_amenities = df_amenities.add_prefix('amenities_')
df_listing_modi2 = pd.concat([df_listing_modi2, df_amenities], axis=1)
df_listing_modi2 = df_listing_modi2.drop('amenities', axis=1)
# Use get_dummies for columns having unique values less then 10
# It is reasonable to use one-hot-encoding if the nunber of unique values are less then 10.
# It doesn't lose information, and keep the dataframe simple.
columns_of_object_less10 =[]
for i,j in zip(df_listing_modi2.columns.to_list(), df_listing_modi2.dtypes.to_list()):
if j == object and len(df_listing_modi2[i].value_counts()) < 10 :
columns_of_object_less10.append(i)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_less10, prefix=columns_of_object_less10,
dummy_na=True)
# Modify 'extra_people' coulmn to get boolean type of 'extra_people_fee_tf'
# Instead of dropping, I decided to change 'extra_people' coulmn to binary type to save some information
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(str)
df_listing_modi2['extra_people']= df_listing_modi2['extra_people'].str.replace('$','')
df_listing_modi2['extra_people']=df_listing_modi2['extra_people'].str.replace(',','')
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].astype(float)
df_listing_modi2['extra_people'] = df_listing_modi2['extra_people'].replace(to_replace=0, value=np.nan)
df_listing_modi2['extra_people_fee_tf']= pd.notna(df_listing_modi2.extra_people)
df_listing_modi2 = df_listing_modi2.drop('extra_people', axis=1)
# Modify and Split values in 'host_verifications' column
df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("[", "")
df_listing_modi2.host_verifications = df_listing_modi2.host_verifications.str.replace("]", "")
df_host_verifications = df_listing_modi2.host_verifications.str.get_dummies(sep = ",")
df_host_verifications = df_host_verifications.add_prefix('host_verification_')
df_listing_modi2 = pd.concat([df_listing_modi2, df_host_verifications], axis=1)
df_listing_modi2 = df_listing_modi2.drop(['host_verifications'], axis=1)
df_listing_modi2 = df_listing_modi2.drop(['host_neighbourhood'], axis=1)
# Modify 'calendar_updated' column
# Instead of dropping, I decided to change 'calendar_updated' coulmn to binary type (updated within a week or not)
# to save some information
df_listing_modi2["calendar_updated_1weekago"] = np.where(df_listing_modi2['calendar_updated'].str.contains(
"days|yesterday|today|a week ago")==True, 'yes', 'more_than_1week')
df_listing_modi2 = df_listing_modi2.drop(['calendar_updated'], axis=1)
# Use get_dummies for the columns 'neighbourhood', 'city', 'zipcode', 'property_type'
tmp = df_listing_modi2.columns.to_list()
tmp1 = df_listing_modi2.dtypes.to_list()
columns_of_object_over10 =[]
for i,j in zip(tmp,tmp1):
if j == object and len(df_listing_modi2[i].value_counts()) > 10 :
columns_of_object_over10.append(i)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=columns_of_object_over10,
prefix=columns_of_object_over10, dummy_na=True)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'],
prefix=['calendar_updated_1weekago','house_rules_exist_tf','extra_people_fee_tf'], dummy_na=True)
df_listing_modi2["host_response_rate_100"] = np.where(df_listing_modi2['host_response_rate'] ==100, True, False)
df_listing_modi2["host_acceptance_rate_100"] = np.where(df_listing_modi2['host_acceptance_rate'] ==100, True, False)
df_listing_modi2 = df_listing_modi2.drop(['host_response_rate','host_acceptance_rate','reviews_per_month'], axis=1)
# bathrooms, bedrooms, beds, cleaning_fee, review_scores_rating, review_... : : fillna with mean value
# The empty cell are filled with mean values of corresponding columns. Because these are numerical type,
# I thought imputing with mean values is better than dropping or one-hot-encoding
columns1 = ['bathrooms','bedrooms','beds','cleaning_fee','review_scores_rating','review_scores_accuracy','review_scores_cleanliness','review_scores_checkin',
'review_scores_communication','review_scores_location','review_scores_value']
df_listing_modi2[columns1] = df_listing_modi2[columns1].fillna(df_listing_modi2.mean())
df_listing_modi2.price_weekend.fillna(df_listing_modi2.price, inplace=True)
df_listing_modi2.price_weekday.fillna(df_listing_modi2.price, inplace=True)
df_listing_modi2['integrated_score_log'] = np.log(df_listing_modi2['review_scores_rating']*df_listing_modi2['number_of_reviews']+1)
df_listing_modi2 = pd.get_dummies(df_listing_modi2, columns=['host_response_rate_100','host_acceptance_rate_100'],
prefix=['host_response_rate_100','host_acceptance_rate_100'])
df_listing_modi2 = df_listing_modi2.drop(['id', 'host_id', 'latitude', 'longitude','price','host_listings_count','host_total_listings_count','maximum_nights'], axis=1)
return df_listing_modi2
def conditioning_listing_df(df_listing_modi2):
'''
This function is for conditioning a dataframe returned by the funtion 'clean_listing_df(df_listing)''
Input :
- A Pandas dataframe came from the function 'clean_listing_df(df_listing)''
Output :
- Cleaned df_listing_modi2 : df_listing_modi3
'''
threshold_80 = df_listing_modi2.integrated_score_log.quantile(0.8)
condition = [df_listing_modi2['integrated_score_log'] == 0, df_listing_modi2['integrated_score_log'] >= threshold_80]
label_list = ['poor','high']
df_listing_modi2['y_label'] = np.select(condition, label_list, default='normal')
# Drop columns related to 'y_label' column
# Without dropping, the remained columns affect model's prediction
df_listing_modi3 = df_listing_modi2.drop(['integrated_score_log','number_of_reviews','review_scores_rating', 'review_scores_value',
'review_scores_communication','review_scores_accuracy','review_scores_checkin','review_scores_cleanliness',
'review_scores_location', 'availability_30','availability_60', 'availability_90','availability_365','calculated_host_listings_count'], axis=1)
return df_listing_modi3
def investigate(df_listing_scaled, pca, i):
'''
This function checks pca components that which original features are storngly related to a pca component
Input :
- Dataframe : df_listing_scaled a dataframe scaled by StandardScaler()
- pca instance
- i : The number of pca component
Output :
- pos_list : Original features having positive relationship with a
corresponding pca component,which are sorted in order of importance
- neg_list : Original features having positive relationship with a
corresponding pca component,which are sorted in order of importance
'''
pos_list =[]
neg_list =[]
feature_names = list(df_listing_scaled.columns)
weights_pca = copy.deepcopy(pca.components_[i])
combined = list(zip(feature_names, weights_pca))
combined_sorted= sorted(combined, key=lambda tup: tup[1], reverse=True)
tmp_list = [list(x) for x in combined_sorted]
tmp_list = [(x[0],"{0:.3f}".format(x[1])) for x in tmp_list]
print("positive to pca{}:".format(i), tmp_list[0:10])
print()
print("negative to pca{}:".format(i), tmp_list[-1:-11:-1])
print()
for j in range(0,10):
pos_list.append(tmp_list[j][0])
for k in range(1,11):
neg_list.append(tmp_list[-k][0])
return pos_list, neg_list
def check_difference(pos_list, neg_list, df_listing_poor, df_listing_high):
'''
Print original features that are stongly related with a corresponding pca component.
'''
data_pos = [[df_listing_high[x].mean(), df_listing_poor[x].mean()] for x in pos_list]
data_neg = [[df_listing_high[x].mean(), df_listing_poor[x].mean()] for x in neg_list]
tmp_pos = pd.DataFrame(data=data_pos , index=pos_list, columns=['high', 'poor'])
tmp_neg = pd.DataFrame(data=data_neg , index=neg_list, columns=['high', 'poor'])
tmp_both = pd.concat([tmp_pos, tmp_neg])
tmp_both["difference"] = tmp_both.high - tmp_both.poor
tmp_both["difference"] = tmp_both["difference"].abs()
result = tmp_both.sort_values(by=['difference'], ascending=False)
return result
| 54.807453 | 501 | 0.716228 | 2,438 | 17,648 | 4.889664 | 0.183757 | 0.095126 | 0.099824 | 0.019126 | 0.383022 | 0.28714 | 0.233957 | 0.20812 | 0.178592 | 0.159718 | 0 | 0.016807 | 0.174014 | 17,648 | 321 | 502 | 54.978193 | 0.800988 | 0.301111 | 0 | 0.023529 | 0 | 0 | 0.189481 | 0.04878 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041176 | false | 0 | 0.041176 | 0 | 0.123529 | 0.023529 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794b69e64ae775672890ac0f8ee3c75b24418261 | 2,898 | py | Python | src/junction/markdown/info_panels.py | explody/Junction | 700df9385fceda00d6830816606d8854dc9cef7b | [
"MIT"
] | 16 | 2020-04-28T07:03:26.000Z | 2022-03-05T14:26:40.000Z | src/junction/markdown/info_panels.py | explody/Junction | 700df9385fceda00d6830816606d8854dc9cef7b | [
"MIT"
] | 14 | 2020-03-19T04:32:18.000Z | 2021-03-05T23:54:47.000Z | src/junction/markdown/info_panels.py | explody/Junction | 700df9385fceda00d6830816606d8854dc9cef7b | [
"MIT"
] | 3 | 2021-01-19T18:39:00.000Z | 2022-02-14T23:51:07.000Z | from typing import List, Any
from markdown import Markdown
from markdown.extensions import Extension
from markdown.blockprocessors import BlockProcessor
import re
import xml.etree.ElementTree as etree
class InfoPanelExtension(Extension):
"""Markdown extension for rendering the Confluence info panel macro. Only supports
the "original" info panels AKA info (blue), success (green), warning (yellow), and error (red).
Example:
```
Normal, introductory paragraph.
Warning: info panels like this must be isolated into their own blocks with surrounding blank lines.
This will be a plain old paragraph, and not included in the warning above.
```
"""
def extendMarkdown(self, md: Markdown) -> None:
md.registerExtension(self)
md.parser.blockprocessors.register(
InfoPanelBlockProcessor(
"Info:", "info", "42afc5c4-fb53-4483-9f1a-a87a7ad033e6", md.parser
),
"info-panel",
25,
)
md.parser.blockprocessors.register(
InfoPanelBlockProcessor(
"Success:", "tip", "d60a142d-bc62-4f37-a091-7254c4472bdf", md.parser
),
"success-panel",
25,
)
md.parser.blockprocessors.register(
InfoPanelBlockProcessor(
"Warning:", "note", "9e14a573-943e-4691-919b-a9f6a389da71", md.parser
),
"warning-panel",
25,
)
md.parser.blockprocessors.register(
InfoPanelBlockProcessor(
"Error:", "warning", "2e759c9c-11f1-4959-82e7-901a2dc737d7", md.parser
),
"error-panel",
25,
)
class InfoPanelBlockProcessor(BlockProcessor):
def __init__(
self, prefix: str, name: str, macro_id: str, *args: Any, **kwargs: Any
):
self._prefix = prefix
self._block_re = re.compile(
r"\s*{}.*".format(prefix), re.MULTILINE | re.DOTALL | re.VERBOSE
)
self._name = name
self._macro_id = macro_id
super().__init__(*args, **kwargs)
def test(self, parent: etree.Element, block: str) -> bool:
return bool(self._block_re.match(block))
def run(self, parent: etree.Element, blocks: List[str]) -> None:
raw_content = blocks.pop(0).lstrip(self._prefix).lstrip()
info_panel = etree.SubElement(
parent,
"ac:structured-macro",
{
"ac:name": self._name,
"ac:schema-version": "1",
"ac:macro-id": self._macro_id,
},
)
rich_text_body = etree.SubElement(info_panel, "ac:rich-text-body")
self.parser.parseChunk(rich_text_body, raw_content)
info_panel.tail = "\n"
def makeExtension(**kwargs: Any) -> InfoPanelExtension:
return InfoPanelExtension(**kwargs)
| 32.931818 | 103 | 0.596963 | 301 | 2,898 | 5.644518 | 0.448505 | 0.037669 | 0.05415 | 0.072984 | 0.139494 | 0.10771 | 0.10771 | 0 | 0 | 0 | 0 | 0.046715 | 0.29089 | 2,898 | 87 | 104 | 33.310345 | 0.780049 | 0.138716 | 0 | 0.242424 | 0 | 0 | 0.129019 | 0.058608 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075758 | false | 0 | 0.090909 | 0.030303 | 0.227273 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794c1314bf22e9986c1038e23ccfa6cf2ec03b66 | 5,096 | py | Python | ppo.py | ajleite/basic-ppo | e9d823275dda3c376e3e0f7d66e8dfb815b434d8 | [
"MIT"
] | 2 | 2020-06-27T11:44:19.000Z | 2022-01-11T21:23:01.000Z | ppo.py | ajleite/basic-ppo | e9d823275dda3c376e3e0f7d66e8dfb815b434d8 | [
"MIT"
] | null | null | null | ppo.py | ajleite/basic-ppo | e9d823275dda3c376e3e0f7d66e8dfb815b434d8 | [
"MIT"
] | null | null | null | #!/usr/bin/python3
# Copyright 2019 Abe Leite
# Based on "Proximal Policy Optimization Algorithms", Schulman et al 2017
# For the benefit of my fellow CSCI-B 659 students
# While I hope that this code is helpful I will not vouch for its total accuracy;
# my primary aim here is to elucidate the ideas from the paper.
import sys
import tensorflow as tf
import gym
ACTORS = 8
N_CYCLES = 10000
LEARNING_RATE = 0.00025
CYCLE_LENGTH = 128
BATCH_SIZE = CYCLE_LENGTH*ACTORS
CYCLE_EPOCHS = 3
MINIBATCH = 32*ACTORS
GAMMA = 0.99
EPSILON = 0.1
class DiscretePPO:
def __init__(self, V, pi):
''' V and pi are both keras (Sequential)s.
V maps state to single scalar value;
pi maps state to discrete probability distribution on actions. '''
self.V = V
self.pi = pi
self.old_pi = tf.keras.models.clone_model(self.pi)
self.optimizer = tf.keras.optimizers.Adam(LEARNING_RATE)
@tf.function
def pick_action(self, S):
return tf.random.categorical(self.pi(tf.expand_dims(S,axis=0)), 1)[0,0]
@tf.function
def train_minibatch(self, SARTS_minibatch):
S, A, R, T, S2 = SARTS_minibatch
next_V = tf.where(T, tf.zeros((MINIBATCH,)), self.V(S2))
next_V = tf.stop_gradient(next_V)
advantage = R + GAMMA * next_V - self.V(S)
V_loss = tf.reduce_sum(advantage ** 2)
V_gradient = tf.gradients(V_loss, self.V.weights)
self.optimizer.apply_gradients(zip(V_gradient, self.V.weights))
ratio = tf.gather(self.pi(S), A, axis=1) / tf.gather(self.old_pi(S), A, axis=1)
confident_ratio = tf.clip_by_value(ratio, 1-EPSILON, 1+EPSILON)
current_objective = ratio * advantage
confident_objective = confident_ratio * advantage
PPO_objective = tf.where(current_objective < confident_objective, current_objective, confident_objective)
PPO_objective = tf.reduce_mean(PPO_objective)
pi_gradient = tf.gradients(-PPO_objective, self.pi.weights)
self.optimizer.apply_gradients(zip(pi_gradient, self.pi.weights))
@tf.function
def train(self, SARTS_batch):
S, A, R, T, S2 = SARTS_batch
for _ in range(CYCLE_EPOCHS):
# shuffle and split into minibatches!
shuffled_indices = tf.random.shuffle(tf.range(BATCH_SIZE))
num_mb = BATCH_SIZE // MINIBATCH
for minibatch_indices in tf.split(shuffled_indices, num_mb):
mb_SARTS = (tf.gather(S, minibatch_indices),
tf.gather(A, minibatch_indices),
tf.gather(R, minibatch_indices),
tf.gather(T, minibatch_indices),
tf.gather(S2, minibatch_indices))
self.train_minibatch(mb_SARTS)
for old_pi_w, pi_w in zip(self.old_pi.weights, self.pi.weights):
old_pi_w.assign(pi_w)
def train_PPO(agent, envs, render=False):
episode_returns = []
current_episode_returns = [0 for env in envs]
last_s = [env.reset() for env in envs]
for _ in range(N_CYCLES):
SARTS_samples = []
next_last_s = []
next_current_episode_returns = []
for env, s, episode_return in zip(envs, last_s, current_episode_returns):
for _ in range(CYCLE_LENGTH):
a = agent.pick_action(s).numpy()
s2, r, t, _ = env.step(a)
if render:
env.render()
episode_return += r
SARTS_samples.append((s,a,r,t,s2))
if t:
episode_returns.append(episode_return)
print(f'Episode {len(episode_returns):3d}: {episode_return}')
episode_return = 0
s = env.reset()
else:
s = s2
next_last_s.append(s)
next_current_episode_returns.append(episode_return)
last_s = next_last_s
current_episode_returns = next_current_episode_returns
SARTS_batch = [tf.stack(X, axis=0) for X in zip(*SARTS_samples)]
agent.train(SARTS_batch)
def make_agent(env):
obs_shape = env.observation_space.shape
n_actions = env.action_space.n
V = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=obs_shape),
tf.keras.layers.Dense(400, activation='relu'),
tf.keras.layers.Dense(300, activation='relu'),
tf.keras.layers.Dense(1)])
pi = tf.keras.Sequential([tf.keras.layers.InputLayer(input_shape=obs_shape),
tf.keras.layers.Dense(400, activation='relu'),
tf.keras.layers.Dense(300, activation='sigmoid'),
tf.keras.layers.Dense(n_actions, activation='softmax')])
return DiscretePPO(V, pi)
if __name__ == '__main__':
if len(sys.argv) < 2:
print('Usage: python ppo.py <Env-V*> (--render)')
envs = [gym.make(sys.argv[1]) for _ in range(ACTORS)]
agent = make_agent(envs[0])
train_PPO(agent, envs, '--render' in sys.argv)
| 40.768 | 113 | 0.615385 | 687 | 5,096 | 4.36099 | 0.28821 | 0.028037 | 0.034713 | 0.036048 | 0.197597 | 0.122163 | 0.082777 | 0.082777 | 0.082777 | 0.082777 | 0 | 0.019325 | 0.279042 | 5,096 | 124 | 114 | 41.096774 | 0.796135 | 0.094388 | 0 | 0.05 | 0 | 0 | 0.029052 | 0.005679 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06 | false | 0 | 0.03 | 0.01 | 0.12 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794c7683b545a543ae42b9c3d18137a15b824634 | 2,620 | py | Python | youtube_dl/views.py | Shovon588/api_collection | f348ffa8dc5c4dc69ba4c2a7d145c71e8273e0a2 | [
"MIT"
] | null | null | null | youtube_dl/views.py | Shovon588/api_collection | f348ffa8dc5c4dc69ba4c2a7d145c71e8273e0a2 | [
"MIT"
] | null | null | null | youtube_dl/views.py | Shovon588/api_collection | f348ffa8dc5c4dc69ba4c2a7d145c71e8273e0a2 | [
"MIT"
] | null | null | null | from pytube import YouTube
from rest_framework import status
from rest_framework.response import Response
from rest_framework.views import APIView
from .serializers import YoutubeDLSerializer
from .utils import make_time, make_size
class YoutubeDL(APIView):
serializer_class = YoutubeDLSerializer
def post(self, request):
serializer = self.serializer_class(data=request.data)
if serializer.is_valid():
url = serializer.validated_data.get("url")
try:
file = YouTube(url)
except:
return Response({
"status": "failed",
"message": "Invalid url",
}, status=status.HTTP_404_NOT_FOUND)
videos = file.streams
thumbnail = file.thumbnail_url
title = file.title
duration = make_time(file.length)
video_res = {
"1080p": None,
"720p": None,
"480p": None,
"360p": None,
"240p": None,
"144p": None
}
aud_size = 0
audio = None
for video in videos:
if video.resolution in video_res and video_res[video.resolution] is None:
video_res[video.resolution] = {"resolution": video.resolution, "video_type": video.subtype,
"size": make_size(video.filesize),
"url": video.url}
if video.type == "audio":
if video.filesize > aud_size:
audio = video
aud_size = video.filesize
video_data = [value for key, value in video_res.items() if value is not None]
audio_data = None
if audio is not None:
audio_type = audio.subtype
size = make_size(audio.filesize)
url = audio.url
audio_data = {"audio_type": audio_type, "size": size, "url": url}
return Response({
"status": "success",
"message": "Got some data.",
"title": title,
"duration": duration,
"thumbnail": thumbnail,
"video_data": video_data,
}, status=status.HTTP_200_OK)
return Response({"status": "failed",
"message": "Something went wrong.",
"error": serializer.errors},
status=status.HTTP_400_BAD_REQUEST)
| 34.933333 | 111 | 0.500763 | 247 | 2,620 | 5.157895 | 0.327935 | 0.031397 | 0.040031 | 0.040816 | 0.051805 | 0 | 0 | 0 | 0 | 0 | 0 | 0.018831 | 0.412214 | 2,620 | 74 | 112 | 35.405405 | 0.808442 | 0 | 0 | 0.032787 | 0 | 0 | 0.083206 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.016393 | false | 0 | 0.098361 | 0 | 0.196721 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794d44a2cc74842f8b8d00f81d2ce675f076304a | 5,043 | py | Python | coot/data/ht100m_dataset.py | Jabb0/coot-videotext | 2da20a3f3a50b69677e59869b02cbd72945913d9 | [
"Apache-2.0"
] | null | null | null | coot/data/ht100m_dataset.py | Jabb0/coot-videotext | 2da20a3f3a50b69677e59869b02cbd72945913d9 | [
"Apache-2.0"
] | null | null | null | coot/data/ht100m_dataset.py | Jabb0/coot-videotext | 2da20a3f3a50b69677e59869b02cbd72945913d9 | [
"Apache-2.0"
] | null | null | null | import json
import pandas as pd
import numpy as np
from typing import Union, List
from pathlib import Path
from timeit import default_timer as timer
from nntrainer import data as nn_data
def _time_to_seconds(time_column):
return pd.to_timedelta(time_column).dt.total_seconds()
class HT100MBaseDataset:
"""
Dataloader for HowTo100M dataset.
Based on the index csv file of the HT100M dataset this builds a wrapper
around the file structure to return individual files.
"""
def __init__(self, dataset_root: Union[str, Path], metadata_name: str,
split=None):
"""
Setup the dataset
Args:
dataset_root: path to the dataset folder
metadata_name: identifier of the metadata to use. Will select the files we want to use.
split: identifier of the split to use or "ALL"/None to use all data
"""
dataset_root = Path(dataset_root)
# Read the CSV file containing information about the videos
# Format is:
# video_id, category_1, category_2, rank, task_id
# This is used as lookup table of the existing videos
csv = dataset_root.joinpath(f"meta_{metadata_name}.csv")
self._metadata_csv = pd.read_csv(csv, usecols=["video_id", "split"], index_col="video_id")
if split is not None and split != nn_data.DataSplitConst.ALL:
self._metadata_csv = self._metadata_csv[self._metadata_csv["split"] == split]
metadata_path = dataset_root.joinpath("metadata.json")
if not metadata_path.exists():
raise RuntimeError(f"metadata.json for HT100M dataset not found! Path: {dataset_root}")
self._metadata = json.load(metadata_path.open("rt", encoding="utf8"))
self._fps = self._metadata["fps"]
self._caption_root = dataset_root.joinpath("captions")
# Get all available caption files
self._keys = self._metadata_csv.index.to_list()
# Check the dataset integrity. I.e. if all caption csv files for every index are available
if not self.check_integrity():
raise RuntimeError("HT100MDataset: There are data_keys for which the features are not available!")
def check_integrity(self) -> bool:
"""
Checks if caption files for all keys exist. This is crucial for the integrity of the dataset.
Returns: True if dataset integrity is correct.
"""
timer_start = timer()
available_keys = set([x.stem for x in self._caption_root.glob("*.csv")])
print(f"Took {timer() - timer_start:.1f} seconds for scanning caption directory. "
f"Found {len(self._keys)} videos.")
missing_keys = set(self._keys).difference(available_keys)
keys_are_missing = len(missing_keys) != 0
if keys_are_missing:
print(f"There are {len(missing_keys)} missing keys. First 10: {list(missing_keys)[:10]}")
return not keys_are_missing
def _read_caption_csv(self, video_id: str) -> (List[str], List[float], List[float]):
cap_csv = pd.read_csv(self._caption_root.joinpath(video_id + ".csv"),
usecols=["start", "end", "text"],
keep_default_na=False)
cap_csv = cap_csv[
# Drop clips that have no subtitles/captions
(cap_csv["text"].str.len() > 0)
]
return (cap_csv['text'].tolist(),
_time_to_seconds(cap_csv["start"]).tolist(),
_time_to_seconds(cap_csv["end"]).tolist())
def __getitem__(self, video_id: str) -> List[str]:
raise NotImplementedError("GetItem cannot be called on BaseDataset")
def __len__(self):
"""
Returns len of dataset. I.e. number of videos.
"""
return len(self._keys)
def keys(self):
return self._keys
def data_keys(self):
return self._keys
class HT100MCaptionDataset(HT100MBaseDataset):
def __getitem__(self, video_id: str) -> List[str]:
sentences, _, _ = self._read_caption_csv(video_id)
return sentences
class HT100MDataset(HT100MBaseDataset):
def __init__(self, dataset_root: Union[str, Path], metadata_name: str, split: str, max_datapoints: int = -1):
super(HT100MDataset, self).__init__(dataset_root, metadata_name, split=split)
# reduce dataset size if request
if max_datapoints > -1:
self._keys = self._keys[:max_datapoints]
print(f"Reduced number of datapoints to {len(self._keys)}")
def __getitem__(self, key: str):
sentences, starts, stops = self._read_caption_csv(key)
# Drop the same items based on the filter as before
return {
"fps": self._fps,
"data_key": key,
"segments": [
{
"text": text,
"start_sec": start,
"stop_sec": end
} for (text, start, end) in zip(sentences, starts, stops)
]
}
| 37.917293 | 113 | 0.628594 | 646 | 5,043 | 4.673375 | 0.289474 | 0.036436 | 0.024843 | 0.017887 | 0.109308 | 0.094733 | 0.05631 | 0.05631 | 0.035773 | 0.035773 | 0 | 0.011463 | 0.273448 | 5,043 | 132 | 114 | 38.204545 | 0.8125 | 0.197303 | 0 | 0.051948 | 0 | 0 | 0.145417 | 0.012545 | 0 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.090909 | 0.038961 | 0.376623 | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794d94442dfccd9fb0860ed1722ed3107bbed462 | 1,244 | py | Python | qiime_16s/combine_collapsed_otu_tables.py | lotrus28/TaboCom | b67d66e4c410375a9efa08c5e637301e78e9204b | [
"Apache-2.0"
] | null | null | null | qiime_16s/combine_collapsed_otu_tables.py | lotrus28/TaboCom | b67d66e4c410375a9efa08c5e637301e78e9204b | [
"Apache-2.0"
] | null | null | null | qiime_16s/combine_collapsed_otu_tables.py | lotrus28/TaboCom | b67d66e4c410375a9efa08c5e637301e78e9204b | [
"Apache-2.0"
] | null | null | null | import sys
import re
import pandas as pd
def combine_otu_tables(path_to_files):
with open(path_to_files) as a:
filenames = a.read().splitlines()
separated = {re.search(r'ERR\d+?(?=_)',x).group(0):pd.read_table(x, sep = '\t', index_col = 1, header = None,engine='python')
for x in filenames}
indices = [list(x.index) for x in list(separated.values())]
all_taxa = sum(indices,[])
all_taxa = list(set(all_taxa))
altogether = pd.DataFrame(None, columns = list(separated.keys()), index = all_taxa)
for pat in separated:
altogether[pat] = separated[pat][0]
altogether = altogether.fillna(0)
altogether['Mean'] = altogether.mean(axis = 1)
if float(pd.__version__[:4]) >= 0.17:
altogether = altogether.sort_values('Mean', axis = 0, ascending=False)
else:
altogether = altogether.sort('Mean', axis = 0, ascending=False)
return(altogether.ix[:,:-1])
def main():
# list_of_files = 'temp2.txt'
# output = 'combined.txt'
list_of_files = sys.argv[1]
output = sys.argv[2]
combined = combine_otu_tables(list_of_files)
print('Combining all OTU-tables')
combined.to_csv(output, sep = '\t')
if __name__ == "__main__":
main()
| 30.341463 | 129 | 0.639871 | 173 | 1,244 | 4.398844 | 0.445087 | 0.036794 | 0.043364 | 0.047306 | 0.060447 | 0 | 0 | 0 | 0 | 0 | 0 | 0.015228 | 0.208199 | 1,244 | 40 | 130 | 31.1 | 0.75736 | 0.040997 | 0 | 0 | 0 | 0 | 0.055462 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.103448 | 0 | 0.172414 | 0.034483 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794f5243f54f0804ec162bec691a557c23883c30 | 773 | py | Python | shared/charge_controller_tcp_driver/exemple_driver.py | EDF-Lab/EDF | 3ab2d9e1820dfb713bbd54c91ba72d7d32d998f9 | [
"MIT"
] | 16 | 2022-02-11T14:49:04.000Z | 2022-03-30T07:33:45.000Z | shared/charge_controller_tcp_driver/exemple_driver.py | EDF-Lab/EDF | 3ab2d9e1820dfb713bbd54c91ba72d7d32d998f9 | [
"MIT"
] | 1 | 2022-02-16T15:23:50.000Z | 2022-02-21T15:30:21.000Z | shared/charge_controller_tcp_driver/exemple_driver.py | EDF-Lab/EDF | 3ab2d9e1820dfb713bbd54c91ba72d7d32d998f9 | [
"MIT"
] | 1 | 2022-03-24T10:52:28.000Z | 2022-03-24T10:52:28.000Z |
import sys
sys.path.append("..")
import time
from charge_controller_tcp_driver.charge_controller_tcp_client_helper import *
if __name__ == '__main__':
helper = ChargeControllerTCPClientHelper("169.254.43.3", 12500)
time.sleep(3)
helper.set_pwm(100)
print("PWM:", helper.get_pwm())
#time.sleep(10)
#helper.set_ev_state("A")
#print("EV State: ", helper.get_ev_state())
time.sleep(10)
helper.set_pwm(50)
time.sleep(2)
print("PWM:", helper.get_pwm())
#print("EV State: ", helper.get_ev_state())
time.sleep(1)
#helper.set_pwm(50)
#print("PWM:", helper.get_pwm())
time.sleep(10)
helper.set_pwm(30)
time.sleep(2)
print("PWM:", helper.get_pwm())
# print("EV State: ", helper.get_ev_state())
| 24.15625 | 78 | 0.648124 | 111 | 773 | 4.234234 | 0.315315 | 0.134043 | 0.102128 | 0.144681 | 0.551064 | 0.551064 | 0.514894 | 0.514894 | 0.514894 | 0.417021 | 0 | 0.052133 | 0.181113 | 773 | 31 | 79 | 24.935484 | 0.690363 | 0.276843 | 0 | 0.388889 | 0 | 0 | 0.061818 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
794f8be8a7920197768cc08897059ca509f8735d | 5,312 | py | Python | tests/test_intent_classification.py | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 | [
"Apache-2.0"
] | 83 | 2021-08-30T02:50:37.000Z | 2022-02-22T09:37:36.000Z | tests/test_intent_classification.py | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 | [
"Apache-2.0"
] | 2 | 2021-09-10T08:44:13.000Z | 2022-01-23T17:33:35.000Z | tests/test_intent_classification.py | BatsResearch/zsl-kg | 9bc4d4537a0f90ee3bbcefdf90ceae6dbcf48572 | [
"Apache-2.0"
] | 6 | 2021-09-10T07:09:41.000Z | 2021-11-07T14:31:33.000Z | import os
from typing import Text
import torch
import unittest
import torch.nn as nn
import torch.optim as optim
from allennlp.models import Model
from allennlp.data.vocabulary import Vocabulary
from zsl_kg.class_encoders.auto_gnn import AutoGNN
from zsl_kg.example_encoders.text_encoder import TextEncoder
from zsl_kg.data.snips import SnipsDataset
from allennlp.data.iterators import BasicIterator
from allennlp.modules.text_field_embedders import BasicTextFieldEmbedder
from zsl_kg.common.graph import NeighSampler
from zsl_kg.knowledge_graph.conceptnet import ConceptNetKG
from allennlp.common.tqdm import Tqdm
class BiLinearModel(Model):
def __init__(
self,
vocab: Vocabulary,
example_encoder: object,
class_encoder: object,
joint_dim: int,
bias: bool = False,
):
super().__init__(vocab)
self.example_encoder = example_encoder
self.class_encoder = class_encoder
self.text_joint = nn.Linear(
self.example_encoder.output_dim, joint_dim, bias=bias
)
self.class_joint = nn.Linear(
self.class_encoder.output_dim, joint_dim, bias=bias
)
def forward(self, batch, node_idx, kg):
encoder_out = self.example_encoder(batch)
text_rep = self.text_joint(encoder_out)
# get label representation
class_out = self.class_encoder(node_idx, kg)
class_rep = self.class_joint(class_out)
logits = torch.matmul(text_rep, class_rep.t())
return logits
class TestIntentClassification(unittest.TestCase):
def setUp(
self,
):
label_maps = {
"train": ["weather", "music", "restaurant"],
"dev": ["search", "movie"],
"test": ["book", "playlist"],
}
data_path = "tests/test_data/datasets/snips/"
datasets = []
for split in ["train", "dev", "test"]:
labels = label_maps[split]
label_to_idx = dict(
[(label, idx) for idx, label in enumerate(labels)]
)
reader = SnipsDataset(label_to_idx)
path = os.path.join(data_path, f"{split}.txt")
_dataset = reader.read(path)
datasets.append(_dataset)
self.train_dataset, self.dev_dataset, self.test_dataset = datasets
vocab = Vocabulary.from_instances(
self.train_dataset + self.dev_dataset + self.test_dataset
)
# create the iterator
self.iterator = BasicIterator(batch_size=32)
self.iterator.index_with(vocab)
print("Loading GloVe...")
# token embed
token_embed_path = os.path.join(data_path, "word_emb.pt")
token_embedding = torch.load(token_embed_path)
print("word embeddings created...")
word_embeddings = BasicTextFieldEmbedder({"tokens": token_embedding})
# create the text encoder
print("Loading the text encoder...")
self.example_encoder = TextEncoder(word_embeddings, 300, 32, 20)
trgcn = {
"input_dim": 300,
"output_dim": 64,
"type": "trgcn",
"gnn": [
{
"input_dim": 300,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(100, mode="topk"),
"fh": 100,
},
{
"input_dim": 64,
"output_dim": 64,
"activation": nn.ReLU(),
"normalize": True,
"sampler": NeighSampler(50, mode="topk"),
},
],
}
self.class_encoder = AutoGNN(trgcn)
self.train_graph = ConceptNetKG.load_from_disk(
"tests/test_data/subgraphs/snips/train_graph"
)
node_to_idx = dict(
[(node, idx) for idx, node in enumerate(self.train_graph.nodes)]
)
#
self.train_nodes = torch.tensor(
[
node_to_idx[node]
for node in [
"/c/en/weather",
"/c/en/music",
"/c/en/restaurant",
]
]
)
self.model = BiLinearModel(
vocab, self.example_encoder, self.class_encoder, joint_dim=20
)
self.optimizer = optim.Adam(
self.model.parameters(), lr=1e-03, weight_decay=5e-04
)
self.loss_function = nn.CrossEntropyLoss()
def test_intent_classification_train(self):
self.model.train()
total_batch_loss = 0.0
generator_tqdm = Tqdm.tqdm(
self.iterator(self.train_dataset, num_epochs=1, shuffle=False),
total=self.iterator.get_num_batches(self.train_dataset),
)
for batch in generator_tqdm:
self.optimizer.zero_grad()
logits = self.model(
batch["sentence"], self.train_nodes, self.train_graph
)
loss = self.loss_function(logits, batch["labels"])
total_batch_loss += loss.item()
loss.backward()
self.optimizer.step()
self.assertLessEqual(total_batch_loss, 100.0)
| 31.247059 | 77 | 0.573419 | 570 | 5,312 | 5.12807 | 0.301754 | 0.027711 | 0.015395 | 0.015737 | 0.139925 | 0.119398 | 0.093055 | 0.07116 | 0.07116 | 0.040369 | 0 | 0.012864 | 0.326807 | 5,312 | 169 | 78 | 31.431953 | 0.80453 | 0.01506 | 0 | 0.094891 | 0 | 0 | 0.081324 | 0.01416 | 0 | 0 | 0 | 0 | 0.007299 | 1 | 0.029197 | false | 0 | 0.116788 | 0 | 0.167883 | 0.021898 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79509ae0de663c69b13b3aa40296a01c2a31c785 | 5,077 | py | Python | chase/simulation.py | Motwg/WolfAndSheep-2019 | d6c50660368661fddf88dc860caac7236a791beb | [
"MIT"
] | null | null | null | chase/simulation.py | Motwg/WolfAndSheep-2019 | d6c50660368661fddf88dc860caac7236a791beb | [
"MIT"
] | null | null | null | chase/simulation.py | Motwg/WolfAndSheep-2019 | d6c50660368661fddf88dc860caac7236a791beb | [
"MIT"
] | null | null | null | import csv
import json
import logging
import math
import random as ran
def distance(point1, point2):
logging.debug("Args: {0}".format(locals()))
if type(point1) != type(point2):
logging.warning("Types of given arguments are different: {0} != {1}".format(point1, point2))
logging.debug("Returns: {0}".format(((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) ** 0.5))
return ((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2) ** 0.5
class Animal:
def __init__(self, id, x, y, move_dist):
logging.info("{0}:[{1}, {2}]".format(id, x, y))
self.id = id
self.x = x
self.y = y
self.move_dist = move_dist
def __lt__(self, other):
return self.id < other.id
def move(self, x, y):
logging.info("{0}:[{1}, {2}] => [{3}, {4}]".format(self.id, self.x, self.y, self.x+x, self.y+y))
self.x += x
self.y += y
def move_in_direction(self, direction):
if direction == 0:
self.move(0, self.move_dist)
elif direction == 1:
self.move(0, -self.move_dist)
elif direction == 2:
self.move(self.move_dist, 0)
elif direction == 3:
self.move(-self.move_dist, 0)
elif type(direction) == Animal:
degrees = math.atan2(direction.y-self.y, direction.x-self.x)
self.move(
self.move_dist * math.cos(degrees),
self.move_dist * math.sin(degrees)
)
def move_in_random_direction(self):
self.move_in_direction(ran.randint(0, 3))
def distance(self, animal):
return distance([self.x, self.y], [animal.x, animal.y])
def find_the_closest_animal(self, animals):
dist = self.distance(animals[0])
closest = animals[0]
for animal in animals:
new_dist = distance([self.x, self.y], [animal.x, animal.y])
if dist > new_dist:
dist = new_dist
closest = animal
return closest
def eaten(self):
logging.info("Eaten: {0}:[{1}, {2}]".format(self.id, self.x, self.y))
self.x = None
self.y = None
def get_pos(self):
return [self.x, self.y]
@staticmethod
def generate_animals(animals_number,
move_range,
spawn_range=10.0):
logging.debug("Args: {0}".format(locals()))
new_animals = []
for s in range(animals_number):
new_animals.append(Animal(
s + 1,
ran.random() * spawn_range * 2 - spawn_range,
ran.random() * spawn_range * 2 - spawn_range,
move_range))
logging.debug("Returns: {0}".format(new_animals))
return new_animals
def save_json(json_data, filename='pos.json', save_dir='.'):
logging.debug("Args: {0}".format(locals()))
with open(save_dir+"/"+filename, 'w') as json_file:
json.dump(json_data, json_file)
def save_csv(csv_data=None, filename='alive.csv', opening_parameter='a', save_dir='.'):
logging.debug("Args: {0}".format(locals()))
with open(save_dir+"/"+filename, opening_parameter, newline='') as csv_file:
writer = csv.writer(csv_file, delimiter=',',
quotechar='|', quoting=csv.QUOTE_MINIMAL)
if csv_data is not None:
writer.writerow(csv_data)
def simulate(wolves_sim, sheep_sim, turns_number=50, save_dir='.', wait=False):
logging.debug("Args: {0}".format(locals()))
sheep_eaten = []
save_csv(None, 'alive.csv', 'w', save_dir) # nadpisuje plik
for t in range(turns_number):
for s in sheep_sim:
s.move_in_random_direction()
for w in wolves_sim:
closest = w.find_the_closest_animal(sheep_sim)
if w.distance(closest) <= w.move_dist:
w.x = closest.x
w.y = closest.y
closest.eaten()
sheep_index = closest.id
sheep_eaten.append(closest)
sheep_sim.remove(closest)
else:
w.move_in_direction(closest)
sheep_index = None
print("Turn: {0}\n"
"Wolf position: {1}\n"
"Sheep alive: {2}\n"
"Eaten sheep: {3}".format(t + 1, wolves_sim[0].get_pos(), len(sheep_sim), sheep_index))
# zapis json i csv
pos = {
'round_no': t + 1,
'wolf_pos': wolves_sim[0].get_pos(),
'sheep_pos': list(map(Animal.get_pos, sorted(sheep_sim+sheep_eaten)))
}
save_json(pos, 'pos.json', save_dir)
save_csv([t+1, len(sheep_sim)], 'alive.csv', 'a', save_dir)
# oczekiwanie na klawisz
if wait:
input("Press Enter to continue...")
# populacja owiec spadnie do 0 => koniec symulacji
if len(sheep_sim) == 0:
logging.info("Wolf ate every sheep. End of simulation.")
break
logging.debug("Returns: {0}".format(sheep_eaten))
return sheep_eaten
| 32.33758 | 104 | 0.554855 | 670 | 5,077 | 4.044776 | 0.208955 | 0.038376 | 0.017712 | 0.031365 | 0.279705 | 0.221402 | 0.189299 | 0.135793 | 0.110701 | 0.067159 | 0 | 0.0234 | 0.301359 | 5,077 | 156 | 105 | 32.544872 | 0.740626 | 0.020288 | 0 | 0.058333 | 0 | 0 | 0.081304 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.116667 | false | 0 | 0.041667 | 0.025 | 0.225 | 0.008333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79509e0da59087724c7ad32862f4a10871238e6b | 4,518 | py | Python | anchorgql/runlocal.py | vybenetwork/anchorgql | d8a8a3fa332e0076f20061689951645c0dae1642 | [
"MIT"
] | 1 | 2022-02-20T22:05:26.000Z | 2022-02-20T22:05:26.000Z | anchorgql/runlocal.py | vybenetwork/anchorgql | d8a8a3fa332e0076f20061689951645c0dae1642 | [
"MIT"
] | null | null | null | anchorgql/runlocal.py | vybenetwork/anchorgql | d8a8a3fa332e0076f20061689951645c0dae1642 | [
"MIT"
] | null | null | null | import json
import subprocess
import asyncio
from solana.rpc.async_api import AsyncClient
from solana.publickey import PublicKey
from anchorpy import Program, Provider, Wallet
class bcolors:
HEADER = '\033[95m'
OKBLUE = '\033[94m'
OKCYAN = '\033[96m'
OKGREEN = '\033[92m'
WARNING = '\033[93m'
FAIL = '\033[91m'
ENDC = '\033[0m'
BOLD = '\033[1m'
UNDERLINE = '\033[4m'
def build_and_start_server(project_name, prd_mode):
print(f'{bcolors.OKCYAN}INFO: Starting test for {project_name}')
completed_process_result = subprocess.run(
"npm run prod", shell=True)
if completed_process_result.returncode != 0:
print(
f'{bcolors.FAIL}ERROR: Failed to generate Apollo GraphQL project for project: {project_name}{bcolors.ENDC}')
return False
print(f'{bcolors.OKGREEN}DONE: Project creation successful for project: {project_name}{bcolors.ENDC}')
server_directory = "./src/server"
new_process = subprocess.run(
"npm start", cwd=server_directory, shell=True)
if new_process.returncode != 0:
print(
f'{bcolors.FAIL}ERROR: Failed to start newly generated Apollo GraphQL server for project: {project_name}{bcolors.ENDC}')
return False
print(f'{bcolors.OKGREEN}DONE: Project startup successful for project: {project_name}{bcolors.ENDC}')
return True
def create_project_config(path, content):
with open(path, 'w') as f:
f.write(json.dumps(content))
return
async def check_and_replace_with_new_idl(program_id, idl_path, anchor_provider_url):
try:
client = AsyncClient(anchor_provider_url)
provider = Provider(client, Wallet.local())
program_id = PublicKey(program_id)
idl = await Program.fetch_raw_idl(
program_id, provider
)
except:
await client.close()
return
if idl is not None:
with open(idl_path, 'w') as file:
json.dump(idl, file)
await client.close()
return
def main():
# On Windows, if an error happens where the channels file isn't found, you probably opened the project
# from the wrong directory. Either try reopening the project from the correct directory or play with the
# line below.
# os.chdir('./anchorgql')
config = json.load(open('channels.json'))
channels_config = config['channels']
results = []
for channel in channels_config:
project_name = channel['PROJECT_NAME']
program_id = channel['PROGRAM_ID']
anchor_provider_url = channel['ANCHOR_PROVIDER_URL']
idl_path = channel['IDL_PATH']
asyncio.run(check_and_replace_with_new_idl(
program_id, idl_path, anchor_provider_url))
content = {
"projectName": project_name,
"protocol": channel["PROTOCOL"],
"network": channel["NETWORK"],
"programID": program_id,
"anchorProviderURL": anchor_provider_url,
"idlPath": idl_path,
"anchorVersion": config['anchorVersion'],
"idl": config['idl'],
"port": config['port'],
"packageJsonTemplateFile": config['packageJsonTemplateFile'],
"indexTemplateFile": config['indexTemplateFile'],
"typeDefTemplateFile": config['typeDefTemplateFile'],
"configFile": config['configFile'],
"testMode": config["testMode"],
"prdMode": config["prdMode"]
}
create_project_config('./src/config.json', content)
passed = build_and_start_server(project_name, config["prdMode"])
results.append({
"projectName": project_name,
"passed": passed
})
print()
print("===================================================")
print("===================================================")
print("===================================================")
print()
print(f'{bcolors.OKBLUE}INFO: Test results:{bcolors.ENDC}')
for result in results:
if result['passed']:
print(
f'{bcolors.OKGREEN}{result["projectName"]}: Passed{bcolors.ENDC}')
else:
print(
f'{bcolors.FAIL}{result["projectName"]}: Failed{bcolors.ENDC}')
print()
print("===================================================")
print("=================== End of Run ====================")
print("===================================================")
if __name__ == '__main__':
main()
| 36.144 | 132 | 0.588092 | 477 | 4,518 | 5.404612 | 0.32914 | 0.046936 | 0.040341 | 0.032583 | 0.204034 | 0.192397 | 0.169123 | 0.134213 | 0.134213 | 0.102405 | 0 | 0.01275 | 0.236166 | 4,518 | 124 | 133 | 36.435484 | 0.73428 | 0.0529 | 0 | 0.196262 | 0 | 0.018692 | 0.337857 | 0.140384 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028037 | false | 0.037383 | 0.056075 | 0 | 0.233645 | 0.158879 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
795164e9b019d5e0233e60502428b4c2cb401ddf | 4,647 | py | Python | scripts/scrape_cgc.py | eklipse2009/ZX-Pokemaster | 113bf2e242347b475cca9eadbae4f1b67f498466 | [
"MIT"
] | 8 | 2018-11-18T00:37:25.000Z | 2020-12-06T13:17:53.000Z | scripts/scrape_cgc.py | eklipse2009/ZX-Pokemaster | 113bf2e242347b475cca9eadbae4f1b67f498466 | [
"MIT"
] | 8 | 2017-08-21T10:07:58.000Z | 2020-03-29T18:23:37.000Z | scripts/scrape_cgc.py | eklipse2009/ZX-Pokemaster | 113bf2e242347b475cca9eadbae4f1b67f498466 | [
"MIT"
] | 1 | 2021-03-04T17:43:36.000Z | 2021-03-04T17:43:36.000Z | import os
import glob
import shutil
import zipfile
from functions.game_name_functions import *
if (os.getcwd().endswith('scripts')):
os.chdir('..')
from classes.scraper import *
def scrape_csscgc():
# if os.path.exists('tosec\\CSSCGC Games'):
# shutil.rmtree('tosec\\CSSCGC Games')
s = Scraper()
template = 'https://www.yoursinclair.co.uk/csscgc/csscgc.cgi?year='
for year in range(1996, 2017):
files_extracted = []
page = template + str(year)
selector = s.loadUrl(page)
games_tables = selector.xpath('//table[@border="1"]').extract_all()
for game_table in games_tables:
cells = Selector(game_table).xpath('//td//text()').extract_all()
game_name = cells[0]
author = cells[2]
if not author.startswith('Mr'):
author = putInitialsToEnd(author)
filenames = list(set(cells[4].split(' ')+[cells[4]]))
format = cells[10]
game_represented = False
for filename in filenames:
if not filename:
continue
filename = os.path.basename(filename)
ext = os.path.splitext(filename)[-1].lower()
tosec_name = '{} ({})({})({})[CSSCGC]{}'.format(game_name, str(year), author, format, ext)
tosec_name = tosec_name.replace('(Spectrum)', '').replace('ZX Spectrum ', '').replace('(48K)', '')
tosec_name = tosec_name.replace('(128K Spectrum)', '(128K)')
tosec_name = tosec_name.replace('(128K-+2)', '(+2)')
tosec_name =tosec_name.replace('(unknown)', '(-)')
tosec_name = getFileSystemFriendlyName(tosec_name)
src = os.path.join('tosec', 'csscgc scrape', 'CSSCGC' + str(year), filename)
dest = os.path.join('tosec', 'CSSCGC Games', str(year), tosec_name)
# print(src, dest)
if not os.path.exists(src):
# print('File does not exist:', filename, 'Year:', year)
continue
if os.path.exists(dest):
print('Conflict:', tosec_name, filename, 'Year:', year)
continue
os.makedirs(os.path.dirname(dest), exist_ok=True)
if ext == '.zip':
with zipfile.ZipFile(src, 'r') as zf:
files_to_extract = []
conflict = False
for zfname in zf.namelist():
zfname_ext = zfname.split('.')[-1].lower()
if zfname_ext in GAME_EXTENSIONS:
files_to_extract.append(zfname)
for each in GAME_EXTENSIONS:
if len([x for x in files_to_extract if x.endswith(each)])>1:
print('Conflict:', tosec_name, src, files_to_extract, 'Year:', year)
conflict = True
break
if not conflict and files_to_extract:
for file in files_to_extract:
data = zf.read(files_to_extract[0])
ext = os.path.splitext(files_to_extract[0])[-1].lower()
dest = dest.replace('.zip', ext)
with open(dest, 'wb+') as output:
output.write(data)
game_represented = True
files_extracted.append(src)
else:
shutil.copy(src, dest)
files_extracted.append(src)
game_represented = True
if not game_represented:
print('Game not represented:', tosec_name, cells[4], 'Year:', year)
for src in glob.glob(os.path.join('tosec', 'csscgc scrape', 'CSSCGC'+str(year), '*')):
filename, ext = os.path.splitext(os.path.basename(src))
if ext[1:] not in GAME_EXTENSIONS+['zip']:
continue
if src in files_extracted:
continue
else:
tosec_name = '{} ({})(-)[CSSCGC]{}'.format(filename.title() , str(year), ext)
dest = os.path.join('tosec', 'CSSCGC Games', str(year), 'unsorted', tosec_name)
os.makedirs(os.path.dirname(dest), exist_ok=True)
shutil.copy(src, dest)
print('Copied: ', src, 'to:', dest, 'Year:', year)
if __name__=='__main__':
scrape_csscgc() | 49.967742 | 114 | 0.497095 | 483 | 4,647 | 4.641822 | 0.252588 | 0.068243 | 0.049955 | 0.032114 | 0.176628 | 0.135593 | 0.109723 | 0.109723 | 0.109723 | 0.042819 | 0 | 0.012375 | 0.374005 | 4,647 | 93 | 115 | 49.967742 | 0.758336 | 0.03314 | 0 | 0.174419 | 0 | 0 | 0.089329 | 0.004901 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011628 | false | 0 | 0.069767 | 0 | 0.081395 | 0.046512 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79526a360c29da4c2b5320e1dc30a9a350d4bff9 | 5,249 | py | Python | molar/backend/database/query.py | aspuru-guzik-group/molar | a3e0c337bd8a41c94b2c25831c95048cc7614f04 | [
"BSD-3-Clause"
] | 4 | 2021-07-20T18:49:44.000Z | 2021-10-15T00:58:12.000Z | molar/backend/database/query.py | aspuru-guzik-group/molar | a3e0c337bd8a41c94b2c25831c95048cc7614f04 | [
"BSD-3-Clause"
] | null | null | null | molar/backend/database/query.py | aspuru-guzik-group/molar | a3e0c337bd8a41c94b2c25831c95048cc7614f04 | [
"BSD-3-Clause"
] | 2 | 2022-01-07T17:57:42.000Z | 2022-01-13T21:00:20.000Z | # std
from typing import Any, Dict, List, Optional, Union
# external
import pkg_resources
import sqlalchemy
from sqlalchemy.orm import aliased, Session
# molar
from molar.backend import schemas
from molar.backend.database.utils import sqlalchemy_to_dict
INFORMATION_QUERY = open(
pkg_resources.resource_filename("molar", "sql/information_query.sql"), "r"
).read()
def resolve_type(type: str, models, alias_registry=None):
if alias_registry is None:
alias_registry = {}
types = type.split(".")
if len(types) == 1:
if isinstance(models, sqlalchemy.orm.attributes.InstrumentedAttribute):
return models[types[0]].astext
type_ = getattr(models, types[0], None)
if type_ is not None:
return type_
if types[0] in alias_registry.keys():
return alias_registry[types[0]]
raise ValueError(f"Type {type} not found in database!")
submodel = getattr(models, types[0], None)
if submodel is None and types[0] in alias_registry.keys():
submodel = alias_registry[types[0]]
if submodel is not None:
return resolve_type(".".join(types[1:]), submodel, alias_registry)
raise ValueError(f"Type {type} not found in database!")
def query_builder(
db: Session,
models,
types: schemas.QueryTypes,
limit: int,
offset: int,
joins: Optional[schemas.QueryJoins] = None,
filters: Optional[schemas.QueryFilters] = None,
order_by: Optional[schemas.QueryOrderBys] = None,
aliases: Optional[schemas.QueryAliases] = None,
):
alias_registry: Dict[str, Any] = {}
# Resolving aliases
if aliases is not None:
if not isinstance(aliases, list):
aliases = [aliases]
for alias in aliases:
alias_registry[alias.alias] = aliased(
resolve_type(alias.type, models), name=alias.alias
)
# Resolving main types
if not isinstance(types, list):
types = [types]
db_objs = []
for type_ in types:
db_obj = resolve_type(type_, models, alias_registry)
db_objs.append(db_obj)
query = db.query(*db_objs)
if joins is not None:
if not isinstance(joins, list):
joins = [joins]
for join in joins:
joined_table = resolve_type(
join.type,
models,
alias_registry,
)
onclause = None
if join.on is not None:
onclause = resolve_type(
join.on.column1, models, alias_registry
) == resolve_type(join.on.column2, models, alias_registry)
query = query.join(
joined_table,
onclause,
isouter=True if join.join_type == "outer" else False,
full=True if join.join_type == "full" else False,
)
if filters is not None:
filters = expand_filters(filters, models, alias_registry)
query = query.filter(filters)
if order_by is not None:
if not isinstance(order_by, list):
order_by = [order_by]
order_bys = []
for ob in order_by:
t = resolve_type(ob.type, models, alias_registry)
if ob.order == "asc":
order_bys.append(t.asc())
else:
order_bys.append(t.desc())
query = query.order_by(*order_bys)
query = query.offset(offset).limit(limit)
return query, db_objs, types
def process_query_output(db_objs, query_results, types):
if len(db_objs) == 1:
return [sqlalchemy_to_dict(db_objs[0], r, types[0]) for r in query_results]
results = []
for result in query_results:
result_dict = {}
for res, db_obj, t in zip(result, db_objs, types):
result_dict.update(sqlalchemy_to_dict(db_obj, res, t, add_table_name=True))
results.append(result_dict)
return results
def expand_filters(filters, models, alias_registry):
if isinstance(filters, schemas.QueryFilterList):
filters = [expand_filters(f) for f in filters.filters]
if filters.op == "and":
return sqlalchemy.and_(*filters)
elif filters.op == "or":
return sqlalchemy.or_(*filters)
else:
raise ValueError(f"Filter operator not supported: {filters.op}")
elif isinstance(filters, schemas.QueryFilter):
type = resolve_type(filters.type, models, alias_registry)
operator = filters.op
if filters.op == "==":
operator = "__eq__"
elif filters.op == "!=":
operator = "__ne__"
elif filters.op == ">":
operator = "__gt__"
elif filters.op == "<":
operator = "__lt__"
elif filters.op == ">=":
operator = "__ge__"
elif filters.op == "<=":
operator = "__le__"
# If value is another column
value = filters.value
if isinstance(value, str):
try:
value_type = resolve_type(value, models, alias_registry)
except ValueError:
pass
else:
value = value_type
return getattr(type, operator)(value)
| 30.34104 | 87 | 0.594589 | 615 | 5,249 | 4.889431 | 0.209756 | 0.082142 | 0.063186 | 0.034919 | 0.136016 | 0.111074 | 0.027935 | 0.027935 | 0.027935 | 0 | 0 | 0.003838 | 0.30501 | 5,249 | 172 | 88 | 30.517442 | 0.82045 | 0.016003 | 0 | 0.052632 | 0 | 0 | 0.040132 | 0.004847 | 0 | 0 | 0 | 0 | 0 | 1 | 0.030075 | false | 0.007519 | 0.045113 | 0 | 0.150376 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
795299febd0881f339bf75a4c01b525d81a4103e | 1,089 | py | Python | fa_management_server/models/role.py | Msms-NJ/fa_management_server | 6787e35a5ac27c27c61fcaa0f508a78f4dc6e8f9 | [
"MIT"
] | null | null | null | fa_management_server/models/role.py | Msms-NJ/fa_management_server | 6787e35a5ac27c27c61fcaa0f508a78f4dc6e8f9 | [
"MIT"
] | null | null | null | fa_management_server/models/role.py | Msms-NJ/fa_management_server | 6787e35a5ac27c27c61fcaa0f508a78f4dc6e8f9 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
"""Role models."""
from dataclasses import dataclass
from array import array
from .database import Column, Model, SurrogatePK, db, reference_col, relationship
from sqlalchemy.dialects.postgresql import ARRAY
@dataclass
class Role(SurrogatePK, Model):
"""用户角色信息表"""
__tablename__ = "roles"
# 配置JSON返回字段信息
name: str
id: str
remarks: str
web_menus: array
update_date: str
# role 角色数据权限 data_scope
# 0 默认值 1 只能看到自己数据 2 能看到当前所在机构下的数据 3 能看到系统中的所有数据
DATA_SCOPE_DEFAULT = 0
DATA_SCOPE_SELF = 1
DATA_SCOPE_OFFICE = 2
DATA_SCOPE_ALL = 3
# 配置数据库字段信息
name = Column(db.String(80), unique=True, nullable=False)
users = relationship("UserRole", back_populates="role")
data_scope = Column(db.SmallInteger, nullable=False)
web_menus = Column(ARRAY(db.String))
def __init__(self, **kwargs):
"""Create instance."""
db.Model.__init__(self, **kwargs)
def __repr__(self):
"""Represent instance as a unique string."""
return "<Role({name})>".format(name=self.name)
| 26.560976 | 81 | 0.673095 | 134 | 1,089 | 5.238806 | 0.537313 | 0.076923 | 0.039886 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012821 | 0.212121 | 1,089 | 40 | 82 | 27.225 | 0.805361 | 0.176309 | 0 | 0 | 0 | 0 | 0.03555 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.916667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79540db7343cd37c04169f2c2a9534f0c0ea7d5c | 1,187 | py | Python | code/math_examples.py | rustam-fork/ml-course-uz | e1554d4c69bf0e421aa596d77aab65639df1ff73 | [
"MIT"
] | 21 | 2018-01-05T09:24:49.000Z | 2021-04-24T03:25:25.000Z | code/math_examples.py | rustam-fork/ml-course-uz | e1554d4c69bf0e421aa596d77aab65639df1ff73 | [
"MIT"
] | 1 | 2019-11-11T18:34:53.000Z | 2019-11-13T15:56:10.000Z | code/math_examples.py | rustam-fork/ml-course-uz | e1554d4c69bf0e421aa596d77aab65639df1ff73 | [
"MIT"
] | 13 | 2018-01-05T10:26:47.000Z | 2022-01-25T07:48:33.000Z | import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import cm
def draw_parabola(steps=50):
x = np.linspace(-4, 4, steps)
plt.plot(x, x ** 2)
plt.axvline(x=0, color='b', linestyle='dashed')
def draw_paraboloid(steps=50):
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
x = np.linspace(-1, 1, steps)
y = np.linspace(-1, 1, steps)
X, Y = np.meshgrid(x, y)
Z = X ** 2 + Y ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
def draw_mishra_bird():
fig = plt.figure(figsize=(14, 10))
x = np.arange(-10, 1, 0.1)
y = np.arange(-6, 0.5, 0.1)
X, Y = np.meshgrid(x, y)
ax = plt.gca(projection='3d')
Z = np.sin(Y) * np.exp((1 - np.cos(X)) ** 2) + np.cos(X) * np.cos(X) * np.exp((1 - np.sin(Y)) ** 2) + (X - Y) ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm)
ax.view_init(20, -60)
def draw_hyperbolic_paraboloid():
fig = plt.figure(figsize=(10, 10))
ax = fig.gca(projection='3d')
x = np.linspace(-1, 1, 50)
y = np.linspace(-1, 1, 50)
X, Y = np.meshgrid(x, y)
Z = X ** 2 - Y ** 2
ax.plot_surface(X, Y, Z, cmap=cm.coolwarm) | 27.604651 | 118 | 0.57877 | 215 | 1,187 | 3.144186 | 0.274419 | 0.029586 | 0.022189 | 0.071006 | 0.43787 | 0.378698 | 0.357988 | 0.357988 | 0.357988 | 0.357988 | 0 | 0.065288 | 0.225779 | 1,187 | 43 | 119 | 27.604651 | 0.670294 | 0 | 0 | 0.30303 | 0 | 0 | 0.010943 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.121212 | false | 0 | 0.121212 | 0 | 0.242424 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7954a7bbe8ccac9a9d76513832ed91b4c1c715ad | 3,075 | py | Python | tests/onegov/town6/test_views.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | tests/onegov/town6/test_views.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | tests/onegov/town6/test_views.py | politbuero-kampagnen/onegov-cloud | 20148bf321b71f617b64376fe7249b2b9b9c4aa9 | [
"MIT"
] | null | null | null | import onegov.core
import onegov.org
from tests.shared import utils
def test_view_permissions():
utils.assert_explicit_permissions(onegov.org, onegov.org.OrgApp)
def test_notfound(client):
notfound_page = client.get('/foobar', expect_errors=True)
assert "Seite nicht gefunden" in notfound_page
assert notfound_page.status_code == 404
def test_links(client):
root_url = client.get('/').pyquery('.side-navigation a').attr('href')
client.login_admin()
root_page = client.get(root_url)
new_link = root_page.click("Verknüpfung")
assert "Neue Verknüpfung" in new_link
new_link.form['title'] = 'Google'
new_link.form['url'] = 'https://www.google.ch'
link = new_link.form.submit().follow()
assert "Sie wurden nicht automatisch weitergeleitet" in link
assert 'https://www.google.ch' in link
client.get('/auth/logout')
root_page = client.get(root_url)
assert "Google" in root_page
google = root_page.click("Google", index=0)
assert google.status_code == 302
assert google.location == 'https://www.google.ch'
def test_clipboard(client):
client.login_admin()
page = client.get('/topics/organisation')
assert 'paste-link' not in page
page = page.click(
'Kopieren',
extra_environ={'HTTP_REFERER': page.request.url}
).follow()
assert 'paste-link' in page
page = page.click('Einf').form.submit().follow()
assert '/organisation/organisation' in page.request.url
def test_clipboard_separation(client):
client.login_admin()
page = client.get('/topics/organisation')
page = page.click('Kopieren')
assert 'paste-link' in client.get('/topics/organisation')
# new client (browser) -> new clipboard
client = client.spawn()
client.login_admin()
assert 'paste-link' not in client.get('/topics/organisation')
def test_gobal_tools(client):
links = client.get('/').pyquery('.globals a')
assert links == []
client.login_admin()
links = client.get('/').pyquery('.globals a')
assert links != []
def test_top_navigation(client):
links = client.get('/').pyquery('.side-navigation a span')
assert links.text() == 'Organisation Themen Kontakt Aktuelles'
def test_announcement(client):
client.login_admin()
color = '#006fbb'
bg_color = '#008263'
text = 'This is an announcement which appears on top of the page'
settings = client.get('/header-settings')
# test default not giving the color
assert settings.form['left_header_announcement_bg_color'].value == (
'#FBBC05'
)
assert settings.form['left_header_announcement_font_color'].value == (
'#000000'
)
settings.form['left_header_announcement'] = text
settings.form['left_header_announcement_bg_color'] = bg_color
settings.form['left_header_announcement_font_color'] = color
page = settings.form.submit().follow()
assert text in page
assert (
f'<div id="announcement" style="color: {color}; '
f'background-color: {bg_color};">'
) in page
| 27.212389 | 74 | 0.67935 | 392 | 3,075 | 5.170918 | 0.293367 | 0.057721 | 0.047361 | 0.054267 | 0.325111 | 0.23483 | 0.174642 | 0.091761 | 0.052294 | 0 | 0 | 0.009592 | 0.186341 | 3,075 | 112 | 75 | 27.455357 | 0.80056 | 0.023089 | 0 | 0.16 | 0 | 0 | 0.272909 | 0.061979 | 0 | 0 | 0 | 0 | 0.28 | 1 | 0.106667 | false | 0 | 0.04 | 0 | 0.146667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
795502273dc48fdf684fe2e0b8c17dbaab75cc3f | 8,530 | pyw | Python | main.pyw | Niyco/Cipher-tool | a0689daf8e8a087571d447efe6e98c206364316f | [
"MIT"
] | null | null | null | main.pyw | Niyco/Cipher-tool | a0689daf8e8a087571d447efe6e98c206364316f | [
"MIT"
] | null | null | null | main.pyw | Niyco/Cipher-tool | a0689daf8e8a087571d447efe6e98c206364316f | [
"MIT"
] | null | null | null | import tkinter as tk
from tkinter import filedialog
from Solve_stages import *
from Text_stages import *
from Analysis_stages import *
from Output import *
root = tk.Tk()
root.title("Cipher program")
root.geometry("1500x500")
root.state("zoomed") #apparently windows only
def getOutputText():
text = ""
for stage in stages:
if stage.check_var.get():
if decode_var.get() == 1: #encode is selected
text = stage.encode(text)
else: #decode is selected
text = stage.decode(text)
return text
def updateOutputText():
text = getOutputText()
right_text.delete(1.0, tk.END)
right_text.insert(tk.END,text)
for stage in stages:
if stage.check_var.get():
stage.updateOutputWidget(text, right_text)
def updateStageEditor():
for child in stage_editor.winfo_children():
child.grid_forget()
stages[selected_stage.get()].display()
root.focus_set()
stage_editor = tk.Frame(root, width=10, height=10)#Size is the same as right_text, they will expand equally to fill the space
stage_editor.grid(row=0, column=0, rowspan=4, sticky="NESW")
stage_editor.grid_propagate(0) #stops the contents of the window affecting the size
stages = []
def addStage(stage):
stages.append(stage)
updateStagesFrame()
stages[len(stages)-1].button.select() #select the newly added stage
updateStageEditor()
updateOutputText()
selected_stage = tk.IntVar()
stages_frame = tk.Frame(root)
stages_frame.grid(row=0, column=1, sticky="NS", columnspan=3)
#Radiobuttons to select between encode and decode
decode_var = tk.IntVar()
decodeBox = tk.Radiobutton(root, text="Decode", variable=decode_var,value=-1,command=updateOutputText)
encodeBox = tk.Radiobutton(root, text="Encode", variable=decode_var,value=1,command=updateOutputText)
decode_var.set(-1) #set to decode as default
decodeBox.grid(row=1,column=1,columnspan=3)
encodeBox.grid(row=2,column=1,columnspan=3)
#Up, Delete, and Down buttons
def stageUp():
if len(stages) > 1 and selected_stage.get() > 1:
stages.insert(selected_stage.get()-1, stages.pop(selected_stage.get()))
selected_stage.set(selected_stage.get()-1)
updateStagesFrame()
updateOutputText()
def stageDown():
if len(stages) > 1 and selected_stage.get() < len(stages)-1 and selected_stage.get() != 0:
stages.insert(selected_stage.get()+1, stages.pop(selected_stage.get()))
selected_stage.set(selected_stage.get()+1)
updateStagesFrame()
updateOutputText()
def deleteStage():
if len(stages) > 1 and selected_stage.get() != 0:
stages.pop(selected_stage.get())
selected_stage.set(selected_stage.get()-1)
updateStagesFrame()
updateStageEditor()
updateOutputText()
stage_up_button = tk.Button(root, text = "↑",command=stageUp,takefocus=0)
stage_delete_button = tk.Button(root, text = "×",command=deleteStage,takefocus=0)
stage_down_button = tk.Button(root, text = "↓",command=stageDown,takefocus=0)
stage_up_button.grid(row=3, column=1, sticky="ESW")
stage_delete_button.grid(row=3,column=2, sticky="ESW")
stage_down_button.grid(row=3, column=3, sticky="ESW")
#Shortcuts for selecting the next and previous stage
def stageSelectUp(event):
if selected_stage.get() > 0:
selected_stage.set(selected_stage.get()-1)
updateStagesFrame()
updateStageEditor()
def stageSelectDown(event):
if selected_stage.get() < len(stages) - 1:
selected_stage.set(selected_stage.get()+1)
updateStagesFrame()
updateStageEditor()
root.bind("<Control-Tab>", stageSelectUp)
root.bind("<Control-Shift-Tab>", stageSelectDown)
root.bind("<Control-Prior>", stageSelectUp) #Control + page up
root.bind("<Control-Next>", stageSelectDown) #Control + page down
def updateStagesFrame():
for button in stages_frame.winfo_children():
button.destroy()
for stage_index in range(len(stages)):
stage = stages[stage_index]
stage.button = tk.Radiobutton(stages_frame, text=stage.name, variable = selected_stage, value = stage_index, command=updateStageEditor,
indicatoron = 0, width = 20, takefocus=0)
stage.check_var = tk.BooleanVar()
stage.check_var.set(True)
stage.checkbox = tk.Checkbutton(stages_frame, variable = stage.check_var, command=updateOutputText, takefocus=0)
if stage_index == 0: #Input cannot be disabled, so don't show the checkbox
stage.checkbox.config(state="disabled")
stage.button.grid(column=1, row=stage_index)
stage.checkbox.grid(column=0, row=stage_index)
updateStagesFrame()
right_text = tk.Text(root, takefocus=0, width=10, height=10, font=("Courier", 10))
right_text.grid(row=0, column=4, rowspan=4, sticky="NESW")
right_text.grid_propagate(0)
tk.Grid.columnconfigure(root, 0, weight=1)
tk.Grid.columnconfigure(root, 1, weight=0)
tk.Grid.columnconfigure(root, 2, weight=0)
tk.Grid.columnconfigure(root, 3, weight=0)
tk.Grid.columnconfigure(root, 4, weight=1)
tk.Grid.rowconfigure(root, 0, weight=1)
tk.Grid.rowconfigure(root, 1, weight=0)
tk.Grid.columnconfigure(stage_editor, 0, weight=1)
tk.Grid.rowconfigure(stage_editor, 0, weight=1)
#==========
def add(menu, StageClass): #Helper function to make adding stages neater
menu.add_command(label= StageClass.name,#Takes the name from the class
command=lambda:addStage(StageClass(stage_editor, #passes the stage editor frame to draw to
updateOutputText))) #and a callback for when things change and the output text needs updating
#Functions for file menu operations:
def openCom():
text = ""
try:
with filedialog.askopenfile() as file:
for line in file:
text += line
stages[0].textbox.delete(1.0, tk.END)
stages[0].textbox.insert(tk.END,text)
except AttributeError:#Catch error if the user cancels the dialog
pass
def clearCom():
global stages
stages[0].textbox.delete(1.0, tk.END)
stages = [stages[0]]
selected_stage.set(0)
updateStageEditor()
updateStagesFrame()
def saveCom():
text = getOutputText()
try:
with filedialog.asksaveasfile() as file:
file.write(text)
except AttributeError:
pass
def copyCom():
text = ""
for stage in stages:
text = stage.process(text)
root.clipboard_clear()
root.clipboard_append(text)
root.update()
menu = tk.Menu(root)
file_menu = tk.Menu(menu, tearoff=0)
file_menu.add_command(label="Open", command=openCom)
file_menu.add_command(label="Clear", command = clearCom)
file_menu.add_command(label="Save", command=saveCom)
file_menu.add_command(label="Copy output", command=copyCom)
menu.add_cascade(label="File", menu = file_menu)
ana_menu = tk.Menu(menu, tearoff=0)
add(ana_menu, Length)
add(ana_menu, PlayfairDetect)
add(ana_menu, FrequencyAnalyse)
add(ana_menu, Doubles)
add(ana_menu, Triples)
add(ana_menu, IoC)
add(ana_menu, WordFinder)
add(ana_menu, VigenereKeyword)
add(ana_menu, ColumnarKeyword)
menu.add_cascade(label="Analyse", menu=ana_menu)
text_menu = tk.Menu(menu, tearoff=0)
add(text_menu, Capitalise)
add(text_menu, Lowercase)
add(text_menu, Swapcase)
add(text_menu, Strip)
add(text_menu, RemoveSpaces)
add(text_menu, Reverse)
add(text_menu, Block)
menu.add_cascade(label="Text stage", menu=text_menu)
solve_menu = tk.Menu(menu, tearoff=0)
add(solve_menu, CaesarShift)
add(solve_menu, Substitution)
add(solve_menu, Affine)
add(solve_menu, Vigenere)
#add(solve_menu, Transposition) #this one doesn't work
add(solve_menu, RailFence)
add(solve_menu, Scytale)
add(solve_menu, Morse)
menu.add_cascade(label="Solve stage", menu=solve_menu)
#Functions for the output menu operations
def changeFontSize(change):
currentSize = int(right_text.cget("font").split(" ")[1])
right_text.config(font=("Courier", currentSize + change))
stages[0].textbox.config(font=("Courier", currentSize + change))
output_menu = tk.Menu(menu, tearoff=0)
add(output_menu, OutputHighlight)
add(output_menu, Blank)
output_menu.add_command(label="Increase font size", command=lambda:changeFontSize(1))
output_menu.add_command(label="Decrease font size", command=lambda:changeFontSize(-1))
right_text.tag_configure("highlight", foreground = "red")
menu.add_cascade(label="Output", menu=output_menu)
root.config(menu=menu)
addStage(Input(stage_editor, updateOutputText))
root.mainloop()
| 36.609442 | 149 | 0.710785 | 1,164 | 8,530 | 5.089347 | 0.215636 | 0.054862 | 0.045915 | 0.022957 | 0.292876 | 0.216577 | 0.16813 | 0.124578 | 0.113774 | 0.067016 | 0 | 0.015551 | 0.163189 | 8,530 | 232 | 150 | 36.767241 | 0.813953 | 0.095545 | 0 | 0.199005 | 0 | 0 | 0.034711 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.079602 | false | 0.00995 | 0.029851 | 0 | 0.114428 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7956dd9954a869adae25776f34d9cfad6f7f2ede | 1,912 | py | Python | mp/data/pytorch/domain_prediction_dataset_wrapper.py | MECLabTUDA/OOD-Gen | f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e | [
"MIT"
] | null | null | null | mp/data/pytorch/domain_prediction_dataset_wrapper.py | MECLabTUDA/OOD-Gen | f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e | [
"MIT"
] | null | null | null | mp/data/pytorch/domain_prediction_dataset_wrapper.py | MECLabTUDA/OOD-Gen | f85ea9106ae1425f18e34c9d82fa3ca4925d8d9e | [
"MIT"
] | null | null | null | from mp.data.pytorch.pytorch_dataset import PytorchDataset
from mp.data.datasets.dataset import Instance
import copy
import torch
class DomainPredictionDatasetWrapper(PytorchDataset):
r"""Wraps a PytorchDataset to reuse its instances.x and replacing the labels"""
def __init__(self, pytorch_ds, target_idx):
"""
Args:
pytorch_ds (PytorchSegmentationDataset): the Dataset that need to be wrapped
target_idx (int): the target idx for domain prediction, corresponding to this dataset
"""
class Dummy:
def __init__(self):
self.instances = pytorch_ds.instances
self.hold_out_ixs = []
self.original_ds = pytorch_ds
# Ugly
# noinspection PyTypeChecker
super().__init__(dataset=Dummy(), size=pytorch_ds.size)
# Copy the predictor, but prevent it from reshaping the prediction
self.predictor = copy.copy(pytorch_ds.predictor)
self.predictor.reshape_pred = False
# Create new target as one hot encoded
# self.target = torch.zeros((1, target_cnt), dtype=self.instances[0].y.tensor.dtype)
# self.target[:, target_idx] = 1
self.target = torch.tensor([target_idx], dtype=self.instances[0].y.tensor.dtype)
# Modify instances
self.instances = [Instance(inst.x, self.target, inst.name, inst.class_ix, inst.group_id)
for inst in self.instances]
def get_subject_dataloader(self, subject_ix):
r"""Get a list of input/target pairs equivalent to those if the dataset
was only of subject with index subject_ix. For evaluation purposes.
"""
# Generate the original subject dataloader and replace the target
subject_dataloader = self.original_ds.get_subject_dataloader(subject_ix)
return [(x, self.target) for x, _ in subject_dataloader]
| 40.680851 | 97 | 0.671548 | 239 | 1,912 | 5.200837 | 0.41841 | 0.043443 | 0.01609 | 0.030571 | 0.049879 | 0.049879 | 0.049879 | 0 | 0 | 0 | 0 | 0.00278 | 0.247385 | 1,912 | 46 | 98 | 41.565217 | 0.861015 | 0.373954 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.181818 | 0 | 0.454545 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
795999b8a086d2a92c7c0d0019a508d781dcdb36 | 4,889 | py | Python | code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | 1 | 2021-07-15T07:05:18.000Z | 2021-07-15T07:05:18.000Z | code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | 2 | 2021-07-15T06:12:47.000Z | 2021-07-16T10:05:36.000Z | code/visualization/2020/04/0_0_compression_tucker_sparse_facto_select_lr.py | lucgiffon/psm-nets | dec43c26281febf6e5c8b8f42bfb78098ae7101d | [
"MIT"
] | null | null | null | import pathlib
import pandas as pd
from palmnet.visualization.utils import get_palminized_model_and_df, get_df
import matplotlib.pyplot as plt
import numpy as np
import logging
import plotly.graph_objects as go
import plotly.express as px
from pprint import pprint as pprint
mpl_logger = logging.getLogger('matplotlib')
mpl_logger.setLevel(logging.ERROR)
dataset = {
"Cifar10": "--cifar10",
"Cifar100": "--cifar100",
"SVHN": "--svhn",
"MNIST": "--mnist"
}
basemodels = {
"Cifar100": ["--cifar100-vgg19", "--cifar100-resnet20", "--cifar100-resnet50"],
"Cifar10": ["--cifar10-vgg19"],
"SVHN": ["--svhn-vgg19"],
"MNIST": ["--mnist-lenet"]
}
def show_for_tucker():
# compression_method = ["tucker", "tensortrain"]
# df = df.apply(pd.to_numeric, errors='coerce')
dct_config_lr = dict()
lst_name_trace_low = list()
for dataname in dataset:
df_data = df[df[dataset[dataname]] == 1]
for base_model_name in basemodels[dataname]:
df_model = df_data[df_data[base_model_name] == 1]
for index, row in df_model.iterrows():
fig = go.Figure()
csv_file = pathlib.Path(row["results_dir"]) / row["output_file_csvcbprinter"]
df_csv = pd.read_csv(csv_file)
win_size = 5
lr_values = df_csv["lr"].values
lr_values_log = np.log10(lr_values)
lr_rolling_mean = pd.Series(lr_values_log).rolling(window=win_size).mean().iloc[win_size - 1:].values
loss_rolling_mean = df_csv["loss"].rolling(window=win_size).mean().iloc[win_size - 1:].values
if all(np.isnan(loss_rolling_mean)):
continue
delta_loss = (np.hstack([loss_rolling_mean, [0]]) - np.hstack([[0], loss_rolling_mean]))[1:-1]
delta_loss_rolling_mean = pd.Series(delta_loss).rolling(window=win_size).mean().iloc[win_size - 1:].values
lr_rolling_mean_2x = pd.Series(lr_rolling_mean).rolling(window=win_size).mean().iloc[win_size - 1:].values
lr_rolling_mean_2x_exp = 10 ** lr_rolling_mean_2x
# fig.add_trace(go.Scatter(x=lr_rolling_mean_exp, y=loss_rolling_mean, name="sp_fac {} - hiearchical {}".format(row["--sparsity-factor"], row["--hierarchical"])))
fig.add_trace(go.Scatter(x=lr_rolling_mean_2x_exp[:-1], y=delta_loss_rolling_mean, name=""))
argmin_loss = np.argmin(delta_loss_rolling_mean)
val = lr_rolling_mean_2x_exp[:-1][argmin_loss]
log_val = np.log10(val)
approx = 10 ** np.around(log_val, decimals=0)
sparsity = int(row["--sparsity-factor"])
hierarchical = bool(row["--hierarchical"])
str_hierarchical = " H" if hierarchical else ""
try:
nb_fac = int(row["--nb-factor"])
except ValueError:
nb_fac = None
name_trace = f"tucker_sparse_facto-{dataset[dataname]}-{base_model_name}-Q={nb_fac}-K={sparsity}{str_hierarchical}"
print(len(delta_loss_rolling_mean), name_trace)
if len(delta_loss_rolling_mean) < 10:
lst_name_trace_low.append(name_trace)
continue
dct_config_lr[name_trace] = approx
# title_str = "{}:{} - {} - keep first :{}".format(dataname, base_model_name, "tucker", keep_first)
fig.update_layout(barmode='group',
title=name_trace,
xaxis_title="lr",
yaxis_title="loss",
xaxis_type="log",
xaxis={'type': 'category'},
)
# fig.show()
pprint(dct_config_lr)
pprint(lst_name_trace_low)
if __name__ == "__main__":
root_source_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/results/")
expe_path = "2020/04/0_0_compression_tucker_sparse_facto_select_lr"
expe_path_errors = "2020/04/0_0_compression_tucker_sparse_facto_select_lr_errors"
src_results_dir = root_source_dir / expe_path
src_results_dir_errors = root_source_dir / expe_path_errors
get_df_and_assign = lambda x: get_df(x).assign(results_dir=str(x))
df = get_df_and_assign(src_results_dir)
df_errors = get_df_and_assign(src_results_dir_errors)
df = pd.concat([df, df_errors])
df = df.dropna(subset=["failure"])
df = df[df["failure"] == 0]
df = df.drop(columns="oar_id").drop_duplicates()
root_output_dir = pathlib.Path("/home/luc/PycharmProjects/palmnet/reports/figures/")
output_dir = root_output_dir / expe_path / "line_plots"
output_dir.mkdir(parents=True, exist_ok=True)
show_for_tucker() | 40.07377 | 178 | 0.607895 | 617 | 4,889 | 4.473258 | 0.290113 | 0.071739 | 0.054348 | 0.036232 | 0.243478 | 0.190942 | 0.181884 | 0.131159 | 0.131159 | 0.106522 | 0 | 0.022867 | 0.266517 | 4,889 | 122 | 179 | 40.07377 | 0.746793 | 0.074044 | 0 | 0.022472 | 0 | 0 | 0.140867 | 0.072534 | 0 | 0 | 0 | 0 | 0 | 1 | 0.011236 | false | 0 | 0.101124 | 0 | 0.11236 | 0.05618 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
795b1f096f5aa18037e59346d95e4b832947c2de | 8,209 | py | Python | spectrocrunch/sources/tests/test_polarization.py | woutdenolf/spectrocrunch | fde4b6e0f462f464ce7af6a942b355d3d8f39f77 | [
"MIT"
] | 3 | 2018-04-16T15:51:36.000Z | 2019-12-16T11:21:05.000Z | spectrocrunch/sources/tests/test_polarization.py | woutdenolf/spectrocrunch | fde4b6e0f462f464ce7af6a942b355d3d8f39f77 | [
"MIT"
] | null | null | null | spectrocrunch/sources/tests/test_polarization.py | woutdenolf/spectrocrunch | fde4b6e0f462f464ce7af6a942b355d3d8f39f77 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import unittest
import cmath
import numpy as np
from scipy import integrate
from .. import polarization
from ...utils import instance
from ...patch import jsonpickle
class test_polarization(unittest.TestCase):
def _equal_params(self, params1, params2):
for k, v in params1.items():
if instance.isstring(v):
self.assertEqual(v, params2[k])
else:
np.testing.assert_allclose(v, params2[k])
def _gen_jones(self, n=20):
x = np.random.uniform(low=-10, high=10, size=4 * n).reshape((n, 4))
for xi in x:
yield polarization.Jones(xi[0] + xi[1] * 1j, xi[2] + xi[3] * 1j)
def _gen_stokes(self, n=20):
x = np.random.uniform(low=-10, high=10, size=3 * n).reshape((n, 3))
for xi in x:
S0 = np.sqrt(sum(xi[1:] ** 2)) * np.random.uniform(low=1, high=1.5)
yield polarization.Stokes(S0, *xi)
def test_convert_representation(self):
def f1(x, attr):
return getattr(x, attr)
def f2(x, attr):
return getattr(x, attr) % 360
attrs = {
"coherency_matrix": f1,
"dop": f1,
"dolp": f1,
"docp": f1,
"hdolp": f1,
"polangle": f2,
}
for J1 in self._gen_jones():
S1 = J1.to_stokes()
J2 = S1.to_jones()
S2 = J2.to_stokes()
J3 = S2.to_jones()
self._equal_params(J2.to_params(), J3.to_params())
self._equal_params(S1.to_params(), S2.to_params())
self.assertEqual(J1.dop, 1)
for attr, f in attrs.items():
a = f(J1, attr)
np.testing.assert_allclose(a, f(S1, attr))
np.testing.assert_allclose(a, f(J2, attr))
np.testing.assert_allclose(a, f(S2, attr))
np.testing.assert_allclose(a, f(J3, attr))
np.testing.assert_allclose(J1.norm, J2.norm)
np.testing.assert_allclose(
J1.phase_difference % 360, J2.phase_difference % 360
)
np.testing.assert_allclose(J2.to_numpy(), J3.to_numpy())
np.testing.assert_allclose(S1.to_numpy(), S2.to_numpy())
np.testing.assert_allclose(S1.to_numpy(), S2.to_numpy())
def test_stokes(self):
for S in self._gen_stokes():
tmp = S.decompose()
Spol, Sunpol = tmp["pol"], tmp["unpol"]
np.testing.assert_allclose(
S.intensity, S.intensity_polarized + S.intensity_unpolarized
)
np.testing.assert_allclose(S.intensity_polarized, Spol.intensity)
np.testing.assert_allclose(S.intensity_unpolarized, Sunpol.intensity)
np.testing.assert_allclose(S.dop, S.intensity_polarized / S.intensity)
np.testing.assert_allclose(
S.coherency_matrix, Spol.coherency_matrix + Sunpol.coherency_matrix
)
J = S.to_jones(allowloss=True)
np.testing.assert_allclose(J.intensity, Spol.intensity)
S2 = polarization.Stokes.from_params(**S.to_params())
np.testing.assert_allclose(S.to_numpy(), S2.to_numpy())
def test_jones(self):
for J in self._gen_jones():
np.testing.assert_allclose(
J.to_numpy(), J.to_stokes().to_jones(phase0=J.phase0).to_numpy()
)
np.testing.assert_allclose(J.coherency_matrix.trace(), J.norm ** 2)
J2 = polarization.Jones.from_params(**J.to_params())
np.testing.assert_allclose(J.to_numpy(), J2.to_numpy())
J.plot_efield(animate=True)
def test_intensity(self):
for J in self._gen_jones():
S = J.to_stokes()
Jparams = J.to_params()
Sparams = S.to_params()
IJ, IS = np.random.uniform(low=1, high=10, size=2)
J.intensity = IJ
S.intensity = IS
Jparams["intensity"] = IJ
Sparams["intensity"] = IS
self._equal_params(J.to_params(), Jparams)
self._equal_params(S.to_params(), Sparams)
for S in self._gen_stokes():
Sparams = S.to_params()
IS = np.random.uniform(low=1, high=10)
S.intensity = IS
Sparams["intensity"] = IS
self._equal_params(S.to_params(), Sparams)
def test_rotate(self):
for J1 in self._gen_jones():
S1 = J1.to_stokes()
azimuth = np.random.uniform(low=0, high=2 * np.pi) # change-of-frame
J2 = J1.rotate(azimuth)
S2 = S1.rotate(azimuth)
self._equal_params(S2.to_params(), J2.to_stokes().to_params())
R = polarization.JonesMatrixRotation(-azimuth)
Ri = polarization.JonesMatrixRotation(azimuth)
np.testing.assert_allclose(
R.dot(J1.coherency_matrix).dot(Ri), J2.coherency_matrix
)
np.testing.assert_allclose(
R.dot(S1.coherency_matrix).dot(Ri), S2.coherency_matrix
)
def test_thomson(self):
for J1 in self._gen_jones():
S1 = J1.to_stokes()
azimuth = np.random.uniform(low=0, high=2 * np.pi)
polar = np.random.uniform(low=0, high=np.pi)
J2 = J1.thomson_scattering(azimuth, polar)
S2 = S1.thomson_scattering(azimuth, polar)
self._equal_params(S2.to_params(), J2.to_stokes().to_params())
angle = polarization.ThomsonRotationAngle(azimuth) # change-of-frame
R = polarization.JonesMatrixRotation(-angle)
Ri = polarization.JonesMatrixRotation(angle)
Mth = polarization.JonesMatrixThomson(polar)
Mthi = Mth
np.testing.assert_allclose(
Mth.dot(R).dot(J1.coherency_matrix).dot(Ri).dot(Mthi),
J2.coherency_matrix,
)
np.testing.assert_allclose(
Mth.dot(R).dot(S1.coherency_matrix).dot(Ri).dot(Mthi),
S2.coherency_matrix,
)
np.testing.assert_allclose(
S2.intensity, S1.thomson_intensity(azimuth, polar)
)
def integrand(azimuth, polar):
return S1.thomson_intensity(
np.degrees(azimuth), np.degrees(polar)
) * np.sin(polar)
thomsonsc = (
integrate.dblquad(
integrand, 0, np.pi, lambda x: 0, lambda x: 2 * np.pi
)[0]
/ S1.intensity
)
np.testing.assert_allclose(thomsonsc, 8 * np.pi / 3)
def test_compton(self):
for S1 in self._gen_stokes():
azimuth = np.random.uniform(low=0, high=2 * np.pi)
polar = np.random.uniform(low=0, high=np.pi)
energy = np.random.uniform(low=5.0, high=20.0)
S2 = S1.compton_scattering(azimuth, polar, energy)
np.testing.assert_allclose(
S2.intensity, S1.compton_intensity(azimuth, polar, energy)
)
def test_serialize(self):
g1 = next(iter(self._gen_jones()))
g2 = jsonpickle.loads(jsonpickle.dumps(g1))
self.assertEqual(g1, g2)
g1 = next(iter(self._gen_stokes()))
g2 = jsonpickle.loads(jsonpickle.dumps(g1))
self.assertEqual(g1, g2)
def test_suite():
"""Test suite including all test suites"""
testSuite = unittest.TestSuite()
testSuite.addTest(test_polarization("test_jones"))
testSuite.addTest(test_polarization("test_stokes"))
testSuite.addTest(test_polarization("test_convert_representation"))
testSuite.addTest(test_polarization("test_intensity"))
testSuite.addTest(test_polarization("test_rotate"))
testSuite.addTest(test_polarization("test_thomson"))
testSuite.addTest(test_polarization("test_compton"))
testSuite.addTest(test_polarization("test_serialize"))
return testSuite
if __name__ == "__main__":
import sys
mysuite = test_suite()
runner = unittest.TextTestRunner()
if not runner.run(mysuite).wasSuccessful():
sys.exit(1)
| 35.081197 | 83 | 0.575466 | 997 | 8,209 | 4.564694 | 0.161484 | 0.053395 | 0.088991 | 0.136454 | 0.489563 | 0.383432 | 0.283894 | 0.165458 | 0.13909 | 0.13909 | 0 | 0.028132 | 0.302838 | 8,209 | 233 | 84 | 35.23176 | 0.76708 | 0.011085 | 0 | 0.233696 | 0 | 0 | 0.023921 | 0.003329 | 0 | 0 | 0 | 0 | 0.168478 | 1 | 0.081522 | false | 0 | 0.043478 | 0.016304 | 0.152174 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
795b834e229f484b2777e3dde64e6efd9b1ae8d7 | 1,166 | py | Python | AlphaDDA1/Othello/ringbuffer.py | KazuhisaFujita/AlphaDDA | 664742567883cf3e08c2c53b3bce3112b8cc0560 | [
"MIT"
] | 11 | 2021-11-13T01:43:28.000Z | 2021-12-19T06:40:34.000Z | AlphaZero/Othello66/ringbuffer.py | KazuhisaFujita/AlphaDDA | 664742567883cf3e08c2c53b3bce3112b8cc0560 | [
"MIT"
] | null | null | null | AlphaZero/Othello66/ringbuffer.py | KazuhisaFujita/AlphaDDA | 664742567883cf3e08c2c53b3bce3112b8cc0560 | [
"MIT"
] | null | null | null | #---------------------------------------
#Since : 2019/04/24
#Update: 2019/07/25
# -*- coding: utf-8 -*-
#---------------------------------------
import numpy as np
class RingBuffer:
def __init__(self, buf_size):
self.size = buf_size
self.buf = []
for i in range(self.size):
self.buf.append([])
self.start = 0
self.end = 0
def add(self, el):
self.buf[self.end] = el
self.end = (self.end + 1) % self.size
if self.end == self.start:
self.start = (self.start + 1) % self.size
def Get_buffer(self):
array = []
for i in range(self.size):
buf_num = (self.end - i) % self.size
array.append(self.buf[buf_num])
return array
def Get_buffer_start_end(self):
array = []
for i in range(self.size):
buf_num = (self.start + i) % self.size
if self.buf[buf_num] == []:
return array
array.append(self.buf[buf_num])
return array
def get(self):
val = self.buf[self.start]
self.start =(self.start + 1) % self.size
return val
| 26.5 | 53 | 0.482847 | 149 | 1,166 | 3.677852 | 0.261745 | 0.131387 | 0.094891 | 0.131387 | 0.498175 | 0.498175 | 0.419708 | 0.419708 | 0.419708 | 0.288321 | 0 | 0.028025 | 0.326758 | 1,166 | 43 | 54 | 27.116279 | 0.670064 | 0.116638 | 0 | 0.375 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.15625 | false | 0 | 0.03125 | 0 | 0.34375 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
795f708e3eddaecd36d179568af03258f48e6ef1 | 8,202 | py | Python | ANOVA.py | AngusNicolson/factorial_experiment_analysis | a499642c38cb22a2ce13b93dda82c622193e7e35 | [
"MIT"
] | null | null | null | ANOVA.py | AngusNicolson/factorial_experiment_analysis | a499642c38cb22a2ce13b93dda82c622193e7e35 | [
"MIT"
] | null | null | null | ANOVA.py | AngusNicolson/factorial_experiment_analysis | a499642c38cb22a2ce13b93dda82c622193e7e35 | [
"MIT"
] | null | null | null | import itertools
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
import pandas as pd
from scipy.stats import f
from scipy.stats import norm
class ANOVA:
"""Analyse DOE experiments using ANOVA. NB: n > 1 for the code to work, where n is the number of repeats.
Model: y = y_average
i.e. all factors have no effect on the response. Hence the sum of squares is a measure of how much the factor effects the response.
Replace with linear model??"""
def __init__(self, data):
#Initialise variables and define simple statistical values
self.data = data
self.num_factors = len(self.data.columns) - 1
self.factors = list(self.data.columns[:-1])
self.sum_y = data.iloc[:,-1].sum()
self.unique_dict = self.unique_values_dict(self.data)
self.levels = {}
#Determine all interactions between factors
sources_of_variation = []
for interaction_level in range(self.num_factors):
combos = itertools.combinations(self.factors, interaction_level + 1)
for combo in combos:
sources_of_variation.append(self.make_interaction_name(combo))
sources_of_variation.append('Error')
sources_of_variation.append('Total')
#Create ANOVA table
self.table = pd.DataFrame(columns =['Sum of Squares', 'Degrees of Freedom', 'Mean Square', 'F0', 'P-Value'], index=sources_of_variation)
#Needed for functions later, even though the data ends up in the table.
#Code is designed like this because initally more dictionaries were used instead of pandas dataframe.
self.sum_of_squares = [{}]*self.num_factors
#Determine number of repeats. Must be the same for all measurements.
total = 1
for factor in self.factors:
level = len(self.unique_dict[factor])
self.levels[factor] = level
total = total*level
self.n = len(self.data)/total
self.total = len(self.data)
#Most of the complicated equations are contained within this loop/function
for interaction_level in range(self.num_factors):
self.calculate_interactions(interaction_level + 1)
#Create the table from component parts
#Sum of squares
self.table['Sum of Squares'] = pd.DataFrame(self.sum_of_squares).max()
self.table.loc['Total', 'Sum of Squares'] = (data.iloc[:,-1]**2).sum() - (self.sum_y**2)/(self.total)
prefactor = self.make_prefactor(self.factors)
final_subtotal = (1/(prefactor*self.n)) * (self.data.groupby(self.factors).sum().iloc[:,-1]**2).sum() - (self.sum_y**2)/self.total
self.table.loc['Error', 'Sum of Squares']= self.table.loc['Total', 'Sum of Squares'] - final_subtotal
#Degrees of freedom
self.table.loc['Total', 'Degrees of Freedom'] = self.total - 1
self.table.loc['Error', 'Degrees of Freedom'] = (self.total/self.n) * (self.n - 1)
#Mean square
self.table['Mean Square'] = self.table['Sum of Squares']/self.table['Degrees of Freedom']
#F0
self.table['F0'] = self.table['Mean Square']/self.table.loc['Error', 'Mean Square']
#P-value
self.f_function = f(self.n, self.total/self.n)
self.table['P-Value'] = self.f_function.sf(list(self.table['F0']))
#Remove values which have no meaning. Only calculated in the first place because it was simpler to code.
self.table.iloc[-2:, -2:] = np.NaN
self.table.iloc[-1, -3] = np.NaN
self.table.iloc[:, :-1] = self.table.iloc[:, :-1].astype(float)
#F0 for statistical significance P<0.05
self.calculate_F0_significance_level()
#Residuals for model y = average_y
self.calculate_residuals()
def calculate_interactions(self, interaction_level):
"""Calculates sum of squares and degrees of freedom for a specified interaction level and saves them in the self.table dataframe.
interaction_level = 1 ---> Main factors
interaction_level = 2 ---> 2-factor interactions
interaction_level = 3 ---> 3-factor interactions
..."""
combinations = itertools.combinations(self.factors, interaction_level)
subtotals = {}
effects = {}
for combo in combinations:
interaction_factors = list(combo)
interaction = self.make_interaction_name(interaction_factors)
prefactor = self.make_prefactor(interaction_factors)
self.table.loc[interaction, 'Degrees of Freedom'] = self.calculate_degrees_of_freedom(interaction_factors)
subtotals[interaction] = (1/(prefactor*self.n)) * (self.data.groupby(interaction_factors).sum().iloc[:,-1]**2).sum() - (self.sum_y**2)/self.total
effects[interaction] = subtotals[interaction]
for level in range(interaction_level - 1) :
factor_combos = itertools.combinations(combo, level + 1)
for factor_combo in factor_combos:
name = self.make_interaction_name(factor_combo)
effects[interaction] += -self.sum_of_squares[level][name]
self.sum_of_squares[interaction_level - 1] = effects
def calculate_degrees_of_freedom(self, interaction_factors):
dof = 1
for factor in interaction_factors:
dof = (self.levels[factor] - 1) * dof
return dof
def unique_values_dict(self, df):
unique_dict = {}
for column in df.columns:
unique_dict[column] = df[column].unique()
return unique_dict
def make_prefactor(self, interaction_factors):
#Determine prefactor. Multiply all factor levels together which aren't the main factor
prefactor = 1
for factor in self.factors:
if factor not in interaction_factors:
prefactor = prefactor * self.levels[factor]
return prefactor
def make_interaction_name(self, interaction_factors):
interaction = ''
for factor in interaction_factors:
interaction = interaction + ':' + factor
interaction = interaction[1:]
return interaction
def calculate_F0_significance_level(self, sig=0.05):
self.significance = self.f_function.isf(sig)
def calculate_residuals(self):
self.sigma = np.sqrt(self.table.loc['Error', 'Mean Square'])
tmp_data = self.data.set_index(self.factors)
self.residuals = (tmp_data - tmp_data.groupby(self.factors).mean()).iloc[:, -1].values/self.sigma
def plot_residuals(self):
"""Makes a normal probability plot of residuals"""
residuals = sorted(self.residuals)
df = pd.DataFrame(columns=['Residuals'], data=residuals)
df['Position'] = df.index + 1
df['f'] = (df.Position - 0.375)/(len(df) + 0.25)
df['z'] = norm.ppf(df.f)
plt.figure()
sns.regplot(x='Residuals', y='z', data=df)
plt.show()
def plot_normal(self):
"""Makes a normal probability plot of the response"""
tmp_data = self.data.iloc[:, -1].values
tmp_data.sort()
df = pd.DataFrame(columns=['Response'], data=tmp_data)
df['Position'] = df.index + 1
df['f'] = (df.Position - 0.375)/(len(df) + 0.25)
df['z'] = norm.ppf(df.f)
plt.figure()
sns.regplot(x='Response', y='z', data=df)
plt.show()
def plot_pareto_chart(self):
ANOVA_table = self.table.sort_values(by='F0')
plt.figure()
plt.barh(ANOVA_table.index, ANOVA_table['F0'])
plt.xlabel('F0')
plt.ylabel('Term')
plt.axvline(x = self.significance, linestyle='--')
three_data = pd.read_csv('test_data.csv')
three = ANOVA(three_data)
#Doesn't work for n < 2
five_data = pd.read_csv('example_data.csv')
five_data.drop(columns=['order'], inplace=True)
five = ANOVA(five_data)
| 42.278351 | 157 | 0.617045 | 1,036 | 8,202 | 4.766409 | 0.214286 | 0.04192 | 0.031592 | 0.020251 | 0.237141 | 0.170514 | 0.113609 | 0.076347 | 0.051235 | 0.051235 | 0 | 0.012161 | 0.268105 | 8,202 | 193 | 158 | 42.497409 | 0.810428 | 0.175933 | 0 | 0.136 | 0 | 0 | 0.059467 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.088 | false | 0 | 0.056 | 0 | 0.184 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
795f95b9ee59eba0d720fd1de7316678421773f4 | 6,010 | py | Python | datmo/core/entity/snapshot.py | datmo/datmo | a456d196006b67ce56af96cb4900682eab747bef | [
"MIT"
] | 331 | 2018-03-30T14:33:59.000Z | 2022-01-10T19:43:32.000Z | datmo/core/entity/snapshot.py | KIMS-Github/datmo | a456d196006b67ce56af96cb4900682eab747bef | [
"MIT"
] | 274 | 2018-04-08T17:12:44.000Z | 2020-07-29T02:45:22.000Z | datmo/core/entity/snapshot.py | KIMS-Github/datmo | a456d196006b67ce56af96cb4900682eab747bef | [
"MIT"
] | 28 | 2018-05-03T21:57:22.000Z | 2020-12-31T04:18:42.000Z | import os
from datetime import datetime
from datmo.core.util.json_store import JSONStore
from datmo.core.util.misc_functions import prettify_datetime, printable_object, format_table
class Snapshot():
"""Snapshot is an entity object to represent a version of the model. These snapshots
are the building blocks upon which models can be shared and reproduced.
Snapshots consist of 5 main components which are represented as well in the attributes
listed below
1) Source code
2) Dependency environment
3) Large files not included in source code
4) Configurations of your model, features, data, etc
5) Performance metrics that evaluate your model
Note
----
All attributes of the class in the ``Attributes`` section must be serializable by the DB
Parameters
----------
dictionary : dict
id : str, optional
the id of the entity
(default is None; storage driver has not assigned an id yet)
model_id : str
the parent model id for the entity
message : str
long description of snapshot
code_id : str
code reference associated with the snapshot
environment_id : str
id for environment used to create snapshot
file_collection_id : str
file collection associated with the snapshot
config : dict
key, value pairs of configurations
stats : dict
key, value pairs of metrics and statistics
task_id : str, optional
task id associated with snapshot
(default is None, means no task_id set)
label : str, optional
short description of snapshot
(default is None, means no label set)
visible : bool, optional
True if visible to user via list command else False
(default is True to show users unless otherwise specified)
created_at : datetime.datetime, optional
(default is datetime.utcnow(), at time of instantiation)
updated_at : datetime.datetime, optional
(default is same as created_at, at time of instantiation)
Attributes
----------
id : str or None
the id of the entity
model_id : str
the parent model id for the entity
message : str
long description of snapshot
code_id : str
code reference associated with the snapshot
environment_id : str
id for environment used to create snapshot
file_collection_id : str
file collection associated with the snapshot
config : dict
key, value pairs of configurations
stats : dict
key, value pairs of metrics and statistics
task_id : str or None
task id associated with snapshot
label : str or None
short description of snapshot
visible : bool
True if visible to user via list command else False
created_at : datetime.datetime
updated_at : datetime.datetime
"""
def __init__(self, dictionary):
self.id = dictionary.get('id', None)
self.model_id = dictionary['model_id']
self.message = dictionary['message']
self.code_id = dictionary['code_id']
self.environment_id = dictionary['environment_id']
self.file_collection_id = dictionary['file_collection_id']
self.config = dictionary['config']
self.stats = dictionary['stats']
self.task_id = dictionary.get('task_id', None)
self.label = dictionary.get('label', None)
self.visible = dictionary.get('visible', True)
self.created_at = dictionary.get('created_at', datetime.utcnow())
self.updated_at = dictionary.get('updated_at', self.created_at)
def __eq__(self, other):
return self.id == other.id if other else False
def __str__(self):
if self.label:
final_str = '\033[94m' + "snapshot " + self.id + '\033[0m'
final_str = final_str + '\033[94m' + " (" + '\033[0m'
final_str = final_str + '\033[93m' + '\033[1m' + "label: " + self.label + '\033[0m'
final_str = final_str + '\033[94m' + ")" + '\033[0m' + os.linesep
else:
final_str = '\033[94m' + "snapshot " + self.id + '\033[0m' + os.linesep
final_str = final_str + "Date: " + prettify_datetime(
self.created_at) + os.linesep
table_data = []
if self.task_id:
table_data.append(["Task", "-> " + self.task_id])
table_data.append(["Visible", "-> " + str(self.visible)])
# Components
table_data.append(["Code", "-> " + self.code_id])
table_data.append(["Environment", "-> " + self.environment_id])
table_data.append(["Files", "-> " + self.file_collection_id])
table_data.append(["Config", "-> " + str(self.config)])
table_data.append(["Stats", "-> " + str(self.stats)])
final_str = final_str + format_table(table_data)
final_str = final_str + os.linesep + " " + self.message + os.linesep + os.linesep
return final_str
def __repr__(self):
return self.__str__()
def save_config(self, filepath):
JSONStore(os.path.join(filepath, 'config.json'), self.config)
return
def save_stats(self, filepath):
JSONStore(os.path.join(filepath, 'stats.json'), self.stats)
return
def to_dictionary(self, stringify=False):
attr_dict = self.__dict__
pruned_attr_dict = {
attr: val
for attr, val in attr_dict.items()
if not callable(getattr(self, attr)) and not attr.startswith("__")
}
if stringify:
for key in ["config", "stats", "message", "label"]:
pruned_attr_dict[key] = printable_object(pruned_attr_dict[key])
for key in ["created_at", "updated_at"]:
pruned_attr_dict[key] = prettify_datetime(
pruned_attr_dict[key])
return pruned_attr_dict
| 38.280255 | 95 | 0.621631 | 747 | 6,010 | 4.840696 | 0.227577 | 0.033186 | 0.029038 | 0.026549 | 0.333518 | 0.311394 | 0.262721 | 0.235896 | 0.235896 | 0.219027 | 0 | 0.012829 | 0.286689 | 6,010 | 156 | 96 | 38.525641 | 0.830651 | 0.419468 | 0 | 0.030769 | 0 | 0 | 0.111007 | 0 | 0.030769 | 0 | 0 | 0 | 0 | 1 | 0.107692 | false | 0 | 0.061538 | 0.030769 | 0.276923 | 0.030769 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7961d1af5a2c494ba659aefe30c177aba0152b99 | 3,895 | py | Python | ranking/train_LM.py | yzhhome/JDQA | 68e1d0259d316b3577a1f2fafa773b50f1885762 | [
"MIT"
] | 1 | 2021-12-21T10:50:21.000Z | 2021-12-21T10:50:21.000Z | ranking/train_LM.py | kalanile/JDQA | 68e1d0259d316b3577a1f2fafa773b50f1885762 | [
"MIT"
] | null | null | null | ranking/train_LM.py | kalanile/JDQA | 68e1d0259d316b3577a1f2fafa773b50f1885762 | [
"MIT"
] | 1 | 2021-12-21T10:50:20.000Z | 2021-12-21T10:50:20.000Z | '''
@Author: dengzaiyong
@Date: 2021-08-21 15:16:08
@LastEditTime: 2021-08-27 19:37:08
@LastEditors: dengzaiyong
@Desciption: 训练tfidf, word2vec, fasttext语言模型
@FilePath: /JDQA/ranking/train_LM.py
'''
import os
from collections import defaultdict
from gensim import models, corpora
import config
import pandas as pd
import jieba
from utils.tools import create_logger
logger = create_logger(config.root_path + '/logs/train_LM.log')
class Trainer(object):
def __init__(self):
self.data = self.data_reader(config.rank_train_file) + \
self.data_reader(config.rank_test_file) + \
self.data_reader(config.rank_dev_file)
self.stopwords = open(config.stopwords_path).readlines()
self.preprocessor()
self.train()
self.saver()
def data_reader(self, path):
"""
读取数据集,返回question1和question2所有的句子
"""
sentences = []
df = pd.read_csv(path, sep='\t', encoding='utf-8')
question1 = df['question1'].values
question2 = df['question2'].values
sentences.extend(list(question1))
sentences.extend(list(question2))
return sentences
def preprocessor(self):
"""
分词,并生成计算tfidf需要的数据
"""
logger.info('loading data...')
# 对所有句子进行分词
self.data = [[word for word in jieba.cut(sentence)] for sentence in self.data]
# 计算每个词出现的次数
self.freq = defaultdict(int)
for sentence in self.data:
for word in sentence:
self.freq[word] += 1
# 过滤出现次数小于1的词
self.data = [[word for word in sentence if self.freq[word] > 1] \
for sentence in self.data]
logger.info('building dictionary...')
# 构建词典
self.dictionary = corpora.Dictionary(self.data)
# 保存词典
self.dictionary.save(config.temp_path + '/model/ranking/ranking.dict')
# 构建语料库
self.corpus = [self.dictionary.doc2bow(text) for text in self.data]
# 语料库序列化保存
corpora.MmCorpus.serialize(config.temp_path + '/model/ranking/ranking.mm', self.corpus)
def train(self):
logger.info('train tfidf model...')
self.tfidf = models.TfidfModel(self.corpus, normalize=True)
logger.info('train word2vec model...')
self.w2v = models.Word2Vec(self.data,
vector_size=config.embed_dim,
window=2,
min_count=2,
sample=6e-5,
min_alpha=0.0007,
alpha=0.03,
workers=4,
negative=15,
epochs=10)
self.w2v.build_vocab(self.data)
self.w2v.train(self.data,
total_examples=self.w2v.corpus_count,
epochs=15,
report_delay=1)
logger.info('train fasttext model...')
self.fast = models.FastText(self.data,
vector_size=config.embed_dim,
window=3,
min_count=1,
epochs=10,
min_n=3,
max_n=6,
word_ngrams=1)
def saver(self):
logger.info(' save tfidf model ...')
self.tfidf.save(os.path.join(config.temp_path, 'model/ranking/tfidf.model'))
logger.info(' save word2vec model ...')
self.w2v.save(os.path.join(config.temp_path, 'model/ranking/w2v.model'))
logger.info(' save fasttext model ...')
self.fast.save(os.path.join(config.temp_path, 'model/ranking/fast.model'))
if __name__ == "__main__":
Trainer() | 32.458333 | 95 | 0.537869 | 416 | 3,895 | 4.920673 | 0.362981 | 0.058622 | 0.034196 | 0.046409 | 0.218368 | 0.175867 | 0.09575 | 0.09575 | 0.058622 | 0 | 0 | 0.030965 | 0.353273 | 3,895 | 120 | 96 | 32.458333 | 0.781659 | 0.077279 | 0 | 0.026316 | 0 | 0 | 0.098161 | 0.035078 | 0 | 0 | 0 | 0 | 0 | 1 | 0.065789 | false | 0 | 0.092105 | 0 | 0.184211 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7962461ca47687b7819e6dc00edee38793e1d6d0 | 4,680 | py | Python | dao/ImageDAO.py | NEU-CSYE6225-SEC03/webservice | 416cff5e3c8c88ce59333393a933ea88b3b8e2c0 | [
"MIT"
] | null | null | null | dao/ImageDAO.py | NEU-CSYE6225-SEC03/webservice | 416cff5e3c8c88ce59333393a933ea88b3b8e2c0 | [
"MIT"
] | null | null | null | dao/ImageDAO.py | NEU-CSYE6225-SEC03/webservice | 416cff5e3c8c88ce59333393a933ea88b3b8e2c0 | [
"MIT"
] | 1 | 2022-03-09T23:46:32.000Z | 2022-03-09T23:46:32.000Z | import uuid
import datetime
import pymysql
from tool.Config import Config
from tool.Logger import Logger
class ImageDAO(object):
def __init__(self, connect_pool):
self.connect_pool = connect_pool
async def userImageExist(self, user_id: str):
selectResult = None
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
await cursor.execute("SELECT user_id FROM image WHERE user_id = %s", [user_id, ])
selectResult = await cursor.fetchone()
Logger.getInstance().info('execute sql to determine exist of image by user_id [%s]' % user_id)
except Exception as e:
Logger.getInstance().exception(e)
return selectResult is not None
async def getUserImage(self, user_id: str):
selectResult = None
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
await cursor.execute(
"SELECT id, file_name, user_id, url, upload_date FROM image WHERE user_id = %s",
[user_id, ])
Logger.getInstance().info('execute sql to get info of image by user_id[%s]' % user_id)
selectResult = await cursor.fetchone()
except Exception as e:
Logger.getInstance().exception(e)
if selectResult is not None:
return {
'id': selectResult[0],
'file_name': selectResult[1],
'user_id': selectResult[2],
'url': selectResult[3],
'upload_date': selectResult[4].strftime("%Y-%m-%d")
}
else:
return None
async def updateUserImage(self, file_name: str, url: str, user_id: str):
affectRowNum = 0
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
affectRowNum = await cursor.execute(
"UPDATE image SET file_name = %s, url = %s, upload_date = %s where user_id = %s",
[file_name,
url,
datetime.datetime.now().strftime("%Y-%m-%d"),
user_id, ])
Logger.getInstance().info('execute sql for updating image info by user_id[%s]' % user_id)
await conn.commit()
except Exception as e:
Logger.getInstance().exception(e)
if affectRowNum:
return True
else:
return False
async def deleteUserImage(self, user_id: str):
affectRowNum = 0
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
affectRowNum = await cursor.execute(
"DELETE FROM image WHERE user_id = %s",
[user_id, ]
)
Logger.getInstance().info('execute sql for deleting image info by user_id[%s]' % user_id)
await conn.commit()
except Exception as e:
Logger.getInstance().exception(e)
if affectRowNum:
return True
else:
return False
async def createUserImage(self, file_name: str, url: str, user_id: str):
table = 'image'
data = {
'id': str(uuid.uuid1()),
'file_name': file_name,
'url': url,
'user_id': user_id,
'upload_date': datetime.datetime.now().strftime("%Y-%m-%d"),
}
keys = ', '.join(data.keys())
values = ', '.join(['%s'] * len(data))
insert_sql = "INSERT INTO {table} ({keys}) VALUES ({values})".format(table=table, keys=keys, values=values)
affectRowNum = 0
async with self.connect_pool.acquire() as conn:
async with conn.cursor() as cursor:
try:
affectRowNum = await cursor.execute(insert_sql, tuple(data.values()))
await conn.commit()
Logger.getInstance().info(
'execute sql for inserting a image, affectRowNum[{}], insert sql[{}], values[{}]'.format(
affectRowNum, insert_sql, tuple(data.values())))
except Exception as e:
Logger.getInstance().exception(e)
if affectRowNum:
return True, data
else:
return False, data
| 37.142857 | 115 | 0.519444 | 494 | 4,680 | 4.813765 | 0.192308 | 0.065601 | 0.023549 | 0.03238 | 0.643818 | 0.623633 | 0.594617 | 0.569386 | 0.513457 | 0.43524 | 0 | 0.003114 | 0.382479 | 4,680 | 125 | 116 | 37.44 | 0.819723 | 0 | 0 | 0.461538 | 0 | 0.009615 | 0.141239 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.009615 | false | 0 | 0.048077 | 0 | 0.153846 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7962e2d4ed65e0f87126ca65657b5d805b1ac6cf | 2,363 | py | Python | profiletool.py | SimpleProxy/myproject02 | 13d0c657e2e324af78467eb2edfae2d22669573f | [
"MIT"
] | 1 | 2020-10-21T21:32:42.000Z | 2020-10-21T21:32:42.000Z | profiletool.py | kelvesc/myproject02 | 13d0c657e2e324af78467eb2edfae2d22669573f | [
"MIT"
] | null | null | null | profiletool.py | kelvesc/myproject02 | 13d0c657e2e324af78467eb2edfae2d22669573f | [
"MIT"
] | null | null | null | #!/bin/python3
# -*- coding: utf-8 -*-
# file name: profiletool.py
# standart libraries
from time import sleep
from time import process_time_ns as timer_ns
# to call the respective routines
import subprocess as ps
# local imports
import pyfactorial as pyf
import mathfactorial as mtf
def _vector():
return range(2, 501, 2)
def _mod_asm(num):
ps.run(["./asmmodifier.sh", num])
sleep(0.01)
def user_defined_fac(n):
return pyf.iterative_factorial(n)
def mathlib_defined_fac(n):
return mtf.factorial(n)
def vm_defined_fac(n):
ps.run(["./vm_code/hack_machine/CPUEmulator.sh",
"./vm_code/test/Factorial.tst",
"2&>1 >/dev/null"],
capture_output=True,
text=True)
def test_user_factorial():
results = open("./results/vector_nxt_user.txt", "w")
results.seek(0,2)
totalTime = 0
for num in _vector():
start = timer_ns()
fac = user_defined_fac(int(num))
end = timer_ns()
dt = end - start
totalTime += dt
results.write(f"{num} {dt}\n")
print(f"factorial of {num} took {dt} nanoseconds")
sleep(0.02)
print(f"Total time elapsed: {totalTime} nanoseconds")
results.close()
def test_math_factorial():
results = open("./results/vector_nxt_mathlib.txt", "w")
results.seek(0,2)
totalTime = 0
for num in _vector():
start = timer_ns()
fac = mathlib_defined_fac(int(num))
end = timer_ns()
dt = end - start
totalTime += dt
results.write(f"{num} {dt}\n")
print(f"factorial of {num} took {dt} nanoseconds")
sleep(0.02)
print(f"Total time elapsed: {totalTime} nanoseconds")
results.close()
def test_vm_factorial():
results = open("./results/vector_nxt_vm.txt", "w")
results.seek(0,2)
totalTime = 0
for num in _vector():
_mod_asm(str(num)) # modify asm file
start = timer_ns()
vm_defined_fac(int(num))
end = timer_ns()
dt = end - start
totalTime += dt
results.write(f"{num} {dt}\n")
print(f"factorial of {num} took {dt} nanoseconds")
sleep(0.02)
print(f"Total time elapsed: {totalTime} nanoseconds")
results.close()
if __name__ == "__main__":
test_user_factorial()
test_math_factorial()
test_vm_factorial()
| 22.084112 | 59 | 0.611934 | 321 | 2,363 | 4.317757 | 0.29595 | 0.035354 | 0.02381 | 0.058442 | 0.555556 | 0.555556 | 0.477633 | 0.477633 | 0.477633 | 0.477633 | 0 | 0.017065 | 0.25603 | 2,363 | 106 | 60 | 22.292453 | 0.771331 | 0.060093 | 0 | 0.514286 | 0 | 0 | 0.216802 | 0.069106 | 0 | 0 | 0 | 0 | 0 | 1 | 0.114286 | false | 0 | 0.071429 | 0.042857 | 0.228571 | 0.085714 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7964d5e0d6c5bbff30057dd541992a4595176f15 | 760 | py | Python | urmovie/views/image_view.py | xuyangliu/UR | 8a3c94dd6b6f16bf233167333464c0429ad269d8 | [
"Apache-2.0"
] | null | null | null | urmovie/views/image_view.py | xuyangliu/UR | 8a3c94dd6b6f16bf233167333464c0429ad269d8 | [
"Apache-2.0"
] | null | null | null | urmovie/views/image_view.py | xuyangliu/UR | 8a3c94dd6b6f16bf233167333464c0429ad269d8 | [
"Apache-2.0"
] | null | null | null | # Author:Sunny Liu
from django.shortcuts import HttpResponse
from django.shortcuts import render
from django.shortcuts import redirect
from urmovie import models
from django.views.decorators.csrf import csrf_exempt
import hashlib,os
"""
内容简介:
1.爬虫情况下,对电影封面的添加
"""
@csrf_exempt
def uploadImg(request):
if request.method == 'POST':
print(type(request.FILES.get('img')))
new_img = models.Image(
image_file=request.FILES.get('img'),
image_name = "hahaha.jpg",
)
new_img.save()
return render(request, 'uploadimg.html')
@csrf_exempt
def showImg(request):
imgs = models.Image.objects.all()
content = {
'imgs':imgs,
}
return render(request, 'showimg.html', content) | 24.516129 | 52 | 0.665789 | 93 | 760 | 5.365591 | 0.505376 | 0.08016 | 0.114228 | 0.150301 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001684 | 0.218421 | 760 | 31 | 53 | 24.516129 | 0.838384 | 0.021053 | 0 | 0.086957 | 0 | 0 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.086957 | false | 0 | 0.26087 | 0 | 0.434783 | 0.043478 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7964ebe5d975dfd2d7d9cc2c69f05839abcd1197 | 2,983 | py | Python | fastreid/layers/norm_layers/batch_re_norm2d.py | SZLSP/reid2020NAIC | d0eaee768e0be606417a27ce5ea2b3071b5a9bc2 | [
"Apache-2.0"
] | 2 | 2021-05-12T13:36:46.000Z | 2021-08-15T10:35:08.000Z | fastreid/layers/norm_layers/batch_re_norm2d.py | SZLSP/reid2020NAIC | d0eaee768e0be606417a27ce5ea2b3071b5a9bc2 | [
"Apache-2.0"
] | 1 | 2021-12-28T12:49:49.000Z | 2021-12-28T12:49:49.000Z | fastreid/layers/norm_layers/batch_re_norm2d.py | SZLSP/reid2020NAIC | d0eaee768e0be606417a27ce5ea2b3071b5a9bc2 | [
"Apache-2.0"
] | null | null | null | import torch
import torch.nn as nn
from torch.cuda.amp import custom_fwd
class BatchReNorm2D(nn.Module):
"""Batch Re-Normalization
Parameters
num_features – C from an expected input of size (N, C, H, W)
eps – a value added to the denominator for numerical stability. Default: 1e-5
momentum – the value used for the running_mean and running_var computation. Can be set to None for cumulative moving average (i.e. simple average). Default: 0.1
affine – a boolean value that when set to True, this module has learnable affine parameters. Default: True
r_max - a hyper parameter. The paper used rmax = 1 for the first 5000 training steps, after which these were gradually relaxed to reach rmax=3 at 40k steps.
d_max - a hyper parameter. The paper used dmax = 0 for the first 5000 training steps, after which these were gradually relaxed to reach dmax=5 at 25k steps.
Shape:
Input: (N, C, H, W)
Output: (N, C, H, W) (same shape as input)
Examples:
>>> m = BatchReNorm2d(100)
>>> input = torch.randn(20, 100, 35, 45)
>>> output = m(input)
"""
def __init__(self, num_features, r_max=1, d_max=0, eps=1e-3, momentum=0.01, affine=True):
super(BatchReNorm2D, self).__init__()
self.affine = affine
if self.affine:
self.weight = nn.Parameter(torch.ones((1, num_features, 1, 1)))
self.bias = nn.Parameter(torch.zeros((1, num_features, 1, 1)))
self.register_buffer('running_var', torch.ones(1, num_features, 1, 1))
self.register_buffer('running_mean', torch.zeros(1, num_features, 1, 1))
self.r_max, self.d_max = r_max, d_max
self.eps, self.momentum = eps, momentum
def update_stats(self, input):
batch_mean = input.mean((0, 2, 3), keepdim=True)
batch_var = input.var((0, 2, 3), keepdim=True)
batch_std = (batch_var + self.eps).sqrt()
running_std = (self.running_var + self.eps).sqrt()
r = torch.clamp(batch_std / running_std, min=1 / self.r_max, max=self.r_max).detach()
d = torch.clamp((batch_mean - self.running_mean) / running_std, min=-self.d_max, max=self.d_max).detach()
self.running_mean.lerp_(batch_mean, self.momentum)
self.running_var.lerp_(batch_var, self.momentum)
return batch_mean, batch_std, r, d
@custom_fwd(cast_inputs=torch.float32)
def forward(self, input):
if self.training:
with torch.no_grad():
mean, std, r, d = self.update_stats(input)
input = (input - mean) / std * r + d
else:
mean, std = self.running_mean, self.running_var
input = (input - mean) / (self.running_var + self.eps).sqrt()
if self.affine:
return self.weight * input + self.bias
return input
if __name__ == '__main__':
m = BatchReNorm2D(100)
input = torch.randn(20, 100, 35, 45)
output = m(input)
| 41.430556 | 168 | 0.636272 | 448 | 2,983 | 4.09375 | 0.299107 | 0.041985 | 0.026172 | 0.028353 | 0.29771 | 0.29771 | 0.249727 | 0.217012 | 0.176663 | 0.134133 | 0 | 0.034035 | 0.251425 | 2,983 | 71 | 169 | 42.014085 | 0.78549 | 0.325176 | 0 | 0.05 | 0 | 0 | 0.01593 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0 | 0.075 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7966f849a29e53c40e0aa168b93b3cd8e669d4ec | 3,191 | py | Python | Projects/Project 2/program.py | ymirthor/T-215-STY1 | b888da1e88c5aa16eac03353f525e9e0b9d901df | [
"MIT"
] | null | null | null | Projects/Project 2/program.py | ymirthor/T-215-STY1 | b888da1e88c5aa16eac03353f525e9e0b9d901df | [
"MIT"
] | null | null | null | Projects/Project 2/program.py | ymirthor/T-215-STY1 | b888da1e88c5aa16eac03353f525e9e0b9d901df | [
"MIT"
] | null | null | null | from collections import deque as LL
class VM_Manager:
def __init__(self):
self.s_size = 9
self.p_size = 9
self.w_size = 9
self.PM = [None] * 2**19 # PM[524288]
self.D = [[None] * 2**10] * 2**9 # D[1024][512]
self.free_frames = LL([i for i in range(2**10)])
self.occupied_frames = [0,1]
def get_free_frame(self):
while True:
frame = self.free_frames.popleft()
if frame not in self.occupied_frames:
return frame
def create_ST(self, s, z, f):
if f >= 0:
self.occupied_frames.append(f)
self.PM[2 * s] = z
PT_idx = 2 * s + 1
self.PM[PT_idx] = f
def create_PT(self, s, p, f):
PT = self.PM[2 * s + 1]
if PT < 0:
self.D[-PT][p] = f
else:
self.occupied_frames.append(f)
self.PM[PT * 512 + p] = f
def translate_VA(self, VA):
s = VA >> (self.p_size + self.w_size)
p = (VA >> self.w_size) & 2 ** self.p_size - 1
w = VA & 2 ** self.w_size - 1
pw = VA & 2 ** (self.p_size + self.w_size) - 1
return s, p, w, pw
def PA(self, s, p, w, pw):
if pw >= self.PM[2 * s]:
return -1
PT = self.PM[2 * s + 1]
if PT < 0:
f1 = self.get_free_frame()
self.PM[f1 * 512 + p] = self.D[-PT][p]
PT = f1
pg = self.PM[PT * 512 + p]
if pg < 0:
f2 = self.get_free_frame()
pg = f2
return pg * 512 + w
def line_input(string):
nested = []
lis = []
for idx, i in enumerate(string.split(), start=1):
lis.append(int(i))
if idx % 3 == 0:
nested.append(lis)
lis = []
return nested
if __name__ == "__main__":
manager_no_dp = VM_Manager()
manager_dp = VM_Manager()
init_dp = open('init-dp.txt','r')
input_dp = open('input-dp.txt', 'r')
init_no_dp = open('init-no-dp.txt','r')
input_no_dp = open('input-no-dp.txt', 'r')
STs_dp = line_input(init_dp.readline())
for ST in STs_dp:
manager_dp.create_ST(*ST)
STs_no_dp = line_input(init_no_dp.readline())
for ST in STs_no_dp:
manager_no_dp.create_ST(*ST)
PTs_dp = line_input(init_dp.readline())
for PT in PTs_dp:
manager_dp.create_PT(*PT)
PTs_no_dp = line_input(init_no_dp.readline())
for PT in PTs_no_dp:
manager_no_dp.create_PT(*PT)
VAs_dp = list(map(int, input_dp.readline().split()))
VAs_no_dp = list(map(int, input_no_dp.readline().split()))
PAs_dp = []
for idx, address in enumerate(VAs_dp, start=1):
spw_pw = manager_dp.translate_VA(address)
PA = manager_dp.PA(*spw_pw)
PAs_dp.append(PA)
PAs_no_dp = []
for idx, address in enumerate(VAs_no_dp, start=1):
spw_pw = manager_no_dp.translate_VA(address)
PA = manager_no_dp.PA(*spw_pw)
PAs_no_dp.append(PA)
print(*PAs_no_dp)
print(*PAs_dp)
with open('output.txt','w') as out:
out.write(' '.join(map(str,PAs_no_dp)) + '\n')
out.write(' '.join(map(str,PAs_dp))) | 27.991228 | 62 | 0.531808 | 506 | 3,191 | 3.12253 | 0.181818 | 0.055696 | 0.028481 | 0.020253 | 0.375949 | 0.327848 | 0.172152 | 0.060759 | 0.060759 | 0 | 0 | 0.032543 | 0.325917 | 3,191 | 114 | 63 | 27.991228 | 0.701999 | 0.007208 | 0 | 0.086957 | 0 | 0 | 0.024953 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076087 | false | 0 | 0.01087 | 0 | 0.152174 | 0.021739 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79671fc83f6656f30c6074c1b351a64eeeecad56 | 3,750 | py | Python | src/utils/common/prediction_helper.py | Supreeth-Shetty/Projectathon---Simplified-AI | 3fc26a58a9370d119811ac4e864af977c21f6c40 | [
"MIT"
] | 8 | 2021-12-23T06:05:00.000Z | 2021-12-26T05:39:00.000Z | src/utils/common/prediction_helper.py | Supreeth-Shetty/Projectathon---Simplified-AI | 3fc26a58a9370d119811ac4e864af977c21f6c40 | [
"MIT"
] | null | null | null | src/utils/common/prediction_helper.py | Supreeth-Shetty/Projectathon---Simplified-AI | 3fc26a58a9370d119811ac4e864af977c21f6c40 | [
"MIT"
] | 2 | 2021-12-23T06:10:11.000Z | 2021-12-23T07:24:28.000Z | import os
from flask import session
from src.utils.common.common_helper import load_project_encdoing, load_project_model, load_project_pca, \
load_project_scaler, read_config
from loguru import logger
from from_root import from_root
from src.utils.databases.mysql_helper import MySqlHelper
from src.preprocessing.preprocessing_helper import Preprocessing
from src.feature_engineering.feature_engineering_helper import FeatureEngineering
import pandas as pd
import numpy as np
config_args = read_config("./config.yaml")
log_path = os.path.join(from_root(), config_args['logs']['logger'], config_args['logs']['generallogs_file'])
logger.add(sink=log_path, format="[{time:YYYY-MM-DD HH:mm:ss.SSS} - {level} - {module} ] - {message}", level="INFO")
mysql = MySqlHelper.get_connection_obj()
"""[Function to make prediction]
"""
def make_prediction(df):
try:
logger.info(f"Started Prediction!!1")
if df is None:
logger.info(f"DataFrame is null")
raise Exception("Data Frame is None")
else:
query_ = f"""Select Name, Input,Output,ActionDate from tblProject_Actions_Reports
Join tblProjectActions on tblProject_Actions_Reports.ProjectActionId=tblProjectActions.Id
where ProjectId={session['pid']}"""
action_performed = mysql.fetch_all(query_)
print(action_performed)
feature_columns = [col for col in df.columns if col != session['target_column']]
df = df.loc[:, feature_columns]
df_org = df
if len(action_performed) > 0:
for action in action_performed:
if action[0] == 'Delete Column':
df = Preprocessing.delete_col(df, action[1].split(","))
elif action[0] == 'Change Data Type':
df = FeatureEngineering.change_data_type(df, action[1], action[2])
elif action[0] == 'Column Name Change':
df = FeatureEngineering.change_column_name(df, action[1], action[2])
elif action[0] == 'Encdoing':
cat_data = Preprocessing.col_seperator(df, 'Categorical_columns')
num_data = Preprocessing.col_seperator(df, 'Numerical_columns')
encoder = load_project_encdoing()
# columns=action[1].split(",")
# df_=df.loc[:,columns]
df_ = encoder.transform(cat_data)
df = pd.concat([df_, num_data], axis=1)
elif action[0] == 'Scalling':
scalar = load_project_scaler()
columns = df.columns
df = scalar.transform(df)
df = pd.DataFrame(df, columns=columns)
elif action[0] == 'PCA':
pca = load_project_pca()
columns = df.columns
df_ = pca.transform(df)
df_ = df_[:, :int(action[1])]
df = pd.DataFrame(df_, columns=[f"Col_{col + 1}" for col in np.arange(0, df_.shape[1])])
elif action[0] == 'Custom Script':
if action[1] is not None:
exec(action[1])
model = load_project_model()
result = model.predict(df)
df_org.insert(loc=0, column=session['target_column'], value=result)
return df_org
else:
pass
return df
except Exception as e:
logger.info('Error in Prediction ' + str(e))
raise Exception(e)
| 43.604651 | 117 | 0.560533 | 414 | 3,750 | 4.89372 | 0.342995 | 0.043435 | 0.032577 | 0.015795 | 0.078973 | 0.026654 | 0.026654 | 0.026654 | 0 | 0 | 0 | 0.009259 | 0.3376 | 3,750 | 85 | 118 | 44.117647 | 0.806361 | 0.013333 | 0 | 0.058824 | 0 | 0.014706 | 0.161704 | 0.037695 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014706 | false | 0.014706 | 0.147059 | 0 | 0.191176 | 0.014706 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
796832284ec5beb0d93e3de2098cee7d04cbed89 | 18,718 | py | Python | examples/connections.py | Thinker83/remote-computer-manager | 1ea8353e77fc13a98625d744162f789503a8f400 | [
"MIT"
] | null | null | null | examples/connections.py | Thinker83/remote-computer-manager | 1ea8353e77fc13a98625d744162f789503a8f400 | [
"MIT"
] | null | null | null | examples/connections.py | Thinker83/remote-computer-manager | 1ea8353e77fc13a98625d744162f789503a8f400 | [
"MIT"
] | null | null | null | from computer_communication_framework.base_connection import Connection
import subprocess
import re
import datetime
class BasePbs(Connection):
"""
This is meant to be a template to create a connection object for a standard PBS/TORQUE cluster. This inherits from the base_connect.Connection class in base_connection.py. It will not define ALL of the abstract classes specified in base_connection.Connection and so you will not be able to create an instance of it. One should create a class that inherits this class and add all the neccessary methods to statisfy the base_connection.Connection abstract methods.
This is meant to contain the BASIC commands that can be used by programs to control the remote computer (that aren't already included in base_connection.Connection). This is atomistic level commands that form the basis of more complex and specific programs.
Abstract methods that are left out are:
- checkDiskUsage
"""
def __init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email, base_output_path = '/base/output/path', base_runfiles_path = '/base/run/file/path', master_dir = '/master/dir', info_about_cluster = 'Example Cluster Name (ECN): Advanced Computing Research Centre, somewhere.', activate_virtual_environment_list = ['module add python-anaconda-4.2-3.5', 'source activate virtual_environment_name']):
Connection.__init__(self, cluster_user_name, ssh_config_alias, path_to_key, forename_of_user, surname_of_user, user_email)
self.submit_command = 'qsub'
self.information_about_cluster = info_about_cluster
self.base_output_path = base_output_path
self.base_runfiles_path = base_runfiles_path
self.master_dir = master_dir
self.activate_venv_list = activate_virtual_environment_list
# INSTANCE METHODS
def checkQueue(self, job_number):
"""
This function must exist to satisfy the abstract class that it inherits from. In this case it takes a job number and returns a list of all the array numbers of that job still running.
Args:
job_number (int): PBS assigns a unique integer number to each job. Remeber that a job can actually be an array of jobs.
Returns:
output_dict (dict): Has keys 'return_code', 'stdout', and 'stderr'.
"""
# -t flag shows all array jobs related to one job number, if that job is an array.
grep_part_of_cmd = "qstat -tu " + self.user_name + " | grep \'" + str(job_number) + "\' | awk \'{print $1}\' | awk -F \"[][]\" \'{print $2}\'"
output_dict = self.checkSuccess(self.sendCommand([grep_part_of_cmd])) # Remember that all commands should be passed through the "checkSuccess" function that is inherited from the Connection class.
return output_dict
# STUFF FOR THE BCS CHILD CLASS!!!
# no_of_unique_jobs (int): Total amount of jobs to run.
# no_of_repetitions_of_each_job (int): Total amount of repetitions of each job.
# master_dir (str): The directory on the remote computer that you want the submission script to start in.
def createPbsSubmissionScriptTemplate(self, pbs_job_name, no_of_nodes, no_of_cores, walltime, queue_name, job_number, outfile_name_and_path, errorfile_name_and_path, initial_message_in_code = None, shebang = "#!/bin/bash"):
"""
This creates a template for a submission script for the cluster however it does not contain any code for specific jobs (basically just the PBS commands and other bits that might be useful for debugging). It puts it all into a list where list[0] will be line number one of the file and list[2] will be line number two of the file etc and returns that list.
Args:
pbs_job_name (str): The name given to the queuing system.
no_of_nodes (int): The number of nodes that the user would like to request.
no_of_cores (int): The number of cores that the user would like to request.
walltime (str): The maximum amount of time the job is allowed to take. Has the form 'HH:MM:SS'.
queue_name (str): PBS/Torque clusters have a choice of queues and this variable specifies which one to use.
outfile_name_and_path (str): Absolute path and file name of where you want the outfiles of each job array stored.
errorfile_name_and_path (str): Absolute path and file name of where you want to store the errorfiles of each job array stored.
initial_message_in_code (str): The first comment in the code normally says a little something about where this script came from. NOTE: You do not need to include a '#' to indicat it is a comment.
initial_message_in_code == None (str): Should the user wish to put a meaasge near the top of the script (maybe explanation or something) then they can add it here as a string. If it's value is None (the default value) then the line is omitted.
Returns:
list_of_pbs_commands (list of strings): Each string represents the line of a submission file and the list as a whole is the beginning of a PBS submission script.
"""
# add the first part of the template to the list
list_of_pbs_commands = [shebang + "\n", "\n", "# This script was created using Oliver Chalkley's computer_communication_framework library - https://github.com/Oliver-Chalkley/computer_communication_framework." + "\n", "# "]
# Only want to put the users initial message if she has one
if initial_message_in_code is not None:
list_of_pbs_commands += [initial_message_in_code + "\n"]
# add the next part of the template
list_of_pbs_commands = ["# Title: " + pbs_job_name + "\n", "# User: " + self.forename_of_user + ", " + self.surename_of_user + ", " + self.user_email + "\n"]
# Only want to put affiliation if there is one
if type(self.affiliation) is not None:
list_of_pbs_commands += ["# Affiliation: " + self.affiliation + "\n"]
# add the next part of the template to the list
list_of_pbs_commands += ["# Last Updated: " + str(datetime.datetime.now()) + "\n", "\n", "## Job name" + "\n", "#PBS -N " + pbs_job_name + "\n", "\n", "## Resource request" + "\n", "#PBS -l nodes=" + str(no_of_nodes) + ":ppn=" + str(no_of_cores) + ",walltime=" + walltime + "\n", "#PBS -q " + queue_name + "\n", "\n", "## Job array request" + "\n", "#PBS -t " + job_array_numbers + "\n", "\n", "## designate output and error files" + "\n", "#PBS -e " + outfile_name_and_path + "\n", "#PBS -o " + errorfile_name_and_path + "\n", "\n", "# print some details about the job" + "\n", 'echo "The Array ID is: ${PBS_ARRAYID}"' + "\n", 'echo Running on host `hostname`' + "\n", 'echo Time is `date`' + "\n", 'echo Directory is `pwd`' + "\n", 'echo PBS job ID is ${PBS_JOBID}' + "\n", 'echo This job runs on the following nodes:' + "\n", 'echo `cat $PBS_NODEFILE | uniq`' + "\n", "\n"]
return list_of_pbs_commands
def createStandardSubmissionScript(self, file_name_and_path, list_of_job_specific_code, pbs_job_name, no_of_nodes, no_of_cores, queue_name, outfile_name_and_path, errorfile_name_and_path, walltime, initial_message_in_code = None, file_permissions = "700", shebang = "#!/bin/bash"):
"""
This creates a PBS submission script based on the resources you request and the job specific code that you supply. It then writes this code to a file that you specify.
Args:
file_name_and_path (str): Absolute path plus filename that you wish to save the PBS submission script to e.g. /path/to/file/pbs_submission_script.sh.
list_of_job_specific_code (list of strings): Each element of the list contains a string of one line of code. Note: This code is appended to the end of the submission script.
pbs_job_name (str): The name given to this job.
no_of_nodes (int): The number of nodes that the user would like to request.
no_of_cores (int): The number of cores that the user would like to request.
queue_name (str): PBS/Torque clusters have a choice of queues and this variable specifies which one to use.
outfile_name_and_path (str): Absolute path and file name of where you want the outfiles of each job array stored.
errorfile_name_and_path (str): Absolute path and file name of where you want to store the errorfiles of each job array stored.
walltime (str): The maximum amount of time the job is allowed to take. Has the form 'HH:MM:SS'.
initial_message_in_code == None (str): Should the user wish to put a meaasge near the top of the script (maybe explanation or something) then they can add it here as a string. If it's value is None (the default value) then the line is omitted.
file_permissions = "700" (str): The file permissions that the user would like the PBS submission script to have. If it is None then it will not attempt to change the settings. The default setting, 700, makes it read, write and executable only to the user. NOTE: For the submission script to work one needs to make it executable.
shebang = "#!/bin/bash" (str): The shebang line tells the operating system what interpreter to use when executing this script. The default interpreter is BASH which is normally found in /bin/bash.
"""
# Create the PBS template
pbs_script_list = self.createPbsSubmissionScriptCommands(initial_message_in_code, pbs_job_name, no_of_nodes, no_of_cores, walltime, queue_name, job_number, outfile_name_and_path, errorfile_name_and_path, shebang = "#!/bin/bash")
# Add the code that is specific to this job
pbs_script_list += list_of_job_specific_code
# write the code to a file
Connection.createLocalFile(file_name_and_path, pbs_script_list, file_permisions = "700")
# change the permissions if neccessary
if file_permissions != None:
subprocess.check_call(["chmod", str(file_permissions), str(output_filename)])
return
# DELETE THIS ONCE EVERYTHING HAS BEEN DONE
# def createStandardSubmissionScript(self, output_filename, pbs_job_name, queue_name, no_of_unique_jobs, no_of_repetitions_of_each_job, master_dir, outfile_name_and_path, errorfile_name_and_path, walltime, initial_message_in_code, list_of_job_specific_code):
# """
# This acts as a template for a submission script for the cluster however it does not contain any code for specific jobs. This code is pass to the function through the list_of_job_specific_code variable.
#
# The format for a submission in this case will be an array of jobs. Here we want to be able to specify a number of unique jobs and then the amount of times we wish to repeat each unique job. This will then split all the jobs across arrays and CPUs on the cluster depending on how many are given. Each unique job has a name and some settings, this is stored on the cluster in 2 files job_names_file and job_settings_file, respectively.
#
# Args:
# output_filename (str): The name of the submission script.
# pbs_job_name (str): The name given to the queuing system.
# queue_name (str): This cluster has a choice of queues and this variable specifies which one to use.
# no_of_unique_jobs (int): Total amount of jobs to run.
# no_of_repetitions_of_each_job (int): Total amount of repetitions of each job.
# master_dir (str): The directory on the remote computer that you want the submission script to start in.
# outfile_name_and_path (str): Absolute path and file name of where you want the outfiles of each job array stored.
# errorfile_name_and_path (str): Absolute path and file name of where you want to store the errorfiles of each job array stored.
# walltime (str): The maximum amount of time the job is allowed to take. Has the form 'HH:MM:SS'.
# initial_message_in_code (str): The first comment in the code normally says a little something about where this script came from. NOTE: You do not need to include a '#' to indicat it is a comment.
# list_of_job_specific_code (list of strings): Each element of the list contains a string of one line of code.
#
# Returns:
# output_dict (dict): Contains details of how it spread the jobs across arrays and CPUs. Has keys, 'no_of_arrays', 'no_of_unique_jobs_per_array_job', 'no_of_repetitions_of_each_job', 'no_of_sims_per_array_job', and 'list_of_rep_dir_names'.
# """
#
# # set job array numbers to None so that we can check stuff has worked later
# job_array_numbers = None
# # The maximum job array size on the cluster.
# max_job_array_size = 500
# # initialise output dict
# output_dict = {}
# # test that a reasonable amount of jobs has been submitted (This is not a hard and fast rule but there has to be a max and my intuition suggestss that it will start to get complicated around this level i.e. queueing and harddisk space etc)
# total_sims = no_of_unique_jobs * no_of_repetitions_of_each_job
# if total_sims > 20000:
# raise ValueError('Total amount of simulations for one batch submission must be less than 20,000, here total_sims=',total_sims)
#
# output_dict['total_sims'] = total_sims
# # spread simulations across array jobs
# if no_of_unique_jobs <= max_job_array_size:
# no_of_unique_jobs_per_array_job = 1
# no_of_arrays = no_of_unique_jobs
# job_array_numbers = '1-' + str(no_of_unique_jobs)
# else:
# # job_array_size * no_of_unique_jobs_per_array_job = no_of_unique_jobs so all the factors of no_of_unique_jobs is
# common_factors = [x for x in range(1, no_of_unique_jobs+1) if no_of_unique_jobs % x == 0]
# # make the job_array_size as large as possible such that it is less than max_job_array_size
# factor_idx = len(common_factors) - 1
# while factor_idx >= 0:
# if common_factors[factor_idx] < max_job_array_size:
# job_array_numbers = '1-' + str(common_factors[factor_idx])
# no_of_arrays = common_factors[factor_idx]
# no_of_unique_jobs_per_array_job = common_factors[(len(common_factors)-1) - factor_idx]
# factor_idx = -1
# else:
# factor_idx -= 1
#
# # raise error if no suitable factors found!
# if job_array_numbers is None:
# raise ValueError('job_array_numbers should have been assigned by now! This suggests that it wasn\'t possible for my algorithm to split the KOs across the job array properly. Here no_of_unique_jobs=', no_of_unique_jobs, ' and the common factors of this number are:', common_factors)
#
# output_dict['no_of_arrays'] = no_of_arrays
# output_dict['no_of_unique_jobs_per_array_job'] = no_of_unique_jobs_per_array_job
# output_dict['no_of_repetitions_of_each_job'] = no_of_repetitions_of_each_job
# # calculate the amount of cores per array job - NOTE: for simplification we only use cores and not nodes (this is generally the fastest way to get through the queue anyway)
# no_of_cores = no_of_repetitions_of_each_job * no_of_unique_jobs_per_array_job
# output_dict['no_of_sims_per_array_job'] = no_of_cores
# output_dict['list_of_rep_dir_names'] = list(range(1, no_of_repetitions_of_each_job + 1))
# no_of_nodes = 1
# # write the script to file
# with open(output_filename, mode='wt', encoding='utf-8') as myfile:
# myfile.write("#!/bin/bash" + "\n")
# myfile.write("\n")
# myfile.write("# This script was created using Oliver Chalkley's computer_communication_framework library - https://github.com/OliCUoB/computer_communication_framework." + "\n")
# myfile.write("# " + initial_message_in_code + "\n")
# myfile.write("# Title: " + pbs_job_name + "\n")
# myfile.write("# User: " + self.forename_of_user + ", " + self.surename_of_user + ", " + self.user_email + "\n")
# if type(self.affiliation) is not None:
# myfile.write("# Affiliation: " + self.affiliation + "\n")
# myfile.write("# Last Updated: " + str(datetime.datetime.now()) + "\n")
# myfile.write("\n")
# myfile.write("## Job name" + "\n")
# myfile.write("#PBS -N " + pbs_job_name + "\n")
# myfile.write("\n")
# myfile.write("## Resource request" + "\n")
# myfile.write("#PBS -l nodes=" + str(no_of_nodes) + ":ppn=" + str(no_of_cores) + ",walltime=" + walltime + "\n")
# myfile.write("#PBS -q " + queue_name + "\n")
# myfile.write("\n")
# myfile.write("## Job array request" + "\n")
# myfile.write("#PBS -t " + job_array_numbers + "\n")
# myfile.write("\n")
# myfile.write("## designate output and error files" + "\n")
# myfile.write("#PBS -e " + outfile_name_and_path + "\n")
# myfile.write("#PBS -o " + errorfile_name_and_path + "\n")
# myfile.write("\n")
# myfile.write("# print some details about the job" + "\n")
# myfile.write('echo "The Array ID is: ${PBS_ARRAYID}"' + "\n")
# myfile.write('echo Running on host `hostname`' + "\n")
# myfile.write('echo Time is `date`' + "\n")
# myfile.write('echo Directory is `pwd`' + "\n")
# myfile.write('echo PBS job ID is ${PBS_JOBID}' + "\n")
# myfile.write('echo This job runs on the following nodes:' + "\n")
# myfile.write('echo `cat $PBS_NODEFILE | uniq`' + "\n")
# myfile.write("\n")
# for line in list_of_job_specific_code:
# myfile.write(line)
#
# # give the file execute permissions
# subprocess.check_call(["chmod", "700", str(output_filename)])
#
# return output_dict
def getJobIdFromSubStdOut(self, stdout):
"""
When one submits a job to the cluster it returns the job ID to the stdout. This function takes that stdout and extracts the job ID so that it can be used to monitor the job if neccessary.
Args:
stdout (str): The stdout after submitting a job to the queue.
Returns:
return (int): The job ID of the job submitted which returned stdout.
"""
return int(re.search(r'\d+', stdout).group())
| 76.713115 | 884 | 0.678865 | 2,811 | 18,718 | 4.320527 | 0.165777 | 0.017456 | 0.029642 | 0.023055 | 0.497242 | 0.449238 | 0.397448 | 0.356608 | 0.339481 | 0.33133 | 0 | 0.003625 | 0.233732 | 18,718 | 243 | 885 | 77.028807 | 0.843129 | 0.737793 | 0 | 0 | 0 | 0.028571 | 0.228827 | 0.018132 | 0.028571 | 0 | 0 | 0 | 0 | 1 | 0.142857 | false | 0 | 0.114286 | 0 | 0.4 | 0.057143 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
796b67b9479d04170cd02e4d71dc7ae51ab5fc75 | 13,795 | py | Python | src/util.py | lambertwang/mastery | 772bdeb10e014391835d267069afc820a113d2b2 | [
"MIT"
] | 1 | 2017-12-01T03:30:34.000Z | 2017-12-01T03:30:34.000Z | src/util.py | lambertwang/mastery | 772bdeb10e014391835d267069afc820a113d2b2 | [
"MIT"
] | 1 | 2017-11-13T18:46:39.000Z | 2017-11-13T18:46:39.000Z | src/util.py | lambertwang/mastery | 772bdeb10e014391835d267069afc820a113d2b2 | [
"MIT"
] | null | null | null | import random
import re
import json
from combat import *
from travel import *
from pdb import set_trace
def load_words(path):
with open(path, 'r') as f:
for line in f:
clean_line = line.strip()
if clean_line and not clean_line[0] == "#":
yield clean_line
class MarkovGenerator:
def __init__(self, words, length):
self.length = length
self.transitions = {}
for word in words:
key = (None,) * length
for char in word:
self.addTransition(key, char)
key = key[1:] + (char,)
self.addTransition(key, None)
def addTransition(self, key, char):
if key not in self.transitions:
self.transitions[key] = []
self.transitions[key].append(char)
def generate(self):
result = []
key = (None,) * self.length
while key in self.transitions:
next_char = random.choice(self.transitions[key])
if next_char is None:
break
result.append(next_char)
key = key[1:] + (next_char,)
return ''.join(result)
town_generator = MarkovGenerator(load_words('../data/towns.txt'), 2)
name_generator = MarkovGenerator(load_words('../data/names_male.txt'), 3)
occupation_list = list(load_words('../data/occupations.txt'))
color_list = list(load_words('../data/colors.txt'))
landform_list = list(load_words('../data/landforms.txt'))
weapon_list = list(load_words('../data/weapons.txt'))
with open('../monsters.json', 'r') as monster_file:
monsters_list = json.load(monster_file)
def expand(sentence, **kwargs):
# set_trace()
while True:
matches = list(re.finditer('<([!a-zA-Z0-9:_]*?)>', sentence))
if not matches:
return sentence
for match in reversed(matches):
parts = match.group(1).split(':')
if parts[0][0] == '!':
replacement = kwargs[parts[0][1:]]
else:
replacement = globals()[parts[0]]()
if len(parts) >= 2:
replacement = globals()[parts[1]](replacement)
sentence = sentence[:match.start(0)] + replacement + sentence[match.end(0):]
def title(words):
return ' '.join((word[0].upper() + word[1:]) for word in words.split(' '))
def sentence(words):
return words[0].upper() + words[1:]
def book_title():
return '# <!pc_name>\'s Journey to Defeat the Evil Wizard <!wiz_name> _(and his many battles along the way)_\n\n'
def chapter_title(title):
return '## <a name="chapter<!chapter_number>"></a> ' + title + '\n\n'
def chapter_title_plain():
return 'Chapter <!chapter_number>: <!town_name> and the <!monster_name:title>'
def town():
return town_generator.generate()
def name():
return name_generator.generate()
def occupation():
return random.choice(occupation_list)
def color():
return random.choice(color_list)
def landform():
return random.choice(landform_list)
def weapon():
return random.choice(weapon_list)
def positive_trait():
return random.choice([
'bold',
'courageous',
'daring',
'epic',
'fearless',
'gallant',
'grand',
'gutsy',
'noble',
'valiant',
'classic',
'elevated',
'bigger than life',
'dauntless',
'doughty',
'exaggerated',
'fire-eating',
'grandiose',
'gritty',
'gutty',
'high-flown',
'impavid',
'inflated',
'intrepid',
'lion-hearted',
'mythological',
'tall standing',
'stouthearted',
'unafraid',
'valorous',
'undaunted'
])
def negative_trait():
return random.choice([
'hideous',
'smelly',
'terrible',
'menacing',
'awful',
'ruinous',
'evil',
'abhorrent',
'abominable',
'appalling',
'awful',
'cruel',
'disgusting',
'dreadful',
'eerie',
'frightful',
'ghastly',
'grim',
'grisly',
'gruesome',
'heinous',
'hideous',
'horrendous',
'horrid',
'lousy',
'nasty',
'scandalous',
'scary',
'shameful',
'shocking',
'terrible',
'terrifying',
'beastly',
'detestable',
'disagreeable',
'execrable',
'fairy',
'fearful',
'loathsome',
'lurid',
'mean',
'obnoxious',
'offensive',
'repellent',
'repulsive',
'revolting',
'sickie',
'ungodly',
'unholy',
'unkind'
])
def pc_name():
return random.choice([
'<!pc_name>',
'the <positive_trait> <!pc_name>',
'<!pc_name> the <positive_trait>',
'our hero',
'the adventurer',
'he',
'he',
'he',
'he'
])
def activity():
return random.choice([
'sat by the side of the road',
'rushed by quickly, ignoring him',
'gazed at him from an open window',
'talked excitedly with what appeared to be a <occupation>',
'slowly carried supplies',
'slept in an alleyway',
'eyed him suspiciously',
'scuttled out of his way',
'stood by a market stall, negotiating with the <occupation>',
'hawked fine imported goods from <town>',
'bit into an apple',
'finished an apple and tossed the core aside',
'ran from person to person, asking if they had seen <name>',
'loaded a market stall with wares',
'threw punches'
])
def town_people_sentence():
return random.choice([
'A <occupation> <activity>.',
'While the <occupation> <activity>, a <occupation> <activity>.',
'Two <occupation>s <activity>.',
'The <occupation> <activity> with a <occupation>.',
'Nearby, a <occupation> <activity>.'
])
def character_attribute():
return random.choice([
'unusual weapons',
'foreboding cloak',
'impressive armor',
'strong forearms',
'well-made boots',
'determined look',
'dangerous demeanor'
])
def number():
return str(random.randint(2, 10))
def building():
return random.choice([
'tavern',
'inn',
'barn',
'church',
'monastery',
'cattle barn',
'stables',
'warehouse'
])
def direction():
return random.choice([
'left',
'right',
'left' # Bias towards left (for some reason)
])
def in_town_directions_end():
return random.choice([
'It\'s just to the <direction>.',
'There\'s a small door.',
'Look for the large hanging sign that reads \"<!armor_name> Fine Supplies\".'
])
def in_town_directions():
return random.choice([
'down the street to the <building> and <direction>. You\'ll see a <building>. It\'s <in_town_directions>',
'past the <building>. <in_town_directions_end>',
'into the market and towards the <building>. Eventually you need to walk <in_town_directions>',
'just a bit further down the street. <in_town_directions_end>'
])
def town_intro():
return (
'<!pc_name> followed a dirt path into the village of <!town_name>. <town_people_sentence> <town_people_sentence> '
'<!pc_name> continued down the path. <town_people_sentence>\n\n'
'Eventually, <!pc_name> arrived at the town square, where he found a <occupation>. ' +
random.choice([
'The man, eying his <character_attribute>, beckoned him forward.\n\n'
'"Not many people around here like you." he said gruffly. "What makes you think you can step foot in these parts?"\n\n',
'<!pc_name> approached him, hoping for some advice.\n\n'
]) +
random.choice([
'"My name is <!pc_name>, and it is my quest to defeat the evil wizard <!wiz_name>." <!pc_name> announced.\n\n',
'"The evil wizard <!wiz_name> has terrorized these lands for far too long. I <!pc_name> have come to destroy him!" <!pc_name> exclaimed.\n\n',
'"Do you remember the glory days before the evil wizard <!wiz_name> took over?" <!pc_name> asked. '
'"I seek to destroy him and restore this kingdom\'s rightful rule!"\n\n'
]) +
'<town_people_sentence> ' +
random.choice([
'The man eyed him thoughtfully',
'He still looked suspicious',
'The man sat in silence for a while',
'The man quietly reminised about the past'
]) +
random.choice([
', then finally responded.\n\n',
', but eventually responded.\n\n',
'He finally responded.\n\n'
]) +
random.choice([
'"We have waited for your arrival for many years, <!pc_name>. Is there any way I can be of help?"\n\n',
'"Our village of <!town_name> will gladly help you on your quest. What do you need?"\n\n'
]) +
'"My weapons were badly damaged on the way here. Could you point me to your armory to get some new supplies?"\n\n' +
random.choice([
'"<!armor_name> is the best in town. His shop is <in_town_directions> ',
'"The armory is <in_town_directions> You\'ll find <!armor_name>, the best weapons expert we\'ve got. ',
'"<!armor_name> is <in_town_directions> Tell him I sent you. '
]) +
random.choice([
'And here, take a few gold pieces to buy the best." He reached into his pocket and pulled out <number> small coins. '
'"I want that <!wiz_name> gone as much as anybody."\n\n',
'Be careful out there. You\'re not the first to try this adventure. Men stronger than you have vanished or worse."\n\n',
'I\'d show you myself, but I have urgent matters to attend to here in the square."\n\n'
]) +
'<!pc_name> hurried towards the armory. <town_people_sentence> <town_people_sentence> '
'Turning the corner, he saw the armory in front of him. He pushed the door open and walked inside.\n\n'
)
def monster_name():
return random.choice([monster['name'].strip() for monster in monsters_list])
def monster_description(name):
matches = [monster for monster in monsters_list if monster['name'].strip() == name]
if matches and matches[0]['description']:
return matches[0]['description']
else:
return ['The monster ' + name + ' is terrifying for sure, but I honestly don\'t know much about that beast.']
def armory_intro():
return (
random.choice([
'<!armor_name> looked up from his work behind a counter at <!pc_name>.\n\n',
'There was no one there. <!pc_name> cleared his throat and a man ran out from a backroom.\n\n'
]) +
'"I\'m <!pc_name>, a brave adventurer seeking to destroy <!wiz_name>. What dangers lurk nearby?" he asked.\n\n' +
random.choice([
'<!armor_name> grabbed a dusty book from the shelf and flipped through it. Pictures of <monster_name>s and <monster_name>s flew by. '
'Eventually he settled on a page and started to explain.\n\n',
'<!armor_name> lifted up his tunic and pointed to a scar. "You see this?" he asked. "Only one monster can do this kind of damage. The <!monster_name>."\n\n',
'"Brave you say? You may have fought the <monster_name>, or perhaps even the <monster_name>, but that\'s nothing compared to the <!monster_name> we\'ve got."\n\n'
])
)
def armory_explanation():
return random.choice([
'"<!description>" <!armor_name> explained.\n\n',
'The armorer sighed and continued. "<!description>"\n\n',
'<!armor_name> returned to the book of monsters on the desk and pointed at the terrifying illustration. "<!description>"\n\n'
])
def armory_more():
return random.choice([
'<!pc_name> looked surprised. "Incredible! Is there anything else I should know?"\n\n',
'"But my weapons may be too weak. Are there any other ways to defeat the <!monster_name>?" <!pc_name> asked.\n\n',
'<!pc_name> slipped the man <number> coins. "I get the feeling you\'ve been here for a while. Surely you know more than that."\n\n',
'"I could handle that. Tell me again, what makes the <!monster_name> so bad?" <!pc_name> responded.\n\n'
])
def armory_no_more():
return random.choice([
'"That\'s all I can tell you."\n\n',
'"Anything else you need to know can be found it the book. Take your time." He took the book of monsters and handed it to <!pc_name>.\n\n',
'"Look I\'ve got other things to attend to. Do you need weapons or not?" His frusturation was visible.\n\n'
])
def armory_new_weapon(old_weapon):
return (
'As <!pc_name> turned to leave the armory, <!armor_name> called out\n\n' +
random.choice([
'"Before you go, get rid of that useless ' + old_weapon + '. It won\'t make a dent against the carapace of the <!monster_name>." ',
'"Wait, you\'ll need a weapon worthy of your great cause. That rusty ' + old_weapon + ' won\'t do." '
]) +
'\n\n' +
random.choice([
'"Take this <!pc_weapon>. It has served a well over a dozen adventureres before you and it should serve you well too."\n\n',
'"Forged by the finest dwarven smiths in the mountains of <town>, this <!pc_weapon> is the finest display of craftsmanship for miles around."\n\n'
])
)
| 35.01269 | 174 | 0.58137 | 1,748 | 13,795 | 4.497712 | 0.316362 | 0.009921 | 0.0435 | 0.010684 | 0.068685 | 0.022132 | 0.007123 | 0 | 0 | 0 | 0 | 0.002654 | 0.289888 | 13,795 | 393 | 175 | 35.101781 | 0.799918 | 0.003407 | 0 | 0.178161 | 0 | 0.048851 | 0.443288 | 0.025464 | 0 | 0 | 0 | 0 | 0 | 1 | 0.100575 | false | 0 | 0.020115 | 0.083333 | 0.218391 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
796c208b5ef0105c3a346b49387aabac0584232a | 5,937 | py | Python | soc-tools/reporting/report_splitter.py | michalk68/soc-tools | 8d4c8fd53624817c1126c72d757878f305151446 | [
"MIT"
] | null | null | null | soc-tools/reporting/report_splitter.py | michalk68/soc-tools | 8d4c8fd53624817c1126c72d757878f305151446 | [
"MIT"
] | null | null | null | soc-tools/reporting/report_splitter.py | michalk68/soc-tools | 8d4c8fd53624817c1126c72d757878f305151446 | [
"MIT"
] | 1 | 2020-01-25T08:55:41.000Z | 2020-01-25T08:55:41.000Z | import csv
import argparse
import os
class ReportSplitter:
def __init__(self, values, columns, file, output_folder=None, verbose=False, case_insensitive=True,
contains_value=False):
self.values = values
self.columns = columns
self.file = file
self.output_folder = output_folder
self._file_mapping = {}
self._opened_files = []
self.verbose = verbose
self.case_insensitive = case_insensitive
self.contains_value = contains_value
if self.output_folder is None:
self.output_folder = os.getcwd()
def split(self):
if self.verbose:
print("Values used for indexing:")
print(self.values)
print("Columns that will be indexed:")
print(self.columns)
print("File that will be splitted: " + self.file)
print("Output folder: " + self.output_folder)
print("Case insensitivity enabled: " + self.case_insensitive)
print("Value contained in indexed column: " + self.contains_value)
print("Starting...")
try:
self._file_exists(self.file)
self._folder_exists(self.output_folder)
if self.case_insensitive:
values = self._values_to_lowecase(self.values)
else:
values = self.values
with open(self.file) as csvfile:
reader = csv.DictReader(csvfile)
self._verify_column_names(reader.fieldnames)
self._create_files(reader.fieldnames, values)
# Reading row by row
for row in reader:
# For each row checking columns that contain indexed data
for column in self.columns:
if self.case_insensitive:
column_value = row[column].lower()
else:
column_value = row[column]
# If indexed value in the column, writing this line to appropriate file
if self.contains_value:
for v in values:
if v in column_value:
self._write_line_to_file(v, row)
else:
if column_value in values:
self._write_line_to_file(column_value, row)
self._close_files()
except Exception as err:
print(err)
return
if self.verbose:
print("Finished...")
print("Following files were created:")
for file in self._opened_files:
print(file.name)
def _write_line_to_file(self, value, row):
self._file_mapping[value].writerow(row)
def _folder_exists(self, folder):
if not os.path.exists(folder):
raise Exception("ERROR - folder " + folder + " doesn't exist!")
if not os.path.isdir(folder):
raise Exception("ERROR - " + folder + " is not a folder!")
if not os.access(folder, os.W_OK):
raise Exception("ERROR - folder " + folder + " is not writable!")
def _file_exists(self, file):
if not os.path.exists(file):
raise Exception("ERROR - file " + file + " doesn't exist!")
if not os.path.isfile(file):
raise Exception("ERROR - " + file + " is not a file!")
if not os.access(file, os.R_OK):
raise Exception("ERROR - file " + file + " is not readable!")
def _verify_column_names(self, fieldnames):
for column in self.columns:
if column not in fieldnames:
raise Exception(
"ERROR - Column " + column + " not found to be a in the CSV file. Maybe case sensitivity issue?")
def _create_files(self, fieldnames, values):
try:
for value in values:
file_name = os.path.join(self.output_folder, value.replace(".", "_") + ".csv")
csvfile = open(file_name, 'w')
writer = csv.DictWriter(csvfile, fieldnames)
writer.writeheader()
self._file_mapping[value] = writer
self._opened_files.append(csvfile)
except Exception as err:
raise err
def _values_to_lowecase(self, list):
new_list = []
for value in list:
new_list.append(value.lower())
return new_list
def _close_files(self):
for file in self._opened_files:
file.close()
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("-v", "--value_list", help="List of values based on which should the report be splitted. " +
"Accepts list of comma separated values")
parser.add_argument("-c", "--column_list", help="List of columns that will be searched for indexing." +
"Accepts list of comma separated values")
parser.add_argument("file", help="File that should be splitted")
parser.add_argument("-o", "--output_folder", help="Folder where the output should be placed")
parser.add_argument("-p", "--verbose", help="Verbose mode", action='store_true')
parser.add_argument("-i", "--case_insensitive", help="Allows to enable case insensitivity.", action='store_true')
parser.add_argument("-x", "--contains_value",
help="If enabled, value needs to be only contained in the column. No need for the exact match.",
action='store_true')
args = parser.parse_args()
report_splitter = ReportSplitter(args.value_list.split(","), args.column_list.split(","), args.file,
args.output_folder, args.verbose)
report_splitter.split()
| 41.229167 | 120 | 0.561732 | 666 | 5,937 | 4.828829 | 0.225225 | 0.041045 | 0.041356 | 0.013682 | 0.174129 | 0.094527 | 0.044776 | 0.031095 | 0.031095 | 0 | 0 | 0 | 0.343945 | 5,937 | 143 | 121 | 41.517483 | 0.825674 | 0.024255 | 0 | 0.144068 | 0 | 0 | 0.172223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.076271 | false | 0 | 0.025424 | 0 | 0.127119 | 0.110169 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
796dec29764e9116f7092158c4657486b2e11567 | 1,899 | py | Python | go/guru.py | x0rzkov/sublime-go | b77d78594caed017f040fe6c4168e525a563e28b | [
"MIT"
] | 51 | 2019-08-18T18:18:42.000Z | 2022-02-09T07:44:42.000Z | go/guru.py | x0rzkov/sublime-go | b77d78594caed017f040fe6c4168e525a563e28b | [
"MIT"
] | 28 | 2019-08-19T04:10:52.000Z | 2020-12-09T16:39:26.000Z | go/guru.py | localhots/sublime-go | 960e72dafdb6c69d78bb5cbd88052540342517b9 | [
"MIT"
] | 4 | 2019-11-12T20:39:54.000Z | 2021-07-30T09:57:32.000Z |
from . import decorators
from . import exec
from . import log
import os.path as path
import sublime
import time
import json
@decorators.thread
@decorators.trace
def source(view):
locate(view)
def call(mode, filename, region):
"""
Call calls guru(1) with the given `<mode>`
filename and point.
"""
file = "{}:#{},#{}".format(filename, region.begin(), region.end())
args = ["--json", mode, file]
cmd = exec.Command("guru", args=args)
res = cmd.run()
if res.code == 0:
return json.loads(res.stdout)
def locate(view):
"""
Locate returns the location of the symbol
at the cursor, empty string is returned if no symbol
is found.
"""
file = view.file_name()
pos = view.sel()[0]
resp = call("describe", file, pos)
if resp == None:
return
if resp["detail"] == "value":
if 'objpos' in resp['value']:
open_position(view, resp['value']['objpos'])
return
if resp["detail"] == "type":
if "namepos" in resp["type"]:
open_position(view, resp['type']['namepos'])
return
if 'built-in type' in resp['desc']:
symbol = resp['type']['type']
cwd = path.dirname(file)
goroot = exec.goenv(cwd)['GOROOT']
src = path.join(goroot, 'src', 'builtin', 'builtin.go')
win = view.window()
open_symbol(view, src, symbol)
return
log.error("guru(1) - unknown response {}", resp)
return ""
def open_position(view, src):
win = view.window()
win.open_file(src, sublime.ENCODED_POSITION)
def open_symbol(view, src, symbol):
win = view.window()
new_view = win.open_file(src)
show(new_view, symbol)
sublime.set_timeout(lambda: show(new_view, symbol), 20)
def show(view, symbol):
if view.is_loading():
sublime.set_timeout(lambda: show(view, symbol), 30)
return
for sym in view.symbols():
if symbol in sym[1]:
sel = sublime.Selection(0)
sel.add(sym[0])
view.show(sel)
| 22.879518 | 68 | 0.636651 | 271 | 1,899 | 4.405904 | 0.365314 | 0.033501 | 0.040201 | 0.030151 | 0.083752 | 0 | 0 | 0 | 0 | 0 | 0 | 0.007266 | 0.202738 | 1,899 | 82 | 69 | 23.158537 | 0.781374 | 0.087941 | 0 | 0.135593 | 0 | 0 | 0.101765 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101695 | false | 0 | 0.118644 | 0 | 0.338983 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
796f8ea384a7f05b46370bc3b9473a2242391c4a | 357 | py | Python | Problems/String/1209. Remove All Adjacent Duplicates in String II.py | BYJRK/LeetCode-Solutions | 008467e1717309066a519acb8623d2f84071b64a | [
"MIT"
] | null | null | null | Problems/String/1209. Remove All Adjacent Duplicates in String II.py | BYJRK/LeetCode-Solutions | 008467e1717309066a519acb8623d2f84071b64a | [
"MIT"
] | null | null | null | Problems/String/1209. Remove All Adjacent Duplicates in String II.py | BYJRK/LeetCode-Solutions | 008467e1717309066a519acb8623d2f84071b64a | [
"MIT"
] | null | null | null | # https://leetcode.com/problems/remove-all-adjacent-duplicates-in-string-ii/
class Solution:
def removeDuplicates(self, s: str, k: int) -> str:
res = ''
for c in s:
res += c
if res[-k:] == c * k:
res = res[:-k]
return res
s = Solution()
print(s.removeDuplicates('deeedbbcccbdaa', 3))
| 21 | 76 | 0.537815 | 45 | 357 | 4.266667 | 0.622222 | 0.041667 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004065 | 0.310924 | 357 | 16 | 77 | 22.3125 | 0.776423 | 0.207283 | 0 | 0 | 0 | 0 | 0.049822 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0 | 0 | 0.3 | 0.1 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
797130522e525a58e85e7b3f848947aed4b21310 | 2,150 | py | Python | detro/packages/circledet/network.py | Peiiii/detro | 26d74468d7554dc20b2a2daf7ec5009302c820f2 | [
"MIT"
] | null | null | null | detro/packages/circledet/network.py | Peiiii/detro | 26d74468d7554dc20b2a2daf7ec5009302c820f2 | [
"MIT"
] | null | null | null | detro/packages/circledet/network.py | Peiiii/detro | 26d74468d7554dc20b2a2daf7ec5009302c820f2 | [
"MIT"
] | null | null | null | from .resnet_backbone import resnet18
from torch import nn
import torch
import torch.nn.functional as F
from detro.networks.components import BiFPN, Center_layer, Offset_layer, Reg_layer, Heatmap_layer
from detro.networks.losslib import center_loss, distance_loss
class FeatureFusionNetwork(nn.Module):
def __init__(self):
super().__init__()
def forward(self, inputs):
resized = []
size = inputs[0].size()[-2:]
for x in inputs[1:]:
resized.append(F.upsample(x, size))
x = torch.cat(resized, dim=1)
return x
class CircleNet(nn.Module):
def __init__(self, num_classes=1):
super().__init__()
self.backbone = resnet18(pretrained=True)
self.neck = FeatureFusionNetwork()
self.conv1 = nn.Conv2d(896, 256, kernel_size=1, stride=1, padding=0)
self.bn1 = nn.BatchNorm2d(256)
self.relu = nn.ReLU(inplace=True)
# self.center_layer = Heatmap_layer(in_channels=256, out_channels=num_classes)
# self.reg_layer = Heatmap_layer(in_channels=256, out_channels=1)
self.hm_layer = Heatmap_layer(in_channels=256, out_channels=num_classes + 1)
def forward(self, inputs):
c1, c2, c3, c4, c5 = self.backbone(inputs)
features = [c2, c3, c4, c5]
features = self.neck(features)
x = features
x = self.conv1(x)
x = self.bn1(x)
x = self.relu(x)
# center_heatmap = self.center_layer(x)
# offsets = self.reg_layer(x)
x=self.hm_layer(x)
center_heatmap=x[:,:-1]
offsets=x[:,-1:]
return dict(
center_heatmap=center_heatmap, offsets=offsets
)
def CircleDetCriterion(preds, labels):
loss_center = center_loss(preds['center_heatmap'], labels['center_heatmap'])
# loss_corner=center_loss(preds['corner_heatmap'],labels['corner_heatmap'])
loss_offsets = distance_loss(preds['offsets'], labels['offsets'], labels['offsets_mask'])
return dict(
loss=loss_center + loss_offsets,
loss_center=loss_center,
# loss_corner=loss_corner,
loss_offsets=loss_offsets,
)
| 33.59375 | 97 | 0.649767 | 278 | 2,150 | 4.791367 | 0.276978 | 0.045045 | 0.051051 | 0.042793 | 0.135886 | 0.107357 | 0.107357 | 0.107357 | 0.076577 | 0.076577 | 0 | 0.029679 | 0.232093 | 2,150 | 63 | 98 | 34.126984 | 0.777105 | 0.14186 | 0 | 0.12766 | 0 | 0 | 0.02938 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.12766 | 0 | 0.340426 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
79714648fe909d1ef23cf1429aeb6aaa8d22155b | 2,938 | py | Python | home/forms.py | kana-shimmichi/Weeet | 4e332107748cbf63b6c109d3e5ce968a42ed10c3 | [
"BSD-3-Clause"
] | null | null | null | home/forms.py | kana-shimmichi/Weeet | 4e332107748cbf63b6c109d3e5ce968a42ed10c3 | [
"BSD-3-Clause"
] | 9 | 2021-03-19T00:17:56.000Z | 2022-03-12T00:17:14.000Z | home/forms.py | kana-shimmichi/Weeet | 4e332107748cbf63b6c109d3e5ce968a42ed10c3 | [
"BSD-3-Clause"
] | null | null | null | from django import forms
from .models import MakerProfile,BuyerProfile,MstLang,MstSkill,Contact,Order,OrderMessage
from register.models import User
class UserForm(forms.ModelForm):
class Meta:
model = User
fields = ('last_name', 'first_name')
class MakerProfileForm(forms.ModelForm):
class Meta:
model = MakerProfile
fields = ('picture','lang','cost','skill')
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['lang'].widget = forms.CheckboxSelectMultiple()
self.fields['lang'].queryset = MstLang.objects
self.fields['skill'].widget = forms.CheckboxSelectMultiple()
self.fields['skill'].queryset = MstSkill.objects
class BuyerProfileForm(forms.ModelForm):
class Meta:
model = BuyerProfile
fields = ('picture',)
class ContactForm(forms.ModelForm):
class Meta:
model = Contact
fields = ('user','email','message','file',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['user'].widget.attrs.update({
'class': 'form-control required',
'placeholder':'Your Name',
'data-placement':'top',
'data-trigger':'manual',
'data-content':'Must be at least 3 characters long, and must only contain letters.'})
self.fields['email'].widget.attrs.update({
'class':'form-control email',
'placeholder':'email@xxx.com',
'data-placement':'top',
'data-trigger':'manual',
'data-content':'Must be a valid e-mail address (user@gmail.com)',
})
self.fields['message'].widget.attrs.update({
'class':'form-control',
'placeholder':"Your message here..",
'data-placement':'top',
'data-trigger':'manual',
})
class OrderForm(forms.ModelForm):
class Meta:
model = Order
fields = ('title','body','order_type','order_finish_time','cost',)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.fields['title'].widget.attrs.update({
'class':'form-control',
'placeholder':"タイトルを入れてください",
'data-placement':'top',
'data-trigger':'manual',
"data-content" :"依頼の内容入力",
})
self.fields['order_type'].widget.attrs.update({
'class': 'form-control',
})
self.fields['body'].widget.attrs.update({
'class':'form-control',
})
self.fields['cost'].widget.attrs.update({
'class':'form-control',
})
self.fields['order_finish_time'].widget.attrs.update({
'class':'form-control',
})
class SearchForm(forms.Form):
title = forms.CharField(
initial='',
label='タイトル',
required = False, # 必須ではない
)
| 29.676768 | 97 | 0.573179 | 292 | 2,938 | 5.657534 | 0.311644 | 0.072639 | 0.082324 | 0.106538 | 0.526029 | 0.389225 | 0.309322 | 0.256053 | 0.151332 | 0.151332 | 0 | 0.000465 | 0.267529 | 2,938 | 98 | 98 | 29.979592 | 0.767193 | 0.002042 | 0 | 0.415584 | 0 | 0 | 0.248464 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038961 | false | 0 | 0.038961 | 0 | 0.233766 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7975415464bdf9086363882be5e74bf46c4eaee1 | 5,362 | py | Python | src/simple_regression.py | haojunqiu/csc110-project | f379d66709c89e33a312fb054bc91619e0fe6a92 | [
"MIT"
] | null | null | null | src/simple_regression.py | haojunqiu/csc110-project | f379d66709c89e33a312fb054bc91619e0fe6a92 | [
"MIT"
] | null | null | null | src/simple_regression.py | haojunqiu/csc110-project | f379d66709c89e33a312fb054bc91619e0fe6a92 | [
"MIT"
] | 1 | 2022-01-11T04:26:48.000Z | 2022-01-11T04:26:48.000Z | """CSC110 final project, main module
Descriptions
===============================
This module contains all the functions we used to implement the
simple linear regression model.
Copyright and Usage Information
===============================
All forms of distribution of this code, whether as given or with any changes, are
expressly prohibited. All rights reserved.
This file is Copyright (c) 2020 Runshi Yang, Chenxu Wang and Haojun Qiu
"""
from typing import List, Tuple
import plotly.graph_objects as go
def evaluate_line(a: float, b: float, x: float) -> float:
"""Evaluate the linear function y = a + bx for the given a, b.
>>> result = evaluate_line(5.0, 1.0, 10.0) # y = 5.0 + 1.0 * 10.0,
>>> result == 15
True
"""
return a + b * x
def convert_points(points: List[tuple]) -> tuple:
"""Return a tuple of two lists, containing the x- and y-coordinates of the given points.
>>> result = convert_points([(0.0, 1.1), (2.2, 3.3), (4.4, 5.5)])
>>> result[0] # The x-coordinates
[0.0, 2.2, 4.4]
>>> result[1] # The y-coordinates
[1.1, 3.3, 5.5]
"""
x_coordinates = [x[0] for x in points]
y_coordinates = [x[1] for x in points]
return (x_coordinates, y_coordinates)
def simple_linear_regression(points: List[tuple]) -> tuple:
"""Perform a linear regression on the given points.
This function returns a pair of floats (a, b) such that the line
y = a + bx is the approximation of this data.
Further reading: https://en.wikipedia.org/wiki/Simple_linear_regression
Preconditions:
- len(points) > 0
>>> simple_linear_regression([(1.0, 1.0), (2.0, 2.0), (3.0, 3.0)])
(0.0, 1.0)
"""
avg_x = sum(convert_points(points)[0]) / len(points)
avg_y = sum(convert_points(points)[1]) / len(points)
numerator = [(p[0] - avg_x) * (p[1] - avg_y) for p in points]
denominator = [(p[0] - avg_x) ** 2 for p in points]
b = sum(numerator) / sum(denominator)
a = avg_y - b * avg_x
return (a, b)
def calculate_r_squared(points: List[tuple], a: float, b: float) -> float:
"""Return the R squared value when the given points are modelled as the line y = a + bx.
points is a list of pairs of numbers: [(x_1, y_1), (x_2, y_2), ...]
Preconditions:
- len(points) > 0
"""
avg_y = sum(convert_points(points)[1]) / len(points)
tot = [(avg_y - p[1]) ** 2 for p in points]
res = [(p[1] - (a + b * p[0])) ** 2 for p in points]
return 1 - sum(res) / sum(tot)
def perform_regression(train_data: List[tuple], xlabel: str,
title: str) -> Tuple[float, float, float]:
"""Return (a, b, r_squared)
Plot all data points and regression line
"""
# Get data points.
points = train_data
# Converts the points into the format expected by plotly.
separated_coordinates = convert_points(points)
x_coords = separated_coordinates[0]
y_coords = separated_coordinates[1]
# Do a simple linear regression. Returns the (a, b) constants for
# the line y = a + b * x.
model = simple_linear_regression(points)
a = model[0]
b = model[1]
# Plot all the data points AND a line based on the regression
plot_points_and_regression(x_coords, y_coords, [a, b], xlabel, title)
# Calculate the r_squared value
r_squared = calculate_r_squared(points, a, b)
return (a, b, r_squared)
def plot_points_and_regression(x_coords: list, y_coords: list, coef: List[float],
xlabel: str, title: str) -> None:
"""Plot the given x- and y-coordinates and linear regression model using plotly.
"""
# Create a blank figure
layout = go.Layout(title=title,
xaxis={'title': xlabel},
yaxis={'title': 'number of cases'})
fig = go.Figure(layout=layout)
# Add the raw data
fig.add_trace(go.Scatter(x=x_coords, y=y_coords, mode='markers', name='Data'))
# Add the regression line
x_max = 1.1 * max(x_coords)
fig.add_trace(go.Scatter(x=[0, x_max], y=[evaluate_line(coef[0], coef[1], 0),
evaluate_line(coef[0], coef[1], x_max)],
mode='lines', name='Regression line'))
# Display the figure in a web browser
fig.show()
def predict(test_data: List[Tuple], model: Tuple[float, float, float],
xlabel: str, title: str) -> float:
"""Return r_squared for the prediction.
Plot all data points and regression line
"""
# Get data points.
points = test_data
a = model[0]
b = model[1]
# Converts the points into the format expected by plotly.
separated_coordinates = convert_points(points)
x_coords = separated_coordinates[0]
y_hat = separated_coordinates[1]
# Plot all the data points AND a line based on the regression
plot_points_and_regression(x_coords, y_hat, [a, b], xlabel, title)
# Calculate the r_squared value
r_squared = calculate_r_squared(points, a, b)
return r_squared
if __name__ == '__main__':
import doctest
doctest.testmod(verbose=True)
import python_ta
python_ta.check_all(config={
'extra-imports': ['plotly.graph_objects', 'python_ta'],
'allowed-io': [],
'max-line-length': 100,
'disable': ['R1705', 'C0200']
})
| 31.356725 | 92 | 0.619172 | 795 | 5,362 | 4.047799 | 0.228931 | 0.00808 | 0.041019 | 0.014916 | 0.313238 | 0.27253 | 0.223741 | 0.223741 | 0.223741 | 0.201367 | 0 | 0.02797 | 0.24655 | 5,362 | 170 | 93 | 31.541176 | 0.768564 | 0.397426 | 0 | 0.181818 | 0 | 0 | 0.048271 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106061 | false | 0 | 0.075758 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
797b83c4395d6b6acbe9c60dbd945372be2f9477 | 718 | py | Python | FaceRecogEngine/recognition/urls.py | thecodacus/FaceAuth | dca6d6438426df48cd7e9c9693fa450d817f7d61 | [
"Apache-2.0"
] | 2 | 2018-09-22T18:28:33.000Z | 2021-08-28T17:44:30.000Z | FaceRecogEngine/recognition/urls.py | thecodacus/FaceAuth | dca6d6438426df48cd7e9c9693fa450d817f7d61 | [
"Apache-2.0"
] | null | null | null | FaceRecogEngine/recognition/urls.py | thecodacus/FaceAuth | dca6d6438426df48cd7e9c9693fa450d817f7d61 | [
"Apache-2.0"
] | 1 | 2019-06-05T15:34:59.000Z | 2019-06-05T15:34:59.000Z | from django.contrib import admin
from django.urls import path, include
from . import views
from django.conf import settings
app_name='recognition'
urlpatterns = [
path('', views.Home.as_view(), name='home'),
path('settings/', views.Home.as_view(), name='settings'),
path('login/', views.UserLoginView.as_view(), name='login'),
path('logout/', views.LogoutView.as_view(), name='logout'),
path('register/', views.UserRegistrationView.as_view(), name='register'),
path('settings/profile/', views.ProfileSettingsView.as_view(), name='edit-profile'),
path('settings/reg-face/', views.UserFaceRegView.as_view(), name='reg-face'),
path('apis/auth/', views.UserFaceLogInView.as_view(), name='api-auth')
]
| 34.190476 | 86 | 0.71727 | 93 | 718 | 5.44086 | 0.365591 | 0.094862 | 0.158103 | 0.059289 | 0.075099 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.093315 | 718 | 20 | 87 | 35.9 | 0.777266 | 0 | 0 | 0 | 0 | 0 | 0.203626 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.266667 | 0 | 0.266667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
797d78dc8a7e7f2b8677fa417daf060e2f5479f3 | 2,026 | py | Python | .pre-commit/check_version.py | JPchico/aiida-lammps | 8f618541784bbd6360efc653350570cf76398e83 | [
"MIT"
] | 7 | 2021-02-26T06:12:28.000Z | 2022-03-27T17:06:41.000Z | .pre-commit/check_version.py | JPchico/aiida-lammps | 8f618541784bbd6360efc653350570cf76398e83 | [
"MIT"
] | 21 | 2020-09-18T14:03:16.000Z | 2022-02-14T10:48:40.000Z | .pre-commit/check_version.py | JPchico/aiida-lammps | 8f618541784bbd6360efc653350570cf76398e83 | [
"MIT"
] | 5 | 2018-03-02T23:49:41.000Z | 2020-04-17T05:35:19.000Z | """Validate consistency of versions and dependencies.
Validates consistency of setup.json and
* environment.yml
* version in aiida_lammps/__init__.py
"""
import json
import os
import sys
import click
FILENAME_SETUP_JSON = "setup.json"
SCRIPT_PATH = os.path.split(os.path.realpath(__file__))[0]
ROOT_DIR = os.path.join(SCRIPT_PATH, os.pardir)
FILEPATH_SETUP_JSON = os.path.join(ROOT_DIR, FILENAME_SETUP_JSON)
def get_setup_json():
"""Return the `setup.json` as a python dictionary."""
with open(FILEPATH_SETUP_JSON, "r") as handle:
setup_json = json.load(handle) # , object_pairs_hook=OrderedDict)
return setup_json
@click.group()
def cli():
"""Command line interface for pre-commit checks."""
pass
@cli.command("version")
def validate_version():
"""Check that version numbers match.
Check version number in setup.json and aiida_lammos/__init__.py and make sure
they match.
"""
# Get version from python package
sys.path.insert(0, ROOT_DIR)
import aiida_lammps # pylint: disable=wrong-import-position
version = aiida_lammps.__version__
setup_content = get_setup_json()
if version != setup_content["version"]:
click.echo("Version number mismatch detected:")
click.echo(
"Version number in '{}': {}".format(
FILENAME_SETUP_JSON, setup_content["version"]
)
)
click.echo(
"Version number in '{}/__init__.py': {}".format("aiida_lammps", version)
)
click.echo(
"Updating version in '{}' to: {}".format(FILENAME_SETUP_JSON, version)
)
setup_content["version"] = version
with open(FILEPATH_SETUP_JSON, "w") as fil:
# Write with indentation of two spaces and explicitly define separators to not have spaces at end of lines
json.dump(setup_content, fil, indent=2, separators=(",", ": "))
sys.exit(1)
if __name__ == "__main__":
cli() # pylint: disable=no-value-for-parameter
| 28.138889 | 118 | 0.661895 | 258 | 2,026 | 4.94186 | 0.418605 | 0.105882 | 0.053333 | 0.051765 | 0.123922 | 0.064314 | 0.064314 | 0 | 0 | 0 | 0 | 0.002541 | 0.2231 | 2,026 | 71 | 119 | 28.535211 | 0.807497 | 0.304541 | 0 | 0.075 | 0 | 0 | 0.139416 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.075 | false | 0.025 | 0.125 | 0 | 0.225 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
797dc34e814424ff0892e6ac9838f4607837049a | 7,062 | py | Python | main.py | rorro/legacy-gauntlet | 82898408acee5ddd0c629c15521c7f5f7a8982fe | [
"MIT"
] | null | null | null | main.py | rorro/legacy-gauntlet | 82898408acee5ddd0c629c15521c7f5f7a8982fe | [
"MIT"
] | null | null | null | main.py | rorro/legacy-gauntlet | 82898408acee5ddd0c629c15521c7f5f7a8982fe | [
"MIT"
] | null | null | null | import json
import os
import time
from configparser import ConfigParser
import discord
from discord.ext import tasks, commands
from dotenv import load_dotenv
from datetime import datetime
load_dotenv()
TOKEN = os.getenv('TOKEN')
CONFIG_FILE = 'config.ini'
# Config
config_parser = ConfigParser()
config_parser.read(CONFIG_FILE)
# In minutes
CHALLENGE_TIME = int(config_parser.get('CHALLENGE', 'frequency'))
BOUNTY_TIME = int(config_parser.get('BOUNTY', 'frequency'))
challenge_start = 0
bounty_start = 0
started = False
def read_file(file):
with open(file) as f:
lst = []
for entry in json.load(f):
lst.append(entry)
return lst
bounties = read_file(config_parser.get('BOUNTY', 'file'))
challenges = read_file(config_parser.get('CHALLENGE', 'file'))
# Create bot
client = commands.Bot(command_prefix='!')
# Startup information
@client.event
async def on_ready():
print(f'Connected to bot: {client.user.name}')
print(f'Bot ID: {client.user.id}')
@client.event
async def on_command_error(ctx, error):
if isinstance(error, commands.CommandNotFound):
return
elif isinstance(error, commands.MissingPermissions):
return
elif isinstance(error, commands.MissingRequiredArgument):
return
elif isinstance(error, commands.CommandInvokeError):
return
elif isinstance(error, commands.ChannelNotFound):
return
raise error
@commands.has_permissions(administrator=True)
@client.command(help='- Start the announcements')
async def start(ctx):
global started
if config_parser.get('CHALLENGE', 'enabled') == "True":
challenge_loop.start()
if config_parser.get('BOUNTY', 'enabled') == "True":
bounty_loop.start()
started = True
await ctx.send('Announcements have been started')
time.sleep(3)
countdown.start()
@commands.has_permissions(administrator=True)
@client.command(help='- Stop the announcements')
async def stop(ctx):
global started
challenge_loop.cancel()
bounty_loop.cancel()
countdown.cancel()
started = False
await ctx.send('Announcements have been stopped')
@commands.has_permissions(administrator=True)
@client.command(help='- DO NOT USE THIS WHILE EVENT IS ONGOING!')
async def reset(ctx):
config_parser.set('BOUNTY', 'index', '0')
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
config_parser.set('CHALLENGE', 'index', '0')
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
await ctx.send('Indexes have been reset to 0')
@commands.has_permissions(administrator=True)
@client.command(help='- Give a message id to set message as ended. Run this in the same channel as the ended message.')
async def end(ctx, arg):
ended_message = await ctx.fetch_message(int(arg))
if ended_message.author == client.user:
new_embed = ended_message.embeds[0]
new_embed.set_footer(text='Time remaining: 0h 0min')
await ended_message.edit(embed=new_embed)
await ctx.message.delete()
@commands.has_permissions(administrator=True)
@client.command(help='- Set channels for bounties and challenges. Configure this before you start the event!')
async def set_channel(ctx, t, channel: discord.TextChannel):
if started:
await ctx.send("You can only configure this while the event is stopped.")
return
if t not in ["bounty", "challenge"]:
await ctx.send("Invalid type. Only valid types are 'bounty' and 'challenge'.")
return
config_parser.set(t.upper(), 'channel', str(channel.id))
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
await ctx.send(f'Successfully set the {t} channel to {channel.mention}')
# Announcements for the bounty channel
@tasks.loop(minutes=BOUNTY_TIME)
async def bounty_loop():
global bounty_start
bounty_start = datetime.now()
bounty_channel = client.get_channel(int(config_parser.get('BOUNTY', 'channel')))
bounty_index = int(config_parser.get('BOUNTY', 'index'))
if bounty_index >= len(bounties):
bounty_loop.stop()
return
embed_message = discord.Embed(title=f'{BOUNTY_TIME//60} Hour Bounty', color=discord.Color.green())
embed_message.add_field(name="The current bounty is...", value=bounties[bounty_index]['bounty'], inline=False)
embed_message.add_field(name="Keyword", value=bounties[bounty_index]['keyword'])
embed_message.set_footer(text=f'Time remaining: {BOUNTY_TIME//60}h {BOUNTY_TIME%60}min')
msg = await bounty_channel.send(embed=embed_message)
config_parser.set('BOUNTY', 'index', str(bounty_index + 1))
config_parser.set('BOUNTY', 'message_id', str(msg.id))
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
# Announcements for the challenges channel
@tasks.loop(minutes=CHALLENGE_TIME)
async def challenge_loop():
global challenge_start
challenge_start = datetime.now()
challenge_channel = client.get_channel(int(config_parser.get('CHALLENGE', 'channel')))
challenge_index = int(config_parser.get('CHALLENGE', 'index'))
if challenge_index >= len(challenges):
challenge_loop.stop()
return
embed_message = discord.Embed(title="Daily Challenge", color=discord.Color.green())
embed_message.add_field(name="The current challenge is...", value=challenges[challenge_index]['challenge'], inline=False)
embed_message.add_field(name="Keyword", value=challenges[challenge_index]['keyword'])
embed_message.set_footer(text=f'Time remaining: {CHALLENGE_TIME // 60}h {CHALLENGE_TIME % 60}min')
msg = await challenge_channel.send(embed=embed_message)
config_parser.set('CHALLENGE', 'index', str(challenge_index + 1))
config_parser.set('CHALLENGE', 'message_id', str(msg.id))
with open(CONFIG_FILE, 'w') as config_file:
config_parser.write(config_file)
def update_counter(message, t, start_time):
new_embed = message.embeds[0]
difference = datetime.now() - start_time
difference_min = difference.seconds//60
new_embed.set_footer(text=f'Time remaining: {(t - difference_min)//60}h {(t - difference_min)%60}min')
return new_embed
@tasks.loop(minutes=1)
async def countdown():
if config_parser.get('BOUNTY', 'enabled') == "True":
bounty_channel = await client.fetch_channel(config_parser.get('BOUNTY', 'channel'))
bounty_message = await bounty_channel.fetch_message(config_parser.get('BOUNTY', 'message_id'))
await bounty_message.edit(embed=update_counter(bounty_message, BOUNTY_TIME, bounty_start))
if config_parser.get('CHALLENGE', 'enabled') == "True":
challenge_channel = await client.fetch_channel(config_parser.get('CHALLENGE', 'channel'))
challenge_message = await challenge_channel.fetch_message(config_parser.get('CHALLENGE', 'message_id'))
await challenge_message.edit(embed=update_counter(challenge_message, CHALLENGE_TIME, challenge_start))
client.run(TOKEN)
| 33.15493 | 125 | 0.717927 | 932 | 7,062 | 5.26824 | 0.177039 | 0.07332 | 0.04888 | 0.039104 | 0.468024 | 0.349287 | 0.298574 | 0.298574 | 0.136456 | 0.117312 | 0 | 0.004889 | 0.160011 | 7,062 | 212 | 126 | 33.311321 | 0.822825 | 0.017842 | 0 | 0.226667 | 0 | 0.006667 | 0.190215 | 0.006206 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013333 | false | 0 | 0.053333 | 0 | 0.14 | 0.013333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
7981d5f5623d46312039f8e4c8cb2b8fbffad125 | 4,730 | py | Python | tests/test_rtpPayload_ttml.py | bbc/rd-apmm-python-lib-rtpPayload_ttml | 805d13242b44f26f38e5a9d940ee2ec4862528c3 | [
"Apache-1.1"
] | null | null | null | tests/test_rtpPayload_ttml.py | bbc/rd-apmm-python-lib-rtpPayload_ttml | 805d13242b44f26f38e5a9d940ee2ec4862528c3 | [
"Apache-1.1"
] | null | null | null | tests/test_rtpPayload_ttml.py | bbc/rd-apmm-python-lib-rtpPayload_ttml | 805d13242b44f26f38e5a9d940ee2ec4862528c3 | [
"Apache-1.1"
] | null | null | null | #!/usr/bin/python
#
# James Sandford, copyright BBC 2020
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from unittest import TestCase
from hypothesis import given, strategies as st # type: ignore
from rtpPayload_ttml import (RTPPayload_TTML, LengthError, SUPPORTED_ENCODINGS,
utfEncode)
class TestExtension (TestCase):
def setUp(self):
self.thisP = RTPPayload_TTML()
@given(st.tuples(
st.text(),
st.sampled_from(SUPPORTED_ENCODINGS),
st.booleans()).filter(
lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16))
def test_init(self, data):
doc, encoding, bom = data
reservedBits = bytearray(b'\x00\x00')
newP = RTPPayload_TTML(reservedBits, doc, encoding, bom)
self.assertEqual(newP.reserved, reservedBits)
self.assertEqual(newP.userDataWords, doc)
self.assertEqual(newP._encoding, encoding)
self.assertEqual(newP._bom, bom)
@given(
st.text(),
st.text().filter(lambda x: x not in SUPPORTED_ENCODINGS),
st.booleans())
def test_init_invalidEnc(self, doc, enc, bom):
reservedBits = bytearray(b'\x00\x00')
with self.assertRaises(AttributeError):
RTPPayload_TTML(reservedBits, doc, enc, bom)
def test_reserved_default(self):
self.assertEqual(self.thisP.reserved, bytearray(b'\x00\x00'))
def test_reserved_notBytes(self):
with self.assertRaises(AttributeError):
self.thisP.reserved = ""
@given(st.binary().filter(lambda x: x != bytearray(b'\x00\x00')))
def test_reserved_invalid(self, value):
with self.assertRaises(ValueError):
self.thisP.reserved = bytearray(value)
def test_userDataWords_default(self):
self.assertEqual(self.thisP.userDataWords, "")
@given(st.text().filter(lambda x: len(utfEncode(x, "UTF-8")) < 2**16))
def test_userDataWords(self, doc):
self.thisP.userDataWords = doc
self.assertEqual(self.thisP.userDataWords, doc)
def test_userDataWords_invalidType(self):
with self.assertRaises(AttributeError):
self.thisP.userDataWords = 0
def test_userDataWords_tooLong(self):
doc = ""
for x in range(2**16):
doc += "a"
with self.assertRaises(LengthError):
self.thisP.userDataWords = doc
@given(st.tuples(
st.text(),
st.sampled_from(SUPPORTED_ENCODINGS),
st.booleans()).filter(
lambda x: len(utfEncode(x[0], x[1], x[2])) < 2**16))
def test_userDataWords_encodings(self, data):
doc, encoding, bom = data
payload = RTPPayload_TTML(
userDataWords=doc, encoding=encoding, bom=bom)
self.assertEqual(payload.userDataWords, doc)
self.assertEqual(payload._userDataWords, utfEncode(doc, encoding, bom))
def test_eq(self):
reservedBits = bytearray(b'\x00\x00')
newP = RTPPayload_TTML(reservedBits, "")
self.assertEqual(newP, self.thisP)
def test_bytearray_default(self):
expected = bytearray(4)
self.assertEqual(bytes(self.thisP), expected)
newP = RTPPayload_TTML().fromBytearray(expected)
self.assertEqual(newP, self.thisP)
@given(st.binary(min_size=2, max_size=2).filter(
lambda x: x != b'\x00\x00'))
def test_fromBytearray_invalidLen(self, length):
bArray = bytearray(4)
bArray[2:4] = length
with self.assertRaises(LengthError):
RTPPayload_TTML().fromBytearray(bArray)
@given(st.text())
def test_toBytearray(self, doc):
self.thisP.userDataWords = doc
bDoc = utfEncode(doc)
expected = bytearray(2)
expected += int(len(bDoc)).to_bytes(2, byteorder='big')
expected += bDoc
self.assertEqual(expected, self.thisP.toBytearray())
@given(st.text())
def test_fromBytearray(self, doc):
expected = RTPPayload_TTML(userDataWords=doc)
bDoc = utfEncode(doc)
bArray = bytearray(2)
bArray += int(len(bDoc)).to_bytes(2, byteorder='big')
bArray += bDoc
self.thisP.fromBytearray(bArray)
self.assertEqual(expected, self.thisP)
| 33.785714 | 79 | 0.65074 | 566 | 4,730 | 5.353357 | 0.256184 | 0.047525 | 0.025743 | 0.026403 | 0.364026 | 0.246205 | 0.176238 | 0.124752 | 0.104951 | 0.066667 | 0 | 0.017099 | 0.233404 | 4,730 | 139 | 80 | 34.028777 | 0.818533 | 0.12389 | 0 | 0.3125 | 0 | 0 | 0.014535 | 0 | 0 | 0 | 0 | 0 | 0.208333 | 1 | 0.166667 | false | 0 | 0.03125 | 0 | 0.208333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |