hexsha string | size int64 | ext string | lang string | max_stars_repo_path string | max_stars_repo_name string | max_stars_repo_head_hexsha string | max_stars_repo_licenses list | max_stars_count int64 | max_stars_repo_stars_event_min_datetime string | max_stars_repo_stars_event_max_datetime string | max_issues_repo_path string | max_issues_repo_name string | max_issues_repo_head_hexsha string | max_issues_repo_licenses list | max_issues_count int64 | max_issues_repo_issues_event_min_datetime string | max_issues_repo_issues_event_max_datetime string | max_forks_repo_path string | max_forks_repo_name string | max_forks_repo_head_hexsha string | max_forks_repo_licenses list | max_forks_count int64 | max_forks_repo_forks_event_min_datetime string | max_forks_repo_forks_event_max_datetime string | content string | avg_line_length float64 | max_line_length int64 | alphanum_fraction float64 | qsc_code_num_words_quality_signal int64 | qsc_code_num_chars_quality_signal float64 | qsc_code_mean_word_length_quality_signal float64 | qsc_code_frac_words_unique_quality_signal float64 | qsc_code_frac_chars_top_2grams_quality_signal float64 | qsc_code_frac_chars_top_3grams_quality_signal float64 | qsc_code_frac_chars_top_4grams_quality_signal float64 | qsc_code_frac_chars_dupe_5grams_quality_signal float64 | qsc_code_frac_chars_dupe_6grams_quality_signal float64 | qsc_code_frac_chars_dupe_7grams_quality_signal float64 | qsc_code_frac_chars_dupe_8grams_quality_signal float64 | qsc_code_frac_chars_dupe_9grams_quality_signal float64 | qsc_code_frac_chars_dupe_10grams_quality_signal float64 | qsc_code_frac_chars_replacement_symbols_quality_signal float64 | qsc_code_frac_chars_digital_quality_signal float64 | qsc_code_frac_chars_whitespace_quality_signal float64 | qsc_code_size_file_byte_quality_signal float64 | qsc_code_num_lines_quality_signal float64 | qsc_code_num_chars_line_max_quality_signal float64 | qsc_code_num_chars_line_mean_quality_signal float64 | qsc_code_frac_chars_alphabet_quality_signal float64 | qsc_code_frac_chars_comments_quality_signal float64 | qsc_code_cate_xml_start_quality_signal float64 | qsc_code_frac_lines_dupe_lines_quality_signal float64 | qsc_code_cate_autogen_quality_signal float64 | qsc_code_frac_lines_long_string_quality_signal float64 | qsc_code_frac_chars_string_length_quality_signal float64 | qsc_code_frac_chars_long_word_length_quality_signal float64 | qsc_code_frac_lines_string_concat_quality_signal float64 | qsc_code_cate_encoded_data_quality_signal float64 | qsc_code_frac_chars_hex_words_quality_signal float64 | qsc_code_frac_lines_prompt_comments_quality_signal float64 | qsc_code_frac_lines_assert_quality_signal float64 | qsc_codepython_cate_ast_quality_signal float64 | qsc_codepython_frac_lines_func_ratio_quality_signal float64 | qsc_codepython_cate_var_zero_quality_signal bool | qsc_codepython_frac_lines_pass_quality_signal float64 | qsc_codepython_frac_lines_import_quality_signal float64 | qsc_codepython_frac_lines_simplefunc_quality_signal float64 | qsc_codepython_score_lines_no_logic_quality_signal float64 | qsc_codepython_frac_lines_print_quality_signal float64 | qsc_code_num_words int64 | qsc_code_num_chars int64 | qsc_code_mean_word_length int64 | qsc_code_frac_words_unique null | qsc_code_frac_chars_top_2grams int64 | qsc_code_frac_chars_top_3grams int64 | qsc_code_frac_chars_top_4grams int64 | qsc_code_frac_chars_dupe_5grams int64 | qsc_code_frac_chars_dupe_6grams int64 | qsc_code_frac_chars_dupe_7grams int64 | qsc_code_frac_chars_dupe_8grams int64 | qsc_code_frac_chars_dupe_9grams int64 | qsc_code_frac_chars_dupe_10grams int64 | qsc_code_frac_chars_replacement_symbols int64 | qsc_code_frac_chars_digital int64 | qsc_code_frac_chars_whitespace int64 | qsc_code_size_file_byte int64 | qsc_code_num_lines int64 | qsc_code_num_chars_line_max int64 | qsc_code_num_chars_line_mean int64 | qsc_code_frac_chars_alphabet int64 | qsc_code_frac_chars_comments int64 | qsc_code_cate_xml_start int64 | qsc_code_frac_lines_dupe_lines int64 | qsc_code_cate_autogen int64 | qsc_code_frac_lines_long_string int64 | qsc_code_frac_chars_string_length int64 | qsc_code_frac_chars_long_word_length int64 | qsc_code_frac_lines_string_concat null | qsc_code_cate_encoded_data int64 | qsc_code_frac_chars_hex_words int64 | qsc_code_frac_lines_prompt_comments int64 | qsc_code_frac_lines_assert int64 | qsc_codepython_cate_ast int64 | qsc_codepython_frac_lines_func_ratio int64 | qsc_codepython_cate_var_zero int64 | qsc_codepython_frac_lines_pass int64 | qsc_codepython_frac_lines_import int64 | qsc_codepython_frac_lines_simplefunc int64 | qsc_codepython_score_lines_no_logic int64 | qsc_codepython_frac_lines_print int64 | effective string | hits int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
ce4a34a6473bf57a825b9073d93326bb16d83f0a | 13,670 | py | Python | Lotka/dem_train.py | plaveczlambert/deep_euler_tests | a3ceef98ba76bd7a00ccd3c773cd9850311b3b1a | [
"MIT"
] | 1 | 2021-10-19T02:50:46.000Z | 2021-10-19T02:50:46.000Z | Lotka/dem_train.py | plaveczlambert/deep_euler_tests | a3ceef98ba76bd7a00ccd3c773cd9850311b3b1a | [
"MIT"
] | 1 | 2021-11-12T01:37:11.000Z | 2021-11-16T02:02:40.000Z | Lotka/dem_train.py | plaveczlambert/deep_euler_tests | a3ceef98ba76bd7a00ccd3c773cd9850311b3b1a | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import argparse
import os
import h5py
from datetime import datetime
from copy import deepcopy
import numpy as np
from sklearn.preprocessing import StandardScaler, MinMaxScaler
from sklearn.model_selection import train_test_split
import matplotlib.pyplot as plt
import torch
import torch.optim
import torch.jit
import torch.nn as nn
from torch.utils.data import TensorDataset, DataLoader
from model import MLPs
from utils.plot_utils import plot_loghist
#from utils.scalers import writeStandardScaler
#from utils.scalers import writeMinMaxScaler
torch.set_default_dtype(torch.float64)
# ----- ----- ----- ----- ----- -----
# Command line arguments
# ----- ----- ----- ----- ----- -----
parser = argparse.ArgumentParser()
parser.add_argument(
'--batch',
default = '100',
type = int,
help = "Batch size. 0 means training set length. Default is 100."
)
parser.add_argument(
'--epoch',
default = '1',
type = int,
help = "Number of epochs to train. Default is 1."
)
parser.add_argument(
'--load_model',
default = '',
type = str,
help = "Path to model dict file to load."
)
parser.add_argument(
'--name',
default = '',
type = str,
help = "Optional name of the model."
)
parser.add_argument(
'--start_epoch',
default = '0',
type = int,
help = "Epochs of training of the loaded model. Deprecated"
)
parser.add_argument(
'--save_path',
default = 'training/',
type = str,
help = "Path to save model. Default is 'training'."
)
parser.add_argument(
'--monitor',
default = 0,
type = int,
help = "0: no monitoring, 1: show plots on end, 2: monitor all along"
)
parser.add_argument(
'--print_losses',
default=0,
type=int,
help = "Print every nth losses. Default is 0 meaning no print. Option monitor=2 overrides this."
)
parser.add_argument(
'--save_plots',
dest = 'save_plots',
action = 'store_true',
help = "If set, saves the plots generated after training."
)
parser.add_argument(
'--test',
dest='test',
action='store_true',
help = "If set, no saving takes place."
)
parser.add_argument(
'--print_epoch',
default = 0,
type = int,
help = "Print epoch number at every nth epoch. Default is zero, meaning no print."
)
parser.add_argument(
'--cpu',
dest='cpu',
action='store_true',
help= "If set, training is carried out on the cpu."
)
parser.add_argument(
'--early_stop',
dest='early_stop',
action='store_true',
help= "Enable early stop when the latest validation loss is larger than the average of the previos five validation losses."
)
parser.add_argument(
'--num_threads',
default = 0,
type = int,
help = "Number of cpu threads to be used by pytorch. Default is 0 meaning same as number of cores."
)
parser.add_argument(
'--data',
default = os.path.join('data', 'lotka_data.hdf5'),
type = str,
help = "Data to be loaded for training. Default is 'data/lotka_data.hdf5'."
)
parser.set_defaults(
feature=False,
monitor=False,
load_model=False,
test=False,
cpu=False,
early_stop=False
)
args = parser.parse_args()
if args.num_threads:
torch.set_num_threads(args.num_threads)
if not os.path.isdir(args.save_path):
os.mkdir(args.save_path)
#device selection logic
device=0
if args.cpu:
device = torch.device('cpu')
else:
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
begin_time = datetime.now();
time_str = begin_time.strftime("%y%m%d%H%M")
print("Begin: "+ str(time_str))
if not args.test:
logfile = open('training/' + (args.name+'_' if args.name else '') + time_str + '.log','w')
#check model availability
if args.load_model:
if not os.path.exists(args.load_model):
print("File: " +args.load_model+" does not exist. Abort")
exit()
# ----- ----- ----- ----- ----- -----
# Data loading
# ----- ----- ----- ----- ----- -----
data_path = args.data
f = h5py.File(data_path, 'r')
keys = list(f.keys())
print(keys)
X = np.empty(f['lotka_X'].shape)
f['lotka_X'].read_direct(X)
Y = np.empty(f['lotka_Y'].shape)
f['lotka_Y'].read_direct(Y)
f.close()
print(X[1,:])
input_names = ["x1", "x2"]
print("Train data from: '"+ data_path +"'")
if not args.test:
print("Train data from: " + data_path, file=logfile)
input_length = X.shape[1]
print("Input length: " +str(input_length))
x_trn, x_vld, y_trn, y_vld = train_test_split(
X, Y,
test_size = .25,
random_state= 410,
shuffle = True
)
x_vld, x_tst, y_vld, y_tst = train_test_split(
x_vld, y_vld,
test_size = .40,
shuffle = False
)
# ----- ----- ----- ----- ----- -----
# Data scaling
# ----- ----- ----- ----- ----- -----
'''in_scaler = StandardScaler(with_mean=True, with_std=True, copy=False)
out_scaler = MinMaxScaler(feature_range=(0, 1), copy=False)
in_scaler.fit(x_trn)
out_scaler.fit(y_trn)
x_trn = in_scaler.transform(x_trn)
x_vld = in_scaler.transform(x_vld)
x_tst = in_scaler.transform(x_tst)
y_trn = out_scaler.transform(y_trn)
y_vld = out_scaler.transform(y_vld)
y_tst_unnormed = np.array(y_tst,copy=True)
y_tst = out_scaler.transform(y_tst)'''
trn_set = TensorDataset(torch.tensor(x_trn, dtype=torch.float64), torch.tensor(y_trn, dtype=torch.float64))
vld_set = TensorDataset(torch.tensor(x_vld, dtype=torch.float64), torch.tensor(y_vld, dtype=torch.float64))
trn_ldr = DataLoader(
trn_set,
batch_size = len(trn_set) if args.batch==0 else args.batch,
shuffle = True
)
vld_batch = 100000
vld_ldr = DataLoader(
vld_set,
batch_size = vld_batch,
shuffle = False
)
start_epoch = 0
# ----- ----- ----- ----- ----- -----
# Model definition
# ----- ----- ----- ----- ----- -----
model = MLPs.SimpleMLP(x_trn.shape[-1], y_trn.shape[-1], 80)
model_checkpoint = 0
if args.load_model:
model_checkpoint = torch.load(args.load_model)
model.load_state_dict(model_checkpoint['model_state_dict'])
start_epoch = model_checkpoint['epoch']
if not args.test:
print("Loaded model state from: " + str(args.load_model),file=logfile)
print("Loaded model state from: " + str(args.load_model))
# ----- ----- ----- ----- ----- -----
# Training
# ----- ----- ----- ----- ----- -----
model = model.to(device)
loss = nn.MSELoss()
optim = torch.optim.Adam(model.parameters(), lr=3e-4, eps=1e-8)#, weight_decay=1e-7)
if args.load_model:
optim.load_state_dict(model_checkpoint['optimizer_state_dict'])
total_loss_arr = np.zeros(args.epoch)
vld_loss_arr = np.zeros(args.epoch)
epochs = np.linspace(start_epoch,start_epoch+args.epoch-1,args.epoch)
if args.monitor==2:
plt.ion()
plt.figure(num="Training and Validation Losses")
if not args.test:
print("Training...",file=logfile)
learned_epoch = 0
vld_loss_best = 1e100
best_model_state_dict = 0
best_optim_state_dict = 0
best_epoch = 0
for num_epoch in range(args.epoch):
if args.print_epoch and num_epoch % args.print_epoch == 0:
print(num_epoch+start_epoch)
model.train()
total_loss = 0
len_dataset = 0
for batch in trn_ldr:
x,y = batch
x = x.to(device)
y = y.to(device)
optim.zero_grad()
out = model(x)
trn_loss= loss(out, y)
trn_loss.backward()
optim.step()
total_loss += trn_loss.item() * len(x)
total_loss /= len(trn_ldr.dataset)
total_loss_arr[num_epoch] = total_loss
learned_epoch += 1
model.eval()
vld_loss = 0
for batch in vld_ldr:
x, y= batch
x = x.to(device)
y = y.to(device)
out = model(x)
vld_loss += loss(out, y).item() * len(x)
vld_loss /= len(vld_ldr.dataset)
vld_loss_arr[num_epoch] = vld_loss
if args.monitor==2 or (args.print_losses and num_epoch%args.print_losses==0):
print(total_loss)
print(vld_loss)
if not args.test:
print(total_loss, file=logfile)
print(vld_loss, file=logfile)
if args.monitor==2: #real-time plotting
plt.cla()
plt.plot(epochs, total_loss_arr)
plt.plot(epochs, vld_loss_arr)
plt.yscale('log')
if num_epoch!= 0: plt.xlim([start_epoch, start_epoch+num_epoch])
plt.pause(0.01)
if args.early_stop and vld_loss < vld_loss_best:
vld_loss_best = vld_loss
best_model_state_dict = deepcopy(model.state_dict())
best_optim_state_dict = deepcopy(optim.state_dict())
best_epoch = num_epoch
else:
if num_epoch-best_epoch == 50:
if not args.test:
print("Early stopped", file=logfile)
print("Early stopped")
break
if not args.test:
print("Training ready, epochs: " + str(start_epoch) + "..." + str(start_epoch+learned_epoch),file=logfile)
end_time = datetime.now()
duration = end_time - begin_time
time_end_str = end_time.strftime("%y%m%d%H%M")
print("Ended at: "+ time_end_str)
print("Duration: " + str(duration))
if not args.test:
print("Training duration: " + str(duration),file=logfile)
# ----- ----- ----- ----- ----- -----
#Test
# ----- ----- ----- ----- ----- -----
tst_set = TensorDataset(torch.Tensor(x_tst), torch.Tensor(y_tst))
tst_batch = 100000
tst_ldr = DataLoader(
tst_set,
batch_size = tst_batch,
shuffle = False
)
test_loss = 0
for batch in tst_ldr:
x, y= batch
x = x.to(device)
y = y.to(device)
out = model(x)
test_loss += loss(out, y).item() * len(x)
test_loss /= len(tst_ldr.dataset)
print('Test loss: ' + str(test_loss))
if not args.test:
print('Test loss: ' + str(test_loss),file=logfile)
out = model(torch.tensor(x_tst,dtype=torch.float64).to(device)).cpu().detach().numpy()
test_losses = np.abs(out - y_tst)
max_loss = np.max(test_losses)
mean_loss = np.mean(test_losses)
print('Max unnormed loss: ' + str(max_loss))
print('Mean unnormed loss: ' + str(mean_loss))
if not args.test:
print('Max unnormed loss: ' + str(max_loss),file=logfile)
print('Mean unnormed loss: ' + str(mean_loss),file=logfile)
# ----- ----- ----- ----- ----- -----
#Model Save
# ----- ----- ----- ----- ----- -----
traced_model = 0
if not args.test:
#save scalers
''''f = open(args.save_path+'scaler_' + (args.name+'_' if args.name else '') +time_str + '.psca','w') #chosen this extension
if type(out_scaler) == StandardScaler:
writeStandardScaler(f, out_scaler)
else:
writeMinMaxScaler(f, out_scaler)
if type(in_scaler) == StandardScaler:
writeStandardScaler(f, in_scaler)
else:
writeMinMaxScaler(f, in_scaler)
f.close()
print("Saved scalers.",file=logfile)
print("Saved scalers.")'''
if args.early_stop:
torch.save({
'epoch': start_epoch+best_epoch,
'model_state_dict': best_model_state_dict,
#'scheduler_state_dict': best_scheduler_state_dict,
'optimizer_state_dict': best_optim_state_dict
},
args.save_path+'model_' + (args.name+'_' if args.name else '') + 'e' + str(start_epoch+learned_epoch) + '_' + time_str + '.pt')
else:
torch.save({
'epoch': start_epoch+learned_epoch,
'model_state_dict': model.state_dict(),
#'scheduler_state_dict': scheduler.state_dict(),
'optimizer_state_dict': optim.state_dict()
},
args.save_path+'model_' + (args.name+'_' if args.name else '') + 'e' + str(start_epoch+learned_epoch) + '_' + time_str + '.pt')
print("Saved model.",file=logfile)
print("Saved model.")
#trace model to be used by C/C++
if args.early_stop:
model.load_state_dict(best_model_state_dict)
model.eval()
traced_model = torch.jit.trace(model.cpu(), torch.randn((1,x_trn.shape[-1])))
traced_model.save(args.save_path+'traced_model_' + (args.name+'_' if args.name else '') + 'e'+str(start_epoch+learned_epoch) + '_' + time_str + '.pt')
print("Saved trace model.",file=logfile)
print("Saved trace model.")
# ----- ----- ----- ----- ----- -----
# Plotting
# ----- ----- ----- ----- ----- -----
if args.monitor>0:
plt.ion()
plt.show()
plt.plot(epochs[0:learned_epoch], total_loss_arr[0:learned_epoch], label='Total Loss')
plt.plot(epochs[0:learned_epoch], vld_loss_arr[0:learned_epoch], label='Validation Loss')
plt.yscale('log')
plt.title('Loss Diagram')
plt.xlabel('Epochs')
plt.ylabel('Loss')
plt.legend()
if args.monitor>0:
plt.show()
if not args.test and args.save_plots:
plt.savefig(args.save_path+"learning_curve_"+ (args.name+'_' if args.name else '') + time_str+".png", transparent=True)
plt.figure(num="Losses")
plt.title("Loss Distribution of Truncation Error")
for i in range(test_losses.shape[1]):
plot_loghist(test_losses[:,i], 500, label=input_names[i])
plt.legend()
if args.monitor>0:
plt.show()
if not args.test and args.save_plots:
plt.savefig(args.save_path+"Loss_distr_"+time_str+".png", transparent=True)
plt.figure(num="Losses (Full)")
plt.title("Loss Distribution of Truncation Error(Full)")
plot_loghist(test_losses.flat, 500)
#plt.hist(test_losses.flat, bins=50)
#plt.ylim([0,500])
plt.xscale('log')
if args.monitor>0:
plt.ioff()
plt.show()
if not args.test and args.save_plots:
plt.savefig(args.save_path+"Loss_distr_full_"+time_str+".png", transparent=True)
if not args.test:
logfile.close()
| 29.912473 | 154 | 0.625457 | 1,900 | 13,670 | 4.302105 | 0.154211 | 0.026425 | 0.031196 | 0.023856 | 0.310741 | 0.240152 | 0.139711 | 0.109616 | 0.094446 | 0.073893 | 0 | 0.011581 | 0.210388 | 13,670 | 456 | 155 | 29.97807 | 0.745692 | 0.077469 | 0 | 0.279202 | 0 | 0 | 0.170864 | 0.001977 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.045584 | 0 | 0.045584 | 0.105413 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce4b776c78d1493fba953c555d7092b9f1169ee4 | 20,110 | py | Python | predicators/src/planning.py | williamshen-nz/predicators | 6a6b3444108a6d2da3ec3c7d85bbe60ae3b113b9 | [
"MIT"
] | null | null | null | predicators/src/planning.py | williamshen-nz/predicators | 6a6b3444108a6d2da3ec3c7d85bbe60ae3b113b9 | [
"MIT"
] | null | null | null | predicators/src/planning.py | williamshen-nz/predicators | 6a6b3444108a6d2da3ec3c7d85bbe60ae3b113b9 | [
"MIT"
] | null | null | null | """Algorithms for task and motion planning.
Mainly, "SeSamE": SEarch-and-SAMple planning, then Execution.
"""
from __future__ import annotations
from collections import defaultdict
import heapq as hq
from itertools import islice
import time
from typing import List, Set, Optional, Tuple, Iterator, Sequence, Dict
from dataclasses import dataclass
import numpy as np
from predicators.src.approaches import ApproachFailure, ApproachTimeout
from predicators.src.structs import State, Task, NSRT, Predicate, \
GroundAtom, _GroundNSRT, DummyOption, DefaultState, _Option, \
Metrics, STRIPSOperator, OptionSpec, Object
from predicators.src import utils
from predicators.src.utils import _TaskPlanningHeuristic, ExceptionWithInfo, \
EnvironmentFailure
from predicators.src.option_model import _OptionModelBase
from predicators.src.settings import CFG
_NOT_CAUSES_FAILURE = "NotCausesFailure"
@dataclass(repr=False, eq=False)
class _Node:
"""A node for the search over skeletons."""
atoms: Set[GroundAtom]
skeleton: List[_GroundNSRT]
atoms_sequence: List[Set[GroundAtom]] # expected state sequence
parent: Optional[_Node]
def sesame_plan(
task: Task,
option_model: _OptionModelBase,
nsrts: Set[NSRT],
initial_predicates: Set[Predicate],
timeout: float,
seed: int,
task_planning_heuristic: str,
max_skeletons_optimized: int,
check_dr_reachable: bool = True,
allow_noops: bool = False,
) -> Tuple[List[_Option], Metrics]:
"""Run TAMP.
Return a sequence of options, and a dictionary of metrics for this
run of the planner. Uses the SeSamE strategy: SEarch-and-SAMple
planning, then Execution.
"""
nsrt_preds, _ = utils.extract_preds_and_types(nsrts)
# Ensure that initial predicates are always included.
predicates = initial_predicates | set(nsrt_preds.values())
init_atoms = utils.abstract(task.init, predicates)
objects = list(task.init)
start_time = time.time()
ground_nsrts = []
for nsrt in sorted(nsrts):
for ground_nsrt in utils.all_ground_nsrts(nsrt, objects):
ground_nsrts.append(ground_nsrt)
if time.time() - start_time > timeout:
raise ApproachTimeout("Planning timed out in grounding!")
# Keep restarting the A* search while we get new discovered failures.
metrics: Metrics = defaultdict(float)
# Keep track of partial refinements: skeletons and partial plans. This is
# for making videos of failed planning attempts.
partial_refinements = []
while True:
# Optionally exclude NSRTs with empty effects, because they can slow
# the search significantly, so we may want to exclude them. Note however
# that we need to do this inside the while True here, because an NSRT
# that initially has empty effects may later have a _NOT_CAUSES_FAILURE.
nonempty_ground_nsrts = [
nsrt for nsrt in ground_nsrts
if allow_noops or (nsrt.add_effects | nsrt.delete_effects)
]
all_reachable_atoms = utils.get_reachable_atoms(
nonempty_ground_nsrts, init_atoms)
if check_dr_reachable and not task.goal.issubset(all_reachable_atoms):
raise ApproachFailure(f"Goal {task.goal} not dr-reachable")
reachable_nsrts = [
nsrt for nsrt in nonempty_ground_nsrts
if nsrt.preconditions.issubset(all_reachable_atoms)
]
heuristic = utils.create_task_planning_heuristic(
task_planning_heuristic, init_atoms, task.goal, reachable_nsrts,
predicates, objects)
try:
new_seed = seed + int(metrics["num_failures_discovered"])
for skeleton, atoms_sequence in _skeleton_generator(
task, reachable_nsrts, init_atoms, heuristic, new_seed,
timeout - (time.time() - start_time), metrics,
max_skeletons_optimized):
plan, suc = _run_low_level_search(
task, option_model, skeleton, atoms_sequence, new_seed,
timeout - (time.time() - start_time))
if suc:
# Success! It's a complete plan.
print(
f"Planning succeeded! Found plan of length "
f"{len(plan)} after "
f"{int(metrics['num_skeletons_optimized'])} "
f"skeletons, discovering "
f"{int(metrics['num_failures_discovered'])} failures")
metrics["plan_length"] = len(plan)
return plan, metrics
partial_refinements.append((skeleton, plan))
if time.time() - start_time > timeout:
raise ApproachTimeout(
"Planning timed out in backtracking!",
info={"partial_refinements": partial_refinements})
except _DiscoveredFailureException as e:
metrics["num_failures_discovered"] += 1
new_predicates, ground_nsrts = _update_nsrts_with_failure(
e.discovered_failure, ground_nsrts)
predicates |= new_predicates
partial_refinements.append(
(skeleton, e.info["longest_failed_refinement"]))
except (_MaxSkeletonsFailure, _SkeletonSearchTimeout) as e:
e.info["partial_refinements"] = partial_refinements
raise e
def task_plan_grounding(
init_atoms: Set[GroundAtom],
objects: Set[Object],
strips_ops: Sequence[STRIPSOperator],
option_specs: Sequence[OptionSpec],
allow_noops: bool = False,
) -> Tuple[List[_GroundNSRT], Set[GroundAtom]]:
"""Ground all operators for task planning into dummy _GroundNSRTs,
filtering out ones that are unreachable or have empty effects.
Also return the set of reachable atoms, which is used by task
planning to quickly determine if a goal is unreachable.
See the task_plan docstring for usage instructions.
"""
nsrts = utils.ops_and_specs_to_dummy_nsrts(strips_ops, option_specs)
ground_nsrts = []
for nsrt in sorted(nsrts):
for ground_nsrt in utils.all_ground_nsrts(nsrt, objects):
if allow_noops or (ground_nsrt.add_effects
| ground_nsrt.delete_effects):
ground_nsrts.append(ground_nsrt)
reachable_atoms = utils.get_reachable_atoms(ground_nsrts, init_atoms)
reachable_nsrts = [
nsrt for nsrt in ground_nsrts
if nsrt.preconditions.issubset(reachable_atoms)
]
return reachable_nsrts, reachable_atoms
def task_plan(
init_atoms: Set[GroundAtom],
goal: Set[GroundAtom],
ground_nsrts: List[_GroundNSRT],
reachable_atoms: Set[GroundAtom],
heuristic: _TaskPlanningHeuristic,
seed: int,
timeout: float,
max_skeletons_optimized: int,
) -> Iterator[Tuple[List[_GroundNSRT], List[Set[GroundAtom]], Metrics]]:
"""Run only the task planning portion of SeSamE. A* search is run, and
skeletons that achieve the goal symbolically are yielded. Specifically,
yields a tuple of (skeleton, atoms sequence, metrics dictionary).
This method is NOT used by SeSamE, but is instead provided as a
convenient wrapper around _skeleton_generator below (which IS used
by SeSamE) that takes in only the minimal necessary arguments.
This method is tightly coupled with task_plan_grounding -- the reason they
are separate methods is that it is sometimes possible to ground only once
and then plan multiple times (e.g. from different initial states, or to
different goals). To run task planning once, call task_plan_grounding to
get ground_nsrts and reachable_atoms; then create a heuristic using
utils.create_task_planning_heuristic; then call this method. See the tests
in tests/test_planning for usage examples.
"""
if not goal.issubset(reachable_atoms):
raise ApproachFailure(f"Goal {goal} not dr-reachable")
dummy_task = Task(DefaultState, goal)
metrics: Metrics = defaultdict(float)
generator = _skeleton_generator(dummy_task, ground_nsrts, init_atoms,
heuristic, seed, timeout, metrics,
max_skeletons_optimized)
# Note that we use this pattern to avoid having to catch an ApproachFailure
# when _skeleton_generator runs out of skeletons to optimize.
for skeleton, atoms_sequence in islice(generator, max_skeletons_optimized):
yield skeleton, atoms_sequence, metrics.copy()
def _skeleton_generator(
task: Task, ground_nsrts: List[_GroundNSRT], init_atoms: Set[GroundAtom],
heuristic: _TaskPlanningHeuristic, seed: int, timeout: float,
metrics: Metrics, max_skeletons_optimized: int
) -> Iterator[Tuple[List[_GroundNSRT], List[Set[GroundAtom]]]]:
"""A* search over skeletons (sequences of ground NSRTs).
Iterates over pairs of (skeleton, atoms sequence).
"""
start_time = time.time()
queue: List[Tuple[float, float, _Node]] = []
root_node = _Node(atoms=init_atoms,
skeleton=[],
atoms_sequence=[init_atoms],
parent=None)
metrics["num_nodes_created"] += 1
rng_prio = np.random.default_rng(seed)
hq.heappush(queue,
(heuristic(root_node.atoms), rng_prio.uniform(), root_node))
# Start search.
while queue and (time.time() - start_time < timeout):
if int(metrics["num_skeletons_optimized"]) == max_skeletons_optimized:
raise _MaxSkeletonsFailure(
"Planning reached max_skeletons_optimized!")
_, _, node = hq.heappop(queue)
# Good debug point #1: print out the skeleton here to see what
# the high-level search is doing. You can accomplish this via:
# for act in node.skeleton:
# print(act.name, act.objects)
# print()
if task.goal.issubset(node.atoms):
# If this skeleton satisfies the goal, yield it.
metrics["num_skeletons_optimized"] += 1
yield node.skeleton, node.atoms_sequence
else:
# Generate successors.
metrics["num_nodes_expanded"] += 1
for nsrt in utils.get_applicable_operators(ground_nsrts,
node.atoms):
child_atoms = utils.apply_operator(nsrt, set(node.atoms))
child_node = _Node(atoms=child_atoms,
skeleton=node.skeleton + [nsrt],
atoms_sequence=node.atoms_sequence +
[child_atoms],
parent=node)
metrics["num_nodes_created"] += 1
# priority is g [plan length] plus h [heuristic]
priority = (len(child_node.skeleton) +
heuristic(child_node.atoms))
hq.heappush(queue, (priority, rng_prio.uniform(), child_node))
if not queue:
raise _MaxSkeletonsFailure("Planning ran out of skeletons!")
assert time.time() - start_time >= timeout
raise _SkeletonSearchTimeout
def _run_low_level_search(task: Task, option_model: _OptionModelBase,
skeleton: List[_GroundNSRT],
atoms_sequence: List[Set[GroundAtom]], seed: int,
timeout: float) -> Tuple[List[_Option], bool]:
"""Backtracking search over continuous values.
Returns a sequence of options and a boolean. If the boolean is True,
the option sequence is a complete low-level plan refining the given
skeleton. Otherwise, the option sequence is the longest partial
failed refinement, where the last step did not satisfy the skeleton,
but all previous steps did. Note that there are multiple low-level
plans in general; we return the first one found (arbitrarily).
"""
start_time = time.time()
rng_sampler = np.random.default_rng(seed)
assert CFG.sesame_propagate_failures in \
{"after_exhaust", "immediately", "never"}
cur_idx = 0
num_tries = [0 for _ in skeleton]
plan: List[_Option] = [DummyOption for _ in skeleton]
traj: List[State] = [task.init] + [DefaultState for _ in skeleton]
longest_failed_refinement: List[_Option] = []
# We'll use a maximum of one discovered failure per step, since
# resampling can render old discovered failures obsolete.
discovered_failures: List[Optional[_DiscoveredFailure]] = [
None for _ in skeleton
]
while cur_idx < len(skeleton):
if time.time() - start_time > timeout:
return longest_failed_refinement, False
assert num_tries[cur_idx] < CFG.sesame_max_samples_per_step
# Good debug point #2: if you have a skeleton that you think is
# reasonable, but sampling isn't working, print num_tries here to
# see at what step the backtracking search is getting stuck.
num_tries[cur_idx] += 1
state = traj[cur_idx]
nsrt = skeleton[cur_idx]
# Ground the NSRT's ParameterizedOption into an _Option.
# This invokes the NSRT's sampler.
option = nsrt.sample_option(state, task.goal, rng_sampler)
plan[cur_idx] = option
# Increment cur_idx. It will be decremented later on if we get stuck.
cur_idx += 1
if option.initiable(state):
try:
next_state = option_model.get_next_state(state, option)
except EnvironmentFailure as e:
can_continue_on = False
# Remember only the most recent failure.
discovered_failures[cur_idx - 1] = _DiscoveredFailure(e, nsrt)
else: # an EnvironmentFailure was not raised
discovered_failures[cur_idx - 1] = None
traj[cur_idx] = next_state
# Check atoms against expected atoms_sequence constraint.
assert len(traj) == len(atoms_sequence)
# The expected atoms are ones that we definitely expect to be
# true at this point in the plan. They are not *all* the atoms
# that could be true.
expected_atoms = {
atom
for atom in atoms_sequence[cur_idx]
if atom.predicate.name != _NOT_CAUSES_FAILURE
}
# This "if all" statement is equivalent to, but faster than,
# checking whether expected_atoms is a subset of
# utils.abstract(traj[cur_idx], predicates).
if all(atom.holds(traj[cur_idx]) for atom in expected_atoms):
can_continue_on = True
if cur_idx == len(skeleton):
return plan, True # success!
else:
can_continue_on = False
else:
# The option is not initiable.
can_continue_on = False
if not can_continue_on: # we got stuck, time to resample / backtrack!
# Update the longest_failed_refinement found so far.
if cur_idx > len(longest_failed_refinement):
longest_failed_refinement = list(plan[:cur_idx])
# If we're immediately propagating failures, and we got a failure,
# raise it now. We don't do this right after catching the
# EnvironmentFailure because we want to make sure to update
# the longest_failed_refinement first.
possible_failure = discovered_failures[cur_idx - 1]
if possible_failure is not None and \
CFG.sesame_propagate_failures == "immediately":
raise _DiscoveredFailureException(
"Discovered a failure", possible_failure,
{"longest_failed_refinement": longest_failed_refinement})
# Decrement cur_idx to re-do the step we just did. If num_tries
# is exhausted, backtrack.
cur_idx -= 1
assert cur_idx >= 0
while num_tries[cur_idx] == CFG.sesame_max_samples_per_step:
num_tries[cur_idx] = 0
plan[cur_idx] = DummyOption
traj[cur_idx + 1] = DefaultState
cur_idx -= 1
if cur_idx < 0:
# Backtracking exhausted. If we're only propagating failures
# after exhaustion, and if there are any failures,
# propagate up the EARLIEST one so that high-level search
# restarts. Otherwise, return a partial refinement so that
# high-level search continues.
for possible_failure in discovered_failures:
if possible_failure is not None and \
CFG.sesame_propagate_failures == "after_exhaust":
raise _DiscoveredFailureException(
"Discovered a failure", possible_failure, {
"longest_failed_refinement":
longest_failed_refinement
})
return longest_failed_refinement, False
# Should only get here if the skeleton was empty.
assert not skeleton
return [], True
def _update_nsrts_with_failure(
discovered_failure: _DiscoveredFailure, ground_nsrts: List[_GroundNSRT]
) -> Tuple[Set[Predicate], List[_GroundNSRT]]:
"""Update the given set of ground_nsrts based on the given
DiscoveredFailure.
Returns a new list of ground NSRTs to replace the input one, where
all ground NSRTs that need modification are replaced with new ones
(because _GroundNSRTs are frozen).
"""
new_predicates = set()
new_ground_nsrts = []
for obj in discovered_failure.env_failure.info["offending_objects"]:
pred = Predicate(_NOT_CAUSES_FAILURE, [obj.type],
_classifier=lambda s, o: False)
new_predicates.add(pred)
atom = GroundAtom(pred, [obj])
for ground_nsrt in ground_nsrts:
# Update the preconditions of the failing NSRT.
if ground_nsrt == discovered_failure.failing_nsrt:
new_ground_nsrt = ground_nsrt.copy_with(
preconditions=ground_nsrt.preconditions | {atom})
# Update the effects of all NSRTs that use this object.
# Note that this is an elif rather than an if, because it would
# never be possible to use the failing NSRT's effects to set
# the _NOT_CAUSES_FAILURE precondition.
elif obj in ground_nsrt.objects:
new_ground_nsrt = ground_nsrt.copy_with(
add_effects=ground_nsrt.add_effects | {atom})
else:
new_ground_nsrt = ground_nsrt
new_ground_nsrts.append(new_ground_nsrt)
return new_predicates, new_ground_nsrts
@dataclass(frozen=True, eq=False)
class _DiscoveredFailure:
"""Container class for holding information related to a low-level discovery
of a failure which must be propagated up to the main search function, in
order to restart A* search with new NSRTs."""
env_failure: EnvironmentFailure
failing_nsrt: _GroundNSRT
class _DiscoveredFailureException(ExceptionWithInfo):
"""Exception class for DiscoveredFailure propagation."""
def __init__(self,
message: str,
discovered_failure: _DiscoveredFailure,
info: Optional[Dict] = None):
super().__init__(message, info)
self.discovered_failure = discovered_failure
class _MaxSkeletonsFailure(ApproachFailure):
"""Raised when the maximum number of skeletons has been reached."""
class _SkeletonSearchTimeout(ApproachTimeout):
"""Raised when time out occurs in _run_low_level_search()."""
def __init__(self) -> None:
super().__init__("Planning timed out in skeleton search!")
| 46.443418 | 80 | 0.640776 | 2,332 | 20,110 | 5.32247 | 0.196398 | 0.024815 | 0.022237 | 0.009588 | 0.213664 | 0.147921 | 0.100709 | 0.090719 | 0.077345 | 0.066549 | 0 | 0.001399 | 0.28901 | 20,110 | 432 | 81 | 46.550926 | 0.866695 | 0.275435 | 0 | 0.178694 | 0 | 0 | 0.056329 | 0.019103 | 0 | 0 | 0 | 0 | 0.020619 | 1 | 0.027491 | false | 0 | 0.04811 | 0 | 0.137457 | 0.003436 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce4c6e3932728a90eb005c51b522bf57fa12312a | 1,362 | py | Python | src/attraction_factors.py | ChendaHimalaya/qdt | d44353c18cdaf39740062b7fa68bd37507d5686b | [
"MIT"
] | null | null | null | src/attraction_factors.py | ChendaHimalaya/qdt | d44353c18cdaf39740062b7fa68bd37507d5686b | [
"MIT"
] | null | null | null | src/attraction_factors.py | ChendaHimalaya/qdt | d44353c18cdaf39740062b7fa68bd37507d5686b | [
"MIT"
] | null | null | null | import numpy as np
from CPC18PF.get_PF_Features import get_PF_Features
import time
import pandas as pd
data=pd.read_csv("data/cpc18/PF_features.csv")
def dummy_attraction(distA, distB,Amb, Corr,util_score):
return 0
def ambiguity_aversion(c1, Amb):
return -1*Amb*c1
def QDT_attraction(c1,c2,distA,distB,Amb,Corr,util_score):
attractionA=ambiguity_aversion(c1,0)
attractionB=ambiguity_aversion(c1,Amb)
temp=np.min([util_score,1-util_score])
temp2=np.tanh(c2*(attractionB-attractionA))
return temp*temp2
def QDT_attraction_PF_features(gameID):
features=data[data["GameID"]==gameID]
return features["pHa"]*features["Ha"]
if __name__=="__main__":
Data=pd.read_csv("data/syn_data/5000.csv")
prob=10
Ha = Data['Ha'][prob]
pHa = Data['pHa'][prob]
La = Data['La'][prob]
LotShapeA = Data['LotShapeA'][prob]
LotNumA = Data['LotNumA'][prob]
Hb = Data['Hb'][prob]
pHb = Data['pHb'][prob]
Lb = Data['Lb'][prob]
LotShapeB = Data['LotShapeB'][prob]
LotNumB = Data['LotNumB'][prob]
Amb = Data['Amb'][prob]
Corr = Data['Corr'][prob]
start=time.time()
features=get_PF_Features(Ha,pHa,La,LotShapeA,LotNumA,Hb,pHb,Lb,LotShapeB,LotNumB,Amb,Corr)
print("Calculation cost:{}".format(time.time()-start))
for i in range(5):
print(features.iloc[i])
| 21.967742 | 94 | 0.673275 | 199 | 1,362 | 4.457286 | 0.341709 | 0.05637 | 0.043968 | 0.042841 | 0.096956 | 0.058625 | 0 | 0 | 0 | 0 | 0 | 0.021145 | 0.166667 | 1,362 | 61 | 95 | 22.327869 | 0.760352 | 0 | 0 | 0 | 0 | 0 | 0.102507 | 0.035398 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.105263 | 0.052632 | 0.315789 | 0.052632 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce4c79240d3df41fe2a912ae870af3e015e28a47 | 1,833 | py | Python | algorithm/about_bloom_filter_another.py | dictxwang/python-fragments | 029820bfd290c60aeb172e876ddf3937a8704e91 | [
"Apache-2.0"
] | null | null | null | algorithm/about_bloom_filter_another.py | dictxwang/python-fragments | 029820bfd290c60aeb172e876ddf3937a8704e91 | [
"Apache-2.0"
] | null | null | null | algorithm/about_bloom_filter_another.py | dictxwang/python-fragments | 029820bfd290c60aeb172e876ddf3937a8704e91 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf8 -*-
__author__ = 'wangqiang'
"""
布隆过滤器:采用 mmh3 + bitarray 实现(准确率更高)
pip install mmh3
pip install bitarray
"""
import mmh3
from bitarray import bitarray
import math
class BloomFilterAnother:
def __init__(self, n: int, p: float):
"""
构造函数
:param n: 数据规模
:param p: 允许误判率
"""
self._bit_size = int(-n * math.log(p) / math.pow(math.log(2), 2))
self._hash_count = int((self._bit_size / n) * math.log(2))
bit_array = bitarray(self._bit_size)
bit_array.setall(0)
self._bit_array = bit_array
def put(self, text):
positions = self._calculate_bit_positions(text)
for p in positions:
self._bit_array[p] = 1
def contains(self, text) -> bool:
positions = self._calculate_bit_positions(text)
for p in positions:
if self._bit_array[p] == 0:
return False
return True
def _calculate_bit_positions(self, text):
"""
计算bit位置
:param text:
:return:
"""
positions = []
for i in range(self._hash_count):
# 直接将i作为seed
positions.append(mmh3.hash(text, i) % self._bit_size)
return positions
if __name__ == '__main__':
bloom = BloomFilterAnother(5000000, 0.01)
total_size = 100000
not_contains_count = 0
for i in range(0, total_size, 2):
bloom.put(str(i))
for i in range(total_size):
if not bloom.contains(str(i)):
not_contains_count += 1
# 真实不存在数量
real_not_contains = total_size / 2
# 误判数量
fail_count = abs(not_contains_count - real_not_contains)
# 误判率
fail_rate = float(fail_count / total_size)
# 50000 50000.0 0.0 0.0
print(not_contains_count, real_not_contains, fail_count, fail_rate)
| 25.458333 | 73 | 0.599564 | 240 | 1,833 | 4.2875 | 0.308333 | 0.047619 | 0.04276 | 0.03207 | 0.163265 | 0.163265 | 0.103013 | 0.103013 | 0.103013 | 0.103013 | 0 | 0.036378 | 0.295145 | 1,833 | 71 | 74 | 25.816901 | 0.760062 | 0.074741 | 0 | 0.102564 | 0 | 0 | 0.011097 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.102564 | false | 0 | 0.076923 | 0 | 0.282051 | 0.025641 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce4d1bed8a5aadb2d92384e9f7ca9cdcfa25e6bc | 4,382 | py | Python | HyperGuiModules/recreated_colour_data.py | MIC-Surgery-Heidelberg/HyperGUI_1.0 | 0ee8e0da85049076bb22a542d15d6c3adf6ea106 | [
"MIT"
] | null | null | null | HyperGuiModules/recreated_colour_data.py | MIC-Surgery-Heidelberg/HyperGUI_1.0 | 0ee8e0da85049076bb22a542d15d6c3adf6ea106 | [
"MIT"
] | null | null | null | HyperGuiModules/recreated_colour_data.py | MIC-Surgery-Heidelberg/HyperGUI_1.0 | 0ee8e0da85049076bb22a542d15d6c3adf6ea106 | [
"MIT"
] | null | null | null | from HyperGuiModules.utility import *
class RecreatedColourData:
def __init__(self, recreated_color_data_frame, listener):
self.root = recreated_color_data_frame
# Listener
self.listener = listener
self.data = None
self.stats_data = None
self.calc_button = None
self.mean_text = None
self.mean_value = ''
self.sd_text = None
self.sd_value = ''
self.median_text = None
self.median_value = ''
self.iqr_text = None
self.iqr_value = ''
self.min_text = None
self.min_value = ''
self.max_text = None
self.max_value = ''
self.info_label = None
self._init_widget()
# ----------------------------------------------- INITIALIZATION -------------------------------------------------
def update_calc(self):
data = self.listener.get_current_rec_data().flatten()
self.stats_data = [i for i in data if i != '--']
self._calc_data()
self._build_data()
def _init_widget(self):
self._build_data()
self._build_calc_button()
self._build_info_label()
def empty_stats(self):
self.mean_value = ''
self.sd_value = ''
self.median_value = ''
self.iqr_value = ''
self.min_value = ''
self.max_value = ''
self._build_data()
# ------------------------------------------------- CALCULATOR ---------------------------------------------------
def _calc_data(self):
self.mean_value = np.round(np.ma.mean(self.stats_data), 4)
self.sd_value = np.round(np.ma.std(self.stats_data), 4)
self.median_value = np.round(np.ma.median(self.stats_data), 4)
self.iqr_value = (np.round(np.quantile(self.stats_data, 0.25), 4), round(np.quantile(self.stats_data, 0.75), 4))
self.min_value = np.round(np.ma.min(self.stats_data), 4)
self.max_value = np.round(np.ma.max(self.stats_data), 4)
# --------------------------------------------------- BUILDERS ---------------------------------------------------
def _build_info_label(self):
self.info_label = make_label_button(self.root, text='Recreated Data', command=self.__info, width=12)
def _build_data(self):
# mean
self.mean_text = make_text(self.root, content="Mean = " + str(self.mean_value),
bg=tkcolour_from_rgb(BACKGROUND), column=0, row=1, width=25, columnspan=2,
padx=(3, 15), state=NORMAL)
# standard deviation
self.sd_text = make_text(self.root, content="SD = " + str(self.sd_value), bg=tkcolour_from_rgb(BACKGROUND),
column=0, row=2, width=25, columnspan=2, padx=(3, 15), state=NORMAL)
# median
self.median_text = make_text(self.root, content="Median = " + str(self.median_value),
bg=tkcolour_from_rgb(BACKGROUND), column=0, row=3, width=25, columnspan=2,
padx=(3, 15), state=NORMAL)
# IQR
self.iqr_text = make_text(self.root, content="IQR = " + str(self.iqr_value), bg=tkcolour_from_rgb(BACKGROUND),
column=0, row=4, width=25, columnspan=2, padx=(3, 15), state=NORMAL)
# min
self.min_text = make_text(self.root, content="Min = " + str(self.min_value), bg=tkcolour_from_rgb(BACKGROUND),
column=0, row=5, width=25, columnspan=2, padx=(3, 15), state=NORMAL)
# max
self.max_text = make_text(self.root, content="Max = " + str(self.max_value), bg=tkcolour_from_rgb(BACKGROUND),
column=0, row=6, width=25, columnspan=2, padx=(3, 15), pady=(0, 15), state=NORMAL)
def _build_calc_button(self):
self.calc_button = make_button(self.root, text="CALC", row=0, column=1, columnspan=1, command=self.update_calc,
inner_padx=3, inner_pady=0, outer_padx=(10, 15), outer_pady=15, width=5)
# -------------------------------------------------- CALLBACKS ---------------------------------------------------
def __info(self):
info = self.listener.modules[INFO].recreated_data_info
title = "Recreated Data Information"
make_info(title=title, info=info)
| 42.960784 | 120 | 0.532862 | 523 | 4,382 | 4.225621 | 0.162524 | 0.048869 | 0.052941 | 0.038009 | 0.48371 | 0.338009 | 0.233032 | 0.195475 | 0.195475 | 0 | 0 | 0.024156 | 0.263122 | 4,382 | 101 | 121 | 43.386139 | 0.660266 | 0.114788 | 0 | 0.242857 | 0 | 0 | 0.021981 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.128571 | false | 0 | 0.014286 | 0 | 0.157143 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce4d2d11e5d76a0c282525a6df2f7def76e75d1a | 1,116 | py | Python | app.py | motttey/streamlit_app_example | 03a7ff9af534532ab9b43cee1e5ae9f1a5d4eb4e | [
"MIT"
] | null | null | null | app.py | motttey/streamlit_app_example | 03a7ff9af534532ab9b43cee1e5ae9f1a5d4eb4e | [
"MIT"
] | null | null | null | app.py | motttey/streamlit_app_example | 03a7ff9af534532ab9b43cee1e5ae9f1a5d4eb4e | [
"MIT"
] | null | null | null | import streamlit as st
import pandas as pd
import matplotlib.pyplot as plt
import seaborn as sns
import numpy as np
from sklearn import datasets
st.title('Dashboard')
@st.cache
def load_data():
iris = datasets.load_iris()
df = pd.DataFrame(iris.data, columns=iris.feature_names)
df['target'] = iris.target_names[iris.target]
return df
df = load_data()
targets = list(df.target.unique())
selected_targets = st.multiselect('select targets', targets, default=targets)
df = df[df.target.isin(selected_targets)]
st.dataframe(df)
fig, ax = plt.subplots()
sns.boxplot(x='sepal width (cm)', y='target', data=df, whis=[0,100], width=.5, palette="vlag", ax=ax)
st.pyplot(fig)
# st.table(df)
# vega-lite
df = pd.DataFrame(
np.random.randn(200, 3),
columns=['a', 'b', 'c']
)
st.vega_lite_chart(df, {
'mark': {'type': 'circle', 'tooltip': True},
'encoding': {
'x': {'field': 'a', 'type': 'quantitative'},
'y': {'field': 'b', 'type': 'quantitative'},
'size': {'field': 'c', 'type': 'quantitative'},
'color': {'field': 'c', 'type': 'quantitative'},
},
})
| 24.8 | 101 | 0.633513 | 156 | 1,116 | 4.474359 | 0.474359 | 0.091691 | 0.037249 | 0.063037 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009719 | 0.170251 | 1,116 | 44 | 102 | 25.363636 | 0.74406 | 0.019713 | 0 | 0 | 0 | 0 | 0.170486 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.029412 | false | 0 | 0.176471 | 0 | 0.235294 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce509e93a740840d0f57544222b270afb2cf8be7 | 1,023 | py | Python | catkin_ws/src/f4-devel/kinematics/tests/test_inverse_kinematics.py | DiegoOrtegoP/Software | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | [
"CC-BY-2.0"
] | 12 | 2016-04-14T12:21:46.000Z | 2021-06-18T07:51:40.000Z | catkin_ws/src/f4-devel/kinematics/tests/test_inverse_kinematics.py | DiegoOrtegoP/Software | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | [
"CC-BY-2.0"
] | 14 | 2017-03-03T23:33:05.000Z | 2018-04-03T18:07:53.000Z | catkin_ws/src/f4-devel/kinematics/tests/test_inverse_kinematics.py | DiegoOrtegoP/Software | 4a07dd2dab29db910ca2e26848fa6b53b7ab00cd | [
"CC-BY-2.0"
] | 113 | 2016-05-03T06:11:42.000Z | 2019-06-01T14:37:38.000Z | #!/usr/bin/env python
import unittest
import numpy as np
from kinematics.Inverse_kinematics import *
class TestInverseKinematics(unittest.TestCase):
def test_with_linear_fi(self):
ik = Inverse_kinematics('Duty_fi_linear_no_constant', 'Duty_fi_linear_no_constant', np.matrix([-1, 1]), np.matrix([1,1]))
dL, dR = ik.evaluate(np.matrix([0]), np.matrix([0]))
self.assertAlmostEqual(dL, 0)
self.assertAlmostEqual(dR, 0)
dL, dR = ik.evaluate(np.matrix([0]), np.matrix([2]))
self.assertAlmostEqual(dL, 1)
self.assertAlmostEqual(dR, 1)
dL, dR = ik.evaluate(np.matrix([-2]), np.matrix([0]))
self.assertAlmostEqual(dL, 1)
self.assertAlmostEqual(dR, -1)
dL, dR = ik.evaluate(np.matrix([2]), np.matrix([0]))
self.assertAlmostEqual(dL, -1)
self.assertAlmostEqual(dR, 1)
if __name__ == '__main__':
import rosunit
rosunit.unitrun('kinematics', 'test_inverse_kinematics', TestInverseKinematics)
unittest.main()
| 31.96875 | 129 | 0.657869 | 132 | 1,023 | 4.924242 | 0.287879 | 0.123077 | 0.069231 | 0.086154 | 0.521538 | 0.453846 | 0.416923 | 0.415385 | 0.415385 | 0.32 | 0 | 0.024213 | 0.192571 | 1,023 | 31 | 130 | 33 | 0.762712 | 0.01955 | 0 | 0.181818 | 0 | 0 | 0.092814 | 0.07485 | 0 | 0 | 0 | 0 | 0.363636 | 1 | 0.045455 | false | 0 | 0.181818 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce5279c10b2894947da4676753051eb94a3729ea | 3,808 | py | Python | plover_combo/combo_colors.py | Kaoffie/plover_combo | 5dec46e2529492c4dae1e71d2329f1ed13f2b609 | [
"MIT"
] | 2 | 2021-08-29T18:41:54.000Z | 2022-02-06T21:17:19.000Z | plover_combo/combo_colors.py | Kaoffie/plover_combo | 5dec46e2529492c4dae1e71d2329f1ed13f2b609 | [
"MIT"
] | null | null | null | plover_combo/combo_colors.py | Kaoffie/plover_combo | 5dec46e2529492c4dae1e71d2329f1ed13f2b609 | [
"MIT"
] | null | null | null | from typing import Optional, List, Tuple, Dict
from PyQt5.QtWidgets import QLabel
from PyQt5.QtGui import QColor
BAR_ALPHA = 220
COLORS = {
0: (QColor(62, 167, 237), QColor(106, 187, 241)),
10: (QColor(98, 221, 223), QColor(136, 229, 231)),
25: (QColor(62, 221, 160), QColor(99, 227, 178)),
50: (QColor(229, 189, 69), QColor(235, 204, 112)),
100: (QColor(217, 114, 110), QColor(225, 145, 142)),
250: (QColor(227, 120, 166), QColor(234, 154, 188)),
500: (QColor(217, 61, 194), QColor(225, 102, 206)),
1000: (QColor(147, 79, 219), QColor(172, 120, 227)),
2500: (QColor(85, 81, 211), QColor(128, 124, 222))
}
COLOR_STR = """0: #3EA7ED, #6ABBF1
10: #62DDDF, #88E5E7
25: #3EDDA0, #63E3B2
50: #E5BD45, #EBCC70
100: #D9726E, #E1918E
250: #E378A6, #EA9ABC
500: #D93DC2, #E166CE
1000: #934FDB, #AC78E3
2500: #5551D3, #807CDE"""
COLOR_FORMAT = """Format example:
0: #3EA7ED, #6ABBF1
10: #62DDDF, #88E5E7
Primary color affects the title shadow and counter,
while the secondary color affects the highscore and cooldown bar.
Colors for 0 must be added."""
COLOR_NUMS = list(sorted(COLORS.keys()))
# def validate_string_hex(string: str) -> bool:
# string = string.strip()
# if len(string) != 7:
# return False
# if not string.startswith("#"):
# return False
# color_hex = string[1:]
# color_int = int(color_hex, 16)
# return color_int <= 0xFFFFFF
def string_hex_to_color(string: str, default: Optional[QColor], alpha: Optional[int] = None) -> QColor:
string = string.strip()
if not string.startswith("#") or len(string) != 7:
return default
try:
color_hex = string[1:]
color_int = int(color_hex, 16)
return QColor(color_int)
except ValueError:
return default
def to_int(string: str) -> Optional[int]:
try:
return int(string)
except ValueError:
return None
def convert_str_color_config(string: str) -> Tuple[Dict[int, Tuple[QColor, QColor]], List[int]]:
"""
Format:
0: #AAAAAA, #BBBBBB
1: #CCCCCC, #DDDDDD
"""
color_dict = dict()
color_list = []
for line in string.split("\n"):
if ":" not in line:
continue
num_str, colors_str = line.split(":", 1)
num_int = to_int(num_str)
if num_int is None or num_int < 0:
continue
if "," not in colors_str:
continue
pri_color_str, sec_color_str = colors_str.split(",", 1)
pri_color = string_hex_to_color(pri_color_str, None)
sec_color = string_hex_to_color(sec_color_str, None)
if pri_color is None or sec_color is None:
continue
color_list.append(num_int)
color_dict[num_int] = (pri_color, sec_color)
if 0 not in color_dict:
color_dict[0] = COLORS[0]
color_list.append(0)
return color_dict, sorted(color_list)
def round_to_checkpoint(num: int, color_nums: Optional[List[int]] = None) -> int:
if num <= 0:
return 0
if color_nums is None:
color_nums = COLOR_NUMS
prev = 0
for color_num in color_nums:
if num < color_num:
return prev
prev = color_num
return prev
def set_label_color(label: QLabel, color: QColor) -> None:
values = "{r}, {g}, {b}, {a}".format(
r = color.red(),
g = color.green(),
b = color.blue(),
a = color.alpha()
)
label.setStyleSheet(f"color: rgba({values});")
if __name__ == "__main__":
conf, l = convert_str_color_config(COLOR_STR)
for num, (pri, sec) in conf.items():
print(f"{num}: {hex(pri.rgb())}, {hex(sec.rgb())}")
print(l) | 25.557047 | 103 | 0.587447 | 519 | 3,808 | 4.146435 | 0.33526 | 0.022305 | 0.015335 | 0.022305 | 0.083643 | 0.064126 | 0.039033 | 0.039033 | 0.039033 | 0.039033 | 0 | 0.107195 | 0.277311 | 3,808 | 149 | 104 | 25.557047 | 0.674782 | 0.083508 | 0 | 0.150538 | 0 | 0 | 0.143146 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.053763 | false | 0 | 0.032258 | 0 | 0.182796 | 0.021505 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce529a4f199d63bbad00ec3d0cb1179213edcd32 | 1,057 | py | Python | ProxyIP/utils.py | zMingGit/ProxyIP | 66cac0dfcb1346ee8bddd687900c01c5755cbd78 | [
"MIT"
] | 1 | 2018-09-15T09:40:08.000Z | 2018-09-15T09:40:08.000Z | ProxyIP/utils.py | zMingGit/ProxyIP | 66cac0dfcb1346ee8bddd687900c01c5755cbd78 | [
"MIT"
] | null | null | null | ProxyIP/utils.py | zMingGit/ProxyIP | 66cac0dfcb1346ee8bddd687900c01c5755cbd78 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# coding=utf-8
import asyncio
import aiohttp
from .config import HEADERS, REQUEST_TIMEOUT, REQUEST_DELAY
from .validator import validator
from .logger import logger
LOOP = asyncio.get_event_loop()
async def _get_page(url, sleep):
"""
Gets and returns the page content
"""
async with aiohttp.ClientSession() as session:
try:
await asyncio.sleep(sleep)
async with session.get(
url, headers=HEADERS, timeout=REQUEST_TIMEOUT
) as resp:
return await resp.text()
except Exception:
return ""
def requests(url, sleep=REQUEST_DELAY):
"""
Request method, used for fetch the page content
:param url
:param sleep: delay time
"""
html = LOOP.run_until_complete(asyncio.gather(_get_page(url, sleep)))
if html:
return "".join(html)
def test_proxy(proxy):
"""
"""
cocou = validator.test_one_proxy(proxy)
res = LOOP.run_until_complete(asyncio.gather(cocou))
return res[0]
| 21.571429 | 73 | 0.6386 | 131 | 1,057 | 5.022901 | 0.473282 | 0.036474 | 0.030395 | 0.045593 | 0.100304 | 0.100304 | 0 | 0 | 0 | 0 | 0 | 0.002574 | 0.264901 | 1,057 | 48 | 74 | 22.020833 | 0.844273 | 0.116367 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.208333 | 0 | 0.458333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce53e52029792080c786ba25ea761c1f90b9ef47 | 15,693 | py | Python | src/main/python/pybuilder/python_utils.py | klr8/pybuilder | 2812021c18ce850009ce5ec7f7c18195eff73b10 | [
"Apache-2.0"
] | null | null | null | src/main/python/pybuilder/python_utils.py | klr8/pybuilder | 2812021c18ce850009ce5ec7f7c18195eff73b10 | [
"Apache-2.0"
] | null | null | null | src/main/python/pybuilder/python_utils.py | klr8/pybuilder | 2812021c18ce850009ce5ec7f7c18195eff73b10 | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
#
# This file is part of PyBuilder
#
# Copyright 2011-2020 PyBuilder Team
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import fnmatch
import os
import platform
import re
import sys
import traceback
from collections import OrderedDict
try:
basestring = basestring
except NameError:
basestring = str
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
StringIO = StringIO
def is_windows(platform=sys.platform, win_platforms={"win32", "cygwin", "msys"}):
return platform in win_platforms
PY2 = sys.version_info[0] < 3
IS_PYPY = '__pypy__' in sys.builtin_module_names
IS_WIN = is_windows()
def _py2_makedirs(name, mode=0o777, exist_ok=False):
return os.makedirs(name, mode)
def _py2_which(cmd, mode=os.F_OK | os.X_OK, path=None):
"""Given a command, mode, and a PATH string, return the path which
conforms to the given mode on the PATH, or None if there is no such
file.
`mode` defaults to os.F_OK | os.X_OK. `path` defaults to the result
of os.environ.get("PATH"), or can be overridden with a custom search
path.
"""
# Check that a given file can be accessed with the correct mode.
# Additionally check that `file` is not a directory, as on Windows
# directories pass the os.access check.
def _access_check(fn, mode):
return (os.path.exists(fn) and os.access(fn, mode)
and not os.path.isdir(fn))
# If we're given a path with a directory part, look it up directly rather
# than referring to PATH directories. This includes checking relative to the
# current directory, e.g. ./script
if os.path.dirname(cmd):
if _access_check(cmd, mode):
return cmd
return None
if path is None:
path = os.environ.get("PATH", os.defpath)
if not path:
return None
path = path.split(os.pathsep)
if IS_WIN:
# The current directory takes precedence on Windows.
if os.curdir not in path:
path.insert(0, os.curdir)
# PATHEXT is necessary to check on Windows.
pathext = os.environ.get("PATHEXT", "").split(os.pathsep)
# See if the given file matches any of the expected path extensions.
# This will allow us to short circuit when given "python.exe".
# If it does match, only test that one, otherwise we have to try
# others.
if any(cmd.lower().endswith(ext.lower()) for ext in pathext):
files = [cmd]
else:
files = [cmd + ext for ext in pathext]
else:
# On other platforms you don't have things like PATHEXT to tell you
# what file suffixes are executable, so just pass on cmd as-is.
files = [cmd]
seen = set()
for dir in path:
normdir = os.path.normcase(dir)
if normdir not in seen:
seen.add(normdir)
for thefile in files:
name = os.path.join(dir, thefile)
if _access_check(name, mode):
return name
return None
if PY2: # if major is less than 3
from .excp_util_2 import raise_exception, is_string
def save_tb(ex):
tb = sys.exc_info()[2]
setattr(ex, "__traceback__", tb)
is_string = is_string
makedirs = _py2_makedirs
which = _py2_which
else:
from .excp_util_3 import raise_exception, is_string
from shutil import which
def save_tb(ex):
pass
is_string = is_string
makedirs = os.makedirs
which = which
odict = OrderedDict
def _mp_get_context_win32_py2(context_name):
if context_name != "spawn":
raise RuntimeError("only spawn is supported")
import multiprocessing
return multiprocessing
_mp_get_context = None # This will be patched at runtime
mp_ForkingPickler = None # This will be patched at runtime
mp_log_to_stderr = None # This will be patched at runtime
_mp_billiard_pyb_env = None # This will be patched at runtime
_old_billiard_spawn_passfds = None # This will be patched at runtime
_installed_tblib = False
# Billiard doesn't work on Win32
if PY2:
if IS_WIN:
# Python 2.7 on Windows already only works with spawn
from multiprocessing import log_to_stderr as mp_log_to_stderr
from multiprocessing.reduction import ForkingPickler as mp_ForkingPickler
_mp_get_context = _mp_get_context_win32_py2
# Python 2 on *nix uses Billiard to be patched later
else:
# On all of Python 3s use multiprocessing
from multiprocessing import log_to_stderr as mp_log_to_stderr, get_context as _mp_get_context
from multiprocessing.reduction import ForkingPickler as mp_ForkingPickler
def patch_mp_pyb_env(pyb_env):
global _mp_billiard_pyb_env
if not _mp_billiard_pyb_env:
_mp_billiard_pyb_env = pyb_env
def install_tblib():
global _installed_tblib
if not _installed_tblib:
from pybuilder._vendor.tblib import pickling_support
pickling_support.install()
_installed_tblib = True
def _patched_billiard_spawnv_passfds(path, args, passfds):
global _mp_billiard_plugin_dir, _old_billiard_spawn_passfds
try:
script_index = args.index("-c") + 1
script = args[script_index]
additional_path = []
add_env_to_path(_mp_billiard_pyb_env, additional_path)
args[script_index] = ";".join(("import sys", "sys.path.extend(%r)" % additional_path, script))
except ValueError:
# We were unable to find the "-c", which means we likely don't care
pass
return _old_billiard_spawn_passfds(path, args, passfds)
def patch_mp():
install_tblib()
global _mp_get_context
if not _mp_get_context:
if PY2 and not IS_WIN:
from billiard import get_context, log_to_stderr, compat, popen_spawn_posix as popen_spawn
from billiard.reduction import ForkingPickler
global mp_ForkingPickler, mp_log_to_stderr, _old_billiard_spawn_passfds
_mp_get_context = get_context
mp_ForkingPickler = ForkingPickler
mp_log_to_stderr = log_to_stderr
_old_billiard_spawn_passfds = compat.spawnv_passfds
compat.spawnv_passfds = _patched_billiard_spawnv_passfds
popen_spawn.spawnv_passfds = _patched_billiard_spawnv_passfds
def mp_get_context(context):
global _mp_get_context
return _mp_get_context(context)
mp_ForkingPickler = mp_ForkingPickler
mp_log_to_stderr = mp_log_to_stderr
_mp_get_context = _mp_get_context
def _instrumented_target(q, target, *args, **kwargs):
patch_mp()
ex = tb = None
try:
send_value = (target(*args, **kwargs), None, None)
except Exception:
_, ex, tb = sys.exc_info()
send_value = (None, ex, tb)
try:
q.put(send_value)
except Exception:
_, send_ex, send_tb = sys.exc_info()
e_out = Exception(str(send_ex), send_tb, None if ex is None else str(ex), tb)
q.put(e_out)
def spawn_process(target=None, args=(), kwargs={}, group=None, name=None):
"""
Forks a child, making sure that all exceptions from the child are safely sent to the parent
If a target raises an exception, the exception is re-raised in the parent process
@return tuple consisting of process exit code and target's return value
"""
ctx = mp_get_context("spawn")
q = ctx.SimpleQueue()
p = ctx.Process(group=group, target=_instrumented_target, name=name, args=[q, target] + list(args), kwargs=kwargs)
p.start()
result = q.get()
p.join()
if isinstance(result, tuple):
if result[1]:
raise_exception(result[1], result[2])
return p.exitcode, result[0]
else:
msg = "Fatal error occurred in the forked process %s: %s" % (p, result.args[0])
if result.args[2]:
chained_message = "This error masked the send error '%s':\n%s" % (
result.args[2], "".join(traceback.format_tb(result.args[3])))
msg += "\n" + chained_message
ex = Exception(msg)
raise_exception(ex, result.args[1])
def prepend_env_to_path(python_env, sys_path):
"""type: (PythonEnv, List(str)) -> None
Prepend venv directories to sys.path-like collection
"""
for path in reversed(python_env.site_paths):
if path not in sys_path:
sys_path.insert(0, path)
def add_env_to_path(python_env, sys_path):
"""type: (PythonEnv, List(str)) -> None
Adds venv directories to sys.path-like collection
"""
for path in python_env.site_paths:
if path not in sys_path:
sys_path.append(path)
if PY2:
def _py2_glob(pathname, recursive=False):
"""Return a list of paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
return list(_py2_iglob(pathname, recursive=recursive))
def _py2_iglob(pathname, recursive=False):
"""Return an iterator which yields the paths matching a pathname pattern.
The pattern may contain simple shell-style wildcards a la
fnmatch. However, unlike fnmatch, filenames starting with a
dot are special cases that are not matched by '*' and '?'
patterns.
If recursive is true, the pattern '**' will match any files and
zero or more directories and subdirectories.
"""
it = _iglob(pathname, recursive, False)
if recursive and _isrecursive(pathname):
s = next(it) # skip empty string
assert not s
return it
def _iglob(pathname, recursive, dironly):
dirname, basename = os.path.split(pathname)
if not has_magic(pathname):
assert not dironly
if basename:
if os.path.lexists(pathname):
yield pathname
else:
# Patterns ending with a slash should match only directories
if os.path.isdir(dirname):
yield pathname
return
if not dirname:
if recursive and _isrecursive(basename):
for v in _glob2(dirname, basename, dironly):
yield v
else:
for v in _glob1(dirname, basename, dironly):
yield v
return
# `os.path.split()` returns the argument itself as a dirname if it is a
# drive or UNC path. Prevent an infinite recursion if a drive or UNC path
# contains magic characters (i.e. r'\\?\C:').
if dirname != pathname and has_magic(dirname):
dirs = _iglob(dirname, recursive, True)
else:
dirs = [dirname]
if has_magic(basename):
if recursive and _isrecursive(basename):
glob_in_dir = _glob2
else:
glob_in_dir = _glob1
else:
glob_in_dir = _glob0
for dirname in dirs:
for name in glob_in_dir(dirname, basename, dironly):
yield os.path.join(dirname, name)
def _glob1(dirname, pattern, dironly):
names = list(_iterdir(dirname, dironly))
if not _ishidden(pattern):
names = (x for x in names if not _ishidden(x))
return fnmatch.filter(names, pattern)
def _glob0(dirname, basename, dironly):
if not basename:
# `os.path.split()` returns an empty basename for paths ending with a
# directory separator. 'q*x/' should match only directories.
if os.path.isdir(dirname):
return [basename]
else:
if os.path.lexists(os.path.join(dirname, basename)):
return [basename]
return []
def glob0(dirname, pattern):
return _glob0(dirname, pattern, False)
def glob1(dirname, pattern):
return _glob1(dirname, pattern, False)
def _glob2(dirname, pattern, dironly):
assert _isrecursive(pattern)
yield pattern[:0]
for v in _rlistdir(dirname, dironly):
yield v
def _iterdir(dirname, dironly):
if not dirname:
if isinstance(dirname, bytes):
dirname = os.curdir.decode('ASCII')
else:
dirname = os.curdir
try:
for entry in os.listdir(dirname):
try:
if not dironly or os.path.isdir(os.path.join(dirname, entry)):
yield entry
except OSError:
pass
except OSError:
return
def _rlistdir(dirname, dironly):
names = list(_iterdir(dirname, dironly))
for x in names:
if not _ishidden(x):
yield x
path = os.path.join(dirname, x) if dirname else x
for y in _rlistdir(path, dironly):
yield os.path.join(x, y)
magic_check = re.compile('([*?[])')
magic_check_bytes = re.compile(b'([*?[])')
def has_magic(s):
if isinstance(s, bytes):
match = magic_check_bytes.search(s)
else:
match = magic_check.search(s)
return match is not None
def _ishidden(path):
return path[0] in ('.', b'.'[0])
def _isrecursive(pattern):
if isinstance(pattern, bytes):
return pattern == b'**'
else:
return pattern == '**'
def _py2_escape(pathname):
"""Escape all special characters.
"""
# Escaping is done by wrapping any of "*?[" between square brackets.
# Metacharacters do not work in the drive part and shouldn't be escaped.
drive, pathname = os.path.splitdrive(pathname)
if isinstance(pathname, bytes):
pathname = magic_check_bytes.sub(br'[\1]', pathname)
else:
pathname = magic_check.sub(r'[\1]', pathname)
return drive + pathname
glob = _py2_glob
iglob = _py2_iglob
escape = _py2_escape
else:
from glob import glob, iglob, escape
try:
from os import symlink
except ImportError:
import ctypes
csl = ctypes.windll.kernel32.CreateSymbolicLinkW
csl.argtypes = (ctypes.c_wchar_p, ctypes.c_wchar_p, ctypes.c_uint32)
csl.restype = ctypes.c_ubyte
def symlink(source, link_name, target_is_directory=False):
flags = 1 if target_is_directory else 0
flags += 2
if csl(link_name, source, flags) == 0:
raise ctypes.WinError()
sys_executable_suffix = sys.executable[len(sys.exec_prefix) + 1:]
python_specific_dir_name = "%s-%s" % (platform.python_implementation().lower(),
".".join(str(f) for f in sys.version_info))
_, _venv_python_exename = os.path.split(os.path.abspath(getattr(sys, "_base_executable", sys.executable)))
__all__ = ["glob", "iglob", "escape"]
| 30.952663 | 118 | 0.635506 | 2,082 | 15,693 | 4.614793 | 0.210855 | 0.013114 | 0.017485 | 0.009471 | 0.222211 | 0.177769 | 0.14821 | 0.131973 | 0.103039 | 0.093464 | 0 | 0.008077 | 0.282037 | 15,693 | 506 | 119 | 31.013834 | 0.844679 | 0.243166 | 0 | 0.232787 | 0 | 0 | 0.023676 | 0 | 0 | 0 | 0 | 0 | 0.009836 | 1 | 0.101639 | false | 0.036066 | 0.085246 | 0.019672 | 0.278689 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce55949acd7bce5994cb3909e8fce72f50cb74ba | 3,216 | py | Python | framelistener.py | oflisback/leaphue | 5d9dc916f906d79457f28b5aaa8c618a5cfe6ed6 | [
"MIT"
] | 11 | 2015-09-21T16:28:41.000Z | 2021-07-11T11:03:01.000Z | framelistener.py | oflisback/leaphue | 5d9dc916f906d79457f28b5aaa8c618a5cfe6ed6 | [
"MIT"
] | null | null | null | framelistener.py | oflisback/leaphue | 5d9dc916f906d79457f28b5aaa8c618a5cfe6ed6 | [
"MIT"
] | 1 | 2017-10-16T03:39:37.000Z | 2017-10-16T03:39:37.000Z | import Leap
import math
import vmath
from collections import deque
from datetime import datetime
class FrameListener(Leap.Listener):
def on_frame(self, controller):
frame = controller.frame()
self.confidence = frame.hands[0].confidence
angle = 4*[None]
if self.confidence < 0.1:
self.avg_a = None
return
hd = frame.hands[0].direction
self.hand_angle = vmath.angle_between((-1, 0, 0), (hd.x, hd.y, hd.z))
for i, a in enumerate(self.angle_data):
d = frame.hands[0].fingers[i + 1].bone(2).direction
angle[i] = math.pi/2 - vmath.angle_between((0, 1, 0), (d.x, d.y, d.z))
a.appendleft(angle[i])
# find the finger pointing most downwards
# and also the "second most downwards" finger.
# if the difference between them is large enough we conclude
# that one finger points downwards while the others don't.
down_fingers = []
down_fingers.append({'angle' : 0.0, 'finger_index' : 0})
down_fingers.append({'angle' : 0.0, 'finger_index' : 0})
for i in range(3):
if angle[i] > down_fingers[0]['angle']:
down_fingers[1] = down_fingers[0]
down_fingers[0] = {'angle' : angle[i], 'finger_index' : i}
elif angle[i] > down_fingers[1]['angle']:
down_fingers[1] = {'angle' : angle[i], 'finger_index' : i}
angle_diff = down_fingers[0]['angle'] - down_fingers[1]['angle']
if down_fingers[0]['finger_index'] != -1 and angle_diff > 0.5:
if self.finger_down != down_fingers[0]['finger_index']:
self.finger_down = self.new_finger_down = down_fingers[0]['finger_index']
# print("Finger down: " + str(down_fingers[0]['finger_index']) + " angle diff: " + str(angle_diff))
elif self.finger_down != 3:
# Hack, 3 means .. no finger down.
self.finger_down = self.new_finger_down = 3
# We calculate average without the finger pointing downwards the most ...
fingers_for_average = range(4)
fingers_for_average.remove(down_fingers[0]['finger_index'])
angle_sum = 0
for i in fingers_for_average:
angle_sum += angle[i]
self.avg_a = angle_sum / 3.0
def __init__(self):
super(self.__class__, self).__init__()
self.angle_data = []
self.hand_angle = None
# four fingers to keep track of
for i in range(4):
self.angle_data.append(deque([0] * 1000, 1000))
self.confidence = 0
self.avg_a = 0
self.new_finger_down = 3
self.finger_down = None
def pop_new_finger_down_if_any(self):
finger = self.new_finger_down
self.new_finger_down = None
return finger
def get_hand_direction(self):
return self.hand_direction
def get_confidence(self):
return self.confidence
# hand angle in relation to the eh, "left" vector, (-1, 0, 0).
def get_hand_angle(self):
return self.hand_angle
def get_average_angle(self):
return self.avg_a
def get_angle_data(self):
return self.angle_data
| 34.580645 | 110 | 0.601679 | 445 | 3,216 | 4.139326 | 0.229213 | 0.095548 | 0.058632 | 0.04886 | 0.239414 | 0.204669 | 0.134636 | 0.039088 | 0.039088 | 0 | 0 | 0.026453 | 0.28296 | 3,216 | 92 | 111 | 34.956522 | 0.772333 | 0.157027 | 0 | 0.03125 | 0 | 0 | 0.050352 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.125 | false | 0 | 0.078125 | 0.078125 | 0.328125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce559b51bc538001a5df0ebeaa65551e009ddf58 | 11,974 | py | Python | tds/views/rest/packages_by_id.py | minddrive/tds | 573836434c76603fdd3dd9e07545b48f86e5f70f | [
"Apache-2.0"
] | 1 | 2020-01-02T13:44:23.000Z | 2020-01-02T13:44:23.000Z | tds/views/rest/packages_by_id.py | ifwe/tds | 573836434c76603fdd3dd9e07545b48f86e5f70f | [
"Apache-2.0"
] | 1 | 2017-02-22T22:25:23.000Z | 2017-02-23T17:10:00.000Z | tds/views/rest/packages_by_id.py | minddrive/tds | 573836434c76603fdd3dd9e07545b48f86e5f70f | [
"Apache-2.0"
] | 1 | 2016-08-02T06:06:35.000Z | 2016-08-02T06:06:35.000Z | # Copyright 2016 Ifwe Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
REST API view for packages retrieved by ID.
"""
from cornice.resource import resource, view
import jenkinsapi.jenkins
try:
from jenkinsapi.custom_exceptions import JenkinsAPIException, NotFound
except ImportError:
from jenkinsapi.exceptions import JenkinsAPIException, NotFound
import tds.model
from .base import BaseView, init_view
from . import obj_types, descriptions
from .urls import ALL_URLS
from .permissions import PACKAGE_BY_ID_PERMISSIONS
@resource(collection_path=ALL_URLS['package_by_id_collection'],
path=ALL_URLS['package_by_id'])
@init_view(name='package-by-id', model=tds.model.Package, set_params=False)
class PackageByIDView(BaseView):
"""
View for packages retrieved by ID.
"""
types = {
'id': 'integer',
'version': 'integer',
'revision': 'integer',
'status': 'choice',
'builder': 'choice',
'job': 'string',
'name': 'string',
'commit_hash': 'string',
}
param_routes = {
'name': 'pkg_name',
'application_id': 'pkg_def_id',
'user': 'creator',
}
full_types = obj_types.PACKAGE_TYPES
param_descriptions = {
'id': 'Unique integer identifier',
'version': 'Version number',
'revision': 'Revision number',
'status': 'Current status',
'builder': 'Entity that built the package',
'job': 'Name of Jenkins job',
'name': "Name of the package's application",
'commit_hash': 'The commit hash of the build',
}
full_descriptions = descriptions.PACKAGE_DESCRIPTIONS
defaults = {
'status': 'pending',
}
permissions = PACKAGE_BY_ID_PERMISSIONS
individual_allowed_methods = dict(
GET=dict(description="Get package matching ID."),
HEAD=dict(description="Do a GET query without a body returned."),
PUT=dict(description="Update package matching ID."),
)
collection_allowed_methods = dict(
GET=dict(description="Get a list of packages, optionally by limit and/"
"or start."),
HEAD=dict(description="Do a GET query without a body returned."),
POST=dict(description="Add a new package."),
)
required_post_fields = ('version', 'revision', 'name')
def validate_individual_package_by_id(self, request):
"""
Validate that the package being retrieved by ID actually exists.
"""
self.get_obj_by_name_or_id(obj_type='Package', model=self.model,
param_name='id', can_be_name=False,
dict_name=self.name)
def validate_package_by_id_put(self):
"""
Validate a PUT request to a package retrieved by ID.
"""
if self.name not in self.request.validated:
return
if any(x in self.request.validated_params for x in
('version', 'revision', 'name')):
found_pkg = self.query(self.model).get(
application=self.query(tds.model.Application).get(
pkg_name=self.request.validated_params['name']
) if 'name' in self.request.validated_params else
self.request.validated[self.name].application,
version=self.request.validated_params['version'] if 'version'
in self.request.validated_params else
self.request.validated[self.name].version,
revision=self.request.validated_params['revision'] if
'revision' in self.request.validated_params else
self.request.validated[self.name].revision,
)
if found_pkg and found_pkg != self.request.validated[self.name]:
self.request.errors.add(
'query',
'name' if 'name' in self.request.validated_params
else 'version' if 'version' in
self.request.validated_params else 'revision',
"Unique constraint violated. Another package for this"
" application with this version and revision already"
" exists."
)
self.request.errors.status = 409
if any(x in self.request.validated_params for x in
('version', 'revision', 'job')):
commit_hash = self._validate_jenkins_build()
if commit_hash is not None:
self.request.validated['commit_hash'] = commit_hash
if self.name not in self.request.validated:
return
if 'status' in self.request.validated_params and \
self.request.validated_params['status'] != \
self.request.validated[self.name].status:
if not (self.request.validated[self.name].status == 'failed' and
self.request.validated_params['status'] == 'pending'):
self.request.errors.add(
'query', 'status',
"Cannot change status to {new} from {current}.".format(
new=self.request.validated_params['status'],
current=self.request.validated[self.name].status,
)
)
self.request.errors.status = 403
def validate_package_by_id_post(self):
"""
Validate a POST for a new package.
"""
if 'name' not in self.request.validated_params:
return
found_app = self.query(tds.model.Application).get(
pkg_name=self.request.validated_params['name']
)
ver_check = 'version' in self.request.validated_params
rev_check = 'revision' in self.request.validated_params
if not found_app:
self.request.errors.add(
'query', 'name',
"Application with name {name} does not exist.".format(
name=self.request.validated_params['name']
)
)
self.request.status = 400
return
elif not (ver_check and rev_check):
return
else:
self.request.validated_params['application'] = found_app
found_pkg = self.query(self.model).get(
application=found_app,
version=self.request.validated_params['version'],
revision=self.request.validated_params['revision'],
)
if found_pkg:
self.request.errors.add(
'query', 'version',
"Unique constraint violated. A package for this application"
" with this version and revision already exists."
)
self.request.errors.status = 409
if 'status' in self.request.validated_params and \
self.request.validated_params['status'] != 'pending':
self.request.errors.add(
'query', 'status',
"Status must be pending for new packages."
)
self.request.errors.status = 403
commit_hash = self._validate_jenkins_build()
if commit_hash is not None:
self.request.validated['commit_hash'] = commit_hash
def _add_jenkins_error(self, message):
"""
Add a Jenkins error at 'job', 'version', 'name', or 'id' in that order
with description message.
"""
if 'job' in self.request.validated_params:
self.request.errors.add('query', 'job', message)
elif 'version' in self.request.validated_params:
self.request.errors.add('query', 'version', message)
elif 'name' in self.request.validated_params:
self.request.errors.add('query', 'name', message)
elif self.name in self.request.validated:
self.request.errors.add('path', 'id', message)
def _validate_jenkins_build(self):
"""
Validate that a Jenkins build exists for a package being added or
updated.
"""
try:
jenkins = jenkinsapi.jenkins.Jenkins(self.jenkins_url)
except KeyError:
raise tds.exceptions.ConfigurationError(
'Could not find jenkins_url in settings file.'
)
except Exception:
self._add_jenkins_error(
"Unable to connect to Jenkins server at {addr} to check for "
"package.".format(addr=self.jenkins_url)
)
self.request.errors.status = 500
return
application = None
if 'name' in self.request.validated_params:
app = self.query(tds.model.Application).get(
pkg_name=self.request.validated_params['name']
)
if app is None:
return
application = app
if 'job' in self.request.validated_params:
job_name = self.request.validated_params['job']
elif self.name in self.request.validated and getattr(
self.request.validated[self.name], 'job', None
):
job_name = self.request.validated[self.name].job
elif application is not None:
job_name = application.path
else:
return
if 'version' in self.request.validated_params:
version = self.request.validated_params['version']
elif self.name in self.request.validated:
version = self.request.validated[self.name].version
else:
return
matrix_name = None
if '/' in job_name:
job_name, matrix_name = job_name.split('/', 1)
try:
job = jenkins[job_name]
except KeyError:
self._add_jenkins_error("Jenkins job {job} does not exist.".format(
job=job_name,
))
self.request.errors.status = 400
return
try:
int(version)
except ValueError:
return
try:
build = job.get_build(int(version))
except (KeyError, JenkinsAPIException, NotFound):
self._add_jenkins_error(
"Build with version {vers} for job {job} does not exist on "
"Jenkins server.".format(vers=version, job=job_name)
)
self.request.errors.status = 400
if matrix_name is not None:
for run in build.get_matrix_runs():
if matrix_name in run.baseurl:
build = run
break
else:
self._add_jenkins_error(
"No matrix run matching {matrix} for job {job} found."
.format(matrix=matrix_name, job=job_name)
)
self.request.errors.status = 400
if self.request.errors.status == 400:
return None
try:
return build.get_revision()
except: # Unkown exception type --KN
return None
@view(validators=('validate_put_post', 'validate_post_required',
'validate_obj_post', 'validate_cookie'))
def collection_post(self):
"""
Handle a collection POST after all validation has been passed.
"""
self.request.validated_params['creator'] = self.request.validated[
'user'
]
return self._handle_collection_post()
| 37.071207 | 79 | 0.582178 | 1,320 | 11,974 | 5.147727 | 0.171212 | 0.116556 | 0.153054 | 0.130096 | 0.458425 | 0.422958 | 0.327447 | 0.275497 | 0.243414 | 0.216041 | 0 | 0.004805 | 0.322115 | 11,974 | 322 | 80 | 37.186335 | 0.832327 | 0.086938 | 0 | 0.263158 | 0 | 0 | 0.160849 | 0.004282 | 0 | 0 | 0 | 0 | 0 | 1 | 0.024292 | false | 0 | 0.040486 | 0 | 0.17004 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce5705f95ebcc6624f6d46e38721c5b03f0cdcb8 | 1,460 | py | Python | picotui/defs.py | hiveeyes/picotui | 85f35bb1d85318be24a910ac7e08b589c165195d | [
"MIT"
] | null | null | null | picotui/defs.py | hiveeyes/picotui | 85f35bb1d85318be24a910ac7e08b589c165195d | [
"MIT"
] | null | null | null | picotui/defs.py | hiveeyes/picotui | 85f35bb1d85318be24a910ac7e08b589c165195d | [
"MIT"
] | null | null | null | # Colors
C_BLACK = 0
C_RED = 1
C_GREEN = 2
C_YELLOW = 3
C_BLUE = 4
C_MAGENTA = 5
C_CYAN = 6
C_WHITE = 7
ATTR_INTENSITY = 8
C_GRAY = C_BLACK | ATTR_INTENSITY
C_B_RED = C_RED | ATTR_INTENSITY
C_B_GREEN = C_GREEN | ATTR_INTENSITY
C_B_YELLOW = C_YELLOW | ATTR_INTENSITY
C_B_BLUE = C_BLUE | ATTR_INTENSITY
C_B_MAGENTA = C_MAGENTA | ATTR_INTENSITY
C_B_CYAN = C_CYAN | ATTR_INTENSITY
C_B_WHITE = C_WHITE | ATTR_INTENSITY
def C_PAIR(fg, bg):
return (bg << 4) + fg
# Keys
KEY_UP = 1
KEY_DOWN = 2
KEY_LEFT = 3
KEY_RIGHT = 4
KEY_HOME = 5
KEY_END = 6
KEY_PGUP = 7
KEY_PGDN = 8
KEY_QUIT = 9
KEY_ENTER = 10
KEY_BACKSPACE = 11
KEY_DELETE = 12
KEY_TAB = b"\t"
KEY_SHIFT_TAB = b"\x1b[Z"
KEY_ESC = 20
KEY_F1 = 30
KEY_F2 = 31
KEY_F3 = 32
KEY_F4 = 33
KEY_F5 = b'\x1b[15~'
KEY_F6 = b'\x1b[17~'
KEY_F7 = b'\x1b[18~'
KEY_F8 = b'\x1b[19~'
KEY_F9 = b'\x1b[20~'
KEY_F10 = b'\x1b[21~'
KEYMAP = {
b"\x1b[A": KEY_UP,
b"\x1b[B": KEY_DOWN,
b"\x1b[D": KEY_LEFT,
b"\x1b[C": KEY_RIGHT,
b"\x1bOH": KEY_HOME,
b"\x1bOF": KEY_END,
b"\x1b[1~": KEY_HOME,
b"\x1b[4~": KEY_END,
b"\x1b[5~": KEY_PGUP,
b"\x1b[6~": KEY_PGDN,
b"\x03": KEY_QUIT,
b"\r": KEY_ENTER,
b"\x7f": KEY_BACKSPACE,
b"\x1b[3~": KEY_DELETE,
b"\x1b": KEY_ESC,
b"\x1bOP": KEY_F1,
b"\x1bOQ": KEY_F2,
b"\x1bOR": KEY_F3,
b"\x1bOS": KEY_F4,
}
# Unicode symbols in UTF-8
# DOWNWARDS ARROW
DOWN_ARROW = b"\xe2\x86\x93"
# BLACK DOWN-POINTING TRIANGLE
DOWN_TRIANGLE = b"\xe2\x96\xbc"
| 18.25 | 41 | 0.654795 | 293 | 1,460 | 2.962457 | 0.331058 | 0.078341 | 0.112903 | 0.120968 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.086221 | 0.189726 | 1,460 | 79 | 42 | 18.481013 | 0.647506 | 0.055479 | 0 | 0 | 0 | 0 | 0.137655 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.014925 | false | 0 | 0 | 0.014925 | 0.029851 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce57f15c554e7fac776806f3468f7f4c7d29d0d4 | 424 | py | Python | catalog/bindings/gmd/spherical_cs_2.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/spherical_cs_2.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | catalog/bindings/gmd/spherical_cs_2.py | NIVANorge/s-enda-playground | 56ae0a8978f0ba8a5546330786c882c31e17757a | [
"Apache-2.0"
] | null | null | null | from dataclasses import dataclass
from bindings.gmd.spherical_csproperty_type import SphericalCspropertyType
__NAMESPACE__ = "http://www.opengis.net/gml"
@dataclass
class SphericalCs2(SphericalCspropertyType):
"""
gml:sphericalCS is an association role to the spherical coordinate system
used by this CRS.
"""
class Meta:
name = "sphericalCS"
namespace = "http://www.opengis.net/gml"
| 24.941176 | 77 | 0.731132 | 47 | 424 | 6.468085 | 0.702128 | 0.085526 | 0.105263 | 0.151316 | 0.190789 | 0.190789 | 0 | 0 | 0 | 0 | 0 | 0.00289 | 0.183962 | 424 | 16 | 78 | 26.5 | 0.875723 | 0.214623 | 0 | 0 | 0 | 0 | 0.201278 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.5 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce584a9f58d3d4bd201cf4aa964fe856a4fc739a | 7,334 | py | Python | ansys/dpf/core/operators/mesh/mesh_provider.py | TheGoldfish01/pydpf-core | 75ca8a180454f94cedafbc68c1d6f20dcfc4c795 | [
"MIT"
] | 11 | 2021-01-31T15:50:02.000Z | 2021-10-01T23:15:38.000Z | ansys/dpf/core/operators/mesh/mesh_provider.py | TheGoldfish01/pydpf-core | 75ca8a180454f94cedafbc68c1d6f20dcfc4c795 | [
"MIT"
] | 46 | 2021-01-14T05:00:50.000Z | 2021-10-06T18:30:37.000Z | ansys/dpf/core/operators/mesh/mesh_provider.py | TheGoldfish01/pydpf-core | 75ca8a180454f94cedafbc68c1d6f20dcfc4c795 | [
"MIT"
] | 3 | 2021-06-30T07:18:30.000Z | 2021-09-15T08:43:11.000Z | """
mesh_provider
=============
"""
from ansys.dpf.core.dpf_operator import Operator
from ansys.dpf.core.inputs import Input, _Inputs
from ansys.dpf.core.outputs import Output, _Outputs, _modify_output_spec_with_one_type
from ansys.dpf.core.operators.specification import PinSpecification, Specification
"""Operators from Ans.Dpf.Native plugin, from "mesh" category
"""
class mesh_provider(Operator):
"""Read a mesh from result files and cure degenerated elements
available inputs:
- streams_container (StreamsContainer) (optional)
- data_sources (DataSources)
- read_cyclic (int) (optional)
available outputs:
- mesh (MeshedRegion)
Examples
--------
>>> from ansys.dpf import core as dpf
>>> # Instantiate operator
>>> op = dpf.operators.mesh.mesh_provider()
>>> # Make input connections
>>> my_streams_container = dpf.StreamsContainer()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> my_data_sources = dpf.DataSources()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> my_read_cyclic = int()
>>> op.inputs.read_cyclic.connect(my_read_cyclic)
>>> # Instantiate operator and connect inputs in one line
>>> op = dpf.operators.mesh.mesh_provider(streams_container=my_streams_container,data_sources=my_data_sources)
>>> # Get output data
>>> result_mesh = op.outputs.mesh()"""
def __init__(self, streams_container=None, data_sources=None, config=None, server=None):
super().__init__(name="MeshProvider", config = config, server = server)
self._inputs = InputsMeshProvider(self)
self._outputs = OutputsMeshProvider(self)
if streams_container !=None:
self.inputs.streams_container.connect(streams_container)
if data_sources !=None:
self.inputs.data_sources.connect(data_sources)
@staticmethod
def _spec():
spec = Specification(description="""Read a mesh from result files and cure degenerated elements""",
map_input_pin_spec={
3 : PinSpecification(name = "streams_container", type_names=["streams_container"], optional=True, document="""result file container allowed to be kept open to cache data"""),
4 : PinSpecification(name = "data_sources", type_names=["data_sources"], optional=False, document="""result file path container, used if no streams are set"""),
14 : PinSpecification(name = "read_cyclic", type_names=["int32"], optional=True, document="""if 1 cyclic symmetry is ignored, if 2 cyclic expansion is done (default is 1)""")},
map_output_pin_spec={
0 : PinSpecification(name = "mesh", type_names=["abstract_meshed_region"], optional=False, document="""""")})
return spec
@staticmethod
def default_config():
return Operator.default_config(name = "MeshProvider")
@property
def inputs(self):
"""Enables to connect inputs to the operator
Returns
--------
inputs : InputsMeshProvider
"""
return super().inputs
@property
def outputs(self):
"""Enables to get outputs of the operator by evaluationg it
Returns
--------
outputs : OutputsMeshProvider
"""
return super().outputs
#internal name: MeshProvider
#scripting name: mesh_provider
class InputsMeshProvider(_Inputs):
"""Intermediate class used to connect user inputs to mesh_provider operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.mesh_provider()
>>> my_streams_container = dpf.StreamsContainer()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> my_data_sources = dpf.DataSources()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> my_read_cyclic = int()
>>> op.inputs.read_cyclic.connect(my_read_cyclic)
"""
def __init__(self, op: Operator):
super().__init__(mesh_provider._spec().inputs, op)
self._streams_container = Input(mesh_provider._spec().input_pin(3), 3, op, -1)
self._inputs.append(self._streams_container)
self._data_sources = Input(mesh_provider._spec().input_pin(4), 4, op, -1)
self._inputs.append(self._data_sources)
self._read_cyclic = Input(mesh_provider._spec().input_pin(14), 14, op, -1)
self._inputs.append(self._read_cyclic)
@property
def streams_container(self):
"""Allows to connect streams_container input to the operator
- pindoc: result file container allowed to be kept open to cache data
Parameters
----------
my_streams_container : StreamsContainer,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.mesh_provider()
>>> op.inputs.streams_container.connect(my_streams_container)
>>> #or
>>> op.inputs.streams_container(my_streams_container)
"""
return self._streams_container
@property
def data_sources(self):
"""Allows to connect data_sources input to the operator
- pindoc: result file path container, used if no streams are set
Parameters
----------
my_data_sources : DataSources,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.mesh_provider()
>>> op.inputs.data_sources.connect(my_data_sources)
>>> #or
>>> op.inputs.data_sources(my_data_sources)
"""
return self._data_sources
@property
def read_cyclic(self):
"""Allows to connect read_cyclic input to the operator
- pindoc: if 1 cyclic symmetry is ignored, if 2 cyclic expansion is done (default is 1)
Parameters
----------
my_read_cyclic : int,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.mesh_provider()
>>> op.inputs.read_cyclic.connect(my_read_cyclic)
>>> #or
>>> op.inputs.read_cyclic(my_read_cyclic)
"""
return self._read_cyclic
class OutputsMeshProvider(_Outputs):
"""Intermediate class used to get outputs from mesh_provider operator
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.mesh_provider()
>>> # Connect inputs : op.inputs. ...
>>> result_mesh = op.outputs.mesh()
"""
def __init__(self, op: Operator):
super().__init__(mesh_provider._spec().outputs, op)
self._mesh = Output(mesh_provider._spec().output_pin(0), 0, op)
self._outputs.append(self._mesh)
@property
def mesh(self):
"""Allows to get mesh output of the operator
Returns
----------
my_mesh : MeshedRegion,
Examples
--------
>>> from ansys.dpf import core as dpf
>>> op = dpf.operators.mesh.mesh_provider()
>>> # Connect inputs : op.inputs. ...
>>> result_mesh = op.outputs.mesh()
"""
return self._mesh
| 33.797235 | 209 | 0.622716 | 823 | 7,334 | 5.315917 | 0.147023 | 0.091429 | 0.030171 | 0.032914 | 0.469486 | 0.437486 | 0.388114 | 0.377143 | 0.346286 | 0.346286 | 0 | 0.004771 | 0.256886 | 7,334 | 216 | 210 | 33.953704 | 0.797982 | 0.455277 | 0 | 0.172414 | 0 | 0 | 0.116708 | 0.006884 | 0 | 0 | 0 | 0 | 0 | 1 | 0.189655 | false | 0 | 0.068966 | 0.017241 | 0.448276 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce5d4da9e7149ff5ba0e8cca1bf4fadf22f75af7 | 2,593 | py | Python | test/functional/rpc_scantxoutset.py | lihuanghai/bitcoin | 624da15f8c55219f4ca3e0877a17799990299504 | [
"MIT"
] | 2 | 2021-09-11T22:50:58.000Z | 2021-09-30T19:55:30.000Z | test/functional/rpc_scantxoutset.py | lihuanghai/bitcoin | 624da15f8c55219f4ca3e0877a17799990299504 | [
"MIT"
] | 3 | 2021-07-19T10:25:36.000Z | 2021-07-21T10:47:31.000Z | test/functional/rpc_scantxoutset.py | lihuanghai/bitcoin | 624da15f8c55219f4ca3e0877a17799990299504 | [
"MIT"
] | 8 | 2021-03-23T13:25:08.000Z | 2022-03-09T10:45:53.000Z | #!/usr/bin/env python3
# Copyright (c) 2018 The Bitcoin Core developers
# Distributed under the MIT software license, see the accompanying
# file COPYING or http://www.opensource.org/licenses/mit-license.php.
"""Test the scantxoutset rpc call."""
from test_framework.test_framework import BitcoinTestFramework
from test_framework.util import *
import shutil
import os
class ScantxoutsetTest(BitcoinTestFramework):
def set_test_params(self):
self.num_nodes = 1
self.setup_clean_chain = True
def run_test(self):
self.log.info("Mining blocks...")
self.nodes[0].generate(110)
addr_P2SH_SEGWIT = self.nodes[0].getnewaddress("", "p2sh-segwit")
pubk1 = self.nodes[0].getaddressinfo(addr_P2SH_SEGWIT)['pubkey']
addr_LEGACY = self.nodes[0].getnewaddress("", "legacy")
pubk2 = self.nodes[0].getaddressinfo(addr_LEGACY)['pubkey']
addr_BECH32 = self.nodes[0].getnewaddress("", "bech32")
pubk3 = self.nodes[0].getaddressinfo(addr_BECH32)['pubkey']
self.nodes[0].sendtoaddress(addr_P2SH_SEGWIT, 1)
self.nodes[0].sendtoaddress(addr_LEGACY, 2)
self.nodes[0].sendtoaddress(addr_BECH32, 3)
self.nodes[0].generate(1)
self.log.info("Stop node, remove wallet, mine again some blocks...")
self.stop_node(0)
shutil.rmtree(os.path.join(self.nodes[0].datadir, "regtest", 'wallets'))
self.start_node(0)
self.nodes[0].generate(110)
self.restart_node(0, ['-nowallet'])
self.log.info("Test if we have found the non HD unspent outputs.")
assert_equal(self.nodes[0].scantxoutset("start", [ {"pubkey": {"pubkey": pubk1}}, {"pubkey": {"pubkey": pubk2}}, {"pubkey": {"pubkey": pubk3}}])['total_amount'], 6)
assert_equal(self.nodes[0].scantxoutset("start", [ {"address": addr_P2SH_SEGWIT}, {"address": addr_LEGACY}, {"address": addr_BECH32}])['total_amount'], 6)
assert_equal(self.nodes[0].scantxoutset("start", [ {"address": addr_P2SH_SEGWIT}, {"address": addr_LEGACY}, {"pubkey": {"pubkey": pubk3}} ])['total_amount'], 6)
self.log.info("Test invalid parameters.")
assert_raises_rpc_error(-8, 'Scanobject "pubkey" must contain an object as value', self.nodes[0].scantxoutset, "start", [ {"pubkey": pubk1}]) #missing pubkey object
assert_raises_rpc_error(-8, 'Scanobject "address" must contain a single string as value', self.nodes[0].scantxoutset, "start", [ {"address": {"address": addr_P2SH_SEGWIT}}]) #invalid object for address object
if __name__ == '__main__':
ScantxoutsetTest().main()
| 52.918367 | 216 | 0.67875 | 333 | 2,593 | 5.126126 | 0.366366 | 0.094903 | 0.105448 | 0.064441 | 0.360867 | 0.239602 | 0.165202 | 0.103105 | 0.103105 | 0.103105 | 0 | 0.03089 | 0.163517 | 2,593 | 48 | 217 | 54.020833 | 0.756109 | 0.110683 | 0 | 0.055556 | 0 | 0 | 0.211329 | 0 | 0 | 0 | 0 | 0 | 0.138889 | 1 | 0.055556 | false | 0 | 0.111111 | 0 | 0.194444 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce5dde3609dfe4c59032c4008d18a6d52d6a22fa | 5,985 | py | Python | homeassistant/components/automation/__init__.py | fearoffish/home-assistant | 8ef542927f8c795ed1206d1e0cde41ce822af147 | [
"MIT"
] | null | null | null | homeassistant/components/automation/__init__.py | fearoffish/home-assistant | 8ef542927f8c795ed1206d1e0cde41ce822af147 | [
"MIT"
] | null | null | null | homeassistant/components/automation/__init__.py | fearoffish/home-assistant | 8ef542927f8c795ed1206d1e0cde41ce822af147 | [
"MIT"
] | null | null | null | """
Allow to setup simple automation rules via the config file.
For more details about this component, please refer to the documentation at
https://home-assistant.io/components/automation/
"""
import logging
import voluptuous as vol
from homeassistant.bootstrap import prepare_setup_platform
from homeassistant.const import CONF_PLATFORM
from homeassistant.components import logbook
from homeassistant.helpers import extract_domain_configs
from homeassistant.helpers.service import call_from_config
from homeassistant.loader import get_platform
import homeassistant.helpers.config_validation as cv
DOMAIN = 'automation'
DEPENDENCIES = ['group']
CONF_ALIAS = 'alias'
CONF_CONDITION = 'condition'
CONF_ACTION = 'action'
CONF_TRIGGER = 'trigger'
CONF_CONDITION_TYPE = 'condition_type'
CONDITION_USE_TRIGGER_VALUES = 'use_trigger_values'
CONDITION_TYPE_AND = 'and'
CONDITION_TYPE_OR = 'or'
DEFAULT_CONDITION_TYPE = CONDITION_TYPE_AND
METHOD_TRIGGER = 'trigger'
METHOD_IF_ACTION = 'if_action'
_LOGGER = logging.getLogger(__name__)
def _platform_validator(method, schema):
"""Generate platform validator for different steps."""
def validator(config):
"""Validate it is a valid platform."""
platform = get_platform(DOMAIN, config[CONF_PLATFORM])
if not hasattr(platform, method):
raise vol.Invalid('invalid method platform')
if not hasattr(platform, schema):
return config
print('validating config', method, config)
return getattr(platform, schema)(config)
return validator
_TRIGGER_SCHEMA = vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({
vol.Required(CONF_PLATFORM): cv.platform_validator(DOMAIN)
}, extra=vol.ALLOW_EXTRA),
_platform_validator(METHOD_TRIGGER, 'TRIGGER_SCHEMA')
),
]
)
_CONDITION_SCHEMA = vol.Any(
CONDITION_USE_TRIGGER_VALUES,
vol.All(
cv.ensure_list,
[
vol.All(
vol.Schema({
vol.Required(CONF_PLATFORM): cv.platform_validator(DOMAIN),
}, extra=vol.ALLOW_EXTRA),
_platform_validator(METHOD_IF_ACTION, 'IF_ACTION_SCHEMA'),
)
]
)
)
PLATFORM_SCHEMA = vol.Schema({
CONF_ALIAS: cv.string,
vol.Required(CONF_TRIGGER): _TRIGGER_SCHEMA,
vol.Required(CONF_CONDITION_TYPE, default=DEFAULT_CONDITION_TYPE):
vol.All(vol.Lower, vol.Any(CONDITION_TYPE_AND, CONDITION_TYPE_OR)),
CONF_CONDITION: _CONDITION_SCHEMA,
vol.Required(CONF_ACTION): cv.SERVICE_SCHEMA,
})
def setup(hass, config):
"""Setup the automation."""
for config_key in extract_domain_configs(config, DOMAIN):
conf = config[config_key]
for list_no, config_block in enumerate(conf):
name = config_block.get(CONF_ALIAS, "{}, {}".format(config_key,
list_no))
_setup_automation(hass, config_block, name, config)
return True
def _setup_automation(hass, config_block, name, config):
"""Setup one instance of automation."""
action = _get_action(hass, config_block.get(CONF_ACTION, {}), name)
if CONF_CONDITION in config_block:
action = _process_if(hass, config, config_block, action)
if action is None:
return False
_process_trigger(hass, config, config_block.get(CONF_TRIGGER, []), name,
action)
return True
def _get_action(hass, config, name):
"""Return an action based on a configuration."""
def action():
"""Action to be executed."""
_LOGGER.info('Executing %s', name)
logbook.log_entry(hass, name, 'has been triggered', DOMAIN)
call_from_config(hass, config)
return action
def _process_if(hass, config, p_config, action):
"""Process if checks."""
cond_type = p_config.get(CONF_CONDITION_TYPE,
DEFAULT_CONDITION_TYPE).lower()
if_configs = p_config.get(CONF_CONDITION)
use_trigger = if_configs == CONDITION_USE_TRIGGER_VALUES
if use_trigger:
if_configs = p_config[CONF_TRIGGER]
checks = []
for if_config in if_configs:
platform = _resolve_platform(METHOD_IF_ACTION, hass, config,
if_config.get(CONF_PLATFORM))
if platform is None:
continue
check = platform.if_action(hass, if_config)
# Invalid conditions are allowed if we base it on trigger
if check is None and not use_trigger:
return None
checks.append(check)
if cond_type == CONDITION_TYPE_AND:
def if_action():
"""AND all conditions."""
if all(check() for check in checks):
action()
else:
def if_action():
"""OR all conditions."""
if any(check() for check in checks):
action()
return if_action
def _process_trigger(hass, config, trigger_configs, name, action):
"""Setup the triggers."""
if isinstance(trigger_configs, dict):
trigger_configs = [trigger_configs]
for conf in trigger_configs:
platform = _resolve_platform(METHOD_TRIGGER, hass, config,
conf.get(CONF_PLATFORM))
if platform is None:
continue
if platform.trigger(hass, conf, action):
_LOGGER.info("Initialized rule %s", name)
else:
_LOGGER.error("Error setting up rule %s", name)
def _resolve_platform(method, hass, config, platform):
"""Find the automation platform."""
if platform is None:
return None
platform = prepare_setup_platform(hass, config, DOMAIN, platform)
if platform is None or not hasattr(platform, method):
_LOGGER.error("Unknown automation platform specified for %s: %s",
method, platform)
return None
return platform
| 29.053398 | 79 | 0.649958 | 699 | 5,985 | 5.313305 | 0.206009 | 0.037695 | 0.020194 | 0.022617 | 0.192784 | 0.122779 | 0.108239 | 0.086699 | 0.065697 | 0.065697 | 0 | 0 | 0.260485 | 5,985 | 205 | 80 | 29.195122 | 0.839132 | 0.092732 | 0 | 0.183824 | 0 | 0 | 0.054407 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.080882 | false | 0 | 0.066176 | 0 | 0.235294 | 0.007353 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce5e490c62eedb50a49f434dc444a6c69ee30a08 | 1,061 | py | Python | python/katana/bug/__main__.py | origandrew/katana | 456d64cf48a9d474dc35fb17e4d841bfa7a2f383 | [
"BSD-3-Clause"
] | 64 | 2020-05-22T23:32:00.000Z | 2022-03-18T10:42:45.000Z | python/katana/bug/__main__.py | origandrew/katana | 456d64cf48a9d474dc35fb17e4d841bfa7a2f383 | [
"BSD-3-Clause"
] | 705 | 2020-02-17T20:50:38.000Z | 2022-03-31T16:28:09.000Z | python/katana/bug/__main__.py | origandrew/katana | 456d64cf48a9d474dc35fb17e4d841bfa7a2f383 | [
"BSD-3-Clause"
] | 93 | 2020-03-18T17:34:07.000Z | 2022-03-29T02:11:09.000Z | import argparse
import sys
from pathlib import Path
from katana.bug import capture_environment
if __name__ == "__main__":
parser = argparse.ArgumentParser(
prog=f"{Path(sys.executable).name} -m katana.bug",
description="""
Capture environment information for bug reporting.
""",
)
parser.add_argument(
"destination",
help="Output the binary to the given path, if this is not provided a temporary file name is chosen.",
type=str,
nargs="?",
default=None,
)
parser.add_argument("--stdout", help="Send output to stdout instead of a file.", action="store_true")
args = parser.parse_args()
if args.stdout:
if args.destination:
print("WARNING: Ignoring filename since --stdout was given.", file=sys.stderr)
destination = sys.stdout.buffer
else:
destination = args.destination
filename = capture_environment(destination)
if isinstance(filename, (str, Path)):
print(f"Environment captured to: {filename}")
| 28.675676 | 109 | 0.65033 | 124 | 1,061 | 5.451613 | 0.524194 | 0.079882 | 0.050296 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.245994 | 1,061 | 36 | 110 | 29.472222 | 0.845 | 0 | 0 | 0 | 0 | 0 | 0.3459 | 0.025448 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.137931 | 0 | 0.137931 | 0.068966 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce5e9120b846a47259e8901ee8cb98871a949460 | 1,594 | py | Python | 1d/test-convergence.py | hertzsprung/high-order-transport | 50d9633642dcd4d8fba54b9b408e69dc0f12d9e7 | [
"MIT"
] | null | null | null | 1d/test-convergence.py | hertzsprung/high-order-transport | 50d9633642dcd4d8fba54b9b408e69dc0f12d9e7 | [
"MIT"
] | null | null | null | 1d/test-convergence.py | hertzsprung/high-order-transport | 50d9633642dcd4d8fba54b9b408e69dc0f12d9e7 | [
"MIT"
] | 1 | 2020-02-13T09:16:36.000Z | 2020-02-13T09:16:36.000Z | #!/usr/bin/python3
import numpy as np
import os
from convergence import *
from ddt import *
from div import *
from initial import *
from interpolate import *
from mesh import *
from simulation import *
from spacing import *
from stencil import *
from weighting import *
class Initialiser:
def __init__(self, stencil, order):
self.stencil = stencil
self.order = order
def __call__(self, nx):
mesh = Mesh(nx, Uniform())
return Simulation(
mesh=mesh,
Co=0.5,
u=1,
tracer=SineWave().tracer,
ddt=RungeKutta4(),
interpolation=PointwisePolynomial(
mesh,
self.stencil,
InverseDistanceWeighting(mesh, self.stencil),
self.order)
)
initialisers = [
Initialiser(Stencil([-2, -1, 0]), 2),
Initialiser(Stencil([-2, -1, 0]), 3),
Initialiser(Stencil([-3, -2, -1, 0]), 2),
Initialiser(Stencil([-3, -2, -1, 0]), 3),
Initialiser(Stencil([-3, -2, -1, 0]), 4)
]
for initialiser in initialisers:
convergence = Convergence(
[2**n for n in range(4,9)],
initialiser)
print(
'order', initialiser.order,
'stencil', initialiser.stencil,
'convergence', convergence.order()
)
convergence.dumpTo(
os.path.join('build/convergence.order{order}.stencil{stencil}.dat'.format(
order=initialiser.order,
stencil=len(initialiser.stencil))))
| 26.566667 | 86 | 0.55207 | 164 | 1,594 | 5.317073 | 0.347561 | 0.103211 | 0.017202 | 0.068807 | 0.12844 | 0.107798 | 0.059633 | 0.059633 | 0.059633 | 0.059633 | 0 | 0.028972 | 0.328733 | 1,594 | 59 | 87 | 27.016949 | 0.785981 | 0.010665 | 0 | 0 | 0 | 0 | 0.046984 | 0.032381 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.24 | 0 | 0.32 | 0.02 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce6255297dad75133fd90c004c59c49a8c28ac4e | 658 | py | Python | csv2json.py | shahwahed/CiscoConfigMaker | 07e82489a70603f2df2494b14fe443849bc1318b | [
"MIT"
] | 1 | 2021-01-04T08:54:05.000Z | 2021-01-04T08:54:05.000Z | csv2json.py | shahwahed/SwitchConfigMaker | 07e82489a70603f2df2494b14fe443849bc1318b | [
"MIT"
] | null | null | null | csv2json.py | shahwahed/SwitchConfigMaker | 07e82489a70603f2df2494b14fe443849bc1318b | [
"MIT"
] | null | null | null | #!/usr/bin/python
#quick & dirty csv to json with python
from collections import OrderedDict
import csv
import json
#csv_file = 'vlan.csv'
csv_file = 'csv/mapping_port_sw01.csv'
json_file = 'json/mapping_port_sw01.json'
#Open csv
with open(csv_file,'rU') as f_csv:
reader = csv.reader(f_csv)
headerlist = next(reader)
csvlist = []
for row in reader:
data = OrderedDict()
for i, x in enumerate(row):
data[headerlist[i]] = x
csvlist.append(data)
json_out = json.dumps(csvlist, indent=4)
f_csv.close()
#save json
with open(json_file,'w') as f_json:
f_json.write(json_out)
f_json.close()
| 21.225806 | 44 | 0.664134 | 102 | 658 | 4.117647 | 0.411765 | 0.05 | 0.071429 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.009728 | 0.218845 | 658 | 30 | 45 | 21.933333 | 0.807393 | 0.138298 | 0 | 0 | 0 | 0 | 0.097865 | 0.092527 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.157895 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce632d2510561403eed6367d5618124de6c9918f | 2,200 | py | Python | tests/test_protocols.py | witlox/dcron | ec2391e7a8ca61ecbd65b0d86aa6ed80bc095196 | [
"MIT"
] | null | null | null | tests/test_protocols.py | witlox/dcron | ec2391e7a8ca61ecbd65b0d86aa6ed80bc095196 | [
"MIT"
] | null | null | null | tests/test_protocols.py | witlox/dcron | ec2391e7a8ca61ecbd65b0d86aa6ed80bc095196 | [
"MIT"
] | null | null | null | #!/usr/bin/env python
# -*- coding: utf-8 -*-#
# MIT License
#
# Copyright (c) 2019 Pim Witlox
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
import random
import string
from uuid import uuid4
from dcron.cron.cronitem import CronItem
from dcron.protocols import Packet
from dcron.protocols.messages import Status
from dcron.protocols.udpserializer import UdpSerializer
def test_packet_encoding_and_decoding():
data = b'hello world'
p = Packet(str(uuid4()), 1, 1, data)
encoded = p.encode()
assert p == Packet.decode(encoded)
def test_status_message_dumps_loads():
sm = Status('127.0.0.1', 0)
packets = list(UdpSerializer.dump(sm))
assert len(packets) == 1
assert sm == UdpSerializer.load(packets)
def test_cron_job_message_dumps_loads():
cj = CronItem(command="echo 'hello world'")
packets = list(UdpSerializer.dump(cj))
assert cj == UdpSerializer.load(packets)
def test_cron_with_message_larger_then_max():
cj = CronItem(command=''.join(random.choice(string.ascii_uppercase + string.digits) for _ in range(6000)))
packets = list(UdpSerializer.dump(cj))
assert len(packets) > 1
assert cj == UdpSerializer.load(packets)
| 36.065574 | 110 | 0.75 | 320 | 2,200 | 5.090625 | 0.490625 | 0.054021 | 0.033149 | 0.051565 | 0.128913 | 0.08717 | 0 | 0 | 0 | 0 | 0 | 0.011983 | 0.165455 | 2,200 | 60 | 111 | 36.666667 | 0.875272 | 0.502273 | 0 | 0.153846 | 0 | 0 | 0.035481 | 0 | 0 | 0 | 0 | 0 | 0.230769 | 1 | 0.153846 | false | 0 | 0.269231 | 0 | 0.423077 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce656280a8ebf30e437267a27c79b8e06ea4f3f0 | 1,074 | py | Python | jsontest/jsontestValidateGET.py | nebiutadele/2022-02-28-Alta3-Python | 9c065540bfdf432103bfffac6eae4972c9f9061a | [
"MIT"
] | 1 | 2022-01-05T16:07:46.000Z | 2022-01-05T16:07:46.000Z | jsontest/jsontestValidateGET.py | nebiutadele/2022-02-28-Alta3-Python | 9c065540bfdf432103bfffac6eae4972c9f9061a | [
"MIT"
] | null | null | null | jsontest/jsontestValidateGET.py | nebiutadele/2022-02-28-Alta3-Python | 9c065540bfdf432103bfffac6eae4972c9f9061a | [
"MIT"
] | null | null | null | #!/usr/bin/python3
import requests
import json
# define the URL we want to use
GETURL = "http://validate.jsontest.com/"
def main():
# test data to validate as legal json
mydata = {"fruit": ["apple", "pear"], "vegetable": ["carrot"]}
## the next two lines do the same thing
## we take python, convert to a string, then strip out whitespace
#jsonToValidate = "json=" + str(mydata).replace(" ", "")
#jsonToValidate = f"json={ str(mydata).replace(' ', '') }"
## slightly different thinking
## user json library to convert to legal json, then strip out whitespace
jsonToValidate = f"json={ json.dumps(mydata).replace(' ', '') }"
# use requests library to send an HTTP GET
resp = requests.get(f"{GETURL}?{jsonToValidate}")
# strip off JSON response
# and convert to PYTHONIC LIST / DICT
respjson = resp.json()
# display our PYTHONIC data (LIST / DICT)
print(respjson)
# JUST display the value of "validate"
print(f"Is your JSON valid? {respjson['validate']}")
if __name__ == "__main__":
main()
| 29.027027 | 76 | 0.647114 | 139 | 1,074 | 4.942446 | 0.553957 | 0.039301 | 0.034935 | 0.064047 | 0.104803 | 0 | 0 | 0 | 0 | 0 | 0 | 0.001188 | 0.216015 | 1,074 | 36 | 77 | 29.833333 | 0.814727 | 0.531657 | 0 | 0 | 0 | 0 | 0.365702 | 0.154959 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.166667 | 0 | 0.25 | 0.166667 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce69580401bad665369ebc3320443e245d39e660 | 573 | py | Python | ex09_shooter/minion/_enemy.py | ryohji/ex09_shooter | c7de42f1aaa4b1b4d6592f7aec6ede0acc426daf | [
"MIT"
] | null | null | null | ex09_shooter/minion/_enemy.py | ryohji/ex09_shooter | c7de42f1aaa4b1b4d6592f7aec6ede0acc426daf | [
"MIT"
] | null | null | null | ex09_shooter/minion/_enemy.py | ryohji/ex09_shooter | c7de42f1aaa4b1b4d6592f7aec6ede0acc426daf | [
"MIT"
] | null | null | null | """Alien."""
import pyxel
import random
WIDTH = 8
HEIGHT = 8
SPEED = 1.5
class Minion:
"""Alien!"""
def __init__(self, x, y):
self.x = x
self.y = y
self.w = WIDTH
self.h = HEIGHT
self.dir = 1
self.alive = True
self.offset = random.randint(0, 60)
def update(self):
self.dir = 1 if (pyxel.frame_count + self.offset) % 60 < 30 else -1
self.x += SPEED * self.dir
self.y += SPEED
if self.y > pyxel.height - 1:
self.alive = False
def draw(self):
pyxel.blt(self.x, self.y, 0, 8, 0, WIDTH * self.dir, HEIGHT, 0)
| 17.90625 | 71 | 0.575916 | 94 | 573 | 3.457447 | 0.37234 | 0.061538 | 0.036923 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.045564 | 0.272251 | 573 | 31 | 72 | 18.483871 | 0.733813 | 0.022688 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.090909 | 0 | 0.272727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce6b9d6b2ee710d77c316c53f3210a2e355a5f51 | 6,283 | py | Python | scripts/nlvr/get_nlvr_logical_forms.py | tianjianjiang/allennlp | 0839f5c263911ec5ff04a2ebe575493c7e0436ef | [
"Apache-2.0"
] | 105 | 2019-08-28T14:16:31.000Z | 2022-03-26T20:51:22.000Z | scripts/nlvr/get_nlvr_logical_forms.py | dasguptar/allennlp | 35b285585e0677b1025eac1c19b5eefe7e2a70db | [
"Apache-2.0"
] | 44 | 2019-09-09T20:52:40.000Z | 2022-03-28T03:04:38.000Z | scripts/nlvr/get_nlvr_logical_forms.py | dasguptar/allennlp | 35b285585e0677b1025eac1c19b5eefe7e2a70db | [
"Apache-2.0"
] | 19 | 2019-09-09T17:34:27.000Z | 2021-09-08T08:22:08.000Z | #! /usr/bin/env python
import json
import argparse
from typing import Tuple, List
import os
import sys
sys.path.insert(
0, os.path.dirname(os.path.dirname(os.path.abspath(os.path.join(__file__, os.pardir))))
)
from allennlp.common.util import JsonDict
from allennlp.semparse.domain_languages import NlvrLanguage
from allennlp.semparse.domain_languages.nlvr_language import Box
from allennlp.semparse import ActionSpaceWalker
def read_json_line(line: str) -> Tuple[str, str, List[JsonDict], List[str]]:
data = json.loads(line)
instance_id = data["identifier"]
sentence = data["sentence"]
if "worlds" in data:
structured_reps = data["worlds"]
label_strings = [label_str.lower() for label_str in data["labels"]]
else:
# We're reading ungrouped data.
structured_reps = [data["structured_rep"]]
label_strings = [data["label"].lower()]
return instance_id, sentence, structured_reps, label_strings
def process_data(
input_file: str,
output_file: str,
max_path_length: int,
max_num_logical_forms: int,
ignore_agenda: bool,
write_sequences: bool,
) -> None:
"""
Reads an NLVR dataset and returns a JSON representation containing sentences, labels, correct and
incorrect logical forms. The output will contain at most `max_num_logical_forms` logical forms
each in both correct and incorrect lists. The output format is:
``[{"id": str, "label": str, "sentence": str, "correct": List[str], "incorrect": List[str]}]``
"""
processed_data: JsonDict = []
# We can instantiate the ``ActionSpaceWalker`` with any world because the action space is the
# same for all the ``NlvrLanguage`` objects. It is just the execution that differs.
walker = ActionSpaceWalker(NlvrLanguage({}), max_path_length=max_path_length)
for line in open(input_file):
instance_id, sentence, structured_reps, label_strings = read_json_line(line)
worlds = []
for structured_representation in structured_reps:
boxes = {
Box(object_list, box_id)
for box_id, object_list in enumerate(structured_representation)
}
worlds.append(NlvrLanguage(boxes))
labels = [label_string == "true" for label_string in label_strings]
correct_logical_forms = []
incorrect_logical_forms = []
if ignore_agenda:
# Get 1000 shortest logical forms.
logical_forms = walker.get_all_logical_forms(max_num_logical_forms=1000)
else:
# TODO (pradeep): Assuming all worlds give the same agenda.
sentence_agenda = worlds[0].get_agenda_for_sentence(sentence)
logical_forms = walker.get_logical_forms_with_agenda(
sentence_agenda, max_num_logical_forms * 10
)
for logical_form in logical_forms:
if all([world.execute(logical_form) == label for world, label in zip(worlds, labels)]):
if len(correct_logical_forms) <= max_num_logical_forms:
correct_logical_forms.append(logical_form)
else:
if len(incorrect_logical_forms) <= max_num_logical_forms:
incorrect_logical_forms.append(logical_form)
if (
len(correct_logical_forms) >= max_num_logical_forms
and len(incorrect_logical_forms) >= max_num_logical_forms
):
break
if write_sequences:
correct_sequences = [
worlds[0].logical_form_to_action_sequence(logical_form)
for logical_form in correct_logical_forms
]
incorrect_sequences = [
worlds[0].logical_form_to_action_sequence(logical_form)
for logical_form in incorrect_logical_forms
]
processed_data.append(
{
"id": instance_id,
"sentence": sentence,
"correct_sequences": correct_sequences,
"incorrect_sequences": incorrect_sequences,
"worlds": structured_reps,
"labels": label_strings,
}
)
else:
processed_data.append(
{
"id": instance_id,
"sentence": sentence,
"correct_logical_forms": correct_logical_forms,
"incorrect_logical_forms": incorrect_logical_forms,
"worlds": structured_reps,
"labels": label_strings,
}
)
with open(output_file, "w") as outfile:
for instance_processed_data in processed_data:
json.dump(instance_processed_data, outfile)
outfile.write("\n")
outfile.close()
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("input", type=str, help="NLVR data file")
parser.add_argument("output", type=str, help="Processed output")
parser.add_argument(
"--max-path-length",
type=int,
dest="max_path_length",
help="Maximum path length for logical forms",
default=12,
)
parser.add_argument(
"--max-num-logical-forms",
type=int,
dest="max_num_logical_forms",
help="Maximum number of logical forms per denotation, per question",
default=20,
)
parser.add_argument(
"--ignore-agenda",
dest="ignore_agenda",
help="Should we ignore the "
"agenda and use consistency as the only signal to get logical forms?",
action="store_true",
)
parser.add_argument(
"--write-action-sequences",
dest="write_sequences",
help="If this "
"flag is set, action sequences instead of logical forms will be written "
"to the json file. This will avoid having to parse the logical forms again "
"in the NlvrDatasetReader.",
action="store_true",
)
args = parser.parse_args()
process_data(
args.input,
args.output,
args.max_path_length,
args.max_num_logical_forms,
args.ignore_agenda,
args.write_sequences,
)
| 38.546012 | 102 | 0.621359 | 711 | 6,283 | 5.230661 | 0.250352 | 0.122614 | 0.038451 | 0.05324 | 0.241732 | 0.185534 | 0.13552 | 0.111858 | 0.089271 | 0.037645 | 0 | 0.004042 | 0.291262 | 6,283 | 162 | 103 | 38.783951 | 0.831125 | 0.106955 | 0 | 0.169014 | 0 | 0 | 0.136429 | 0.020079 | 0 | 0 | 0 | 0.006173 | 0 | 1 | 0.014085 | false | 0 | 0.06338 | 0 | 0.084507 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce6cc16e90219542b48ee1d64072fca3e874127b | 1,987 | py | Python | scripts/h36m_to_json.py | xqterry/lightweight-human-pose-estimation.pytorch | e5ec9452c9bd9683451d3b2f97c6fe9e075b2d48 | [
"Apache-2.0"
] | null | null | null | scripts/h36m_to_json.py | xqterry/lightweight-human-pose-estimation.pytorch | e5ec9452c9bd9683451d3b2f97c6fe9e075b2d48 | [
"Apache-2.0"
] | null | null | null | scripts/h36m_to_json.py | xqterry/lightweight-human-pose-estimation.pytorch | e5ec9452c9bd9683451d3b2f97c6fe9e075b2d48 | [
"Apache-2.0"
] | null | null | null | import h5py
import numpy as np
import json
from os.path import join as os_join
from posixpath import join
if __name__ == '__main__':
# camera
dn = "D:/datasets/human3.6m/annotations/h36m"
cameras_fn = os_join(dn, "cameras.h5")
sub_ids = [1, 5, 6, 7, 8, 9, 11]
cam_ids = [1, 2, 3, 4]
print("load cameras from ", cameras_fn)
cameras = dict()
f_flag = True
with h5py.File(cameras_fn, 'r') as f:
for s_id in sub_ids:
k_sub = f'subject{s_id}'
cameras[k_sub] = dict()
for c_id in cam_ids:
k_cam = f'camera{c_id}'
cam_R = f[join(k_sub, k_cam, "R")][()]
cam_T = f[join(k_sub, k_cam, "T")][()]
cam_c = f[join(k_sub, k_cam, "c")][()]
cam_f = f[join(k_sub, k_cam, "f")][()]
cam_k = f[join(k_sub, k_cam, "k")][()]
cam_p = f[join(k_sub, k_cam, "p")][()]
if f_flag:
print("R", cam_R.shape)
print("T", cam_T.shape)
print("c", cam_c.shape)
print("f", cam_f.shape)
print("k", cam_k.shape)
print("p", cam_p.shape)
f_flag = False
cameras[k_sub][k_cam] = dict(
R=cam_R.tolist(),
T=cam_T.tolist(),
c=cam_c.tolist(),
f=cam_f.tolist(),
k=cam_k.tolist(),
p=cam_p.tolist(),
)
f.close()
# output_fn = os_join(dn, "cameras.json")
# with open(output_fn, "w") as f:
# json.dump(cameras, f)
# f.close()
# print(json.dumps(cameras))
kp_fn = "D:/datasets/human3.6m/annotations/h36m/S1/MyPoses/3D_positions/Directions_1.h5"
keys = []
with h5py.File(kp_fn, 'r') as f:
f.visit(keys.append)
print(f['3D_positions'])
f.close()
print(keys) | 30.569231 | 92 | 0.471062 | 282 | 1,987 | 3.08156 | 0.262411 | 0.050633 | 0.040276 | 0.064442 | 0.202532 | 0.163406 | 0 | 0 | 0 | 0 | 0 | 0.023539 | 0.37997 | 1,987 | 65 | 93 | 30.569231 | 0.681818 | 0.072974 | 0 | 0.040816 | 0 | 0.020408 | 0.110566 | 0.063181 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.102041 | 0 | 0.102041 | 0.183673 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce6d74509969f88f865e4bf411094d3399ae6eeb | 3,621 | py | Python | pm4pymdl/objects/xoc/importer/versions/classic.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | 5 | 2021-01-31T22:45:29.000Z | 2022-02-22T14:26:06.000Z | pm4pymdl/objects/xoc/importer/versions/classic.py | Javert899/pm4py-mdl | 4cc875999100f3f1ad60b925a20e40cf52337757 | [
"MIT"
] | 3 | 2021-07-07T15:32:55.000Z | 2021-07-07T16:15:36.000Z | pm4pymdl/objects/xoc/importer/versions/classic.py | dorian1000/pm4py-mdl | 71e0c2425abb183da293a58d31e25e50137c774f | [
"MIT"
] | 9 | 2020-09-23T15:34:11.000Z | 2022-03-17T09:15:40.000Z | from copy import copy
from datetime import datetime
from dateutil import parser
import pandas as pd
import random
def apply(file_path, parameters=None):
"""
Apply the importing of a XOC file
Parameters
------------
file_path
Path to the XOC file
parameters
Import parameters
Returns
------------
dataframe
Dataframe
"""
if parameters is None:
parameters = {}
import_timestamp = parameters["import_timestamp"] if "import_timestamp" in parameters else True
sample_probability = parameters["sample_probability"] if "sample_probability" in parameters else None
classes_to_reference = set(parameters["classes_to_reference"]) if "classes_to_reference" in parameters else None
if classes_to_reference is None:
classes_to_reference = set()
F = open(file_path, "r")
content = F.read()
F.close()
events = content.split("<event>")
stream = []
stream_strings = []
i = 1
considered_events = 0
considered_objects = set()
considered_activities = set()
considered_classes = set()
while i < len(events) - 1:
if sample_probability is not None:
r = random.random()
if r > sample_probability:
i = i + 1
continue
considered_events = considered_events + 1
event_id = events[i].split("\"id\" value=\"")[1].split("\"")[0]
event_activity = events[i].split("\"activity\" value=\"")[1].split("\"")[0]
considered_activities.add(event_activity)
event_timestamp0 = events[i].split("\"timestamp\" value=\"")[1].split("\"")[0].replace(" CET", "")
event_timestamp = None
if import_timestamp:
try:
event_timestamp = parser.parse(event_timestamp0)
except:
pass
if event_timestamp is not None:
event_dictio = {"event_id": event_id, "event_activity": event_activity, "event_timestamp": event_timestamp}
else:
event_dictio = {"event_id": event_id, "event_activity": event_activity}
references = events[i].split("<references>")[1].split("</references>")[0].split("<object>")
referenced_classes = set()
j = 1
while j < len(references):
object_class = references[j].split("\"class\" value=\"")[1].split("\"")[0]
referenced_classes.add(object_class)
j = j + 1
if referenced_classes.issuperset(classes_to_reference):
j = 1
while j < len(references):
this_event_dictio = copy(event_dictio)
object_id = references[j].split("\"id\" value=\"")[1].split("\"")[0]
considered_objects.add(object_id)
object_class = references[j].split("\"class\" value=\"")[1].split("\"")[0]
considered_classes.add(object_class)
this_event_dictio[object_class] = object_id
this_event_dictio_stri = str(this_event_dictio)
if this_event_dictio_stri not in stream_strings:
stream.append(this_event_dictio)
stream_strings.append(this_event_dictio_stri)
j = j + 1
i = i + 1
dataframe = pd.DataFrame.from_dict(stream)
if import_timestamp:
dataframe = dataframe.sort_values(["event_timestamp", "event_id"])
dataframe.type = "exploded"
print("events: ",considered_events,"objects: ",len(considered_objects),"activities: ",len(considered_activities),"classes: ",len(considered_classes))
return dataframe
| 37.329897 | 153 | 0.607843 | 408 | 3,621 | 5.164216 | 0.208333 | 0.052207 | 0.049834 | 0.034172 | 0.171808 | 0.128144 | 0.090176 | 0.090176 | 0.090176 | 0.090176 | 0 | 0.009822 | 0.268986 | 3,621 | 96 | 154 | 37.71875 | 0.786173 | 0.047501 | 0 | 0.164384 | 0 | 0 | 0.118185 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.013699 | false | 0.013699 | 0.109589 | 0 | 0.136986 | 0.013699 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce6e614cd830f867eeb9b99262ef3641a5b9bfd2 | 2,689 | py | Python | DecoratedDecisionTree.py | tjcombs/DecoratedDecisionTree | 55fdb1b17f2bb642f6ba985b512e03eaf67e1b63 | [
"MIT"
] | null | null | null | DecoratedDecisionTree.py | tjcombs/DecoratedDecisionTree | 55fdb1b17f2bb642f6ba985b512e03eaf67e1b63 | [
"MIT"
] | 1 | 2020-10-19T08:38:21.000Z | 2020-10-19T09:00:53.000Z | DecoratedDecisionTree.py | tjcombs/DecoratedDecisionTree | 55fdb1b17f2bb642f6ba985b512e03eaf67e1b63 | [
"MIT"
] | null | null | null | import pandas as pd
from sklearn.base import clone
import warnings
warnings.filterwarnings('ignore')
class DecoratedDecisionTreeRegressor:
def __init__(self, dtr, decorator):
'''
Creates a decorated decision tree regressor. A decision tree is fit
according to the supplied DecisionTreeRegressor. The data on the
leaves of the tree are fit according to a supplied decorator
which is a regression algorithm.
Parameters
----------
dtr : sklearn.tree.DecisionTreeRegressor
Decision tree regressor
decorator : Regressor
Regression algorithm used to fit the data at the leaves of the tree.
'''
self.dtr = dtr
self.decorator = decorator
self.leaf_models = dict()
def fit(self, df_X, y):
'''
Fits the decorated decision tree regressor.
Parameters
----------
df : DataFrame
DataFrame containing the features we want to use for prediction.
y : Series
Values we are trying to predict.
Returns
-------
None.
'''
df_X_copy = df_X.copy()
self.dtr.fit(df_X_copy, y)
leaves = self.dtr.apply(df_X_copy)
# Loop over the leaves and fit the decoration regression algorithm
# and save the result to a dictionary
for leaf in set(leaves):
df_X_leaf = df_X_copy[leaves==leaf]
y_leaf = y[leaves==leaf]
leaf_model = clone(self.decorator)
leaf_model.fit(df_X_leaf, y_leaf)
self.leaf_models[leaf] = leaf_model
def predict(self, df_X):
'''
Parameters
----------
df_X : DataFrame
DataFrame containing the features used to train the model
Returns
-------
Series
A series containing the prediction.
'''
df_X_copy = df_X.copy()
leaves = self.dtr.apply(df_X_copy)
# Say what the ordering is so that we can get the same order back
# when we are done predicting
columns = list(df_X_copy)
df_X_copy['__ordering'] = range(len(df_X_copy))
df_out = pd.DataFrame({})
# Go through the leaves and predict using the model associated to
# the leaf
for leaf in set(leaves):
df_X_leaf = df_X_copy[leaves==leaf]
model = self.leaf_models[leaf]
df_X_leaf['y'] = model.predict(df_X_leaf[columns])
df_out = pd.concat((df_out, df_X_leaf))
df_out = df_out.sort_values('__ordering', ascending=True)
return df_out['y']
| 30.556818 | 80 | 0.58386 | 337 | 2,689 | 4.480712 | 0.305638 | 0.041722 | 0.055629 | 0.023841 | 0.192053 | 0.116556 | 0.088742 | 0.055629 | 0.055629 | 0.055629 | 0 | 0 | 0.334325 | 2,689 | 87 | 81 | 30.908046 | 0.843575 | 0.397917 | 0 | 0.25 | 0 | 0 | 0.020911 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.09375 | false | 0 | 0.09375 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce6f9905b1d28b0ef7c24def1bb8d101f50368d5 | 4,321 | py | Python | test_std.py | aemsenhuber/math-expression-parser | 4c9fe08270e6776494d4e468889065169d63b734 | [
"Apache-2.0"
] | 1 | 2020-10-10T19:02:54.000Z | 2020-10-10T19:02:54.000Z | test_std.py | aemsenhuber/math-expression-parser | 4c9fe08270e6776494d4e468889065169d63b734 | [
"Apache-2.0"
] | null | null | null | test_std.py | aemsenhuber/math-expression-parser | 4c9fe08270e6776494d4e468889065169d63b734 | [
"Apache-2.0"
] | null | null | null | # Test suite for "std" callbacks in MaExPa.
#
# Copyright 2020 Alexandre Emsenhuber
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import math
import unittest
import maexpa
class StdTestCase( unittest.TestCase ):
def setUp( self ):
maexpa.lib( "std" )
def test_var( self ):
tests = [
( "e", math.e ),
( "pi", math.pi ),
( "tau", math.tau ),
]
for expr, comp in tests:
with self.subTest( "Constants", expr = expr ):
res = maexpa.Expression( expr )()
self.assertIs( type( res ), float )
self.assertAlmostEqual( res, comp )
def test_no_var( self ):
for text in [ "xi", "lambda", "a" ]:
with self.subTest( "Undefined constants", text = text ):
with self.assertRaises( maexpa.exception.NoVarException ):
maexpa.Expression( text )()
def test_func_builtin_int( self ):
tests = [
( "min(1,2)", 1 ),
( "max(1,2)", 2 ),
( "min(-1,1)", -1 ),
( "max(-1,1)", 1 ),
( "pow(1,2)", 1 ),
( "pow(2,3)", 8 ),
]
for expr, comp in tests:
with self.subTest( "Builting functions on integers", expr = expr ):
res = maexpa.Expression( expr )()
self.assertIs( type( res ), int )
self.assertEqual( res, comp )
def test_func_builtin_float( self ):
tests = [
( "min(0.9,1.1)", 0.9 ),
( "max(0.9,1.1)", 1.1 ),
( "pow(2.0,4)", 16. ),
]
for expr, comp in tests:
with self.subTest( "Builting functions on floats", expr = expr ):
res = maexpa.Expression( expr )()
self.assertIs( type( res ), float )
self.assertAlmostEqual( res, comp )
def test_func_type_float( self ):
tests = [
( "floor(1.7648)", 1 ),
( "ceil(1.7648)", 2 ),
( "floor(-1.7648)", -2 ),
( "ceil(-1.7648)", -1 ),
]
for expr, comp in tests:
with self.subTest( "Conversion from float to integer", expr = expr ):
res = maexpa.Expression( expr )()
self.assertIs( type( res ), int )
self.assertEqual( res, comp )
def test_func_abs_float( self ):
tests = [
( "abs(1.7648)", 1.7648 ),
( "abs(-1.7648)", 1.7648 ),
]
for expr, comp in tests:
with self.subTest( "Absoltue value on float", expr = expr ):
res = maexpa.Expression( expr )()
self.assertIs( type( res ), float )
self.assertAlmostEqual( res, comp )
def test_func_explog_float( self ):
tests = [
( "exp(1)", math.e ),
( "log(e)", 1. ),
( "log2(2)", 1. ),
( "log10(10)", 1. ),
]
for expr, comp in tests:
with self.subTest( "Exponential and logarithms on float", expr = expr ):
res = maexpa.Expression( expr )()
self.assertIs( type( res ), float )
self.assertAlmostEqual( res, comp )
def test_func_sqrt( self ):
tests = [
( "sqrt(2)", math.sqrt( 2. ) ),
( "sqrt(2.)", math.sqrt( 2. ) ),
( "sqrt(45**2)", 45. ),
]
for expr, comp in tests:
with self.subTest( "Square root", expr = expr ):
res = maexpa.Expression( expr )()
self.assertIs( type( res ), float )
self.assertAlmostEqual( res, comp )
def test_func_cbrt( self ):
tests = [
( "cbrt(8.)", 2. ),
( "cbrt(-7)", -7**( 1. / 3. ) ),
( "cbrt(45**3)", 45. ),
]
for expr, comp in tests:
with self.subTest( "Cube root", expr = expr ):
res = maexpa.Expression( expr )()
self.assertIs( type( res ), float )
self.assertAlmostEqual( res, comp )
def test_func_args_num( self ):
for text in [ "min(1)", "ceil(1,2)", "sqrt(9,16)" ]:
with self.subTest( "Passing incorrect number of arguments", text = text ):
with self.assertRaises( maexpa.exception.FuncArgsNumException ):
maexpa.Expression( text )()
def test_func_no_var( self ):
for text in [ "e(1)", "pi(1)", "tau(1)" ]:
with self.subTest( "Using variables as functions", text = text ):
with self.assertRaises( maexpa.exception.NoFuncException ):
maexpa.Expression( text )()
if __name__ == '__main__':
unittest.main()
| 27.877419 | 77 | 0.610507 | 594 | 4,321 | 4.380471 | 0.260943 | 0.043044 | 0.063413 | 0.039969 | 0.517679 | 0.507686 | 0.457725 | 0.408148 | 0.382782 | 0.329746 | 0 | 0.037578 | 0.224022 | 4,321 | 154 | 78 | 28.058442 | 0.738443 | 0.138625 | 0 | 0.373913 | 0 | 0 | 0.152995 | 0 | 0 | 0 | 0 | 0 | 0.165217 | 1 | 0.104348 | false | 0.008696 | 0.026087 | 0 | 0.13913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce7131aee8a8753e9b15cb2b84ad03c227be2d8b | 799 | py | Python | 2021/day6/day6.py | ChrisCh7/advent-of-code | d6f1dda4a67aae18ac1e15b9eccb3e6e94d705c1 | [
"Unlicense"
] | 3 | 2020-12-03T23:20:27.000Z | 2020-12-03T23:20:53.000Z | 2021/day6/day6.py | ChrisCh7/advent-of-code | d6f1dda4a67aae18ac1e15b9eccb3e6e94d705c1 | [
"Unlicense"
] | null | null | null | 2021/day6/day6.py | ChrisCh7/advent-of-code | d6f1dda4a67aae18ac1e15b9eccb3e6e94d705c1 | [
"Unlicense"
] | null | null | null | def part1(days: list[int]):
for _ in range(80):
days = simulate_a_day(days)
print('Part 1:', sum(days))
def part2(days: list[int]):
for _ in range(256):
days = simulate_a_day(days)
print('Part 2:', sum(days))
def simulate_a_day(days: list[int]):
new_days = [0] * 9
new_days[0] = days[1]
new_days[1] = days[2]
new_days[2] = days[3]
new_days[3] = days[4]
new_days[4] = days[5]
new_days[5] = days[6]
new_days[6] = days[7] + days[0]
new_days[7] = days[8]
new_days[8] = days[0]
return new_days
if __name__ == '__main__':
with open('in.txt') as file:
timers = [int(n) for n in file.readline().split(',')]
days = [0] * 9
for timer in timers:
days[timer] += 1
part1(days)
part2(days)
| 19.975 | 61 | 0.558198 | 131 | 799 | 3.198473 | 0.320611 | 0.183771 | 0.078759 | 0.114558 | 0.238663 | 0.238663 | 0.138425 | 0 | 0 | 0 | 0 | 0.060137 | 0.271589 | 799 | 39 | 62 | 20.487179 | 0.659794 | 0 | 0 | 0.071429 | 0 | 0 | 0.036295 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.107143 | false | 0 | 0 | 0 | 0.142857 | 0.071429 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce71e894a42c77dc391c169ddbcf393c5afa8cb5 | 2,131 | py | Python | monty_hall.py | evancolvin/Small_Scripts | 701d13ba8902a89daffe2284607d4423eb1af2c5 | [
"MIT"
] | null | null | null | monty_hall.py | evancolvin/Small_Scripts | 701d13ba8902a89daffe2284607d4423eb1af2c5 | [
"MIT"
] | null | null | null | monty_hall.py | evancolvin/Small_Scripts | 701d13ba8902a89daffe2284607d4423eb1af2c5 | [
"MIT"
] | null | null | null | import random
from __future__ import division
import matplotlib.pyplot as plt
def monty_hall(switch = True):
doors = [1, 2, 3]
guess = random.choice(doors)
car = random.choice(doors)
doors.remove(car)
goat1, goat2 = doors[0], doors[-1]
# revealing the door not the car or not the guess
if guess == car: # won't matter which to reveal first
reveal = random.choice(doors)
else:
doors.remove(guess)
reveal = doors[0]
# figuring out if they won the car
if switch == False:
if car == guess:
win = 'car'
else:
win = 'goat'
else: # switch == True
doors = [1, 2, 3]
doors.remove(guess)
doors.remove(reveal)
if doors[0] == car:
win = 'car'
else:
win = 'goat'
return win
# Running the simulation
def monty_simulation(iterations, switch = True):
cars, goats = 0, 0
for i in range(iterations):
prize = monty_hall(switch = switch)
if prize == 'car':
cars += 1
else:
goats += 1
return cars/iterations, goats/iterations
def plot_monty(iterations):
# Plots the proportion of success for each iteration
outcomes_with_switch = [0]
outcomes_no_switch = [0]
cars = 0
# Plotting outcome with switch
for i in range(1, iterations+1):
if monty_hall() == 'car':
cars += 1
outcomes_with_switch.append(cars/i)
else:
# Need to add something to make the proportion go down
# when you don't get a car
outcomes_with_switch.append(outcomes_with_switch[i-1]*(i-1)/i)
cars = 0
# Plotting outcomes without switch
for i in range(1, iterations+1):
if monty_hall(switch = False) == 'car':
cars += 1
outcomes_no_switch.append(cars/i)
else:
outcomes_no_switch.append(outcomes_no_switch[i-1]*(i-1)/i)
plt.plot(outcomes_with_switch, color = 'c')
plt.plot(outcomes_no_switch, color = 'm')
plt.legend(['When You Switch', "When You Don't"])
plt.show()
| 28.413333 | 74 | 0.582356 | 286 | 2,131 | 4.234266 | 0.300699 | 0.049546 | 0.074319 | 0.02725 | 0.176713 | 0.113955 | 0.066061 | 0.066061 | 0.066061 | 0.066061 | 0 | 0.020506 | 0.313468 | 2,131 | 74 | 75 | 28.797297 | 0.807245 | 0.161427 | 0 | 0.37931 | 0 | 0 | 0.030405 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.051724 | false | 0 | 0.051724 | 0 | 0.137931 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce7333eb89ff938bcaff3dfa59cddb712a11222f | 1,038 | py | Python | UNO_Microservice/uno_dealer.py | narayanansriram/SE1-Microservices | 13c54acfba60eaa17ff4101a4f5c4088cb6c4c49 | [
"MIT"
] | null | null | null | UNO_Microservice/uno_dealer.py | narayanansriram/SE1-Microservices | 13c54acfba60eaa17ff4101a4f5c4088cb6c4c49 | [
"MIT"
] | null | null | null | UNO_Microservice/uno_dealer.py | narayanansriram/SE1-Microservices | 13c54acfba60eaa17ff4101a4f5c4088cb6c4c49 | [
"MIT"
] | null | null | null | # Microservice for UNO card dealing
# Author: Sriram Narayanan
import random
import json
with open('readme.txt','r') as f:
lines = f.readlines()
uno_deck = []
players = int(lines[0].rstrip('\n'))
num_cards = int(lines[1].rstrip('\n'))
# num_cards = 7
# print(players,num_cards)
uno_deck_colors = ['Red','Yellow','Green','Blue']
uno_deck_types = [0,1,1,2,2,3,3,4,4,5,5,6,6,7,7,8,8,9,9,"Skip","Skip","Draw Two","Draw Two","Reverse","Reverse"]
uno_wild_cards = [(None,"Wild")]*4 + [(None,"Wild Draw Four")]*4
for color in uno_deck_colors:
for type in uno_deck_types:
card = (color,type)
uno_deck.append(card)
uno_deck+=uno_wild_cards
# print(len(uno_deck))
random.shuffle(uno_deck)
dealing = {}
deck_pointer = 0
for i in range(players):
dealing[i+1] = []
for _ in range(num_cards):
dealing[i+1].append(uno_deck[deck_pointer])
deck_pointer+=1
dealing["rest"] = uno_deck[deck_pointer:]
output = json.dumps(dealing)
jsonWrite = open("output.json","w")
jsonWrite.write(output)
jsonWrite.close()
| 28.833333 | 112 | 0.675337 | 172 | 1,038 | 3.912791 | 0.389535 | 0.114413 | 0.029718 | 0.044577 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031425 | 0.141619 | 1,038 | 35 | 113 | 29.657143 | 0.723906 | 0.11368 | 0 | 0 | 0 | 0 | 0.11488 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.071429 | 0 | 0.071429 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce73cef03d811e2a4d671f4170d4576417bddc40 | 7,406 | py | Python | src/zipline/pipeline/loaders/equity_pricing_loader.py | daground/zipline-reloaded | 0aaf5410f58cf950fb95e06e406fda76fde963de | [
"Apache-2.0"
] | null | null | null | src/zipline/pipeline/loaders/equity_pricing_loader.py | daground/zipline-reloaded | 0aaf5410f58cf950fb95e06e406fda76fde963de | [
"Apache-2.0"
] | null | null | null | src/zipline/pipeline/loaders/equity_pricing_loader.py | daground/zipline-reloaded | 0aaf5410f58cf950fb95e06e406fda76fde963de | [
"Apache-2.0"
] | null | null | null | # Copyright 2015 Quantopian, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import defaultdict
from interface import implements
from numpy import iinfo, uint32, multiply
from zipline.data.fx import ExplodingFXRateReader
from zipline.lib.adjusted_array import AdjustedArray
from zipline.utils.numpy_utils import repeat_first_axis
from .base import PipelineLoader
from .utils import shift_dates
from ..data.equity_pricing import EquityPricing
UINT32_MAX = iinfo(uint32).max
class EquityPricingLoader(implements(PipelineLoader)):
"""A PipelineLoader for loading daily OHLCV data.
Parameters
----------
raw_price_reader : zipline.data.session_bars.SessionBarReader
Reader providing raw prices.
adjustments_reader : zipline.data.adjustments.SQLiteAdjustmentReader
Reader providing price/volume adjustments.
fx_reader : zipline.data.fx.FXRateReader
Reader providing currency conversions.
"""
def __init__(self, raw_price_reader, adjustments_reader, fx_reader):
self.raw_price_reader = raw_price_reader
self.adjustments_reader = adjustments_reader
self.fx_reader = fx_reader
@classmethod
def without_fx(cls, raw_price_reader, adjustments_reader):
"""
Construct an EquityPricingLoader without support for fx rates.
The returned loader will raise an error if requested to load
currency-converted columns.
Parameters
----------
raw_price_reader : zipline.data.session_bars.SessionBarReader
Reader providing raw prices.
adjustments_reader : zipline.data.adjustments.SQLiteAdjustmentReader
Reader providing price/volume adjustments.
Returns
-------
loader : EquityPricingLoader
A loader that can only provide currency-naive data.
"""
return cls(
raw_price_reader=raw_price_reader,
adjustments_reader=adjustments_reader,
fx_reader=ExplodingFXRateReader(),
)
def load_adjusted_array(self, domain, columns, dates, sids, mask):
# load_adjusted_array is called with dates on which the user's algo
# will be shown data, which means we need to return the data that would
# be known at the **start** of each date. We assume that the latest
# data known on day N is the data from day (N - 1), so we shift all
# query dates back by a trading session.
sessions = domain.all_sessions()
shifted_dates = shift_dates(sessions, dates[0], dates[-1], shift=1)
ohlcv_cols, currency_cols = self._split_column_types(columns)
del columns # From here on we should use ohlcv_cols or currency_cols.
ohlcv_colnames = [c.name for c in ohlcv_cols]
raw_ohlcv_arrays = self.raw_price_reader.load_raw_arrays(
ohlcv_colnames,
shifted_dates[0],
shifted_dates[-1],
sids,
)
# Currency convert raw_arrays in place if necessary. We use shifted
# dates to load currency conversion rates to make them line up with
# dates used to fetch prices.
self._inplace_currency_convert(
ohlcv_cols,
raw_ohlcv_arrays,
shifted_dates,
sids,
)
adjustments = self.adjustments_reader.load_pricing_adjustments(
ohlcv_colnames,
dates,
sids,
)
out = {}
for c, c_raw, c_adjs in zip(ohlcv_cols, raw_ohlcv_arrays, adjustments):
out[c] = AdjustedArray(
c_raw.astype(c.dtype),
c_adjs,
c.missing_value,
)
for c in currency_cols:
codes_1d = self.raw_price_reader.currency_codes(sids)
codes = repeat_first_axis(codes_1d, len(dates))
out[c] = AdjustedArray(
codes,
adjustments={},
missing_value=None,
)
return out
@property
def currency_aware(self):
# Tell the pipeline engine that this loader supports currency
# conversion if we have a non-dummy fx rates reader.
return not isinstance(self.fx_reader, ExplodingFXRateReader)
def _inplace_currency_convert(self, columns, arrays, dates, sids):
"""
Currency convert raw data loaded for ``column``.
Parameters
----------
columns : list[zipline.pipeline.data.BoundColumn]
List of columns whose raw data has been loaded.
arrays : list[np.array]
List of arrays, parallel to ``columns`` containing data for the
column.
dates : pd.DatetimeIndex
Labels for rows of ``arrays``. These are the dates that should
be used to fetch fx rates for conversion.
sids : np.array[int64]
Labels for columns of ``arrays``.
Returns
-------
None
Side Effects
------------
Modifies ``arrays`` in place by applying currency conversions.
"""
# Group columns by currency conversion spec.
by_spec = defaultdict(list)
for column, array in zip(columns, arrays):
by_spec[column.currency_conversion].append(array)
# Nothing to do for terms with no currency conversion.
by_spec.pop(None, None)
if not by_spec:
return
fx_reader = self.fx_reader
base_currencies = self.raw_price_reader.currency_codes(sids)
# Columns with the same conversion spec will use the same multipliers.
for spec, arrays in by_spec.items():
rates = fx_reader.get_rates(
rate=spec.field,
quote=spec.currency.code,
bases=base_currencies,
dts=dates,
)
for arr in arrays:
multiply(arr, rates, out=arr)
def _split_column_types(self, columns):
"""Split out currency columns from OHLCV columns.
Parameters
----------
columns : list[zipline.pipeline.data.BoundColumn]
Columns to be loaded by ``load_adjusted_array``.
Returns
-------
ohlcv_columns : list[zipline.pipeline.data.BoundColumn]
Price and volume columns from ``columns``.
currency_columns : list[zipline.pipeline.data.BoundColumn]
Currency code column from ``columns``, if present.
"""
currency_name = EquityPricing.currency.name
ohlcv = []
currency = []
for c in columns:
if c.name == currency_name:
currency.append(c)
else:
ohlcv.append(c)
return ohlcv, currency
# Backwards compat alias.
USEquityPricingLoader = EquityPricingLoader
CryptoPricingLoader = EquityPricingLoader
| 34.607477 | 79 | 0.636241 | 861 | 7,406 | 5.326365 | 0.299652 | 0.019189 | 0.03358 | 0.019625 | 0.187309 | 0.147841 | 0.11775 | 0.080244 | 0.080244 | 0.080244 | 0 | 0.004571 | 0.29098 | 7,406 | 213 | 80 | 34.769953 | 0.868787 | 0.435593 | 0 | 0.074468 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.06383 | false | 0 | 0.095745 | 0.010638 | 0.223404 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce758c5c0f67f99eef4740c1f94e3f94263e752f | 3,693 | py | Python | spot-ingest/worker.py | yellingviv/incubator-spot | b97128edc645be45a09fccdc449cae2fd2225681 | [
"Apache-2.0"
] | 365 | 2016-09-27T22:51:20.000Z | 2022-03-16T07:23:00.000Z | spot-ingest/worker.py | yellingviv/incubator-spot | b97128edc645be45a09fccdc449cae2fd2225681 | [
"Apache-2.0"
] | 155 | 2016-12-13T16:13:27.000Z | 2020-07-13T03:33:29.000Z | spot-ingest/worker.py | yellingviv/incubator-spot | b97128edc645be45a09fccdc449cae2fd2225681 | [
"Apache-2.0"
] | 241 | 2016-11-07T06:07:05.000Z | 2022-01-07T07:40:10.000Z | #!/bin/env python
#
# Licensed to the Apache Software Foundation (ASF) under one or more
# contributor license agreements. See the NOTICE file distributed with
# this work for additional information regarding copyright ownership.
# The ASF licenses this file to You under the Apache License, Version 2.0
# (the "License"); you may not use this file except in compliance with
# the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import argparse
import os
import json
import sys
from common.utils import Util
from common.kerberos import Kerberos
from common.kafka_client import KafkaConsumer
import common.configurator as Config
SCRIPT_PATH = os.path.dirname(os.path.abspath(__file__))
CONF_FILE = "{0}/ingest_conf.json".format(SCRIPT_PATH)
WORKER_CONF = json.loads(open(CONF_FILE).read())
def main():
# input parameters
parser = argparse.ArgumentParser(description="Worker Ingest Framework")
parser.add_argument('-t', '--type', dest='type', required=True,
help='Type of data that will be ingested (Pipeline Configuration)',
metavar='')
parser.add_argument('-i', '--id', dest='id', required=True,
help='Worker Id, this is needed to sync Kafka and Ingest framework (Partition Number)',
metavar='')
parser.add_argument('-top', '--topic', dest='topic', required=True,
help='Topic to read from.', metavar="")
parser.add_argument('-p', '--processingParallelism', dest='processes',
required=False, help='Processing Parallelism', metavar="")
args = parser.parse_args()
# start worker based on the type.
start_worker(args.type, args.topic, args.id, args.processes)
def start_worker(type, topic, id, processes=None):
logger = Util.get_logger("SPOT.INGEST.WORKER")
# validate the given configuration exists in ingest_conf.json.
if not type in WORKER_CONF["pipelines"]:
logger.error("'{0}' type is not a valid configuration.".format(type))
sys.exit(1)
# validate the type is a valid module.
if not Util.validate_data_source(WORKER_CONF["pipelines"][type]["type"]):
logger.error("The provided data source {0} is not valid".format(type))
sys.exit(1)
# validate if kerberos authentication is required.
if Config.kerberos_enabled():
kb = Kerberos()
kb.authenticate()
# create a worker instance based on the data source type.
module = __import__("pipelines.{0}.worker".format(WORKER_CONF["pipelines"][type]["type"]),
fromlist=['Worker'])
# kafka server info.
logger.info("Initializing kafka instance")
k_server = WORKER_CONF["kafka"]['kafka_server']
k_port = WORKER_CONF["kafka"]['kafka_port']
# required zookeeper info.
zk_server = WORKER_CONF["kafka"]['zookeper_server']
zk_port = WORKER_CONF["kafka"]['zookeper_port']
topic = topic
# create kafka consumer.
kafka_consumer = KafkaConsumer(topic, k_server, k_port, zk_server, zk_port, id)
# start worker.
db_name = WORKER_CONF['dbname']
app_path = WORKER_CONF['hdfs_app_path']
ingest_worker = module.Worker(db_name, app_path, kafka_consumer, type, processes)
ingest_worker.start()
if __name__ == '__main__':
main()
| 37.683673 | 111 | 0.686975 | 486 | 3,693 | 5.084362 | 0.368313 | 0.040469 | 0.027519 | 0.029138 | 0.042898 | 0.021044 | 0 | 0 | 0 | 0 | 0 | 0.003396 | 0.202545 | 3,693 | 97 | 112 | 38.072165 | 0.835654 | 0.298132 | 0 | 0.078431 | 0 | 0 | 0.224561 | 0.008967 | 0 | 0 | 0 | 0 | 0 | 1 | 0.039216 | false | 0 | 0.176471 | 0 | 0.215686 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce761a0faee4e9021f4b0a20f04705b9c54ec6fa | 4,498 | py | Python | irispreppy/psf/IRIS_SG_deconvolve.py | OfAaron3/irispreppy | a826c6cffa4d7ac76f28208dc71befc8601424d2 | [
"MIT"
] | 3 | 2021-12-16T17:27:42.000Z | 2021-12-22T23:47:25.000Z | irispreppy/psf/IRIS_SG_deconvolve.py | OfAaron3/irispreppy | a826c6cffa4d7ac76f28208dc71befc8601424d2 | [
"MIT"
] | null | null | null | irispreppy/psf/IRIS_SG_deconvolve.py | OfAaron3/irispreppy | a826c6cffa4d7ac76f28208dc71befc8601424d2 | [
"MIT"
] | null | null | null | import numpy as np
import scipy.fft as fft
def IRIS_SG_deconvolve(data_in, psf,
iterations=10,
fft_div=False):
'''
Graham S. Kerr
July 2020
NAME: IRIS_SG_Deconvolve.py
PURPOSE: Deconvolves IRIS SG data using the PSFs from Courrier et al 2018.
INPUTS: data_in -- A 2D IRIS SG array [ypos, wavelength]
psf -- The appropriate PSF
These are not currently in iris_lmsalpy, so I have just
saved the IDL versions and restore them in the notebook
before I call this function.
iterations -- The number of Richardson Lucy iterations to run through
Default = 10
fft_div -- Set to use skip iterations and instead deconvolve by division
in Fourier Space
NOTES: Based on iris_sg_deconvolve.pro by Hans Courrier, but not all the functionality
is included here yet
There are probably more clever ways to code this in -- i'm fairly new to python.
To Do: Add error statements
'''
# Remove negative values
dcvim = data_in.copy()
dcvim[np.where(dcvim<0)] = 0
data_in_zr = dcvim
if fft_div == True:
dcvim = FFT_conv_1D(data_in,psf,div=True)
else:
for ind in range(1,iterations+1):
#print('iteration = %3d' %(ind))
step1 = data_in_zr/(FFT_conv_1D(dcvim,psf,rev_psf=False,div=False))
#print(np.nanmax(step1[265,:]))
step2 = FFT_conv_1D(step1,psf, rev_psf=True)
dcvim = dcvim * step2
return dcvim
def FFT_conv_1D(datain, psfin, div = False, rev_psf=False):
'''
Graham Kerr
July 2020
NAME: FFT_conv_1D
PURPOSE: Function to do FFT convolution in the y-direction
of the input data (first dimensioin). This way we
can pass the 1D PSF.
INPUTS: datain -- a 2D data array [nominally, slit pos vs wavelength]
psfin -- the PSF to be applied in the y-direction
imsize -- the dimensions of the input data array
psflen -- the length of the psf.
KEYWORDS: div -- Set to True to divide in Fourier space.
Default is False, so multiply in Fourier space.
rev_psf -- Set to reverse the 1D input PSF
OUTPUTS: dataout -- the input data convolved with the PSF
NOTES: Pretty much copied exactly from Hans Courrier's IDL version
in the SSW IRIS software tree, as part of iris_sg_deconvole.pro
Can probably be written in a much better way more suitable for
python.
'''
# length of input psf
psflen = len(psfin)
# dimensions of input data
imsize = datain.shape
# Get difference of image size and psf length
ydiff = imsize[0]-psflen
# Cut the PSF if it is too long
if ydiff <= 0:
rs = int(np.abs(ydiff)/2)
if np.abs(ydiff) % 2 == 1:
pin = psfin[rs+1:psflen-rs] # odd ydiff
else:
pin = psfin[rs:psflen-rs] # even ydiff
# renormalize PSF
pin = pin/np.sum(pin)
# Pad the PSF if it is too short
if ydiff > 0:
rs = int(ydiff/2)
padl = np.zeros(rs,dtype=float)
if ydiff % 2 == 1:
padr = np.zeros(rs+1,dtype=float)
else:
padr = np.zeros(rs,dtype=float)
pin = np.concatenate((padl,psfin,padr))
# Replicate the PSF over wavelength array also
pin_full = np.tile(pin,[imsize[1],1])
pin_full = np.transpose(pin_full)
# Shift PSF center to zero to center output
pin_full = np.roll(pin_full, (int(-imsize[0]/2),0),axis=0)
# Reverse the PSF if needed
if rev_psf == True:
pin_full = np.flip(pin_full,axis=0)
if psflen % 2 == 0:
pin_full = np.roll(pin_full,(1,0),axis=0)
# Perform the FFT
fpsf = fft.fft(pin_full,axis=0)
datain=datain.astype(np.float64)
fdatain = fft.fft(datain,axis=0)
# Multiply(divide) the PSF and data, and convert back to k space
if div == False:
dataout = fft.ifft((fdatain*fpsf),axis=0).real
else:
dataout = fft.ifft((fdatain/fpsf),axis=0).real
return dataout
| 29.019355 | 92 | 0.565585 | 622 | 4,498 | 4.017685 | 0.360129 | 0.028011 | 0.018007 | 0.012805 | 0.080832 | 0.055222 | 0.027211 | 0.027211 | 0 | 0 | 0 | 0.023135 | 0.356158 | 4,498 | 154 | 93 | 29.207792 | 0.839779 | 0.509782 | 0 | 0.08 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.04 | false | 0 | 0.04 | 0 | 0.12 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce775e76a5d3b8c616795f47295be062fc4cf134 | 936 | py | Python | setup.py | tcoulvert/qamlz | 2e3c4b4fd3a5c7665ad99b19c995d0da50000f8a | [
"MIT"
] | null | null | null | setup.py | tcoulvert/qamlz | 2e3c4b4fd3a5c7665ad99b19c995d0da50000f8a | [
"MIT"
] | null | null | null | setup.py | tcoulvert/qamlz | 2e3c4b4fd3a5c7665ad99b19c995d0da50000f8a | [
"MIT"
] | null | null | null | from setuptools import setup, find_packages
with open("README.md", "r") as fh:
long_description = fh.read()
setup(
name="qamlz",
version="1.2.0",
description="Binary Classifier trained with D-Wave's Quantum Annealers.",
packages=find_packages(include=["qamlz"]),
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: MIT License",
"Operating System :: OS Independent",
],
long_description=long_description,
long_description_content_type="text/markdown",
install_requires=[
"numpy >= 1.20.3",
"scikit_learn >= 1.0.1",
"scipy >= 1.7.1",
"dwave-ocean-sdk >= 4.2.0",
],
extras_require={
"dev": [
"pytest >= 3.7",
"check-manifest >= 0.47",
],
},
url="https://github.com/tcoulvert/qaml-z",
author="Thomas Sievert",
author_email="tcsievert@gmail.com",
)
| 27.529412 | 77 | 0.58547 | 109 | 936 | 4.917431 | 0.733945 | 0.11194 | 0.070896 | 0.11194 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.031519 | 0.254274 | 936 | 33 | 78 | 28.363636 | 0.73639 | 0 | 0 | 0.096774 | 0 | 0 | 0.409188 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.032258 | 0 | 0.032258 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce80d78bfda2c8329cb88d7393a4292a731adff7 | 5,948 | py | Python | polygon/rest/aggs.py | Polygon-io/client-python | beddb8cbf9e77effa52c40878ab5aefa5f8bef85 | [
"MIT"
] | 1 | 2019-11-19T20:56:27.000Z | 2019-11-19T20:56:27.000Z | polygon/rest/aggs.py | Polygon-io/client-python | beddb8cbf9e77effa52c40878ab5aefa5f8bef85 | [
"MIT"
] | null | null | null | polygon/rest/aggs.py | Polygon-io/client-python | beddb8cbf9e77effa52c40878ab5aefa5f8bef85 | [
"MIT"
] | null | null | null | from .base import BaseClient
from typing import Optional, Any, Dict, List, Union
from .models import Agg, GroupedDailyAgg, DailyOpenCloseAgg, PreviousCloseAgg, Sort
from urllib3 import HTTPResponse
from datetime import datetime, date
class AggsClient(BaseClient):
def get_aggs(
self,
ticker: str,
multiplier: int,
timespan: str,
# "from" is a keyword in python https://www.w3schools.com/python/python_ref_keywords.asp
from_: Union[str, int, datetime, date],
to: Union[str, int, datetime, date],
adjusted: Optional[bool] = None,
sort: Optional[Union[str, Sort]] = None,
limit: Optional[int] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[List[Agg], HTTPResponse]:
"""
Get aggregate bars for a ticker over a given date range in custom time window sizes.
:param ticker: The ticker symbol.
:param multiplier: The size of the timespan multiplier.
:param timespan: The size of the time window.
:param _from: The start of the aggregate time window as YYYY-MM-DD, a date, Unix MS Timestamp, or a datetime.
:param to: The end of the aggregate time window as YYYY-MM-DD, a date, Unix MS Timestamp, or a datetime.
:param adjusted: Whether or not the results are adjusted for splits. By default, results are adjusted. Set this to false to get results that are NOT adjusted for splits.
:param sort: Sort the results by timestamp. asc will return results in ascending order (oldest at the top), desc will return results in descending order (newest at the top).The end of the aggregate time window.
:param limit: Limits the number of base aggregates queried to create the aggregate results. Max 50000 and Default 5000. Read more about how limit is used to calculate aggregate results in our article on Aggregate Data API Improvements.
:param params: Any additional query params
:param raw: Return raw object instead of results object
:return: List of aggregates
"""
if isinstance(from_, datetime):
from_ = int(from_.timestamp() * self.time_mult("millis"))
if isinstance(to, datetime):
to = int(to.timestamp() * self.time_mult("millis"))
url = f"/v2/aggs/ticker/{ticker}/range/{multiplier}/{timespan}/{from_}/{to}"
return self._get(
path=url,
params=self._get_params(self.get_aggs, locals()),
result_key="results",
deserializer=Agg.from_dict,
raw=raw,
)
# TODO: next breaking change release move "market_type" to be 2nd mandatory
# param
def get_grouped_daily_aggs(
self,
date: str,
adjusted: Optional[bool] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
market_type: str = "stocks",
) -> Union[GroupedDailyAgg, HTTPResponse]:
"""
Get the daily open, high, low, and close (OHLC) for the entire market.
:param date: The beginning date for the aggregate window.
:param adjusted: Whether or not the results are adjusted for splits. By default, results are adjusted. Set this to false to get results that are NOT adjusted for splits.
:param params: Any additional query params
:param raw: Return raw object instead of results object
:return: List of grouped daily aggregates
"""
url = f"/v2/aggs/grouped/locale/us/market/{market_type}/{date}"
return self._get(
path=url,
params=self._get_params(self.get_grouped_daily_aggs, locals()),
result_key="results",
deserializer=GroupedDailyAgg.from_dict,
raw=raw,
)
def get_daily_open_close_agg(
self,
ticker: str,
date: str,
adjusted: Optional[bool] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[DailyOpenCloseAgg, HTTPResponse]:
"""
Get the open, close and afterhours prices of a stock symbol on a certain date.
:param ticker: The exchange symbol that this item is traded under.
:param date: The beginning date for the aggregate window.
:param adjusted: Whether or not the results are adjusted for splits. By default, results are adjusted. Set this to false to get results that are NOT adjusted for splits.
:param params: Any additional query params
:param raw: Return raw object instead of results object
:return: Daily open close aggregate
"""
url = f"/v1/open-close/{ticker}/{date}"
return self._get(
path=url,
params=self._get_params(self.get_daily_open_close_agg, locals()),
deserializer=DailyOpenCloseAgg.from_dict,
raw=raw,
)
def get_previous_close_agg(
self,
ticker: str,
adjusted: Optional[bool] = None,
params: Optional[Dict[str, Any]] = None,
raw: bool = False,
) -> Union[PreviousCloseAgg, HTTPResponse]:
"""
Get the previous day's open, high, low, and close (OHLC) for the specified stock ticker.
:param ticker: The ticker symbol of the stock/equity.
:param adjusted: Whether or not the results are adjusted for splits. By default, results are adjusted. Set this to false to get results that are NOT adjusted for splits.
:param params: Any additional query params
:param raw: Return raw object instead of results object
:return: Previous close aggregate
"""
url = f"/v2/aggs/ticker/{ticker}/prev"
return self._get(
path=url,
params=self._get_params(self.get_previous_close_agg, locals()),
result_key="results",
deserializer=PreviousCloseAgg.from_dict,
raw=raw,
)
| 44.38806 | 243 | 0.642905 | 770 | 5,948 | 4.896104 | 0.223377 | 0.022281 | 0.038196 | 0.025464 | 0.555172 | 0.488594 | 0.446154 | 0.436605 | 0.42122 | 0.42122 | 0 | 0.003702 | 0.273369 | 5,948 | 133 | 244 | 44.721805 | 0.868579 | 0.461668 | 0 | 0.467532 | 0 | 0 | 0.075752 | 0.062262 | 0 | 0 | 0 | 0.007519 | 0 | 1 | 0.051948 | false | 0 | 0.064935 | 0 | 0.181818 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce8313674d364b186455a826acc4a230d1804441 | 588 | py | Python | euporie_binder/__init__.py | joouha/euporie-binder | 137945d91d0c00524a41db3cd5b6929aaf22fb83 | [
"MIT"
] | null | null | null | euporie_binder/__init__.py | joouha/euporie-binder | 137945d91d0c00524a41db3cd5b6929aaf22fb83 | [
"MIT"
] | null | null | null | euporie_binder/__init__.py | joouha/euporie-binder | 137945d91d0c00524a41db3cd5b6929aaf22fb83 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
__version__ = "0.1.0"
from .app import EuporieBinderApp
# This is needed for jupyter server to know how to load the extension
def _jupyter_server_extension_points():
return [{"module": __name__, "app": EuporieBinderApp}]
# This is required for classic notebook compatibility
def load_jupyter_server_extension(serverapp):
extension = EuporieBinderApp()
extension.serverapp = serverapp
extension.load_config_file()
extension.update_config(serverapp.config)
extension.parse_command_line(serverapp.extra_args)
extension.initialize()
| 26.727273 | 69 | 0.760204 | 70 | 588 | 6.085714 | 0.585714 | 0.091549 | 0.103286 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.008 | 0.14966 | 588 | 21 | 70 | 28 | 0.844 | 0.239796 | 0 | 0 | 0 | 0 | 0.031603 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.181818 | false | 0 | 0.090909 | 0.090909 | 0.363636 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce84e09068565bb6e282c271c834ef1191dab684 | 2,080 | py | Python | Main.py | unnati914/summer-of-bitcoin-challenge | a400f98db654bdbf80c4d357e43f09f2235e7605 | [
"Apache-2.0"
] | null | null | null | Main.py | unnati914/summer-of-bitcoin-challenge | a400f98db654bdbf80c4d357e43f09f2235e7605 | [
"Apache-2.0"
] | null | null | null | Main.py | unnati914/summer-of-bitcoin-challenge | a400f98db654bdbf80c4d357e43f09f2235e7605 | [
"Apache-2.0"
] | 1 | 2022-03-30T18:16:06.000Z | 2022-03-30T18:16:06.000Z | # Importing pandas module [pd alias]
import pandas as pd
# Importing mempool.csv file into a Dataframe using pandas
df = pd.read_csv("mempool.csv")
print(df.head())
# number of rows and columns
print(df.shape)
#check the data types
print(df.dtypes)
#concise summary of ur DataFrame
print(df.info)
#The describe() method is used for computing some statistical calculations
print(df.describe())
# This function takes 3 parameters - dataframe and two features as parameters
# Sorting the Dataframe using sort_values for multiple columns i.e. Fee & Weight & maximise the fee & minimise the weight
def sort_tra(df, maxfee, minwght):
df = df.sort_values([maxfee, minwght], ascending=[False, True]).reset_index(drop=True)
return df
def check_weight(x):
if min_weight + x['weight'] <= highest_weight:
return True
else:
return False
def check_list(x):
if str(x) in final_set_of_txid:
return True
else:
return False
def check_parent(x):
if str(x[3]) != "nan":
parent_list = str(x[3]).split(";")
for i in parent_list:
if(check_list(i)):
continue
else:
txnindex = df[df['tx_id'] == i].index.item()
k = df.loc[txnindex]
check_add_txn(k)
def add_to_block(x):
global min_weight
txniD = x[0]
weight = x[2]
min_weight += weight
final_set_of_txids.append(txniD)
def check_add_txn(x):
if(check_weight(x)):
if(not check_list(x)):
check_parent(x)
if(check_weight(x)):
add_to_block(x)
def main(df):
sorted_transactions = sort_tra(df, "fee", "weight")
for i in range(len(sorted_transactions)):
txnVar = sorted_transactions.loc[i]
check_add_txn(txnVar)
def write_to_file(fin_list):
file = open("block.txt","a")
for i in fin_list:
file.write(str(i) + '\n')
file.close()
highest_weight = 4000000
min_weight = 0
final_set_of_txid = []
data = df
main(data)
write_to_file(final_set_of_txid)
| 20.8 | 121 | 0.636058 | 307 | 2,080 | 4.13355 | 0.374593 | 0.014184 | 0.031521 | 0.033097 | 0.07565 | 0.052009 | 0.052009 | 0 | 0 | 0 | 0 | 0.008382 | 0.254327 | 2,080 | 99 | 122 | 21.010101 | 0.8098 | 0.212019 | 0 | 0.155172 | 0 | 0 | 0.029012 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137931 | false | 0 | 0.017241 | 0 | 0.241379 | 0.086207 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce850f80aa33d04e110aec056ec3562e482d1b84 | 3,347 | py | Python | util/corpus2csv.py | frserras/verbert-categorization | e0638c1128d04b26014f5f2bf73768dbaa7e4f8f | [
"Apache-2.0"
] | 1 | 2021-11-10T03:34:28.000Z | 2021-11-10T03:34:28.000Z | util/corpus2csv.py | frserras/verbert-categorization | e0638c1128d04b26014f5f2bf73768dbaa7e4f8f | [
"Apache-2.0"
] | null | null | null | util/corpus2csv.py | frserras/verbert-categorization | e0638c1128d04b26014f5f2bf73768dbaa7e4f8f | [
"Apache-2.0"
] | null | null | null | # -*- coding: utf-8 -*-
# This program was made by Felipe Serras as part of his Master's degree,
# under the guidance of Prof. Marcelo Finger. All rights reserved.
# We tried to make explicit all our references and all the works on which ours is based.
# Please contact us if you encounter any problems in this regard.
# If not stated otherwise, this software is licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
'''Corpus2CSV
Auxiliary Script that converts the corpus into a .csv file, with the appropriate format
for be processed by verBERT.
Receives a string identifying the corpus as a mandatory argument, and as optional arguments,
it recieves 'split', to indicate whether the corpus should be segmented and
'tree structure' to indicate whether the conversion should take into account the hierarchical
structure of the corpus.
'''
import sys
import pickle
import random
import pandas as pd
from tqdm import tqdm
from getpass import getpass
from namedlist import namedlist
from naive_criptography import *
from sklearn.model_selection import train_test_split
SEED = 42
corpus_identifier = sys.argv[1]
tree_structure = 'tree_structure' in sys.argv
split = 'split' in sys.argv
for arg in sys.argv:
if 'seed' in arg:
SEED = int(arg.split('=')[1])
random.seed(SEED)
if tree_structure:
print('ERROR: tree-structure friendly treatment for corpus conversion '
'not implemented.')
else:
corpus_file_name = 'koll_corpus_' + corpus_identifier + '.pkl'
fields_file_name = 'koll_fields_' + corpus_identifier + '.pkl'
with open(fields_file_name, 'rb') as f:
fields = pickle.load(f)
Processo = namedlist('Processo', fields)
with open(corpus_file_name, 'rb') as f:
corpus = pickle.load(f)
df_corpus = pd.DataFrame(columns=['Ementa', 'Verbetes'])
for i in tqdm(range(len(corpus))):
processo = corpus[i]
verbet_string = ''
for verb in processo.verbetacao:
if verb != 'clust_26': # Removes the 'Others' super-class
verbet_string = verbet_string + verb + '/'
if verbet_string != '': # Removes the 'Others' super-class
verbet_string = verbet_string[:len(verbet_string)-1]
df_corpus.loc[i] = [processo.ementa, verbet_string]
df_corpus = df_corpus.sample(frac=1, random_state=random.randint(0, 10000000))
df_corpus.to_csv('koll' + corpus_identifier +
'_all'+str(SEED)+'.csv', index=False)
# Splitting test data:
if split:
df_execution_data, df_test_data = train_test_split(df_corpus, test_size=0.2,
random_state=random.randint(0, 10000000))
df_execution_data.to_csv(
'koll' + corpus_identifier + '_exec'+str(SEED)+'.csv', index=False)
df_test_data.to_csv('koll' + corpus_identifier +
'_test'+str(SEED)+'.csv', index=False)
| 38.918605 | 107 | 0.709292 | 482 | 3,347 | 4.802905 | 0.417012 | 0.041469 | 0.011663 | 0.019438 | 0.146436 | 0.098488 | 0.073434 | 0.043197 | 0.043197 | 0 | 0 | 0.012682 | 0.198984 | 3,347 | 85 | 108 | 39.376471 | 0.850802 | 0.412907 | 0 | 0 | 0 | 0 | 0.107216 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0.020833 | 0.1875 | 0 | 0.1875 | 0.020833 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce872ad9201f8eeaecaebbea2c9e3cba2d0ab4a9 | 2,914 | py | Python | 00 - Azure ML Setup Script.py | ezwiefel/azureml-image-auto-classification | bd3b0a8ae26e53613f814782eed85e9cab4cf3b9 | [
"MIT"
] | 1 | 2019-08-05T14:14:21.000Z | 2019-08-05T14:14:21.000Z | 00 - Azure ML Setup Script.py | ezwiefel/azureml-image-auto-classification | bd3b0a8ae26e53613f814782eed85e9cab4cf3b9 | [
"MIT"
] | null | null | null | 00 - Azure ML Setup Script.py | ezwiefel/azureml-image-auto-classification | bd3b0a8ae26e53613f814782eed85e9cab4cf3b9 | [
"MIT"
] | null | null | null | # Copyright (c) 2019 Microsoft
#
# This software is released under the MIT License.
# https://opensource.org/licenses/MIT
#%% [markdown]
## Create Azure Machine Learning workspace
#### _One Time Only Notebook_
# The AML Workspace stores infomation useful for building our Machine Learning and Data Science models - such as experiment tracking, model management, data stores and other useful artifacts.
#
# 
#%%
from azureml.core import Workspace, Datastore
from azureml.core.compute import ComputeTarget, AmlCompute
from azureml.core.authentication import InteractiveLoginAuthentication
#%%
# Give details of where the service should be created and what the name should be. This only needs to be done once - all projects can exist within the same workspace.
STORAGE_ACCT_KEY = None
STORAGE_ACCT_NAME = None
SUB_ID = None
RESOURCE_GROUP = None
WORKSPACE_NAME = None
WORKSPACE_REGION = None
# This script will load an already existing workspace
ws = Workspace(workspace_name = WORKSPACE_NAME,
subscription_id = SUB_ID,
resource_group = RESOURCE_GROUP
)
# This code would create a new workspace
# ws = Workspace.create(workspace_name = workspace_name,
# subscription_id = subscription_id,
# resource_group = resource_group,
# location=workspace_region)
# Save the configuration file for the workspace to DBFS
ws.write_config()
#%% [markdown]
### Create Datastore in Workspace
# First, we'll register the datastore in the workspace. This is a one-time only event.
#%%
ds = Datastore.register_azure_blob_container(workspace=ws, datastore_name="images",
container_name="images", account_name=STORAGE_ACCT_NAME,
account_key=STORAGE_ACCT_KEY)
#%% [markdown]
### Create AML Compute Cluster
# Next, we'll create an autoscaling AML Compute layer cluster - with 0 node minimum and 10 mode maximum. We'll create it in WestUS2 - which is the same region that our data is stored in.
#%%
aml_cluster_name = 'gpu-cluster'
provisioning_config = AmlCompute.provisioning_configuration(vm_size = "STANDARD_NC6", # NC6 is GPU-enabled
autoscale_enabled = True,
min_nodes = 0,
max_nodes = 10,
description="A GPU enabled cluster.")
# create the cluster
compute_target = ComputeTarget.create(ws, aml_cluster_name, provisioning_config) | 39.378378 | 191 | 0.641043 | 332 | 2,914 | 5.481928 | 0.463855 | 0.035714 | 0.032967 | 0.028571 | 0.074725 | 0.043956 | 0 | 0 | 0 | 0 | 0 | 0.006314 | 0.293411 | 2,914 | 74 | 192 | 39.378378 | 0.87761 | 0.497941 | 0 | 0 | 0 | 0 | 0.04 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.125 | 0 | 0.125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce89611bae3affd6941f6f02c2054b0f8e4c2278 | 1,303 | py | Python | examples/AnalysePlayBin.py | pierretallotte/python-dds | 653dbd51fa86e2b5fbd1aba241f5a27e3e3776ff | [
"Apache-2.0"
] | 9 | 2015-11-26T07:12:15.000Z | 2022-01-26T04:10:03.000Z | examples/AnalysePlayBin.py | pierretallotte/python-dds | 653dbd51fa86e2b5fbd1aba241f5a27e3e3776ff | [
"Apache-2.0"
] | 1 | 2019-01-05T12:41:48.000Z | 2019-01-05T12:41:48.000Z | examples/AnalysePlayBin.py | pierretallotte/python-dds | 653dbd51fa86e2b5fbd1aba241f5a27e3e3776ff | [
"Apache-2.0"
] | 7 | 2018-07-30T12:07:18.000Z | 2021-07-20T09:24:38.000Z | import dds
import ctypes
import hands
import functions
dl = dds.deal()
DDplay = dds.playTraceBin()
solved = dds.solvedPlay()
line = ctypes.create_string_buffer(80)
threadIndex = 0
for handno in range(3):
dl.trump = hands.trump[handno]
dl.first = hands.first[handno]
dl.currentTrickSuit[0] = 0
dl.currentTrickSuit[1] = 0
dl.currentTrickSuit[2] = 0
dl.currentTrickRank[0] = 0
dl.currentTrickRank[1] = 0
dl.currentTrickRank[2] = 0
for h in range(dds.DDS_HANDS):
for s in range(dds.DDS_SUITS):
dl.remainCards[h][s] = hands.holdings[handno][s][h]
DDplay.number = hands.playNo[handno]
for i in range(hands.playNo[handno]):
DDplay.suit[i] = hands.playSuit[handno][i]
DDplay.rank[i] = hands.playRank[handno][i]
res = dds.AnalysePlayBin(dl, DDplay, ctypes.pointer(solved), threadIndex)
if res != dds.RETURN_NO_FAULT:
dds.ErrorMessage(res, line)
print("DDS error: {}\n".format(line.value.decode("utf-8")))
match = functions.ComparePlay(ctypes.pointer(solved), handno)
line = "AnalysePlayPBNBin, hand {}: {}".format(handno + 1, \
"OK" if match else "ERROR")
functions.PrintHand(line, dl.remainCards)
functions.PrintBinPlay(ctypes.pointer(DDplay), ctypes.pointer(solved))
| 26.06 | 77 | 0.66462 | 173 | 1,303 | 4.971098 | 0.375723 | 0.017442 | 0.066279 | 0.030233 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.017176 | 0.195702 | 1,303 | 49 | 78 | 26.591837 | 0.803435 | 0 | 0 | 0 | 0 | 0 | 0.043745 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.117647 | 0 | 0.117647 | 0.029412 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce8a7a05ebd423d6d52c1c27885c39d4ed9043b2 | 1,194 | py | Python | tests/test_sedmixture.py | JospehCeh/Delight | a363e9b44587b158aa5f54a7cf294044e2989ecf | [
"MIT"
] | 7 | 2016-06-12T10:56:31.000Z | 2022-01-20T17:24:26.000Z | tests/test_sedmixture.py | sylvielsstfr/Delight | 67202a27061dee33cb162ca382d11e4994189644 | [
"MIT"
] | 9 | 2016-06-04T13:36:29.000Z | 2022-01-24T09:04:49.000Z | tests/test_sedmixture.py | sylvielsstfr/Delight | 67202a27061dee33cb162ca382d11e4994189644 | [
"MIT"
] | 4 | 2017-10-24T18:03:36.000Z | 2021-08-24T14:53:52.000Z | # -*- coding: utf-8 -*-
import numpy as np
from delight.sedmixture import *
from scipy.misc import derivative
relative_accuracy = 0.01
def test_PhotometricFilter():
def f(x):
return np.exp(-0.5*((x-3e3)/1e2)**2)
x = np.linspace(2e3, 4e3, 1000)
y = f(x)
aFilter = PhotometricFilter('I', x, y)
xb = np.random.uniform(low=2e3, high=4e3, size=10)
res1 = f(xb)
res2 = aFilter(xb)
assert np.allclose(res2, res1, rtol=relative_accuracy)
def test_PhotometricFluxPolynomialInterpolation():
def f(x):
return np.exp(-0.5*((x-3e3)/1e2)**2)
x = np.linspace(2e3, 4e3, 1000)
y = f(x)
bandName = 'I'
photometricBands = [PhotometricFilter(bandName, x, y)]
x = np.linspace(2e1, 4e5, 1000)
y = f(x)
aTemplate = SpectralTemplate_z(x, y, photometricBands,
redshiftGrid=np.linspace(1e-2, 1.0, 10))
redshifts = np.random.uniform(1e-2, 1.0, 10)
f1 = aTemplate.photometricFlux(redshifts, bandName)
f2 = aTemplate.photometricFlux_bis(redshifts, bandName)
f1 = aTemplate.photometricFlux_gradz(redshifts, bandName)
f2 = aTemplate.photometricFlux_gradz_bis(redshifts, bandName)
| 28.428571 | 75 | 0.647404 | 160 | 1,194 | 4.775 | 0.4 | 0.013089 | 0.043194 | 0.027487 | 0.26178 | 0.13089 | 0.13089 | 0.13089 | 0.13089 | 0.13089 | 0 | 0.07265 | 0.21608 | 1,194 | 41 | 76 | 29.121951 | 0.74359 | 0.017588 | 0 | 0.3 | 0 | 0 | 0.001708 | 0 | 0 | 0 | 0 | 0 | 0.033333 | 1 | 0.133333 | false | 0 | 0.1 | 0.066667 | 0.3 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce8c3526b50f026ed21fb2597aed5f38c57d68be | 1,733 | py | Python | python_web_exam/web_app/models.py | NikolaKolew/softuni-python-web-exam | dfa8bf561a75fdae0083798f953a75afb8e820b9 | [
"MIT"
] | null | null | null | python_web_exam/web_app/models.py | NikolaKolew/softuni-python-web-exam | dfa8bf561a75fdae0083798f953a75afb8e820b9 | [
"MIT"
] | null | null | null | python_web_exam/web_app/models.py | NikolaKolew/softuni-python-web-exam | dfa8bf561a75fdae0083798f953a75afb8e820b9 | [
"MIT"
] | null | null | null | from django.core.validators import MinLengthValidator, MinValueValidator
from django.db import models
class Profile(models.Model):
USER_NAME_MAX_CHARS = 15
USER_NAME_MIN_CHARS = 2
MIN_AGE = 0
user_name = models.CharField(
max_length=USER_NAME_MAX_CHARS,
validators=(
MinLengthValidator(USER_NAME_MIN_CHARS),
)
)
email = models.EmailField()
age = models.IntegerField(
null=True,
blank=True,
validators=(
MinValueValidator(MIN_AGE),
)
)
class Album(models.Model):
ALBUM_NAME_MAX_CHARS = 30
ARTIST_NAME_MAX_CHARS = 30
GENRE_MAX_CHARS = 30
MIN_PRICE = 0.0
POPMUSIC = 'Pop Music'
JAZZMUSIC = 'Jazz Music'
RBMUSIC = 'R&B Music'
ROCKMUSIC = 'Rock Music'
COUNTRYMUSIC = 'Country Music'
DANCEMUSIC = 'Dance Music'
HIPHOPMUSIC = 'Hip Hop Music'
OTHER = 'Other'
GENRE_CHOICES = [
(POPMUSIC, 'Pop Music'),
(JAZZMUSIC, 'Jazz Music'),
(RBMUSIC, 'R&B Music'),
(ROCKMUSIC, 'Rock Music'),
(COUNTRYMUSIC, 'Country Music'),
(DANCEMUSIC, 'Dance Music'),
(HIPHOPMUSIC, 'Hip Hop Music'),
(OTHER, 'Other'),
]
name = models.CharField(
max_length=ALBUM_NAME_MAX_CHARS,
unique=True,
)
artist = models.CharField(
max_length=ARTIST_NAME_MAX_CHARS,
)
genre = models.CharField(
max_length=GENRE_MAX_CHARS,
choices=GENRE_CHOICES,
)
description = models.TextField(
null=True,
blank=True,
)
image = models.URLField()
price = models.FloatField(
validators=(
MinValueValidator(MIN_PRICE),
)
)
| 21.6625 | 72 | 0.600115 | 181 | 1,733 | 5.530387 | 0.325967 | 0.063936 | 0.071928 | 0.095904 | 0.33966 | 0.283716 | 0.283716 | 0.283716 | 0.283716 | 0.283716 | 0 | 0.009909 | 0.301212 | 1,733 | 79 | 73 | 21.936709 | 0.81668 | 0 | 0 | 0.109375 | 0 | 0 | 0.092325 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.03125 | 0 | 0.453125 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce8f519eb63da2ec8901ffd5cbd94d614b1f080f | 7,007 | py | Python | scratchpads/qt_experiments/bridge_mediaplaylist_to_domain_model_experiment.py | devdave/pysongman | e4cb62d780918426322c41dedec8950150f934d5 | [
"MIT"
] | 1 | 2021-04-05T18:45:21.000Z | 2021-04-05T18:45:21.000Z | scratchpads/qt_experiments/bridge_mediaplaylist_to_domain_model_experiment.py | devdave/pysongman | e4cb62d780918426322c41dedec8950150f934d5 | [
"MIT"
] | null | null | null | scratchpads/qt_experiments/bridge_mediaplaylist_to_domain_model_experiment.py | devdave/pysongman | e4cb62d780918426322c41dedec8950150f934d5 | [
"MIT"
] | null | null | null | """
Experiment doesn't work :(
QT rips off the extra attributes in CustomContent and returns just a QMediaContent object
"""
import sys
import argparse
import typing
import pathlib
import pprint
import mutagen
import PySide2
from PySide2 import QtCore
from PySide2.QtCore import Qt
from PySide2 import QtWidgets
from PySide2 import QtMultimedia
from ffprobe_analyzer import FFProbe
class MockRecord:
def __init__(self, meta, path):
self.meta = meta
self.path = path
@property
def title(self):
if getattr(self.meta, "title", None) is None:
# assume everything but duration is missing
if "-" in self.path.name:
# assume artist - title
_, title = self.path.name.split("-", 1)
return title
else:
return self.path.name
else:
return self.meta.title
@property
def filename(self):
return self.path
@property
def duration_str(self):
time = self.meta.info.length
minutes = int(time / 60)
seconds = int(time % 60)
return f"{minutes}:{seconds:02}"
class MockDomain:
data = []
@classmethod
def Generate(cls, song_dir):
song_dir = pathlib.Path(song_dir)
files = (file for file in song_dir.iterdir() if file.is_file() and file.name.endswith((".ogg", ".mp3")))
for file in files:
url = QtCore.QUrl(file.as_posix())
media = QtMultimedia.QMediaContent(url)
meta = mutagen.File(file.as_posix())
# probe = FFProbe(file)
cls.data.append(MockRecord(meta, file))
@classmethod
def GetByPath(cls, path):
search_path = pathlib.Path(path)
for record in cls.data: # type: FFProbe
if pathlib.Path(record.filename) == search_path:
return record
class Playlist2Table(QtCore.QAbstractTableModel):
playlist: QtMultimedia.QMediaPlaylist
def __init__(self, playlist, headers_fetchers):
super(Playlist2Table, self).__init__()
self.playlist = playlist
self.headers = list(headers_fetchers.keys())
self.fetchers = list(headers_fetchers.values())
def rowCount(self, parent: PySide2.QtCore.QModelIndex = ...) -> int:
# print(f"rc: {self.playlist.mediaCount()=}")
return self.playlist.mediaCount()
def columnCount(self, parent: PySide2.QtCore.QModelIndex = ...) -> int:
"""
Row ID, header[0], header[...]
Args:
parent:
Returns:
"""
return len(self.headers) + 1
def headerData(self, section:int, orientation:PySide2.QtCore.Qt.Orientation, role:int=...) -> typing.Any:
if role == Qt.DisplayRole:
if section == 0:
return "RID"
else:
return self.headers[section-1]
def data(self, index:PySide2.QtCore.QModelIndex, role:int=...) -> typing.Any:
if role == Qt.DisplayRole:
if index.column() == 0:
return index.row()
else:
fetcher = self.fetchers[index.column() - 1]
media = self.playlist.media(index.row()) # type: QtMultimedia.QMediaContent
path = media.canonicalUrl().toString()
record = MockDomain.GetByPath(path)
if record is None:
# ruh uh
print("Failed to lookup path: ", path)
return fetcher(record)
class BasicPlayer(QtWidgets.QWidget):
def __init__(self, song_dir):
super(BasicPlayer, self).__init__()
self.playlist = QtMultimedia.QMediaPlaylist()
self.playlist.currentIndexChanged.connect(self.on_index_changed)
self.player = QtMultimedia.QMediaPlayer()
self.player.error.connect(self.on_media_error)
self.player.setPlaylist(self.playlist)
self.load_directory(song_dir)
self.body = QtWidgets.QVBoxLayout()
def fetch_title(record):
return record.title
def fetch_dur(record):
return record.duration_str
self.play2table = Playlist2Table(self.playlist, {"Title": fetch_title, "Duration": fetch_dur})
self.playtable = QtWidgets.QTableView()
self.playtable.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.ResizeToContents)
self.playtable.verticalHeader().hide()
self.playtable.horizontalHeader().hide()
self.playtable.horizontalHeader().setSectionResizeMode(QtWidgets.QHeaderView.Stretch)
self.playtable.setSelectionBehavior(QtWidgets.QAbstractItemView.SelectRows)
self.playtable.setSizePolicy(QtWidgets.QSizePolicy.Expanding, QtWidgets.QSizePolicy.Expanding)
self.playtable.resizeColumnsToContents()
self.playtable.resizeRowsToContents()
self.playtable.setModel(self.play2table)
self.body.addWidget(self.playtable, 1)
self.body.setStretch(0, 1)
# basic player controls
self.controls = QtWidgets.QHBoxLayout()
self.playBtn = QtWidgets.QPushButton("Play")
self.playBtn.clicked.connect(self.on_play_click)
self.stopBtn = QtWidgets.QPushButton("Stop")
self.stopBtn.clicked.connect(self.on_stop_click)
self.controls.addWidget(self.playBtn)
self.controls.addWidget(self.stopBtn)
self.body.addLayout(self.controls)
self.setLayout(self.body)
self.playtable.doubleClicked.connect(self.on_doubleclick)
def on_index_changed(self, position):
self.playtable.selectRow(position)
def on_media_error(self, error):
print(error)
media = self.player.currentMedia()
print(media, media.canonicalUrl())
probe = FFProbe(media.canonicalUrl().toString())
pprint.pprint(probe.info)
# for now no controller
def on_doubleclick(self, index: QtCore.QModelIndex):
row = index.row()
self.playlist.setCurrentIndex(row)
self.player.play()
debug = 1
def on_play_click(self):
self.player.play()
def on_stop_click(self):
self.player.stop()
def load_directory(self, song_dir):
home = pathlib.Path(song_dir)
files = (element for element in home.iterdir() if element.is_file() and element.name.endswith(".mp3"))
for fake_id, file in enumerate(files):
url = QtCore.QUrl(file.as_posix())
media = QtMultimedia.QMediaContent(url)
self.playlist.addMedia(media)
print(f"{self.playlist.mediaCount()}")
MockDomain.Generate(song_dir)
def main(song_dir):
app = QtWidgets.QApplication(sys.argv)
view = BasicPlayer(song_dir)
view.show()
sys.exit(app.exec_())
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument("song_dir")
args = parser.parse_args()
main(args.song_dir) | 27.586614 | 112 | 0.62723 | 758 | 7,007 | 5.688654 | 0.275726 | 0.021104 | 0.015074 | 0.008349 | 0.105751 | 0.095083 | 0.045918 | 0.045918 | 0.045918 | 0.028757 | 0 | 0.006418 | 0.266162 | 7,007 | 254 | 113 | 27.586614 | 0.832166 | 0.057799 | 0 | 0.11039 | 0 | 0 | 0.020224 | 0.00766 | 0 | 0 | 0 | 0 | 0 | 1 | 0.136364 | false | 0 | 0.077922 | 0.025974 | 0.344156 | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce8fc84f32251354743ef3abaef05028483ffd18 | 4,778 | py | Python | examples/asyncio/asyncio-server.py | LaudateCorpus1/hyper-h2 | 7dfab8f8e0e8605c4a2a90706b217d0a0a0c45b7 | [
"MIT"
] | 2 | 2020-07-01T20:46:51.000Z | 2021-04-28T21:28:48.000Z | examples/asyncio/asyncio-server.py | LaudateCorpus1/hyper-h2 | 7dfab8f8e0e8605c4a2a90706b217d0a0a0c45b7 | [
"MIT"
] | null | null | null | examples/asyncio/asyncio-server.py | LaudateCorpus1/hyper-h2 | 7dfab8f8e0e8605c4a2a90706b217d0a0a0c45b7 | [
"MIT"
] | 3 | 2021-06-03T10:10:16.000Z | 2022-03-17T19:57:00.000Z | # -*- coding: utf-8 -*-
"""
asyncio-server.py
~~~~~~~~~~~~~~~~~
A fully-functional HTTP/2 server using asyncio. Requires Python 3.5+.
This example demonstrates handling requests with bodies, as well as handling
those without. In particular, it demonstrates the fact that DataReceived may
be called multiple times, and that applications must handle that possibility.
Please note that this example does not handle flow control, and so only works
properly for relatively small requests. Please see other examples to understand
how flow control should work.
"""
import asyncio
import io
import json
import ssl
import collections
from typing import List, Tuple
from h2.connection import H2Connection
from h2.events import DataReceived, RequestReceived, StreamEnded
from h2.errors import ErrorCodes
RequestData = collections.namedtuple('RequestData', ['headers', 'data'])
class H2Protocol(asyncio.Protocol):
def __init__(self):
self.conn = H2Connection(client_side=False)
self.transport = None
self.stream_data = {}
def connection_made(self, transport: asyncio.Transport):
self.transport = transport
self.conn.initiate_connection()
self.transport.write(self.conn.data_to_send())
def data_received(self, data: bytes):
events = self.conn.receive_data(data)
self.transport.write(self.conn.data_to_send())
for event in events:
if isinstance(event, RequestReceived):
self.request_received(event.headers, event.stream_id)
elif isinstance(event, DataReceived):
self.receive_data(event.data, event.stream_id)
elif isinstance(event, StreamEnded):
self.stream_complete(event.stream_id)
self.transport.write(self.conn.data_to_send())
def request_received(self, headers: List[Tuple[str, str]], stream_id: int):
headers = collections.OrderedDict(headers)
method = headers[':method']
# We only support GET and POST.
if method not in ('GET', 'POST'):
self.return_405(headers, stream_id)
return
# Store off the request data.
request_data = RequestData(headers, io.BytesIO())
self.stream_data[stream_id] = request_data
def stream_complete(self, stream_id: int):
"""
When a stream is complete, we can send our response.
"""
try:
request_data = self.stream_data[stream_id]
except KeyError:
# Just return, we probably 405'd this already
return
headers = request_data.headers
body = request_data.data.getvalue().decode('utf-8')
data = json.dumps(
{"headers": headers, "body": body}, indent=4
).encode("utf8")
response_headers = (
(':status', '200'),
('content-type', 'application/json'),
('content-length', len(data)),
('server', 'asyncio-h2'),
)
self.conn.send_headers(stream_id, response_headers)
self.conn.send_data(stream_id, data, end_stream=True)
def return_405(self, headers: List[Tuple[str, str]], stream_id: int):
"""
We don't support the given method, so we want to return a 405 response.
"""
response_headers = (
(':status', '405'),
('content-length', '0'),
('server', 'asyncio-h2'),
)
self.conn.send_headers(stream_id, response_headers, end_stream=True)
def receive_data(self, data: bytes, stream_id: int):
"""
We've received some data on a stream. If that stream is one we're
expecting data on, save it off. Otherwise, reset the stream.
"""
try:
stream_data = self.stream_data[stream_id]
except KeyError:
self.conn.reset_stream(
stream_id, error_code=ErrorCodes.PROTOCOL_ERROR
)
else:
stream_data.data.write(data)
ssl_context = ssl.create_default_context(ssl.Purpose.CLIENT_AUTH)
ssl_context.options |= (
ssl.OP_NO_TLSv1 | ssl.OP_NO_TLSv1_1 | ssl.OP_NO_COMPRESSION
)
ssl_context.set_ciphers("ECDHE+AESGCM")
ssl_context.load_cert_chain(certfile="cert.crt", keyfile="cert.key")
ssl_context.set_alpn_protocols(["h2"])
loop = asyncio.get_event_loop()
# Each client connection will create a new protocol instance
coro = loop.create_server(H2Protocol, '127.0.0.1', 8443, ssl=ssl_context)
server = loop.run_until_complete(coro)
# Serve requests until Ctrl+C is pressed
print('Serving on {}'.format(server.sockets[0].getsockname()))
try:
loop.run_forever()
except KeyboardInterrupt:
pass
# Close the server
server.close()
loop.run_until_complete(server.wait_closed())
loop.close()
| 32.726027 | 79 | 0.657179 | 601 | 4,778 | 5.073211 | 0.36772 | 0.039357 | 0.018367 | 0.021646 | 0.153493 | 0.146277 | 0.125287 | 0.125287 | 0.087242 | 0.037389 | 0 | 0.013695 | 0.235873 | 4,778 | 145 | 80 | 32.951724 | 0.821419 | 0.213478 | 0 | 0.155556 | 0 | 0 | 0.059176 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.077778 | false | 0.011111 | 0.1 | 0 | 0.211111 | 0.011111 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce928b2bb1579f9b60ce58db2c0e28899791c471 | 3,803 | py | Python | example/dogs_cats_keras.py | aspratyush/dl_utils | c067831f3c72aba88223c231c7fbc249d997e222 | [
"Apache-2.0"
] | null | null | null | example/dogs_cats_keras.py | aspratyush/dl_utils | c067831f3c72aba88223c231c7fbc249d997e222 | [
"Apache-2.0"
] | null | null | null | example/dogs_cats_keras.py | aspratyush/dl_utils | c067831f3c72aba88223c231c7fbc249d997e222 | [
"Apache-2.0"
] | null | null | null | '''
Inspired from:
1) Keras blog : simple Conv-net : https://gist.github.com/fchollet/0830affa1f7f19fd47b06d4cf89ed44d
2) Keras blog : fine-tuning VGG : https://gist.github.com/fchollet/7eb39b44eb9e16e59632d25fb3119975
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Imports
import keras
from keras.preprocessing.image import ImageDataGenerator
from keras.models import Model, Sequential
from keras.layers import Conv2D, Activation, MaxPooling2D, Flatten, Dense, Dropout
# dimensions of our images.
img_width, img_height = 224, 224
train_data_dir = '/home/ctg_pratyush/workspace/data/dogscats/sample/train'
validation_data_dir = '/home/ctg_pratyush/workspace/data/dogscats/sample/valid'
nb_train_samples = 2000
nb_validation_samples = 800
epochs = 20
batch_size = 16
def main():
finetune = True
if (finetune == True):
print('Downloading Resnet...')
# model
#prev_model = keras.applications.resnet50.ResNet50(include_top=False, weights='imagenet', input_shape=(img_width, img_height, 3))
prev_model = keras.applications.VGG16(include_top=False, weights='imagenet', input_shape=(img_width, img_height, 3))
# top model
top_model = Sequential()
top_model.add(Flatten(input_shape=prev_model.output_shape[1:]))
top_model.add(Dense(256, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(84, activation='relu'))
top_model.add(Dropout(0.5))
top_model.add(Dense(1, activation='sigmoid'))
# model summary
top_model.summary()
# set prev_model to be non-trainable
for layer in prev_model.layers:
layer.trainable = False
# append the models
model = Model(inputs=prev_model.input, outputs=top_model(prev_model.output))
else:
# Model
model = Sequential()
model.add(Conv2D(32, (3, 3), input_shape=(img_width, img_height, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(32, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Conv2D(64, (3, 3)))
model.add(Activation('relu'))
model.add(MaxPooling2D(pool_size=(2, 2)))
model.add(Flatten())
model.add(Dense(64))
model.add(Activation('relu'))
model.add(Dropout(0.5))
model.add(Dense(1))
model.add(Activation('sigmoid'))
# model summary
model.summary()
model.compile(loss='binary_crossentropy',
optimizer='rmsprop',
metrics=['accuracy'])
for layer in model.layers:
print(layer, layer.trainable)
# this is the augmentation configuration we will use for training
train_datagen = ImageDataGenerator(
rescale=1. / 255,
shear_range=0.2,
zoom_range=0.2,
horizontal_flip=True)
# this is the augmentation configuration we will use for testing:
# only rescaling
test_datagen = ImageDataGenerator(rescale=1. / 255)
train_generator = train_datagen.flow_from_directory(
train_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
validation_generator = test_datagen.flow_from_directory(
validation_data_dir,
target_size=(img_width, img_height),
batch_size=batch_size,
class_mode='binary')
model.fit_generator(
train_generator,
steps_per_epoch=nb_train_samples // batch_size,
epochs=epochs,
validation_data=validation_generator,
validation_steps=nb_validation_samples // batch_size)
if __name__ == '__main__':
main()
| 30.669355 | 137 | 0.672364 | 470 | 3,803 | 5.206383 | 0.319149 | 0.068656 | 0.026972 | 0.041684 | 0.387005 | 0.332652 | 0.312219 | 0.301185 | 0.301185 | 0.223539 | 0 | 0.040686 | 0.217986 | 3,803 | 123 | 138 | 30.918699 | 0.782112 | 0.165922 | 0 | 0.194805 | 0 | 0 | 0.073287 | 0.034898 | 0 | 0 | 0 | 0 | 0 | 1 | 0.012987 | false | 0 | 0.090909 | 0 | 0.103896 | 0.038961 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce92b22b080b3c4818a4a0e1c30e53e69f243377 | 4,399 | py | Python | inference.py | uos-bhkim/CHALK | 0eca76ab100781ffc14e5eb1098a78a5f386f8f9 | [
"MIT"
] | null | null | null | inference.py | uos-bhkim/CHALK | 0eca76ab100781ffc14e5eb1098a78a5f386f8f9 | [
"MIT"
] | null | null | null | inference.py | uos-bhkim/CHALK | 0eca76ab100781ffc14e5eb1098a78a5f386f8f9 | [
"MIT"
] | null | null | null | #%%
from mmseg.apis import init_segmentor, inference_segmentor, show_result_pyplot
from mmseg.core.evaluation import get_palette
import os
import numpy as np
import cv2
import slidingwindow as sw
import tqdm
import glob
config_file = '/home/user/UOS-SSaS Dropbox/05. Data/03. Checkpoints/2021.07.22_deeplabv3plus_r50-d8_769x769_40k_concrete_crack_cs_xt/deeplabv3_r101-d8_769x769_40k_cityscapes.py'
checkpoint_file = '/home/user/UOS-SSaS Dropbox/05. Data/03. Checkpoints/2021.07.22_deeplabv3plus_r50-d8_769x769_40k_concrete_crack_cs_xt/iter_40000.pth'
# build the model from a config file and a checkpoint file
model = init_segmentor(config_file, checkpoint_file, device='cuda:1')
def imread(filename, flags=cv2.IMREAD_COLOR, dtype=np.uint8):
try:
n = np.fromfile(filename, dtype)
imageBGR = cv2.imdecode(n, flags)
return cv2.cvtColor(imageBGR, cv2.COLOR_BGR2RGB)
except Exception as e:
print(e)
return None
def imwrite(filename, imageRGB, params=None):
try:
ext = os.path.splitext(filename)[1]
imageBGR = cv2.cvtColor(imageRGB, cv2.COLOR_RGB2BGR)
result, n = cv2.imencode(ext, imageBGR, params)
if result:
with open(filename, mode='w+b') as f:
n.tofile(f)
return True
else:
return False
except Exception as e:
print(e)
return False
def inference_segmentor_sliding_window(model, input_img, color_mask, num_classes,
score_thr = 0.1, window_size = 1024, overlap_ratio = 0.1,):
'''
:param model: is a mmdetection model object
:param input_img : str or numpy array
if str, run imread from input_img
:param score_thr: is float number between 0 and 1.
Bounding boxes with a confidence higher than score_thr will be displayed,
in 'img_result' and 'mask_output'.
:param window_size: is a subset size to be detected at a time.
default = 1024, integer number
:param overlap_ratio: is a overlap size.
If you overlap sliding windows by 50%, overlap_ratio is 0.5.
:return: img_result
:return: mask_output
'''
# color mask has to be updated for multiple-class object detection
if isinstance(input_img, str) :
img = imread(input_img)
else :
img = input_img
# Generate the set of windows, with a 256-pixel max window size and 50% overlap
windows = sw.generate(img, sw.DimOrder.HeightWidthChannel, window_size, overlap_ratio)
mask_output = np.zeros((img.shape[0], img.shape[1], num_classes), dtype=np.uint8)
# if isinstance(input_img, str) :
# tqdm_window = tqdm(windows, ascii=True, desc='inference by sliding window on ' + os.path.basename(input_img))
# else :
# tqdm_window = tqdm(windows, ascii=True, desc='inference by sliding window ')
for window in windows :
# Add print option for sliding window detection
img_subset = img[window.indices()]
results = inference_segmentor(model, img_subset)[0]
results_onehot = (np.arange(num_classes) == results[...,None]-1).astype(int)
mask_output[window.indices()] = mask_output[window.indices()] + results_onehot
mask_output[mask_output > 1] = 1
mask_output_bool = mask_output.astype(np.bool)
# Add colors to detection result on img
img_result = img
for num in range(num_classes) :
img_result[mask_output_bool[:,:,num-1], :] = img_result[mask_output_bool[:,:,num-1],:] * 0.01 + np.asarray(color_mask[num-1], dtype = float) * 0.99
print(num)
print(color_mask[num-1])
return img_result, mask_output
def run_model():
img_folder = '/home/user/ssi_proj/static/images/cropped'
img_temp_folder = '/home/user/ssi_proj/static/images/temp'
img_list = glob.glob(os.path.join(img_folder, '*.png'))
for img_path in img_list :
img_subset = imread(img_path)
img_filename = img_path.split('/')[-1]
img_save_path = os.path.join(img_temp_folder, img_filename)
_, mask_output = inference_segmentor_sliding_window(model, img_subset, get_palette('concrete_crack_as_cityscapes')[1:], 1)
cv2.imwrite(img_save_path, mask_output)
if __name__ == "__main__" :
while True :
run_model()
| 36.966387 | 177 | 0.668788 | 616 | 4,399 | 4.563312 | 0.321429 | 0.049804 | 0.012807 | 0.020277 | 0.217005 | 0.175027 | 0.175027 | 0.110993 | 0.110993 | 0.110993 | 0 | 0.036477 | 0.233462 | 4,399 | 118 | 178 | 37.279661 | 0.797153 | 0.258241 | 0 | 0.149254 | 0 | 0.029851 | 0.132477 | 0.099593 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.119403 | 0 | 0.268657 | 0.059701 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce94aa4acbb24837f0165028a6e2ae7b6d2bde07 | 3,366 | py | Python | pyinsights/config.py | 1v1a3x/pyinsights | 7ccff571378e3dbf88e72b0b59036ddd76faa8e6 | [
"MIT"
] | null | null | null | pyinsights/config.py | 1v1a3x/pyinsights | 7ccff571378e3dbf88e72b0b59036ddd76faa8e6 | [
"MIT"
] | null | null | null | pyinsights/config.py | 1v1a3x/pyinsights | 7ccff571378e3dbf88e72b0b59036ddd76faa8e6 | [
"MIT"
] | null | null | null | import json
from dataclasses import dataclass, asdict
from functools import cached_property
from pathlib import Path
from typing import Any, Dict, List, NamedTuple, Union
from jsonschema import Draft7Validator
from jsonschema.exceptions import ValidationError
from yaml import safe_load
from pyinsights.exceptions import (
ConfigInvalidSyntaxError,
ConfigNotFoundError,
ConfigVersionUnknownError,
InvalidVersionError
)
from pyinsights.helper import (
convert_to_epoch,
convert_string_duration_to_datetime,
DatetimeType
)
ConfigType = Dict[str, Any]
SchemaType = Dict[str, Any]
class ConfigFile(NamedTuple):
filename: str
content: ConfigType
@classmethod
def from_filename(cls, filename) -> 'ConfigFile':
return cls(filename, load_yaml(filename))
@property
def version(self) -> str:
try:
return self.content['version']
except KeyError:
raise ConfigVersionUnknownError(
'Please Specify configuration version'
)
def convert_duration(self) -> Dict[str, int]:
duration = self.content['duration']
if isinstance(duration, str):
duration = convert_string_duration_to_datetime(duration)
duration_epoch = {
key: convert_to_epoch(value)
for key, value in duration.items()
}
return duration_epoch
def get_query_params(self) -> ConfigType:
params = self.content.copy()
new_duration = self.convert_duration()
del params['version']
del params['duration']
params.update(new_duration)
return params
def load_config(filepath: str) -> ConfigType:
"""Load configuration
Arguments:
filepath {str}
Returns:
{ConfigType} -- query parameters
"""
config = ConfigFile.from_filename(filepath)
validate(config.content, config.version)
return config
def load_yaml(filepath: str) -> ConfigType:
"""Load YAML configuration file
Arguments:
filepath {str}
Raises:
ConfigNotFoundError
Returns:
config {ConfigType}
"""
try:
with open(filepath) as fd:
return safe_load(fd)
except FileNotFoundError:
raise ConfigNotFoundError('Could not find the configuration')
def load_schema(version: str) -> SchemaType:
"""Load the schema json file
Arguments:
version {str}
Raises:
InvalidVersionError
Returns:
schema {SchemaType}
"""
basepath = Path(__file__).parent.resolve()
filename = f'version_{version}.json'
schema_filpath = f'{basepath}/schema/{filename}'
try:
with open(schema_filpath) as fd:
return json.load(fd)
except FileNotFoundError:
raise InvalidVersionError(f'The version {repr(version)} is invalid')
def validate(config: ConfigType, version: str) -> bool:
"""Validate the configuration
Arguments:
config {ConfigType}
version {str}
Raises:
ConfigInvalidSyntaxError
Returns:
bool
"""
try:
schema = load_schema(version)
Draft7Validator(schema).validate(config)
except ValidationError as err:
raise ConfigInvalidSyntaxError(err)
except Exception as err:
raise err
else:
return True
| 22.743243 | 76 | 0.654486 | 335 | 3,366 | 6.465672 | 0.307463 | 0.020314 | 0.012927 | 0.021237 | 0.060018 | 0 | 0 | 0 | 0 | 0 | 0 | 0.000809 | 0.265894 | 3,366 | 147 | 77 | 22.897959 | 0.875759 | 0.143791 | 0 | 0.075 | 0 | 0 | 0.071299 | 0.018188 | 0 | 0 | 0 | 0 | 0 | 1 | 0.1 | false | 0 | 0.125 | 0.0125 | 0.3625 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce964a0e23058e08b9ab551c3d7c96dc52bc488d | 571 | py | Python | gbe/models/show_vote.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | 1 | 2021-03-14T11:56:47.000Z | 2021-03-14T11:56:47.000Z | gbe/models/show_vote.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | 180 | 2019-09-15T19:52:46.000Z | 2021-11-06T23:48:01.000Z | gbe/models/show_vote.py | bethlakshmi/gbe-divio-djangocms-python2.7 | 6e9b2c894162524bbbaaf73dcbe927988707231d | [
"Apache-2.0"
] | null | null | null | from django.db.models import (
CASCADE,
ForeignKey,
Model,
IntegerField,
)
from gbe.models import (
Act,
Show,
)
from gbetext import vote_options
class ShowVote(Model):
show = ForeignKey(Show,
on_delete=CASCADE,
blank=True,
null=True)
vote = IntegerField(choices=vote_options,
blank=True,
null=True)
class Meta:
verbose_name = "show vote"
verbose_name_plural = "show votes"
app_label = "gbe"
| 19.689655 | 45 | 0.530648 | 56 | 571 | 5.285714 | 0.535714 | 0.081081 | 0.087838 | 0.114865 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.394046 | 571 | 28 | 46 | 20.392857 | 0.855491 | 0 | 0 | 0.173913 | 0 | 0 | 0.038529 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.130435 | 0 | 0.304348 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce965def9015a3c7d76b18628678bbd0c3ec216e | 392 | py | Python | console/audiobc.py | ihydrogen/hydrogen-chat-bot-py | b21ece5cf2532c0f0d31b5db75fe6b91229f5d59 | [
"Apache-2.0"
] | 9 | 2017-02-19T16:09:53.000Z | 2021-01-05T12:18:22.000Z | console/audiobc.py | ihydrogen/hydrogen-chat-bot-py | b21ece5cf2532c0f0d31b5db75fe6b91229f5d59 | [
"Apache-2.0"
] | 1 | 2017-11-28T04:37:33.000Z | 2017-11-28T04:37:33.000Z | console/audiobc.py | ihydrogen/hydrogen-chat-bot-py | b21ece5cf2532c0f0d31b5db75fe6b91229f5d59 | [
"Apache-2.0"
] | null | null | null | import bot_header
from vk_api.api import get_api
from vk_api.api import api_request
CNAME = "audiobc"
def main(command):
args = command.replace(CNAME, "").strip()
msg = args
if not msg:
raise Exception("Usage: audiobc 'OID_AID'")
api = get_api(account=bot_header.CURRENT_ACCOUNT)
r = api_request(api, "audio.setBroadcast", "audio=\"%s\"" % (msg))
return r
| 23.058824 | 70 | 0.673469 | 57 | 392 | 4.45614 | 0.54386 | 0.070866 | 0.070866 | 0.094488 | 0.141732 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.196429 | 392 | 16 | 71 | 24.5 | 0.806349 | 0 | 0 | 0 | 0 | 0 | 0.143223 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.083333 | false | 0 | 0.25 | 0 | 0.416667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce9b98f62ef5cb97427d1594d583d064d67aed9e | 988 | py | Python | sebastian/core/__init__.py | aisipos/sebastian | 4e460c3aeab332b45c74fe78e65e76ec87d5cfa8 | [
"MIT"
] | 47 | 2015-01-07T16:25:27.000Z | 2022-03-07T07:21:27.000Z | sebastian/core/__init__.py | EQ4/sebastian | 4e460c3aeab332b45c74fe78e65e76ec87d5cfa8 | [
"MIT"
] | 1 | 2015-02-02T20:25:15.000Z | 2015-02-02T20:25:15.000Z | sebastian/core/__init__.py | EQ4/sebastian | 4e460c3aeab332b45c74fe78e65e76ec87d5cfa8 | [
"MIT"
] | 10 | 2015-02-02T19:48:57.000Z | 2021-03-19T17:45:17.000Z | # this is just an initial sketch of the data structures so don't read too
# much into them at this stage.
# basically, a Sequence is just a collection of Points and a Point is just a
# dict giving values to certain Attributes.
#
# there are three types of Sequences: OSequences, HSequences and VSequences
# only OSequences are currently implemented
#
# OSequence assumes the Points have OFFSET_64 attribute values and
# will also make use of the DURATION_64 attribute.
#
# see datastructure_notes.txt for some of the thinking behind this whole
# approach and a bit of roadmap as to where things are headed.
OFFSET_64 = "offset_64"
MIDI_PITCH = "midi_pitch"
DURATION_64 = "duration_64"
DEGREE = 'degree'
from sebastian.core.elements import OSeq, Point, VSeq, HSeq # noqa
OSequence = OSeq(OFFSET_64, DURATION_64)
#
#
# def shift(offset):
# def _(point):
# point[OFFSET_64] = point[OFFSET_64] + offset
# return point
# return lambda seq: seq.map_points(_)
#
#
| 28.228571 | 76 | 0.739879 | 150 | 988 | 4.766667 | 0.593333 | 0.067133 | 0.01958 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.025 | 0.190283 | 988 | 34 | 77 | 29.058824 | 0.86875 | 0.745951 | 0 | 0 | 0 | 0 | 0.15859 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce9c12731a8771102b8453503e864bec0aa11330 | 5,219 | py | Python | google_photos_uploader.py | aikige/google-photos-uploader | db86190465dd41ea0822d3b35e7660c676be9742 | [
"MIT"
] | null | null | null | google_photos_uploader.py | aikige/google-photos-uploader | db86190465dd41ea0822d3b35e7660c676be9742 | [
"MIT"
] | null | null | null | google_photos_uploader.py | aikige/google-photos-uploader | db86190465dd41ea0822d3b35e7660c676be9742 | [
"MIT"
] | null | null | null | import os.path
import pickle
import json
import mimetypes
from google_auth_oauthlib.flow import InstalledAppFlow
from google.auth.transport.requests import AuthorizedSession
from google.auth.transport.requests import Request
def get_authorized_session_oob(opt):
# Reference: https://github.com/ido-ran/google-photos-api-python-quickstart
SCOPES = [ 'https://www.googleapis.com/auth/photoslibrary.appendonly',
'https://www.googleapis.com/auth/photoslibrary.readonly.appcreateddata' ]
creds_file_name = opt.get('creds')
client_secret_file = opt.get('client_secret')
creds = None
try:
with open(creds_file_name, 'rb') as creds_file:
creds = pickle.load(creds_file)
print('credential loaded: ' + creds_file_name)
except:
print('failed to read:' + creds_file_name)
if not creds or not creds.valid:
if (creds and creds.expired and creds.refresh_token):
print('credential refreshed')
creds.refresh(Request())
else:
flow = InstalledAppFlow.from_client_secrets_file(
client_secret_file, SCOPES,
redirect_uri='urn:ietf:wg:oauth:2.0:oob')
url, user_code = flow.authorization_url()
print('User code: ' + user_code)
print('Please connect to following URL to authorize this application, and input authorization code.')
print(url)
code = input('Enter authorization code: ').strip()
flow.fetch_token(code=code)
creds = flow.credentials
with open(creds_file_name, 'wb') as creds_file:
pickle.dump(creds, creds_file)
return AuthorizedSession(creds)
def create_album(session, title):
url = 'https://photoslibrary.googleapis.com/v1/albums'
session.headers['Content-type'] = 'application/json'
msg = {'album':{ 'title': title }}
resp = session.post(url, json.dumps(msg)).json()
del(session.headers['Content-type'])
if resp.ok:
resp_json = resp.json()
print(json.dumps(resp_json, indent=2))
return resp_json.get('id')
else:
return None
def list_albums(session):
url = 'https://photoslibrary.googleapis.com/v1/albums'
resp = session.get(url)
if resp.ok:
resp_json = resp.json()
print(json.dumps(resp_json, indent=2))
return resp_json
else:
return {}
def upload(session, file, album_id=None):
# Step 1: upload media body
try:
with open(file, 'rb') as photo_file:
photo_bytes = photo_file.read()
except OSError as err:
print('failed to read: ' + file)
return
mime_type, encoding = mimetypes.guess_type(file)
session.headers['Content-type'] = 'application/octet-stream'
session.headers['X-Goog-Upload-Content-Type'] = mime_type
session.headers['X-Goog-Upload-Protocol'] = 'raw'
session.headers['X-Goog-Upload-File-Name'] = os.path.basename(file)
url = 'https://photoslibrary.googleapis.com/v1/uploads'
resp = session.post(url, photo_bytes)
if resp.ok:
print('uploaded: %d bytes' % len(photo_bytes))
print('token: ' + resp.text)
upload_token = resp.text
else:
print('failed: %d' % resp.status_code)
return
del(session.headers['X-Goog-Upload-Content-Type'])
del(session.headers['X-Goog-Upload-Protocol'])
del(session.headers['X-Goog-Upload-File-Name'])
# Step 2: create media-item based on upload data.
session.headers['Content-type'] = 'application/json'
msg = {'newMediaItems':[{'description':'','simpleMediaItem':{'uploadToken':upload_token}}]}
if album_id:
msg['albumId'] = album_id
url = 'https://photoslibrary.googleapis.com/v1/mediaItems:batchCreate'
resp = session.post(url, json.dumps(msg))
if resp.ok:
print('done')
else:
print('failed: %d: %s' % (resp.status_code, resp.text))
if __name__ == '__main__':
import argparse
parser = argparse.ArgumentParser(description='Script to upload an image to Google Photos.')
parser.add_argument('filename', nargs='?', help='filename of image to upload', default=None)
parser.add_argument('-c', '--creds', default='credentials.pickle',
help='specify credential file. default: credentials.pickle')
parser.add_argument('-s', '--client-secret', default='client_secret.json',
help='specify Client-Secret file, used to set Client ID etc. defalt: client_secret.json')
parser.add_argument('-l', '--list-albums', action='store_true',
help='list albums created by this application.')
parser.add_argument('-a', '--album-id', default=None,
help='set album id to upload the image (optional)')
parser.add_argument('-n', '--new-album', default=None,
help='crate album with specified title and append image to the album.')
opt = vars(parser.parse_args())
session = get_authorized_session_oob(opt)
if opt['new_album']:
opt['album_id'] = create_album(session, opt['new_album'])
if opt['list_albums']:
list_albums(session)
if opt['filename']:
upload(session, opt['filename'], opt['album_id'])
| 42.430894 | 113 | 0.653382 | 659 | 5,219 | 5.047041 | 0.283763 | 0.042093 | 0.02706 | 0.034275 | 0.279314 | 0.240229 | 0.150932 | 0.040289 | 0.040289 | 0.040289 | 0 | 0.002436 | 0.213451 | 5,219 | 122 | 114 | 42.778689 | 0.807795 | 0.028166 | 0 | 0.184211 | 0 | 0 | 0.292818 | 0.037687 | 0 | 0 | 0 | 0 | 0 | 1 | 0.035088 | false | 0 | 0.070175 | 0 | 0.166667 | 0.122807 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce9c997546f7d87d6cf6cb0fb27e8369d631fee2 | 4,310 | py | Python | corankco/partitioning/parfront.py | pierreandrieu/corankco | 769b18ef349de3a0305f878724f6e6ae41f9f38f | [
"MIT"
] | null | null | null | corankco/partitioning/parfront.py | pierreandrieu/corankco | 769b18ef349de3a0305f878724f6e6ae41f9f38f | [
"MIT"
] | null | null | null | corankco/partitioning/parfront.py | pierreandrieu/corankco | 769b18ef349de3a0305f878724f6e6ae41f9f38f | [
"MIT"
] | null | null | null | from corankco.dataset import Dataset
from corankco.scoringscheme import ScoringScheme
from typing import Tuple, List, Set
from numpy import vdot, ndarray, count_nonzero, shape, array, zeros, asarray
from igraph import Graph
class ParFront:
def __init__(self):
pass
def compute_frontiers(
self,
dataset: Dataset,
scoring_scheme: ScoringScheme
) -> List[Set]:
"""
:param dataset: A dataset containing the rankings to aggregate
:type dataset: Dataset (class Dataset in package 'datasets')
:param scoring_scheme: The penalty vectors to consider
:type scoring_scheme: ScoringScheme (class ScoringScheme in package 'distances')
:return a list of sets of elements such that any exact consensus respects this partitioning
"""
sc = asarray(scoring_scheme.penalty_vectors)
rankings = dataset.rankings
res = []
elem_id = {}
id_elements = {}
id_elem = 0
for ranking in rankings:
for bucket in ranking:
for element in bucket:
if element not in elem_id:
elem_id[element] = id_elem
id_elements[id_elem] = element
id_elem += 1
positions = dataset.get_positions(elem_id)
gr1, mat_score, robust_arcs = self.__graph_of_elements(positions, sc)
sccs = gr1.components()
partition = []
for scc in sccs:
partition.append(set(scc))
i = 0
while i < len(partition) - 1:
set1 = partition[i]
set2 = partition[i+1]
fusion = False
for x in set1:
for y in set2:
if (x, y) not in robust_arcs:
fusion = True
break
if fusion:
break
if fusion:
for x in set2:
set1.add(x)
partition.pop(i+1)
i = max(i-1, 1)
else:
i += 1
res = []
for group in partition:
g = set()
res.append(g)
for elem in group:
g.add(id_elements[elem])
return res
@staticmethod
def __graph_of_elements(positions: ndarray, matrix_scoring_scheme: ndarray) -> Tuple[Graph, ndarray, Set[Tuple]]:
graph_of_elements = Graph(directed=True)
robust_arcs = set()
cost_before = matrix_scoring_scheme[0]
cost_tied = matrix_scoring_scheme[1]
cost_after = array([cost_before[1], cost_before[0], cost_before[2], cost_before[4], cost_before[3],
cost_before[5]])
n = shape(positions)[0]
m = shape(positions)[1]
for i in range(n):
graph_of_elements.add_vertex(name=str(i))
matrix = zeros((n, n, 3))
edges = []
for e1 in range(n):
mem = positions[e1]
d = count_nonzero(mem == -1)
for e2 in range(e1 + 1, n):
a = count_nonzero(mem + positions[e2] == -2)
b = count_nonzero(mem == positions[e2])
c = count_nonzero(positions[e2] == -1)
e = count_nonzero(mem < positions[e2])
relative_positions = array([e - d + a, m - e - b - c + a, b - a, c - a, d - a, a])
put_before = vdot(relative_positions, cost_before)
put_after = vdot(relative_positions, cost_after)
put_tied = vdot(relative_positions, cost_tied)
if put_before > put_after or put_before > put_tied:
edges.append((e2, e1))
if put_after > put_before or put_after > put_tied:
edges.append((e1, e2))
if put_before < put_after and put_before < put_tied:
robust_arcs.add((e1, e2))
if put_after < put_before and put_after < put_tied:
robust_arcs.add((e2, e1))
matrix[e1][e2] = [put_before, put_after, put_tied]
matrix[e2][e1] = [put_after, put_before, put_tied]
graph_of_elements.add_edges(edges)
return graph_of_elements, matrix, robust_arcs
| 38.482143 | 117 | 0.538051 | 511 | 4,310 | 4.342466 | 0.248532 | 0.036503 | 0.040559 | 0.032447 | 0.087427 | 0 | 0 | 0 | 0 | 0 | 0 | 0.019331 | 0.37587 | 4,310 | 111 | 118 | 38.828829 | 0.805576 | 0.081439 | 0 | 0.0625 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.03125 | false | 0.010417 | 0.052083 | 0 | 0.114583 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce9cf064736e848701f860e188bae0eaadcebdab | 2,514 | py | Python | generate_pdf.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | 1 | 2020-11-25T04:07:37.000Z | 2020-11-25T04:07:37.000Z | generate_pdf.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | 52 | 2018-10-25T05:49:30.000Z | 2022-03-16T22:31:57.000Z | generate_pdf.py | unfoldingWord-dev/tx-job-handler | 5364ed079bbd5b6528eeb6d12f2ca5c696e84f4f | [
"MIT"
] | null | null | null | #!/usr/bin/env python3
#
# Copyright (c) 2021 unfoldingWord
# http://creativecommons.org/licenses/MIT/
# See LICENSE file for details.
#
# Contributors:
# Richard Mahn <rich.mahn@unfoldingword.org>
import argparse
import sys
from webhook import process_tx_job
from door43_tools.subjects import SUBJECT_ALIASES
if __name__ == '__main__':
parser = argparse.ArgumentParser(description=__doc__, formatter_class=argparse.RawDescriptionHelpFormatter)
parser.add_argument('-o', '--output_file', dest='output_file', required=False,
help='Path to output file, including the zip file name')
parser.add_argument('--owner', dest='owner', default="unfoldingWord", required=False,
help=f'Owner of the resource repo on GitHub. Default: unfoldingWord')
parser.add_argument('--repo', dest='repo_name', required=True, help=f'Repo name')
parser.add_argument('--ref', dest='ref', default='master', help='Branch or tag name. Default: master')
parser.add_argument('-p', '--project_id', metavar='PROJECT ID', dest='project_ids', required=False, action='append',
help='Project ID for resources with projects, as listed in the manfiest.yaml file, such as a Bible book '+
'(-p gen). Can specify multiple projects. Default: None (different converters will handle no or multiple '+
'projects differently, such as compiling all into one PDF, or generating a PDF for each project)')
args = parser.parse_args(sys.argv[1:])
lang, resource = args.repo_name.split('_')
subject = None
for s, r in SUBJECT_ALIASES.items():
if resource == r[0]:
subject = s.replace(' ', '_')
break
input_format = "md"
if subject.startswith('TSV'):
input_format = "tsv"
data = {
"output": args.output_file,
"job_id": f"Door43--{args.owner}--{args.repo_name}",
"identifier": f"{args.owner}--{args.repo_name}",
"resource_type": subject,
"input_format": input_format,
"output_format": "pdf",
"source": f"https://git.door43.org/{args.owner}/{args.repo_name}/archive/{args.ref}.zip",
"repo_name": args.repo_name,
"repo_owner": args.owner,
"repo_ref": args.ref,
"repo_data_url": f"https://git.door43.org/{args.owner}/{args.repo_name}/archive/{args.ref}.zip",
"dcs_domain": "https://git.door43.org",
"project_ids": args.project_ids,
}
print(data)
process_tx_job("dev", data)
| 43.344828 | 131 | 0.654733 | 324 | 2,514 | 4.910494 | 0.425926 | 0.045255 | 0.045255 | 0.04274 | 0.096794 | 0.070396 | 0.070396 | 0.070396 | 0.070396 | 0.070396 | 0 | 0.00847 | 0.201671 | 2,514 | 57 | 132 | 44.105263 | 0.784255 | 0.074383 | 0 | 0 | 0 | 0.069767 | 0.414834 | 0.029323 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.093023 | 0 | 0.093023 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce9d2c4f11a7d337e50290dd3651fc0fa82572d6 | 1,259 | py | Python | tests/hwsim/test_p2p_device.py | PleXone2019/hostap | a06b1070d8902460a9c61a3e13af577327fce6b3 | [
"Unlicense"
] | 5 | 2017-01-08T17:30:55.000Z | 2018-04-30T19:33:29.000Z | tests/hwsim/test_p2p_device.py | PleXone2019/hostap | a06b1070d8902460a9c61a3e13af577327fce6b3 | [
"Unlicense"
] | null | null | null | tests/hwsim/test_p2p_device.py | PleXone2019/hostap | a06b1070d8902460a9c61a3e13af577327fce6b3 | [
"Unlicense"
] | 8 | 2017-03-12T20:16:07.000Z | 2021-11-13T15:24:39.000Z | #!/usr/bin/python
#
# cfg80211 P2P Device
# Copyright (c) 2013, Jouni Malinen <j@w1.fi>
#
# This software may be distributed under the terms of the BSD license.
# See README for more details.
import logging
logger = logging.getLogger()
import time
from wpasupplicant import WpaSupplicant
from test_p2p_grpform import go_neg_pin_authorized
from test_p2p_grpform import check_grpform_results
from test_p2p_grpform import remove_group
def test_p2p_device_grpform(dev, apdev):
"""P2P group formation with driver using cfg80211 P2P Device"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
[i_res, r_res] = go_neg_pin_authorized(i_dev=dev[0], i_intent=15,
r_dev=wpas, r_intent=0)
check_grpform_results(i_res, r_res)
remove_group(dev[0], wpas)
def test_p2p_device_grpform2(dev, apdev):
"""P2P group formation with driver using cfg80211 P2P Device (reverse)"""
wpas = WpaSupplicant(global_iface='/tmp/wpas-wlan5')
wpas.interface_add("wlan5")
[i_res, r_res] = go_neg_pin_authorized(i_dev=wpas, i_intent=15,
r_dev=dev[0], r_intent=0)
check_grpform_results(i_res, r_res)
remove_group(wpas, dev[0])
| 35.971429 | 77 | 0.70691 | 187 | 1,259 | 4.491979 | 0.368984 | 0.053571 | 0.02381 | 0.038095 | 0.578571 | 0.461905 | 0.461905 | 0.461905 | 0.461905 | 0.461905 | 0 | 0.044554 | 0.197776 | 1,259 | 34 | 78 | 37.029412 | 0.787129 | 0.241461 | 0 | 0.285714 | 0 | 0 | 0.042644 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.285714 | 0 | 0.380952 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce9e329766772654bbb7e049085514c0d94dbeae | 489 | py | Python | keyence2021/a/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | 2 | 2022-01-22T07:56:58.000Z | 2022-01-24T00:29:37.000Z | keyence2021/a/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | keyence2021/a/main.py | tonko2/AtCoder | 5d617072517881d226d7c8af09cb88684d41af7e | [
"Xnet",
"X11",
"CECILL-B"
] | null | null | null | import sys
import math
from collections import defaultdict, deque
sys.setrecursionlimit(10 ** 6)
stdin = sys.stdin
INF = float('inf')
ni = lambda: int(ns())
na = lambda: list(map(int, stdin.readline().split()))
ns = lambda: stdin.readline().strip()
N = ni()
A = na()
B = na()
A_max = [0] * (N + 1)
B_max = [0] * (N + 1)
ans = 0
for i in range(N):
A_max[i + 1] = max(A_max[i], A[i])
for i in range(N):
print(max(ans, A_max[i + 1] * B[i]))
ans = max(ans, A_max[i + 1] * B[i]) | 20.375 | 53 | 0.586912 | 92 | 489 | 3.054348 | 0.380435 | 0.071174 | 0.071174 | 0.064057 | 0.185053 | 0.099644 | 0.099644 | 0.099644 | 0 | 0 | 0 | 0.028278 | 0.204499 | 489 | 24 | 54 | 20.375 | 0.694087 | 0 | 0 | 0.1 | 0 | 0 | 0.006122 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.15 | 0 | 0.15 | 0.05 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ce9ee6f1a89e66c53e60830dbcf625d22c6d8a03 | 6,593 | py | Python | memsource_cli/models/async_response_dto.py | unofficial-memsource/memsource-cli-client | a6639506b74e95476da87f4375953448b76ea90c | [
"Apache-2.0"
] | 16 | 2019-09-25T00:20:38.000Z | 2021-05-04T05:56:10.000Z | memsource_cli/models/async_response_dto.py | zerodayz/memsource-cli-client | c2574f1467539a49e6637c874e88d75c7ef789b3 | [
"Apache-2.0"
] | 26 | 2019-09-30T14:00:03.000Z | 2021-05-12T11:15:18.000Z | memsource_cli/models/async_response_dto.py | zerodayz/memsource-cli-client | c2574f1467539a49e6637c874e88d75c7ef789b3 | [
"Apache-2.0"
] | 1 | 2021-05-24T16:19:14.000Z | 2021-05-24T16:19:14.000Z | # coding: utf-8
"""
Memsource REST API
Welcome to Memsource's API documentation. To view our legacy APIs please [visit our documentation](https://wiki.memsource.com/wiki/Memsource_API) and for more information about our new APIs, [visit our blog](https://www.memsource.com/blog/2017/10/24/introducing-rest-apis-qa-with-the-memsource-api-team/). If you have any questions, please contact [Memsource Support](<mailto:support@memsource.com>). # noqa: E501
OpenAPI spec version: Latest
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
import pprint
import re # noqa: F401
import six
from memsource_cli.models.error_detail_dto import ErrorDetailDto # noqa: F401,E501
class AsyncResponseDto(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
"""
"""
Attributes:
swagger_types (dict): The key is attribute name
and the value is attribute type.
attribute_map (dict): The key is attribute name
and the value is json key in definition.
"""
swagger_types = {
'date_created': 'datetime',
'error_code': 'str',
'error_desc': 'str',
'error_details': 'list[ErrorDetailDto]',
'warnings': 'list[ErrorDetailDto]'
}
attribute_map = {
'date_created': 'dateCreated',
'error_code': 'errorCode',
'error_desc': 'errorDesc',
'error_details': 'errorDetails',
'warnings': 'warnings'
}
def __init__(self, date_created=None, error_code=None, error_desc=None, error_details=None, warnings=None): # noqa: E501
"""AsyncResponseDto - a model defined in Swagger""" # noqa: E501
self._date_created = None
self._error_code = None
self._error_desc = None
self._error_details = None
self._warnings = None
self.discriminator = None
if date_created is not None:
self.date_created = date_created
if error_code is not None:
self.error_code = error_code
if error_desc is not None:
self.error_desc = error_desc
if error_details is not None:
self.error_details = error_details
if warnings is not None:
self.warnings = warnings
@property
def date_created(self):
"""Gets the date_created of this AsyncResponseDto. # noqa: E501
:return: The date_created of this AsyncResponseDto. # noqa: E501
:rtype: datetime
"""
return self._date_created
@date_created.setter
def date_created(self, date_created):
"""Sets the date_created of this AsyncResponseDto.
:param date_created: The date_created of this AsyncResponseDto. # noqa: E501
:type: datetime
"""
self._date_created = date_created
@property
def error_code(self):
"""Gets the error_code of this AsyncResponseDto. # noqa: E501
:return: The error_code of this AsyncResponseDto. # noqa: E501
:rtype: str
"""
return self._error_code
@error_code.setter
def error_code(self, error_code):
"""Sets the error_code of this AsyncResponseDto.
:param error_code: The error_code of this AsyncResponseDto. # noqa: E501
:type: str
"""
self._error_code = error_code
@property
def error_desc(self):
"""Gets the error_desc of this AsyncResponseDto. # noqa: E501
:return: The error_desc of this AsyncResponseDto. # noqa: E501
:rtype: str
"""
return self._error_desc
@error_desc.setter
def error_desc(self, error_desc):
"""Sets the error_desc of this AsyncResponseDto.
:param error_desc: The error_desc of this AsyncResponseDto. # noqa: E501
:type: str
"""
self._error_desc = error_desc
@property
def error_details(self):
"""Gets the error_details of this AsyncResponseDto. # noqa: E501
:return: The error_details of this AsyncResponseDto. # noqa: E501
:rtype: list[ErrorDetailDto]
"""
return self._error_details
@error_details.setter
def error_details(self, error_details):
"""Sets the error_details of this AsyncResponseDto.
:param error_details: The error_details of this AsyncResponseDto. # noqa: E501
:type: list[ErrorDetailDto]
"""
self._error_details = error_details
@property
def warnings(self):
"""Gets the warnings of this AsyncResponseDto. # noqa: E501
:return: The warnings of this AsyncResponseDto. # noqa: E501
:rtype: list[ErrorDetailDto]
"""
return self._warnings
@warnings.setter
def warnings(self, warnings):
"""Sets the warnings of this AsyncResponseDto.
:param warnings: The warnings of this AsyncResponseDto. # noqa: E501
:type: list[ErrorDetailDto]
"""
self._warnings = warnings
def to_dict(self):
"""Returns the model properties as a dict"""
result = {}
for attr, _ in six.iteritems(self.swagger_types):
value = getattr(self, attr)
if isinstance(value, list):
result[attr] = list(map(
lambda x: x.to_dict() if hasattr(x, "to_dict") else x,
value
))
elif hasattr(value, "to_dict"):
result[attr] = value.to_dict()
elif isinstance(value, dict):
result[attr] = dict(map(
lambda item: (item[0], item[1].to_dict())
if hasattr(item[1], "to_dict") else item,
value.items()
))
else:
result[attr] = value
if issubclass(AsyncResponseDto, dict):
for key, value in self.items():
result[key] = value
return result
def to_str(self):
"""Returns the string representation of the model"""
return pprint.pformat(self.to_dict())
def __repr__(self):
"""For `print` and `pprint`"""
return self.to_str()
def __eq__(self, other):
"""Returns true if both objects are equal"""
if not isinstance(other, AsyncResponseDto):
return False
return self.__dict__ == other.__dict__
def __ne__(self, other):
"""Returns true if both objects are not equal"""
return not self == other
| 29.698198 | 421 | 0.607462 | 766 | 6,593 | 5.052219 | 0.199739 | 0.031008 | 0.113695 | 0.100775 | 0.389147 | 0.301292 | 0.262532 | 0.243152 | 0.1323 | 0.079587 | 0 | 0.016244 | 0.299712 | 6,593 | 221 | 422 | 29.832579 | 0.821962 | 0.362657 | 0 | 0.070707 | 0 | 0 | 0.064589 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.161616 | false | 0 | 0.040404 | 0 | 0.343434 | 0.020202 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cea0cd1cccab37a74914fd280436875867492fc4 | 4,082 | py | Python | serial.py | jacoboisaza/iot-TBM | 79504cb182675f97c3496b8f87b679696e55fc49 | [
"MIT"
] | null | null | null | serial.py | jacoboisaza/iot-TBM | 79504cb182675f97c3496b8f87b679696e55fc49 | [
"MIT"
] | null | null | null | serial.py | jacoboisaza/iot-TBM | 79504cb182675f97c3496b8f87b679696e55fc49 | [
"MIT"
] | null | null | null | import serial
import smtplib, ssl
# Configuracion del puerto serial
com_serial = serial.Serial('/dev/ttyUSB0', timeout=1, baudrate=115200, bytesize=8, parity='N', stopbits=1, xonxoff=False, rtscts=False, dsrdtr=False)
# Declaracion de listas
status = []
statusraw = []
statisticsraw = []
statistics = []
statdata=[]
# Declaracion de comandos
CMD_PC_CTRL_START = [0x02,0x02,0x01,0x01,0x03,0xFB]
CMD_PC_CTRL_STOP = [0x02,0x01,0x02,0x03,0xFC]
CMD_PC_CTRL_STATISTICS = [0x02,0x02,0x20,0x00,0x03,0xDD]
# Declarcion de listas para interpretar los datos
BosilloR = ['Libre','Ocupado']
Puerta = ['Cerrada','Abierta']
Modos = ['Mezcla','Denominacion','Cuentanotas','Cara','Orientacion','Issue','Serial','Separate','Barcode','Barcode + Efectivo','','Dissue']
Lote = ['100','50','25','20','10','Batch Apagado','Batch por numero personalizado','Batch por monto']
#Send CMD_PC_CTRL_STOP
com_serial.write(serial.to_bytes(CMD_PC_CTRL_STOP))
#Send CMD_PC_CTRL_START
com_serial.write(serial.to_bytes(CMD_PC_CTRL_START))
# Lectura de datos recibidos DEVICE STATUS y SENSOR STATUS
read_byte = com_serial.read()
statusraw.append(read_byte)
status.append(int.from_bytes(read_byte, "big"))
while read_byte is not None:
read_byte = com_serial.read()
if read_byte == b'':
read_byte = None
break
statusraw.append(read_byte)
status.append(int.from_bytes(read_byte, "big"))
# Separacion de los resultados leidos en DEVICE y SENSOR
l = len(statusraw)
length1 = status[1]
device = status[0:length1+4]
sensor = status[length1+4:l]
# Envio de solicitud de STATISTICS
com_serial.write(serial.to_bytes(CMD_PC_CTRL_STATISTICS))
# Lectura de datos recibidos STATISTICS
read_byte = com_serial.read()
statistics.append(int.from_bytes(read_byte, "big"))
statisticsraw.append(read_byte)
while read_byte is not None:
read_byte = com_serial.read()
if read_byte == b'':
read_byte = None
break
statisticsraw.append(read_byte)
statistics.append(int.from_bytes(read_byte, "big"))
# Envio solicitud CMD_PC_CTRL_STOP
com_serial.write(serial.to_bytes(CMD_PC_CTRL_STOP))
# Tratamiento de datos DEVICE STATUS
ndata = device[1]-1
actualcurr = device[3]
dataindex = device[4]
modeindex = device[5]
batchindex = device[6]
batchn1 = device[7]
batchn2 = device[8]
currn = device[9]
currencies = []
i = 10
for j in range(currn):
currencies.append('')
for k in range(3):
currencies[j] = currencies[j]+chr(device[10+j*3+k])
i = i + 1
ndenom = []
for j in range(currn-1):
ndenom.append(device[i])
i = i + 1
cant_batch = []
for j in range(ndata-i+3):
cant_batch.append(device[i])
i = i + 1
# Tratamiento datos SENSOR STATUS
sensores = [0,0,0,0,0,0,0,0]
sensorstatus = sensor[3]
i=0
while sensorstatus // 2 != 0:
sensores[i]=sensorstatus % 2
sensorstatus = sensorstatus // 2
i = i + 1
if sensorstatus == 1:
sensores[i]=sensorstatus % 2
# Tratamiento datos STATISTICS
statdata = statistics[3:statistics[1]+2]
totalcount = (statdata[0]<<24 | statdata[1]<<16 | statdata[2]<<8 | statdata[3])
# Envio notificacion correo
port = 465 # For SSL
smtp_server = "smtp.gmail.com"
sender_email = "" # Enter your address
receiver_email = "" # Enter receiver address
password = ""
message = """\
Subject: "Notificacion CBM"
Total de billetes contados: """+str(totalcount)
context = ssl.create_default_context()
with smtplib.SMTP_SSL(smtp_server, port, context=context) as server:
server.login(sender_email, password)
server.sendmail(sender_email, receiver_email, message)
print ('Moneda Actual:' + currencies[actualcurr])
print ('Modo de conteo:' + Modos[modeindex])
print ('Modo de batch:' + Lote[batchindex])
# print (totalcount)
# print ('Cantidad de monedas instaladas: ' + str(currn-2))
# print ('Estado de bolsillo de rechazo: ' + BosilloR[sensores[7]])
# print ('Estado de bandeja de entrada: ' + BosilloR[sensores[5]])
# print ('Estado de apilador: ' + BosilloR[sensores[6]])
# print ('Estado de puerta superior: ' + Puerta[sensores[0]])
# print ('Estado de inferior: ' + Puerta[sensores[2]]) | 30.691729 | 149 | 0.712396 | 585 | 4,082 | 4.839316 | 0.317949 | 0.050865 | 0.031791 | 0.02296 | 0.22183 | 0.195691 | 0.184387 | 0.181561 | 0.154009 | 0.128576 | 0 | 0.03824 | 0.147967 | 4,082 | 133 | 150 | 30.691729 | 0.775733 | 0.228319 | 0 | 0.304348 | 0 | 0 | 0.1056 | 0 | 0 | 0 | 0.02176 | 0 | 0 | 1 | 0 | false | 0.021739 | 0.021739 | 0 | 0.021739 | 0.032609 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cea190f3a40fb43827a59211c70391501a4af9a4 | 1,219 | py | Python | src/commands/logout.py | OdatNurd/OdatNurdTestPackage | 0aa771e6a2d64224604d42d04ab2f6334320daf8 | [
"MIT"
] | 1 | 2021-07-17T04:01:11.000Z | 2021-07-17T04:01:11.000Z | src/commands/logout.py | OdatNurd/OdatNurdTestPackage | 0aa771e6a2d64224604d42d04ab2f6334320daf8 | [
"MIT"
] | 1 | 2020-12-22T11:52:54.000Z | 2020-12-23T07:57:29.000Z | src/commands/logout.py | OdatNurd/YouTubeEditor | 0aa771e6a2d64224604d42d04ab2f6334320daf8 | [
"MIT"
] | null | null | null | import sublime
import sublime_plugin
from ...lib import log
from ..core import YoutubeRequest
###----------------------------------------------------------------------------
class YoutubeEditorLogoutCommand(YoutubeRequest, sublime_plugin.ApplicationCommand):
"""
Remove the stored credentials that have been saved (i.e. "Log Out"). Once
this is done, in order to make any further requests the user will have to
re-authorize in order to re-establish the connection.
"""
def run(self, force=False):
if not force:
msg = "If you proceed, you will need to re-authenticate. Continue?"
if sublime.yes_no_cancel_dialog(msg) == sublime.DIALOG_YES:
sublime.run_command("youtube_editor_logout", {"force": True})
return
self.request("deauthorize", reason="Logging out")
def _deauthorize(self, request, result):
log("""
Logged out of YouTube.
Your stored credentials have been cleared; further
access to YouTube will require you to re-authorize
YouTuberizer.
""", dialog=True)
###----------------------------------------------------------------------------
| 32.078947 | 84 | 0.567678 | 129 | 1,219 | 5.286822 | 0.573643 | 0.02346 | 0.026393 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.232158 | 1,219 | 37 | 85 | 32.945946 | 0.728632 | 0.290402 | 0 | 0 | 0 | 0 | 0.368421 | 0.02512 | 0 | 0 | 0 | 0 | 0 | 1 | 0.105263 | false | 0 | 0.210526 | 0 | 0.421053 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cea1cfbf1c2c67f77d1b7d47f60cb904dc0d9a2c | 3,049 | py | Python | Agave/python/CFDexec/Slice2D.py | pmackenz/CWE-Simulation-Tool | a77200e68050038574249bf4c8330e90aebafb43 | [
"BSD-3-Clause"
] | 5 | 2019-08-22T13:39:06.000Z | 2021-08-22T15:44:51.000Z | Agave/python/CFDexec/Slice2D.py | pmackenz/CWE-Simulation-Tool | a77200e68050038574249bf4c8330e90aebafb43 | [
"BSD-3-Clause"
] | null | null | null | Agave/python/CFDexec/Slice2D.py | pmackenz/CWE-Simulation-Tool | a77200e68050038574249bf4c8330e90aebafb43 | [
"BSD-3-Clause"
] | 11 | 2019-05-07T05:07:07.000Z | 2021-08-22T15:44:53.000Z | """
Copyright (c) 2018 The University of Notre Dame
Copyright (c) 2018 The Regents of the University of California
Redistribution and use in source and binary forms, with or without modification,
are permitted provided that the following conditions are met:
1. Redistributions of source code must retain the above copyright notice, this
list of conditions and the following disclaimer.
2. Redistributions in binary form must reproduce the above copyright notice, this
list of conditions and the following disclaimer in the documentation and/or other
materials provided with the distribution.
3. Neither the name of the copyright holder nor the names of its contributors may
be used to endorse or promote products derived from this software without specific
prior written permission.
THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY
EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT
SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED
TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR
BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
SUCH DAMAGE.
Contributors:
Written by Peter Sempolinski, for the Natural Hazard Modeling Laboratory, director: Ahsan Kareem, at Notre Dame
Executable to slice 3D SGF into 2D SGF"""
from __future__ import print_function
import sys
from CFDmods.ParseParts import SgfObjs
def give_help_message():
"""Prints help message and exits"""
print("""Usage: slice2D [input geometry] [output geometry]\
[sliceHeight] [slicePlane] [sliceAngle]""")
sys.exit(0)
def main(args):
"""Main function of geometry slicer"""
if len(args) < 4:
give_help_message()
try:
slice_height = float(sys.argv[3])
except ValueError:
print("Slice height should be a number.")
sys.exit(-1)
slice_angle = 0.0
slice_plane = 'z'
if len(args) >= 5:
if args[4] == 'z':
slice_plane = 'z'
elif args[4] == 'y':
slice_plane = 'y'
elif args[4] == 'x':
slice_plane = 'x'
else:
print("Slice plane should be x,y or z.")
sys.exit(-1)
if len(sys.argv) >= 6:
try:
slice_angle = float(sys.argv[5])
except ValueError:
print("Slice angle should be a number.")
sys.exit(-1)
in_geo = SgfObjs.SGFFileData()
in_geo.load_data_from_json(sys.argv[1])
in_geo.perform_slice(slice_plane, slice_height, slice_angle)
in_geo.emit_json_geometry(args[2])
print("Success: Object Sliced")
if __name__ == '__main__':
main(sys.argv)
| 33.505495 | 111 | 0.712693 | 437 | 3,049 | 4.887872 | 0.450801 | 0.02809 | 0.011236 | 0.015918 | 0.107678 | 0.085206 | 0.085206 | 0.06367 | 0.06367 | 0.06367 | 0 | 0.01251 | 0.213513 | 3,049 | 90 | 112 | 33.877778 | 0.878232 | 0.58019 | 0 | 0.225 | 0 | 0 | 0.17498 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.075 | 0 | 0.125 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cea441772f79c383d029fad446da67a80e63c4d1 | 4,547 | py | Python | src/support/roadnet.py | TauferLab/UrbanTrafficFramework_20 | fab1e9e77c6560d79d95f48ca8bdf05a356c07af | [
"BSD-3-Clause"
] | null | null | null | src/support/roadnet.py | TauferLab/UrbanTrafficFramework_20 | fab1e9e77c6560d79d95f48ca8bdf05a356c07af | [
"BSD-3-Clause"
] | null | null | null | src/support/roadnet.py | TauferLab/UrbanTrafficFramework_20 | fab1e9e77c6560d79d95f48ca8bdf05a356c07af | [
"BSD-3-Clause"
] | null | null | null | import json
import numpy as np
from typing import Tuple, List, Dict, IO, Iterator
from .utm import convert_to_utm, S_MAJ
CENT_LON = -87
def haversine_np(lon1, lat1, lon2, lat2):
"""
Calculate the great circle distance in meters between two points
on the earth (specified in decimal degrees)
All args must be of equal length.
"""
lon1, lat1, lon2, lat2 = map(np.radians, [lon1, lat1, lon2, lat2])
dlon = lon2 - lon1
dlat = lat2 - lat1
a = np.sin(dlat / 2.0) ** 2 + np.cos(lat1) * np.cos(lat2) * np.sin(dlon / 2.0) ** 2
c = 2 * np.arcsin(np.sqrt(a))
m = S_MAJ * c
return m
class Link:
def __init__(self, prevl, nextl, link_id, direct, coords, link_type):
self.prev: int = prevl
self.next: int = nextl
self.direct: int = direct
self.coords: np.ndarray = np.array(coords)
self.pts_ = np.column_stack(
convert_to_utm(self.coords[:, 1], self.coords[:, 0], CENT_LON)
)
self.points: List[np.ndarray] = [
a.squeeze() for a in np.vsplit(self.pts_, self.pts_.shape[0])
]
self.id: int = link_id
self.type: str = link_type
# lon/lat 1: start from 0, but don't include the end
lon1 = self.coords[:-1, 0]
lat1 = self.coords[:-1, 1]
# lon/lat 2: include the end, but start from 1
lon2 = self.coords[1:, 0]
lat2 = self.coords[1:, 1]
# all 4 of the above array views have length n-1 (where n = # of coords)
# however, lon2/lat2 are offset by 1 compared to lon1/lat1
# (i.e. lon1[0] = coords[0, 0], lon2[0] = coords[1, 0], and so on)
segment_lengths = haversine_np(lon1, lat1, lon2, lat2)
self.seg_lengths: np.ndarray = segment_lengths
self.cum_lengths: np.ndarray = np.cumsum(segment_lengths)
self.rev_lengths: np.ndarray = np.cumsum(np.flip(segment_lengths))
self.length: float = float(self.cum_lengths[-1])
def offset_to_point(self, offset: float, direct: int) -> Tuple[float, float]:
if direct == self.direct:
cum_lengths = self.cum_lengths
seg_lengths = self.seg_lengths
coords = self.coords
else:
cum_lengths = self.rev_lengths
seg_lengths = np.flip(self.seg_lengths)
coords = np.flip(self.coords, axis=0)
# Get the index of the first element in cum_lengths that is >= offset.
# np.nonzero returns a tuple of ndarrays (one ndarray for each dimension).
#
# Additionally, cum_lengths is 1-D, so np.nonzero only ever returns a
# length-1 tuple.
try:
seg_idx = np.nonzero(cum_lengths >= offset)[0][0]
except IndexError:
raise ValueError(
"Offset {} out of bounds for link with length {}".format(
offset, self.length
)
)
# self.cum_lengths and self.rev_lengths always have one less element
# than self.coords, so this indexing should always work:
p1 = coords[seg_idx]
p2 = coords[seg_idx + 1]
t = (cum_lengths[seg_idx] - offset) / seg_lengths[seg_idx]
interpolated = (t * p1) + ((1 - t) * p2)
return convert_to_utm(interpolated[1], interpolated[0], CENT_LON)
def total_length(self) -> float:
return self.length
class RoadNetwork:
def __init__(self, fp: IO):
self.links: Dict[int, Link] = {}
obj = json.load(fp)
features = obj["features"]
for feature in features:
prop, coords = feature["properties"], feature["geometry"]["coordinates"]
linkid, prevl, nextl, direct = (
int(prop["LINKID"]),
int(prop["FROM"]),
int(prop["TO"]),
int(prop["DIRECT"]),
)
self.links[linkid] = Link(prevl, nextl, linkid, direct, coords, prop["FCC"])
def __iter__(self) -> Iterator[Link]:
return self.links.values().__iter__()
def __len__(self) -> int:
return len(self.links)
def enumerate(self) -> Iterator[Tuple[int, Link]]:
return self.links.items().__iter__()
def bbox(self) -> Tuple[np.ndarray, np.ndarray]:
mins = []
maxes = []
for road in self:
mins.append(np.amin(road.pts_, axis=0))
maxes.append(np.amax(road.pts_, axis=0))
bbox_ne = np.amax(maxes, axis=0)
bbox_sw = np.amin(mins, axis=0)
return bbox_ne, bbox_sw
| 32.71223 | 88 | 0.578403 | 623 | 4,547 | 4.089888 | 0.295345 | 0.039246 | 0.021586 | 0.025118 | 0.040031 | 0.021193 | 0 | 0 | 0 | 0 | 0 | 0.026117 | 0.301078 | 4,547 | 138 | 89 | 32.949275 | 0.775645 | 0.172421 | 0 | 0 | 0 | 0 | 0.02818 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.101124 | false | 0 | 0.044944 | 0.044944 | 0.247191 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cea54a0af680cf3ce28ef46b63953f42d79d0ab9 | 2,926 | py | Python | eventregistry/tests/TestERTopicPage.py | vishalbelsare/event-registry-python | 232f0c73caca94590621eb6eea083f268c9abdf4 | [
"MIT"
] | 160 | 2017-02-07T19:51:40.000Z | 2022-03-28T04:58:02.000Z | eventregistry/tests/TestERTopicPage.py | vishalbelsare/event-registry-python | 232f0c73caca94590621eb6eea083f268c9abdf4 | [
"MIT"
] | 33 | 2017-02-04T22:43:58.000Z | 2020-12-09T07:44:39.000Z | eventregistry/tests/TestERTopicPage.py | vishalbelsare/event-registry-python | 232f0c73caca94590621eb6eea083f268c9abdf4 | [
"MIT"
] | 46 | 2017-02-14T02:42:52.000Z | 2021-12-17T11:57:58.000Z | import unittest, math
from eventregistry import *
from eventregistry.tests.DataValidator import DataValidator
class TestTopicPage(DataValidator):
def createTopicPage(self):
q = TopicPage(self.er)
q.loadTopicPageFromER("5aa6837b-d23d-4a71-bc80-7aad676e1905")
return q
def testGetArticlesForTopicPage(self):
q = self.createTopicPage()
uriSet = set()
for page in range(1, 20):
res = q.getArticles(page=page, dataType=["news", "blog"], sortBy="rel")
rel = sys.maxsize
for art in res.get("articles", {}).get("results", []):
assert art.get("wgt") <= rel
rel = art.get("wgt")
assert art.get("uri") not in uriSet
uriSet.add(art.get("uri"))
def testGetEventsForTopicPage(self):
q = self.createTopicPage()
uriSet = set()
for page in range(1, 20):
res = q.getEvents(page=page, sortBy="rel")
rel = sys.maxsize
for event in res.get("events", {}).get("results", []):
assert event.get("wgt") <= rel
rel = event.get("wgt")
assert event.get("uri") not in uriSet
uriSet.add(event.get("uri"))
def testCreateTopicPage(self):
topic = TopicPage(self.er)
appleUri = self.er.getConceptUri("apple")
msoftUri = self.er.getConceptUri("microsoft")
iphoneUri = self.er.getConceptUri("iphone")
businessUri = self.er.getCategoryUri("business")
topic.addConcept(appleUri, 50, required = False)
topic.addConcept(msoftUri, 50, required = True)
topic.addConcept(iphoneUri, 50, excluded = True)
topic.addCategory(businessUri, 50, required=True)
for page in range(1, 10):
res = topic.getArticles(page = page, returnInfo = ReturnInfo(articleInfo=ArticleInfoFlags(concepts=True, categories=True, maxConceptsPerType=100)))
for art in res.get("articles").get("results"):
foundConcept = False
foundCategory = False
for conceptObj in art.get("concepts", []):
assert iphoneUri != conceptObj["uri"], "Found iphone in the article"
if msoftUri == conceptObj["uri"]:
foundConcept = True
for categoryObj in art.get("categories", []):
if categoryObj["uri"].startswith(businessUri):
foundCategory = True
assert foundConcept, "Article did not have a required concept"
assert foundCategory, "Article did not have a required category"
if __name__ == "__main__":
suite = unittest.TestLoader().loadTestsFromTestCase(TestTopicPage)
# suite = unittest.TestSuite()
# suite.addTest(TestQueryArticles("testQuery2"))
unittest.TextTestRunner(verbosity=3).run(suite)
| 41.211268 | 159 | 0.593301 | 297 | 2,926 | 5.818182 | 0.360269 | 0.020833 | 0.015625 | 0.024306 | 0.195602 | 0.186921 | 0.12963 | 0.099537 | 0.0625 | 0.0625 | 0 | 0.020202 | 0.289474 | 2,926 | 70 | 160 | 41.8 | 0.810967 | 0.025632 | 0 | 0.140351 | 0 | 0 | 0.100421 | 0.01264 | 0 | 0 | 0 | 0 | 0.122807 | 1 | 0.070175 | false | 0 | 0.052632 | 0 | 0.157895 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cea72639dfe8a3f7064ccc990d55d4baecb906b3 | 3,120 | py | Python | work/util/image_scraper.py | nishipy/obama_smalling_predictor | 21379876cd9eb99108e619e130303f5f5b22f642 | [
"MIT"
] | null | null | null | work/util/image_scraper.py | nishipy/obama_smalling_predictor | 21379876cd9eb99108e619e130303f5f5b22f642 | [
"MIT"
] | null | null | null | work/util/image_scraper.py | nishipy/obama_smalling_predictor | 21379876cd9eb99108e619e130303f5f5b22f642 | [
"MIT"
] | null | null | null | #################################################################
# REFERENCE https://qiita.com/skcvim/items/efc296ae1bf0e62f6704 #
#################################################################
import json
import os
import sys
import urllib
from bs4 import BeautifulSoup
import requests
class Google:
def __init__(self):
self.GOOGLE_SEARCH_URL = 'https://www.google.co.jp/search'
self.session = requests.session()
self.session.headers.update(
{'User-Agent': 'Mozilla/5.0 (X11; Linux x86_64; rv:10.0) Gecko/20100101 Firefox/10.0'})
def search(self, keyword, maximum):
print('begin searching', keyword)
query = self.query_gen(keyword)
return self.image_search(query, maximum)
def query_gen(self, keyword):
# search query generator
page = 0
while True:
params = urllib.parse.urlencode({
'q': keyword,
'tbm': 'isch',
'ijn': str(page)})
yield self.GOOGLE_SEARCH_URL + '?' + params
page += 1
def image_search(self, query_gen, maximum):
# search image
result = []
total = 0
while True:
# search
html = self.session.get(next(query_gen)).text
soup = BeautifulSoup(html, 'lxml')
elements = soup.select('.rg_meta.notranslate')
jsons = [json.loads(e.get_text()) for e in elements]
imageURLs = [js['ou'] for js in jsons]
# add search result
if not len(imageURLs):
print('-> no more images')
break
elif len(imageURLs) > maximum - total:
result += imageURLs[:maximum - total]
break
else:
result += imageURLs
total += len(imageURLs)
print('-> found', str(len(result)), 'images')
return result
def main():
google = Google()
if len(sys.argv) != 3:
print('invalid argment')
print('> ./image_collector_cui.py [target name] [download number]')
sys.exit()
else:
# save location
name = sys.argv[1]
data_dir = 'data/'
os.makedirs(data_dir, exist_ok=True)
os.makedirs('data/' + name, exist_ok=True)
# search image
result = google.search(
name, maximum=int(sys.argv[2]))
# download
download_error = []
for i in range(len(result)):
print('-> downloading image', str(i + 1).zfill(4))
try:
urllib.request.urlretrieve(
result[i], data_dir + name + '/' + str(i + 1).zfill(4) + '.jpg')
except:
print('--> could not download image', str(i + 1).zfill(4))
download_error.append(i + 1)
continue
print('complete download')
print('├─ download', len(result)-len(download_error), 'images')
print('└─ could not download', len(
download_error), 'images', download_error)
if __name__ == '__main__':
main()
| 30.588235 | 99 | 0.513782 | 332 | 3,120 | 4.728916 | 0.412651 | 0.041401 | 0.009554 | 0.019108 | 0.027389 | 0.020382 | 0 | 0 | 0 | 0 | 0 | 0.022338 | 0.325641 | 3,120 | 101 | 100 | 30.891089 | 0.721958 | 0.050962 | 0 | 0.081081 | 0 | 0.013514 | 0.141085 | 0.008508 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067568 | false | 0 | 0.081081 | 0 | 0.189189 | 0.135135 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cea7a82f7fbed3c408a162c384c1f27a3fee1e84 | 2,137 | py | Python | oejskit/unittest_support.py | TauPan/oejskit | 52c4bb555871ce711ffa5f8dbcff4a2a1b717665 | [
"MIT"
] | null | null | null | oejskit/unittest_support.py | TauPan/oejskit | 52c4bb555871ce711ffa5f8dbcff4a2a1b717665 | [
"MIT"
] | null | null | null | oejskit/unittest_support.py | TauPan/oejskit | 52c4bb555871ce711ffa5f8dbcff4a2a1b717665 | [
"MIT"
] | null | null | null | import unittest, sys, os
from oejskit import util
from oejskit.testing import giveBrowser, cleanupBrowsers, checkBrowser
NOT_THERE = object()
class JSTestSuite(unittest.TestSuite):
jstests_browser_specs = None
def getglobal(self, name):
return getattr(self, name)
def _expand_browser(self, kind):
try:
kinds = self.jstests_browser_specs[kind]
except KeyError:
kinds = [kind]
return [kind for kind in kinds if checkBrowser(kind)]
def __init__(self, js, root=None, browser_kind=None):
self.testdir = os.path.dirname(sys._getframe(1).f_globals['__file__'])
self.testname = js
if root is None:
root = lambda: None
if self.jstests_browser_specs is None:
self.jstests_browser_specs = {}
if 'any' not in self.jstests_browser_specs:
self.jstests_browser_specs['any'] = util.any_browser()
tests = []
if browser_kind is None:
purebasename = os.path.splitext(os.path.basename(js))[0]
browser_kind = purebasename.split('_')[-1]
for kind in self._expand_browser(browser_kind):
browser, setupBag = giveBrowser(self, self.__class__,
kind, attach=False)
names, runner = browser._gatherTests(js, setupBag)
runner._the_root = NOT_THERE
def makeRunTest(runner, jstest):
def runTest():
if runner._the_root is NOT_THERE:
runner._the_root = root()
runner._runTest(jstest, runner._the_root, None)
return runTest
for jstest in names:
runTest = makeRunTest(runner, jstest)
descr = '%s[=%s][%s]' % (self.testname, kind, jstest)
tests.append(unittest.FunctionTestCase(runTest,
description=descr))
unittest.TestSuite.__init__(self, tests)
def run(self, result):
unittest.TestSuite.run(self, result)
cleanupBrowsers(self)
| 32.876923 | 78 | 0.582124 | 229 | 2,137 | 5.200873 | 0.323144 | 0.070529 | 0.095718 | 0.096558 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.002088 | 0.327562 | 2,137 | 64 | 79 | 33.390625 | 0.826722 | 0 | 0 | 0 | 0 | 0 | 0.012167 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.12766 | false | 0 | 0.06383 | 0.021277 | 0.297872 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cea973823778baab62f649c73855197261a32c0c | 2,154 | py | Python | Toolkits/VCS/repology__repology-api/repology/fetcher/file.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | null | null | null | Toolkits/VCS/repology__repology-api/repology/fetcher/file.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | null | null | null | Toolkits/VCS/repology__repology-api/repology/fetcher/file.py | roscopecoltran/SniperKit-Core | 4600dffe1cddff438b948b6c22f586d052971e04 | [
"MIT"
] | null | null | null | # Copyright (C) 2016-2017 Dmitry Marakasov <amdmi3@amdmi3.ru>
#
# This file is part of repology
#
# repology is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# repology is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with repology. If not, see <http://www.gnu.org/licenses/>.
import bz2
import gzip
import lzma
import os
from repology.logger import NoopLogger
from repology.www import Get
class FileFetcher():
def __init__(self, url, compression=None):
self.url = url
self.compression = compression
def Fetch(self, statepath, update=True, logger=NoopLogger()):
tmppath = statepath + '.tmp'
if os.path.isfile(statepath) and not update:
logger.Log('no update requested, skipping')
return
with open(tmppath, 'wb') as statefile:
logger.Log('fetching ' + self.url)
data = Get(self.url).content
logger.GetIndented().Log('size is {} byte(s)'.format(len(data)))
if self.compression == 'gz':
logger.GetIndented().Log('decompressing with gzip')
data = gzip.decompress(data)
elif self.compression == 'bz2':
logger.GetIndented().Log('decompressing with bz2')
data = bz2.decompress(data)
elif self.compression == 'xz':
logger.GetIndented().Log('decompressing with xz')
data = lzma.LZMADecompressor().decompress(data)
if self.compression:
logger.GetIndented().Log('size after decompression is {} byte(s)'.format(len(data)))
logger.GetIndented().Log('saving')
statefile.write(data)
os.replace(tmppath, statepath)
| 34.741935 | 100 | 0.648561 | 269 | 2,154 | 5.178439 | 0.475836 | 0.073223 | 0.086145 | 0.040919 | 0.214645 | 0.068916 | 0 | 0 | 0 | 0 | 0 | 0.009317 | 0.252553 | 2,154 | 61 | 101 | 35.311475 | 0.855901 | 0.318942 | 0 | 0 | 0 | 0 | 0.123448 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.060606 | false | 0 | 0.181818 | 0 | 0.30303 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceaad7bdfd44d86ec3ab8cb6d1b32bf99ef5150d | 1,029 | py | Python | pysnptools/distreader/_distmergesids.py | fastlmm/PySnpTools | ce2ecaa5548e82b64c8ed6a205dbf419701b66b6 | [
"Apache-2.0"
] | 13 | 2019-12-23T06:51:08.000Z | 2022-01-07T18:14:55.000Z | pysnptools/distreader/_distmergesids.py | fastlmm/PySnpTools | ce2ecaa5548e82b64c8ed6a205dbf419701b66b6 | [
"Apache-2.0"
] | 3 | 2020-07-30T16:07:43.000Z | 2021-07-14T09:00:42.000Z | pysnptools/distreader/_distmergesids.py | fastlmm/PySnpTools | ce2ecaa5548e82b64c8ed6a205dbf419701b66b6 | [
"Apache-2.0"
] | 3 | 2020-05-22T09:46:16.000Z | 2021-01-26T13:27:36.000Z | import numpy as np
from pysnptools.distreader import DistReader
from pysnptools.pstreader import _MergeCols
class _DistMergeSIDs(_MergeCols,DistReader):
def __init__(self, *args, **kwargs):
super(_DistMergeSIDs, self).__init__(*args, **kwargs)
def _savez(self, cache_file):
np.savez(cache_file,
_row=np.array(self._row,dtype='S'), _row_property=self._row_property,
_col=np.array(self._col,dtype='S'), _col_property=self._col_property,
sid_count_list=self.col_count_list)
def _load(self,cache_file):
with np.load(cache_file,allow_pickle=True) as data:
self._col = np.array(data['_col'],dtype='str')
self._col_property = data['_col_property']
self.col_count_list = data['sid_count_list']
assert ('_row' in data) == ('_row_property' in data)
self._row = np.array(data['_row'],dtype='str')
self._row_property = data['_row_property']
| 44.73913 | 89 | 0.627794 | 128 | 1,029 | 4.625 | 0.296875 | 0.070946 | 0.043919 | 0.060811 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.251701 | 1,029 | 22 | 90 | 46.772727 | 0.768831 | 0 | 0 | 0 | 0 | 0 | 0.070943 | 0 | 0 | 0 | 0 | 0 | 0.052632 | 1 | 0.157895 | false | 0 | 0.157895 | 0 | 0.368421 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceab0efadf8519abf33781ac63133948563815fb | 7,980 | py | Python | conventions/forms.py | MTES-MCT/appel | 3b840ccea600ef31cfea57721fe5e6edbdbc2c79 | [
"MIT"
] | null | null | null | conventions/forms.py | MTES-MCT/appel | 3b840ccea600ef31cfea57721fe5e6edbdbc2c79 | [
"MIT"
] | null | null | null | conventions/forms.py | MTES-MCT/appel | 3b840ccea600ef31cfea57721fe5e6edbdbc2c79 | [
"MIT"
] | null | null | null | import datetime
from django import forms
from django.forms import BaseFormSet, formset_factory
from django.forms.fields import FileField
from django.core.exceptions import ValidationError
from programmes.models import Financement, TypeOperation
from .models import Preteur
class ConventionCommentForm(forms.Form):
uuid = forms.UUIDField(required=False)
comments = forms.CharField(
required=False,
max_length=5000,
error_messages={
"max_length": "Le message ne doit pas excéder 5000 caractères",
},
)
comments_files = forms.CharField(
required=False,
)
class ConventionFinancementForm(forms.Form):
prets = []
convention = None
uuid = forms.UUIDField(required=False)
annee_fin_conventionnement = forms.IntegerField(
required=True,
error_messages={
"required": "La date de fin de conventionnement est obligatoire",
},
help_text=(
"Année de signature de la convention + au moins la durée du prêt"
+ " le plus long. Elle ne peut être inférieure à 9 ans. Spécificité"
+ " pour le PLS: comprise entre 15 et 40 ans.Si la convention est"
+ " signée après le 30 juin, la durée de la convention à prendre en"
+ " compte débute à l’année N+1."
),
)
fond_propre = forms.FloatField(required=False)
def clean(self):
cleaned_data = super().clean()
annee_fin_conventionnement = cleaned_data.get("annee_fin_conventionnement")
today = datetime.date.today()
if (
self.prets != []
and self.convention is not None
and annee_fin_conventionnement is not None
):
if self.convention.financement == Financement.PLS:
min_years = today.year + 15
max_years = today.year + 40
if today.month > 6:
min_years = min_years + 1
max_years = max_years + 1
if annee_fin_conventionnement < min_years:
self.add_error(
"annee_fin_conventionnement",
(
"L'année de fin de conventionnement ne peut être inférieur à "
+ f"{min_years}"
),
)
if annee_fin_conventionnement > max_years:
self.add_error(
"annee_fin_conventionnement",
(
"L'année de fin de conventionnement ne peut être supérieur à "
+ f"{max_years}"
),
)
else:
max_duree = 0
for pret in self.prets:
if pret.cleaned_data["preteur"] in ["CDCF", "CDCL"]:
if max_duree is None:
max_duree = int(pret.cleaned_data["duree"])
elif max_duree < pret.cleaned_data["duree"]:
max_duree = int(pret.cleaned_data["duree"])
max_duree = max(max_duree, 9)
if today.month > 6:
max_duree = max_duree + 1
max_duree = max_duree + today.year
if annee_fin_conventionnement < max_duree:
self.add_error(
"annee_fin_conventionnement",
(
"L'année de fin de conventionnement ne peut être inférieur à "
+ f"{max_duree}"
),
)
class PretForm(forms.Form):
uuid = forms.UUIDField(required=False)
numero = forms.CharField(
required=False,
max_length=255,
error_messages={
"max_length": "Le numero ne doit pas excéder 255 caractères",
},
)
preteur = forms.TypedChoiceField(required=False, choices=Preteur.choices)
autre = forms.CharField(
required=False,
max_length=255,
error_messages={
"max_length": "Le prêteur ne doit pas excéder 255 caractères",
},
)
date_octroi = forms.DateField(required=False)
duree = forms.IntegerField(required=False)
montant = forms.DecimalField(
max_digits=12,
decimal_places=2,
error_messages={
"required": "Le montant du prêt est obligatoire",
"max_digits": "Le montant du prêt doit-être inférieur à 10 000 000 000 €",
},
)
def clean(self):
cleaned_data = super().clean()
preteur = cleaned_data.get("preteur")
if preteur in ["CDCF", "CDCL"]:
if not cleaned_data.get("date_octroi"):
self.add_error(
"date_octroi",
"La date d'octroi est obligatoire pour un prêt de la "
+ "Caisse de dépôts et consignations",
)
if not cleaned_data.get("duree"):
self.add_error(
"duree",
"La durée est obligatoire pour un prêt de la Caisse de dépôts et consignations",
)
if not cleaned_data.get("numero"):
self.add_error(
"numero",
(
"Le numéro est obligatoire pour un prêt"
+ " de la Caisse de dépôts et consignations"
),
)
if preteur in ["AUTRE"]:
if not cleaned_data.get("autre"):
self.add_error("autre", "Merci de préciser le prêteur")
class BasePretFormSet(BaseFormSet):
convention = None
def clean(self):
self.manage_cdc_validation()
def manage_cdc_validation(self):
if (
self.convention is not None
and self.convention.financement != Financement.PLS
and self.convention.programme.type_operation != TypeOperation.SANSTRAVAUX
):
for form in self.forms:
# if self.can_delete() and self._should_delete_form(form):
# continue
if form.cleaned_data.get("preteur") in ["CDCF", "CDCL"]:
return
error = ValidationError(
"Au moins un prêt à la Caisee des dépôts et consignations doit-être déclaré "
+ "(CDC foncière, CDC locative)"
)
self._non_form_errors.append(error)
PretFormSet = formset_factory(PretForm, formset=BasePretFormSet, extra=0)
class UploadForm(forms.Form):
file = FileField(
error_messages={
"required": (
"Vous devez séléctionner un fichier avant "
+ "de cliquer sur le bouton 'Téléverser'"
),
}
)
class NotificationForm(forms.Form):
send_copy = forms.BooleanField(required=False)
from_instructeur = forms.BooleanField(required=False)
comment = forms.CharField(
required=False,
max_length=5000,
error_messages={
"max_length": "Le commentaire ne doit pas excéder 5000 caractères",
},
)
class ConventionNumberForm(forms.Form):
prefixe_numero = forms.CharField(
max_length=250,
error_messages={
"max_length": (
"La longueur totale du numéro de convention ne peut pas excéder"
+ " 250 caractères"
),
"required": "Le préfixe du numéro de convention en obligatoire",
},
help_text="département/zone/mois.année/decret/daei/",
)
suffixe_numero = forms.CharField(
max_length=10,
error_messages={
"max_length": "La longueur du numéro de convention ne peut pas excéder 10 caractères",
"required": "Le numéro de convention en obligatoire",
},
)
| 34.248927 | 100 | 0.541729 | 815 | 7,980 | 5.168098 | 0.252761 | 0.04321 | 0.05698 | 0.031339 | 0.385802 | 0.300095 | 0.237892 | 0.188509 | 0.171415 | 0.171415 | 0 | 0.014507 | 0.37807 | 7,980 | 232 | 101 | 34.396552 | 0.833971 | 0.010025 | 0 | 0.272727 | 0 | 0 | 0.23789 | 0.01826 | 0 | 0 | 0 | 0 | 0 | 1 | 0.020202 | false | 0 | 0.035354 | 0 | 0.207071 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceac473a1d30583fc2b6ec775414fa4d6a4df3a0 | 3,011 | py | Python | src/demos/python/demo_irrlicht.py | kishor8dm/chrono | 0ecc2d9ab39cbc068b730bc794fbdf7f22d158cf | [
"BSD-3-Clause"
] | 1 | 2020-04-19T20:34:15.000Z | 2020-04-19T20:34:15.000Z | src/demos/python/demo_irrlicht.py | kishor8dm/chrono | 0ecc2d9ab39cbc068b730bc794fbdf7f22d158cf | [
"BSD-3-Clause"
] | null | null | null | src/demos/python/demo_irrlicht.py | kishor8dm/chrono | 0ecc2d9ab39cbc068b730bc794fbdf7f22d158cf | [
"BSD-3-Clause"
] | 1 | 2018-10-25T07:05:40.000Z | 2018-10-25T07:05:40.000Z | #-------------------------------------------------------------------------------
# Name: modulo1
# Purpose:
#
# Author: tasora
#
# Created: 14/02/2012
# Copyright: (c) tasora 2012
# Licence: <your licence>
#-------------------------------------------------------------------------------
#!/usr/bin/env python
def main():
pass
if __name__ == '__main__':
main()
import os
import math
import ChronoEngine_python_core as chrono
import ChronoEngine_python_postprocess as postprocess
import ChronoEngine_python_irrlicht as chronoirr
print ("Example: create a system and visualize it in realtime 3D");
# ---------------------------------------------------------------------
#
# Create the simulation system and add items
#
mysystem = chrono.ChSystemNSC()
# Create a fixed rigid body
mbody1 = chrono.ChBody()
mbody1.SetBodyFixed(True)
mbody1.SetPos( chrono.ChVectorD(0,0,-0.2))
mysystem.Add(mbody1)
mboxasset = chrono.ChBoxShape()
mboxasset.GetBoxGeometry().Size = chrono.ChVectorD(0.2,0.5,0.1)
mbody1.AddAsset(mboxasset)
# Create a swinging rigid body
mbody2 = chrono.ChBody()
mbody2.SetBodyFixed(False)
mysystem.Add(mbody2)
mboxasset = chrono.ChBoxShape()
mboxasset.GetBoxGeometry().Size = chrono.ChVectorD(0.2,0.5,0.1)
mbody2.AddAsset(mboxasset)
mboxtexture = chrono.ChTexture()
mboxtexture.SetTextureFilename('../../../data/concrete.jpg')
mbody2.GetAssets().push_back(mboxtexture)
# Create a revolute constraint
mlink = chrono.ChLinkRevolute()
# the coordinate system of the constraint reference in abs. space:
mframe = chrono.ChFrameD(chrono.ChVectorD(0.1,0.5,0))
# initialize the constraint telling which part must be connected, and where:
mlink.Initialize(mbody1,mbody2, mframe)
mysystem.Add(mlink)
# ---------------------------------------------------------------------
#
# Create an Irrlicht application to visualize the system
#
myapplication = chronoirr.ChIrrApp(mysystem, 'Test', chronoirr.dimension2du(1024,768))
myapplication.AddTypicalSky('../../../data/skybox/')
myapplication.AddTypicalCamera(chronoirr.vector3df(0.6,0.6,0.8))
myapplication.AddTypicalLights()
# ==IMPORTANT!== Use this function for adding a ChIrrNodeAsset to all items
# in the system. These ChIrrNodeAsset assets are 'proxies' to the Irrlicht meshes.
# If you need a finer control on which item really needs a visualization proxy in
# Irrlicht, just use application.AssetBind(myitem); on a per-item basis.
myapplication.AssetBindAll();
# ==IMPORTANT!== Use this function for 'converting' into Irrlicht meshes the assets
# that you added to the bodies into 3D shapes, they can be visualized by Irrlicht!
myapplication.AssetUpdateAll();
# ---------------------------------------------------------------------
#
# Run the simulation
#
myapplication.SetTimestep(0.001)
while(myapplication.GetDevice().run()):
myapplication.BeginScene()
myapplication.DrawAll()
myapplication.DoStep()
myapplication.EndScene()
| 25.302521 | 87 | 0.64829 | 330 | 3,011 | 5.869697 | 0.490909 | 0.014455 | 0.033041 | 0.035106 | 0.103252 | 0.075374 | 0.075374 | 0.075374 | 0.075374 | 0.075374 | 0 | 0.025671 | 0.133178 | 3,011 | 118 | 88 | 25.516949 | 0.716475 | 0.442378 | 0 | 0.093023 | 0 | 0 | 0.070122 | 0.028659 | 0 | 0 | 0 | 0 | 0 | 1 | 0.023256 | false | 0.023256 | 0.116279 | 0 | 0.139535 | 0.023256 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceb0db22f2438ef7f74d0856be290241a00a19f3 | 2,718 | py | Python | botasky/utils/MyLOG.py | 5atouristspot/sql_audit | 54c6d5ac9f8178ab1a17b7ff2d04ff738f14e0b7 | [
"MIT"
] | null | null | null | botasky/utils/MyLOG.py | 5atouristspot/sql_audit | 54c6d5ac9f8178ab1a17b7ff2d04ff738f14e0b7 | [
"MIT"
] | null | null | null | botasky/utils/MyLOG.py | 5atouristspot/sql_audit | 54c6d5ac9f8178ab1a17b7ff2d04ff738f14e0b7 | [
"MIT"
] | null | null | null | #! /usr/bin/python2.7
# -*- coding: utf-8 -*-
"""
Created on 2017-3-15
@module: MyLOG
@used: print log to console or file
"""
from logging.handlers import RotatingFileHandler
import time
import logging
import threading
import ConfigParser
import sys
reload(sys)
__all__ = ['MyLog']
__author__ = 'zhihao'
class MyLog:
file_handler = ''
def __init__(self, log_config, name):
"""
used : init config and get value
:param name : name of local file
:param log_config : name of log config file
"""
self.name = name
self.logger = logging.getLogger(self.name)
config = ConfigParser.ConfigParser()
config.read(log_config)
mythread = threading.Lock()
mythread.acquire() # thread lock
self.log_file_path = config.get('LOGGING', 'log_file_path')
self.maxBytes = config.get('LOGGING', 'maxBytes')
self.backupCount = int(config.get('LOGGING', 'backupCount'))
self.outputConsole_level = int(config.get('LOGGING', 'outputConsole_level'))
self.outputFile_level = int(config.get('LOGGING', 'outputFile_level'))
self.outputConsole = int(config.get('LOGGING', 'outputConsole'))
self.outputFile = int(config.get('LOGGING', 'outputFile'))
self.formatter = logging.Formatter('%(asctime)s %(filename)s[line:%(lineno)d] %(levelname)s %(message)s')
self.console_handler = ''
self.file_handler = ''
mythread.release() # thread lock relax
def outputLog(self):
"""
used : output log to console and file
"""
if self.outputConsole == 1:
# if true ,it should output log in console
self.console_handler = logging.StreamHandler()
self.console_handler.setFormatter(self.formatter)
self.logger.setLevel(self.outputConsole_level)
self.logger.addHandler(self.console_handler)
else:
pass
if self.outputFile == 1:
self.file_handler = RotatingFileHandler(self.log_file_path, maxBytes=10*1024*1024, backupCount=10)
# define RotatingFileHandler, file output path, one file max byte, max backup number
self.file_handler.setFormatter(self.formatter)
self.logger.setLevel(self.outputFile_level)
self.logger.addHandler(self.file_handler)
else:
pass
return self.logger
if __name__ == '__main__':
'''
mylog = MyLog('logConfig.ini','jjjjj')
logger = mylog.outputLog()
logger.error("jjjjjjjjjjjjjjj")
'''
import MyLOG
help(MyLOG)
| 29.225806 | 114 | 0.611111 | 294 | 2,718 | 5.5 | 0.346939 | 0.038961 | 0.069264 | 0.058751 | 0.184292 | 0.06679 | 0.06679 | 0.06679 | 0 | 0 | 0 | 0.012226 | 0.277778 | 2,718 | 92 | 115 | 29.543478 | 0.811513 | 0.15379 | 0 | 0.083333 | 0 | 0.020833 | 0.11194 | 0.014428 | 0 | 0 | 0 | 0 | 0 | 1 | 0.041667 | false | 0.041667 | 0.145833 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceb48030a33aa811b53f8e680c6133287b77d36d | 6,474 | py | Python | rest_framework/tests/throttling.py | sunscrapers/django-rest-framework | 45086fd308e2f433a8c7f1f53d8a27c4aaf210f0 | [
"Unlicense"
] | null | null | null | rest_framework/tests/throttling.py | sunscrapers/django-rest-framework | 45086fd308e2f433a8c7f1f53d8a27c4aaf210f0 | [
"Unlicense"
] | null | null | null | rest_framework/tests/throttling.py | sunscrapers/django-rest-framework | 45086fd308e2f433a8c7f1f53d8a27c4aaf210f0 | [
"Unlicense"
] | null | null | null | """
Tests for the throttling implementations in the permissions module.
"""
from __future__ import unicode_literals
from django.test import TestCase
from django.contrib.auth.models import User
from django.core.cache import cache
from django.test.client import RequestFactory
from rest_framework.authentication import BasicAuthentication, \
SessionAuthentication, TokenAuthentication
from rest_framework.views import APIView
from rest_framework.throttling import UserRateThrottle, AnonRateThrottle
from rest_framework.response import Response
from django.conf.urls import patterns
class User3SecRateThrottle(UserRateThrottle):
rate = '3/sec'
scope = 'seconds'
class User3MinRateThrottle(UserRateThrottle):
rate = '3/min'
scope = 'minutes'
class MockView(APIView):
throttle_classes = (User3SecRateThrottle,)
def get(self, request):
return Response('foo')
class MockView_MinuteThrottling(APIView):
throttle_classes = (User3MinRateThrottle,)
def get(self, request):
return Response('foo')
class ThrottlingTests(TestCase):
urls = 'rest_framework.tests.throttling'
def setUp(self):
"""
Reset the cache so that no throttles will be active
"""
cache.clear()
self.factory = RequestFactory()
def test_requests_are_throttled(self):
"""
Ensure request rate is limited
"""
request = self.factory.get('/')
for dummy in range(4):
response = MockView.as_view()(request)
self.assertEqual(429, response.status_code)
def set_throttle_timer(self, view, value):
"""
Explicitly set the timer, overriding time.time()
"""
view.throttle_classes[0].timer = lambda self: value
def test_request_throttling_expires(self):
"""
Ensure request rate is limited for a limited duration only
"""
self.set_throttle_timer(MockView, 0)
request = self.factory.get('/')
for dummy in range(4):
response = MockView.as_view()(request)
self.assertEqual(429, response.status_code)
# Advance the timer by one second
self.set_throttle_timer(MockView, 1)
response = MockView.as_view()(request)
self.assertEqual(200, response.status_code)
def ensure_is_throttled(self, view, expect):
request = self.factory.get('/')
request.user = User.objects.create(username='a')
for dummy in range(3):
view.as_view()(request)
request.user = User.objects.create(username='b')
response = view.as_view()(request)
self.assertEqual(expect, response.status_code)
def test_request_throttling_is_per_user(self):
"""
Ensure request rate is only limited per user, not globally for
PerUserThrottles
"""
self.ensure_is_throttled(MockView, 200)
def ensure_response_header_contains_proper_throttle_field(self, view, expected_headers):
"""
Ensure the response returns an X-Throttle field with status and next attributes
set properly.
"""
request = self.factory.get('/')
for timer, expect in expected_headers:
self.set_throttle_timer(view, timer)
response = view.as_view()(request)
if expect is not None:
self.assertEqual(response['X-Throttle-Wait-Seconds'], expect)
else:
self.assertFalse('X-Throttle-Wait-Seconds' in response)
def test_seconds_fields(self):
"""
Ensure for second based throttles.
"""
self.ensure_response_header_contains_proper_throttle_field(MockView,
((0, None),
(0, None),
(0, None),
(0, '1')
))
def test_minutes_fields(self):
"""
Ensure for minute based throttles.
"""
self.ensure_response_header_contains_proper_throttle_field(MockView_MinuteThrottling,
((0, None),
(0, None),
(0, None),
(0, '60')
))
def test_next_rate_remains_constant_if_followed(self):
"""
If a client follows the recommended next request rate,
the throttling rate should stay constant.
"""
self.ensure_response_header_contains_proper_throttle_field(MockView_MinuteThrottling,
((0, None),
(20, None),
(40, None),
(60, None),
(80, None)
))
class Anon3SecRateThrottle(AnonRateThrottle):
rate = '3/sec'
scope = 'seconds'
class NextMockView(APIView):
throttle_classes = (Anon3SecRateThrottle,)
def get(self, request):
return Response('foo')
urlpatterns = patterns('',
(r'^basic/$', NextMockView.as_view(authentication_classes=[BasicAuthentication])),
(r'^session/$', NextMockView.as_view(authentication_classes=[SessionAuthentication])),
(r'^token/$', NextMockView.as_view(authentication_classes=[TokenAuthentication])),
(r'^combined/$', NextMockView.as_view(authentication_classes=[SessionAuthentication, BasicAuthentication])),
(r'^combined/reverse/$', NextMockView.as_view(authentication_classes=[SessionAuthentication, BasicAuthentication])),
)
class ThrottlingWithAuthenticationTest(TestCase):
urls = 'rest_framework.tests.throttling'
def setUp(self):
self.username = 'john'
self.email = 'lennon@thebeatles.com'
self.password = 'password'
self.user = User.objects.create_user(self.username, self.email, self.password)
def test_basic_auth(self):
auth = 'Basic wrongcreds'
response = self.client.get('/basic/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_session_auth(self):
response = self.client.get('/session/')
self.assertEqual(response.status_code, 200)
def test_token_auth(self):
auth = 'Token wrongone'
response = self.client.get('/token/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_combined_auth(self):
auth = 'Basic wrongcreds'
response = self.client.get('/combined/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
def test_combined_reverse_auth(self):
auth = 'Basic wrongcreds'
response = self.client.get('/combined/', HTTP_AUTHORIZATION=auth)
self.assertEqual(response.status_code, 200)
| 32.049505 | 120 | 0.660179 | 703 | 6,474 | 5.911807 | 0.234708 | 0.018527 | 0.03898 | 0.038499 | 0.45693 | 0.397738 | 0.339509 | 0.264918 | 0.235804 | 0.19923 | 0 | 0.012535 | 0.236021 | 6,474 | 201 | 121 | 32.208955 | 0.82774 | 0.097621 | 0 | 0.385827 | 0 | 0 | 0.063537 | 0.023023 | 0 | 0 | 0 | 0 | 0.086614 | 1 | 0.149606 | false | 0.015748 | 0.07874 | 0.023622 | 0.401575 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceb6e599d5a28196b7bfb7f9d6be39918774b25b | 5,125 | py | Python | 09-generative-adversarial-network/cov_gan.py | mingweihe/pytorch-practice | 4d5770b7b6e58161e36decb33a07eebffec1f1a3 | [
"MIT"
] | 4 | 2019-11-02T21:47:33.000Z | 2020-02-13T19:25:38.000Z | 09-generative-adversarial-network/cov_gan.py | mingweihe/pytorch-practice | 4d5770b7b6e58161e36decb33a07eebffec1f1a3 | [
"MIT"
] | null | null | null | 09-generative-adversarial-network/cov_gan.py | mingweihe/pytorch-practice | 4d5770b7b6e58161e36decb33a07eebffec1f1a3 | [
"MIT"
] | null | null | null | import torch
from torch import nn
from torch.autograd import Variable
from torch.utils.data import DataLoader
from torchvision import transforms
from torchvision import datasets
from torchvision.utils import save_image
import os
from torchsummary import summary
img_folder = './dc_img'
if not os.path.exists(img_folder):
os.mkdir(img_folder)
def to_img(x):
x = .5 * (x+1)
x = x.clamp(0, 1)
x = x.view(-1, 1, 28, 28)
return x
batch_size = 128
num_epoch = 100
z_dimension = 100 # noise dimension
learning_rate = 3e-4
img_transform = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize((.5,), (.5,))
])
dataset = datasets.MNIST('../data', transform=img_transform, download=True)
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True) # num_workers=4
class discriminator(nn.Module):
def __init__(self):
super(discriminator, self).__init__()
self.conv1 = nn.Sequential(
nn.Conv2d(1, 32, 5, padding=2), # b, 32, 28, 28
nn.LeakyReLU(.2, True),
nn.AvgPool2d(2, stride=2) # b, 32, 12, 12
)
self.conv2 = nn.Sequential(
nn.Conv2d(32, 64, 5, padding=2), # b, 64, 14, 14
nn.LeakyReLU(.2, True),
nn.AvgPool2d(2, stride=2) # batch, 64, 7, 7
)
self.fc = nn.Sequential(
nn.Linear(64*7*7, 1024),
nn.LeakyReLU(.2, True),
nn.Linear(1024, 1),
nn.Sigmoid()
)
def forward(self, x):
x = self.conv1(x)
x = self.conv2(x)
x = x.view(x.size(0), -1)
x = self.fc(x)
return x
class generator(nn.Module):
def __init__(self, input_size, num_feature):
super(generator, self).__init__()
self.fc = nn.Linear(input_size, num_feature) # b, 3136=1*56*56
self.br = nn.Sequential(
nn.BatchNorm2d(1),
nn.ReLU(True)
)
self.downsample1 = nn.Sequential(
nn.Conv2d(1, 50, 3, stride=1, padding=1), # b, 50, 56, 56
nn.BatchNorm2d(50),
nn.ReLU(True)
)
self.downsample2 = nn.Sequential(
nn.Conv2d(50, 25, 3, stride=1, padding=1), # b, 25, 56, 56
nn.BatchNorm2d(25),
nn.ReLU(True)
)
self.downsample3 = nn.Sequential(
nn.Conv2d(25, 1, 2, stride=2), # b, 1, 28, 28
nn.Tanh()
)
def forward(self, x):
x = self.fc(x)
x = x.view(x.size(0), 1, 56, 56)
x = self.br(x)
x = self.downsample1(x)
x = self.downsample2(x)
x = self.downsample3(x)
return x
D, G = discriminator(), generator(z_dimension, 3136)
use_gpu = torch.cuda.is_available()
if use_gpu: D, G = D.cuda(), G.cuda()
summary(D, (1, 28, 28))
summary(G, (1, 100))
criterion = nn.BCELoss() # binary cross netropy loss
d_optimizer = torch.optim.Adam(D.parameters(), lr=learning_rate)
g_optimizer = torch.optim.Adam(G.parameters(), lr=learning_rate)
# start training
for epoch in range(1, num_epoch+1):
for i, (img, _) in enumerate(dataloader):
num_img = img.size(0)
# ------- train discriminator -------
real_img = Variable(img)
real_label = Variable(torch.ones(num_img).reshape(num_img, -1))
fake_label = Variable(torch.zeros(num_img).reshape(num_img, -1))
if use_gpu:
real_img = real_img.cuda()
real_label = real_label.cuda()
fake_label = fake_label.cuda()
# compute loss of real images
real_out = D(real_img)
d_loss_real = criterion(real_out, real_label)
real_scores = real_out # closer to 1 means better
# compute loss of fake_img
z = Variable(torch.randn(num_img, z_dimension))
if use_gpu: z = z.cuda()
fake_img = G(z)
fake_out = D(fake_img)
d_loss_fake = criterion(fake_out, fake_label)
fake_scores = fake_out # closer to 0 means better
# bp and optimization
d_loss = d_loss_real + d_loss_fake
d_optimizer.zero_grad()
d_loss.backward()
d_optimizer.step()
# ------- train generator ------
# compute loss of fake images
z = Variable(torch.randn(num_img, z_dimension))
if use_gpu: z = z.cuda()
fake_img = G(z)
output = D(fake_img)
g_loss = criterion(output, real_label)
# bp and optimization
g_optimizer.zero_grad()
g_loss.backward()
g_optimizer.step()
if (i+1) % 100 == 0:
print(f'Epoch [{epoch}/{num_epoch}], d_loss: {d_loss.item():.6f}, g_loss: {g_loss.item():.6f}, '
f'D_real_scores: {real_scores.data.mean():.6f}, D_fake_scores: {fake_scores.data.mean():.6f}')
if epoch == 1:
real_images = to_img(real_img.cpu().data)
save_image(real_images, f'{img_folder}/real_images.png')
fake_images = to_img(fake_img.cpu().data)
save_image(fake_images, f'{img_folder}/fake_images-{epoch}.png')
torch.save(G.state_dict(), './conv_generator.pth')
torch.save(D.state_dict(), './conv_discriminator.pth')
| 31.832298 | 110 | 0.590829 | 730 | 5,125 | 3.969863 | 0.217808 | 0.008972 | 0.033816 | 0.034507 | 0.16011 | 0.113182 | 0.073844 | 0.073844 | 0.064182 | 0.040028 | 0 | 0.049277 | 0.271415 | 5,125 | 160 | 111 | 32.03125 | 0.726834 | 0.082537 | 0 | 0.160305 | 0 | 0.015267 | 0.064116 | 0.036119 | 0 | 0 | 0 | 0 | 0 | 1 | 0.038168 | false | 0 | 0.068702 | 0 | 0.145038 | 0.007634 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceb7570c1b7117cd5e3c9683f6fa41c8d8d6582e | 873 | py | Python | Python/flask/day2/telegram/movie.py | statKim/TIL | 3297d09023d97653773b35160794d3324b95c111 | [
"MIT"
] | null | null | null | Python/flask/day2/telegram/movie.py | statKim/TIL | 3297d09023d97653773b35160794d3324b95c111 | [
"MIT"
] | null | null | null | Python/flask/day2/telegram/movie.py | statKim/TIL | 3297d09023d97653773b35160794d3324b95c111 | [
"MIT"
] | null | null | null | import requests
import json
#url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList.json?key=1ce2fa0cc74c89d0a0bc48a61a2d989f&targetDt=20180827"
url = "http://www.kobis.or.kr/kobisopenapi/webservice/rest/boxoffice/searchDailyBoxOfficeList.json"
key = "1ce2fa0cc74c89d0a0bc48a61a2d989f"
date = "20180827"
res = requests.get(url + "?key={}&targetDt={}".format(key,date))
data = json.loads(res.text)
#print(data)
movies = {} # key 값은 영화이름, value 값은 순위
#print(data["boxOfficeResult"]["dailyBoxOfficeList"][0]["movieNm"])
movie_list = data["boxOfficeResult"]["dailyBoxOfficeList"] # 리스트 형태로 되어 있는 것만 사용하기 편리하게 만들어줌
for i in movie_list:
movies[i["movieNm"]] = i["rank"]
#movies[int(i["rank"])] = i["movieNm"]
#print(sorted(movies))
print(movies)
# boxOfficeResult
# dailyBoxOfficeList
# len(dailyBoxOfficeList)
# rank & movieNm
| 33.576923 | 155 | 0.745704 | 105 | 873 | 6.180952 | 0.495238 | 0.152542 | 0.030817 | 0.046225 | 0.360555 | 0.360555 | 0.360555 | 0.360555 | 0.360555 | 0.360555 | 0 | 0.064475 | 0.093929 | 873 | 25 | 156 | 34.92 | 0.756005 | 0.4811 | 0 | 0 | 0 | 0.083333 | 0.438914 | 0.072398 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.166667 | 0 | 0.166667 | 0.083333 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceb8d42174b0a780db99dfa6b4d999e24b18a8aa | 10,133 | py | Python | main.py | dgaiero/Resistor-Band-Picture-Creator | e2960f4260d1e80347bc7497c8b0bab29b28f218 | [
"MIT"
] | null | null | null | main.py | dgaiero/Resistor-Band-Picture-Creator | e2960f4260d1e80347bc7497c8b0bab29b28f218 | [
"MIT"
] | 5 | 2017-06-30T23:25:06.000Z | 2021-09-08T01:30:41.000Z | main.py | dgaiero/Resistor-Band-Picture-Creator | e2960f4260d1e80347bc7497c8b0bab29b28f218 | [
"MIT"
] | 1 | 2017-08-12T18:42:23.000Z | 2017-08-12T18:42:23.000Z | # Project: Resistor-Band-Picture-Creator
# Author: Dominic Gaiero
# File: main.py
# This program outputs an image of a resistor's color bands and was written
# for IEEE's Cal Poly Student Branch and their online parts website.
from tkinter import filedialog
from tkinter.filedialog import askdirectory
from tkinter import messagebox
from tkinter import scrolledtext as ScrolledText
import tkinter
import threading
import logging
import calendar
import time
import logging.handlers
import copy
import traceback
from resistorPicture import *
from shutil import copyfile
import os
class TextHandler(logging.Handler):
"""This class allows you to log to a Tkinter Text or ScrolledText widget"""
def __init__(self, text):
# run the regular Handler __init__
logging.Handler.__init__(self)
# Store a reference to the Text it will log to
self.text = text
def emit(self, record):
msg = self.format(record)
def append():
self.text.configure(state='normal')
self.text.insert(tkinter.END, msg + '\n')
self.text.configure(state='disabled')
# Autoscroll to the bottom
self.text.yview(tkinter.END)
# This is necessary because we can't modify the Text from other threads
self.text.after(0, append)
class RedirectText(object):
def __init__(self, text_ctrl):
"""Constructor"""
self.output = text_ctrl
def write(self, string):
self.output(string)
def flush(self):
pass
class configForm(tkinter.Tk):
def __init__(self):
'''
Configuration
Setup form
Set form to non-resizable
Set form title
'''
tkinter.Tk.__init__(self)
self.resizable(0, 0)
self.wm_title('Resistor Config')
self.report_callback_exception = self.show_error
# self.call('tk', 'scaling', 1.75)
cwd = os.getcwd()
iconLocation = "{}\\icon.ico".format(cwd)
self.iconbitmap(r'{}'.format(iconLocation))
self.frame1 = tkinter.Frame(self)
# self.frame1.grid(row = 0, column = 0, rowspan = 3, columnspan = 2, sticky = "WS")
self.frame1.pack(side="left", anchor="nw")
self.frame2 = tkinter.Frame(self)
self.frame2.pack(side="left")
# self.frame2.grid(row = 3, column = 0, rowspan = 3, columnspan = 2, sticky = "ES")
self.filePrefixLbl = tkinter.Label(self.frame1, text="File Prefix:")
self.filePrefixLbl.grid(
row=0, column=0, sticky='E', padx=5, pady=2)
self.filePrefixTxt = tkinter.Entry(self.frame1)
self.filePrefixTxt.grid(
row=0, column=1, sticky="W", pady=3)
self.multiplierLbl = tkinter.Label(self.frame1, text="Multiplier:")
self.multiplierLbl.grid(
row=1, column=0, sticky='E', padx=5, pady=2)
self.multiplierTxt = tkinter.Entry(self.frame1)
self.multiplierTxt.insert(0, "500")
self.multiplierTxt.grid(
row=1, column=1, sticky="W", pady=3)
self.selectInputCSV = tkinter.Button(
self.frame1, text="CSV File Location", command=self.openCSV)
self.selectInputCSV.grid(row=2, column=0, sticky='N', padx=5, pady=2)
self.selectOutputDirectory = tkinter.Button(
self.frame1, text="Output Directory", command=self.openDirectory)
self.selectOutputDirectory.grid(
row=2, column=1, sticky='NW', padx=5, pady=2)
self.processFile = tkinter.Button(
self.frame1, text="Process Files", command=self.processFiles)
self.processFile.grid(row=3, column=0, sticky='WN', padx=5, pady=2)
self.logText = ScrolledText.ScrolledText(
self.frame2, state='disabled', width=145,)
self.logText.configure(font='TkFixedFont')
self.logText.grid(row=1, column=1, sticky='nesw', padx=5, pady=2)
# Create textLogger
# threading.Thread(target=self.loggingHandler).start()
self.loggingHandler()
def loggingHandler(self):
text_handler = TextHandler(self.logText)
# Add the handler to logger
self.logger = logging.getLogger()
self.logger.addHandler(text_handler)
currTime = int(time.time())
if(not(os.path.isdir("{}\\logs".format(os.getcwd())))):
os.makedirs("{}\\logs".format(os.getcwd()))
self.logFileName = "CPIEEE_RESISTOR_{}.log".format(currTime)
self.logFileFullPath = "{}\\logs\\{}".format(
os.getcwd(), self.logFileName)
fh = logging.FileHandler(self.logFileFullPath, 'a')
formatter = logging.Formatter('%(asctime)s %(message)s')
fh.setFormatter(formatter)
fh.setLevel(logging.WARNING)
self.logger.setLevel(logging.WARNING)
self.logger.addHandler(fh)
self.logger.warning(
"Resistor Picture Generator\n--------------------------------\n"
"Created by Dominic Gaiero and Russell Caletena for the CP IEEE SB (https://calpolyieee.com)\n--------------------------------\n")
self.logger.warning(
"Log File located at: {}".format(self.logFileFullPath))
redir = RedirectText(self.logger.warning)
sys.stdout = redir
def openCSV(self):
if messagebox.askyesno("Open CSV", "The CSV file should be formatted as follows\nvalue,tolerence,num. bands.\nIf this is true, click 'Yes'. Otherwise click 'No'."):
self.filename = filedialog.askopenfilename(
initialdir="/", title="Select file", filetypes=(("csv files", "*.csv"), ("all files", "*.*")))
self.csvFileName = self.filename
if self.csvFileName == "":
self.logger.warning("Invalid File Name. Please re-select")
return
csvFile = ("CSV Location: {}".format(self.csvFileName))
self.logger.warning("CSV Location: {}".format(self.csvFileName))
self.csvTest()
def openDirectory(self):
# from tkinter.filedialog import askdirectory
self.directoryLocation = askdirectory(
parent=self, initialdir="/", title='Please select a directory')
# print(self.directoryLocation)
# print (test)
self.logger.warning(("Directory Location: {}").format(
self.directoryLocation))
# print(self.directoryLocation)
def show_error(self, *args):
err = traceback.format_exception(*args)
# messagebox.showerror('Exception',err)
print("--------------------------------")
logging.warning("Exception Encountered:")
err_message = ''
for error in err:
err_message += error
print(err_message)
if messagebox.askyesno("Unstable State", "The application has entered an unstable state. It is recommended to quit. Do you want to quit?\n{}".format(err_message)):
self.destroy()
os._exit
def processFiles(self):
try:
self.directoryLocation
self.filePrefixTxt.get()
# print(self.directoryLocation)
# print(self.filePrefixTxt.get())
except AttributeError:
messagebox.showerror(
"Error", "Data entered is invalid. Try again.")
self.logger.warning("Data entered is invalid. Try again.")
# print("Error")
return
csvLocation = self.csvFileName
cwd = self.directoryLocation
prefix = self.filePrefixTxt.get()
multiplier = int(self.multiplierTxt.get())
f = open(csvLocation, "rt")
try:
reader = csv.reader(f)
next(reader)
for row in reader:
resistorValue = row[0]
resistorTolerance = float(row[1])
numBands = int(row[2])
resistorData = getResistorData(
resistorValue, resistorTolerance, numBands)
self.logger.warning(
"--------------------------------\nGenerated data for:")
self.logger.warning(
"|{:>8s}|{:>8s}|{:>8s}|{:>8s}|{:>8s}|{:>8s}|{:>8s}|".format(
"Value", "Tolerence", "Band 1", "Band 2", "Band 3", "Band 4", "Band 5")
)
self.logger.warning(
"|{:>8}|{:>8}%|{:>8}|{:>8}|{:>8}|{:>8}|{:>8}|".format(
resistorData[0][0], resistorData[0][1], resistorData[1][0], resistorData[1][1], resistorData[1][2], resistorData[1][3], resistorData[1][4])
)
pictureStatus = generatePicture(
resistorData, cwd, multiplier, prefix)
if pictureStatus[0]:
self.logger.warning(
"Wrote file: {}".format(pictureStatus[1]))
finally:
f.close()
self.logger.warning("--------------------------------\n")
self.logger.warning("Done\n")
self.logger.warning("--------------------------------\n")
copyfile(self.logFileFullPath, os.path.join(self.directoryLocation, self.logFileName))
if messagebox.askyesno("Open output folder", "Do you want to open the folder?"):
os.startfile(cwd)
def csvTest(self):
f = open(self.csvFileName, "rt")
header = f.readline()
header = header.strip()
# self.logger.warning("CSV Header:\n{}".format(header))
headerMore = header.split(",")
headerString = '|'
# print(tuple(headerMore))
for i in range(len(headerMore)):
headerString += "{:>10s}|"
headerString = headerString.strip()
self.logger.warning(headerString.format(*tuple(headerMore)))
line1 = f.readline().strip()
line1More = line1.split(",")
self.logger.warning(headerString.format(*tuple(line1More)))
# self.logger.warning("CSV Line 1:\n{}".format(line1))
self.logger.warning("--------------------------------\n")
def main():
form = configForm()
form.mainloop()
if __name__ == '__main__':
main()
| 38.382576 | 172 | 0.583045 | 1,093 | 10,133 | 5.360476 | 0.297347 | 0.039256 | 0.055129 | 0.010241 | 0.160949 | 0.083632 | 0.028332 | 0.009558 | 0.009558 | 0 | 0 | 0.014947 | 0.267147 | 10,133 | 263 | 173 | 38.528517 | 0.774037 | 0.11803 | 0 | 0.068421 | 0 | 0.015789 | 0.153264 | 0.04145 | 0 | 0 | 0 | 0 | 0 | 1 | 0.073684 | false | 0.005263 | 0.078947 | 0 | 0.178947 | 0.010526 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ceb9bb743972c989b0226d741be69fd8e44a007a | 4,822 | py | Python | 2019-06-28-Nitrogen-grid-optimization/send_mols_to_server.py | btjanaka/qca-dataset-submission | 36c6861219dc522262e105d4c5b99644cb87cdfd | [
"BSD-3-Clause"
] | 15 | 2019-06-28T19:33:37.000Z | 2022-03-23T18:38:14.000Z | 2019-06-28-Nitrogen-grid-optimization/send_mols_to_server.py | btjanaka/qca-dataset-submission | 36c6861219dc522262e105d4c5b99644cb87cdfd | [
"BSD-3-Clause"
] | 251 | 2019-06-26T01:14:52.000Z | 2022-03-31T12:48:40.000Z | 2019-06-28-Nitrogen-grid-optimization/send_mols_to_server.py | btjanaka/qca-dataset-submission | 36c6861219dc522262e105d4c5b99644cb87cdfd | [
"BSD-3-Clause"
] | 5 | 2019-06-25T22:26:55.000Z | 2021-02-17T22:16:39.000Z | #imports
import time
import pprint
import re
import numpy as np
from openeye import oechem
from openeye import oeomega
import qcportal as ptl
import cmiles
# Custom exception for the case when there is no nitrogen
class NoNitrogenException(Exception): pass
#identifies the invertible nitrogen that the grid optimization will occur around
def find_nitrogen(mol):
"""Returns the trivalent nitrogen atom in a molecule"""
for atom in mol.GetAtoms():
if oechem.OEIsInvertibleNitrogen()(atom):
return atom, atom.GetIdx()
raise NoNitrogenException()
# Initialize Omega
omega = oeomega.OEOmega()
omega.SetMaxConfs(1)
omega.SetIncludeInput(True)
omega.SetCanonOrder(True)
omega.SetSampleHydrogens(True) # Word to the wise: skipping this step can lead to significantly different charges!
omega.SetStrictStereo(True)
omega.SetStrictAtomTypes(True)
omega.SetIncludeInput(False) # don't include input
client = ptl.FractalClient("https://localhost:7777/", verify=False)
def make_ptl_mol(oemol):
"""Builds a QCPortal Molecule from an OpenEye molecule"""
coords = oemol.GetCoords()
symbols_list = [oechem.OEGetAtomicSymbol(atom.GetAtomicNum()) for atom in mol.GetAtoms()]
#convert to bohr
print(coords)
for key, item in coords.items():
coords[key] = (item[0]*1.88973, item[1]*1.88973, item[2]*1.88973)
coord_list = [c for atom in mol.GetAtoms() for c in coords[atom.GetIdx()] ]
conn_list = np.array([[bond.GetBgnIdx(),
bond.GetEndIdx(),
bond.GetOrder()] for bond
in mol.GetBonds()])
ptl_mol = ptl.Molecule.from_data(
{'geometry':coord_list,
'symbols':symbols_list,
'connectivity':conn_list})
return ptl_mol
def send_qm_job(ptl_mol, nitrogen, nitrogen_i, mol):
"""Sends a job to the QM Client - returns a submitted object"""
indices = [nitrogen_i] + [nbor.GetIdx() for nbor in list(nitrogen.GetAtoms())]
print(f"indices: {indices}")
keywords = ptl.models.KeywordSet(values={"scf_properties":["wiberg_lowdin_indices"]})
try:
#keywords_id = (client.add_keywords([keywords])[0])
keywords_id = str(client.add_keywords([keywords])[0])
smiles=cmiles.utils.mol_to_smiles(mol, mapped=False, explicit_hydrogen=False)
mol_id = cmiles.get_molecule_ids(smiles, toolkit='openeye', strict=False)
connectivity=np.array(ptl_mol.connectivity).tolist()
geometry=np.array([[ptl_mol.geometry]]).ravel().tolist()
symbols=np.array([[ptl_mol.symbols]]).ravel().tolist()
jsonDict={
"cmiles_ids":mol_id,
"keywords": {
"preoptimization": True,
"scans": [{
"type": "dihedral",
"indices": list(indices),
"steps": [-52 ,-48,-44,-40, -36, -32, -28, -24, -20, -16, -12, -8, -4, 0, 4, 8, 12, 16, 20, 24, 28, 32, 36, 40, 44, 48, 52],
"step_type": "absolute"
}]
},
"optimization_spec": {
"program": "geometric",
"keywords": {
"coordsys": "tric",
}
},
"qc_spec": {
"driver": "gradient",
"method": "mp2",
"basis": "def2-SV(P)",
"keywords": keywords_id,
"program": "psi4",
},
"initial_molecule":{
"geometry":geometry,
"symbols":symbols,
"connectivity":connectivity
}}
return jsonDict, smiles
except:
pass
return
#This is where we submit the job.
#The molecule we ran this example with stored as a smile string in the tiny.smi file.
#This should be adapted for the directory "Molecules_to_run" for the .sdf files
first = True
results = [] # {"molecule": <OEMol>, "nitrogen": <OEAtom>, "nitrogen_i": <int>,
# "ptl_molecule": <PtlMol>, submitted": <submitted object>,
# "res": <result object> from QCPortal}
import glob
file_list = glob.glob('./Molecules_to_run/*.*')
jobsDict={}
for f in file_list:
tmp_mol = oechem.OEMol()
ifs = oechem.oemolistream(f)
oechem.OEReadMolecule(ifs, tmp_mol)
mol = oechem.OEMol(tmp_mol)
status = omega(mol)
nitrogen, nitrogen_i = find_nitrogen(mol)
ptl_mol = make_ptl_mol(mol)
subDict = send_qm_job(ptl_mol, nitrogen, nitrogen_i, mol)
try:
jobsDict[subDict[1]]=subDict[0]
except:
pass
import json
with open('nitrogen_Jobs_updateBohr.json', 'w') as fp:
json.dump(jobsDict, fp, indent=2, sort_keys=True)
| 33.255172 | 148 | 0.596018 | 561 | 4,822 | 5.010695 | 0.4082 | 0.021345 | 0.009605 | 0.012807 | 0.064746 | 0.024902 | 0.024902 | 0.024902 | 0.024902 | 0 | 0 | 0.023919 | 0.280382 | 4,822 | 144 | 149 | 33.486111 | 0.786167 | 0.174824 | 0 | 0.076923 | 0 | 0 | 0.101519 | 0.018228 | 0 | 0 | 0 | 0 | 0 | 1 | 0.028846 | false | 0.028846 | 0.096154 | 0 | 0.173077 | 0.028846 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cebccf985605aa3dbdd934a205cf63b9cd91905c | 3,868 | py | Python | canvasxpress/data/text.py | docinfosci/canvasxpress-python | 532a981b04d0f50bbde1852c695117a6220f4589 | [
"MIT"
] | 4 | 2021-03-18T17:23:40.000Z | 2022-02-01T19:07:01.000Z | canvasxpress/data/text.py | docinfosci/canvasxpress-python | 532a981b04d0f50bbde1852c695117a6220f4589 | [
"MIT"
] | 8 | 2021-04-30T20:46:57.000Z | 2022-03-10T07:25:31.000Z | canvasxpress/data/text.py | docinfosci/canvasxpress-python | 532a981b04d0f50bbde1852c695117a6220f4589 | [
"MIT"
] | 1 | 2022-02-03T00:35:14.000Z | 2022-02-03T00:35:14.000Z | import json
from typing import Union
from canvasxpress.data.base import CXData
class CXTextData(CXData):
"""
`CXTextData` is a `CXData` class that provides plain-text data directly to
the CanvasXpress for Javascript object. In this manner, the Python tier
makes no assumptions about the data content and permits the Javascript tier
to address any required adjustments in order to properly display the data
within a chart. If the data is erroneously formatted then the only
feedback will be at the Javascript tier.
"""
__raw_text = ""
"""
`__raw_text` tracks a block of text to be passed directly to the
CanvasXpress for Javascript constructor.
"""
@property
def text(self) -> str:
"""
Returns the raw text form of the data.
:returns: `str`
The text to be provided to CanvasXpress.
"""
return self.__raw_text
@text.setter
def text(
self,
value: str
) -> None:
"""
Sets the text to be provided to CanvasXpress.
:param value: `str`
The text to provide as-is to CanvasXpress. `None` will be
converted to an empty `str`. Values of type other than `str`
will be converted using `str()`.
"""
if value is None:
self.__raw_text = ""
elif isinstance(value, str):
self.__raw_text = value
else:
self.__raw_text = str(value)
@property
def data(self) -> dict:
"""
A property accessor for the data managed by the object. Regardless of
the input data the returned data structure will be a dict-type for use
with CanvasXpress.
:returns: `dict`
A dictionary representing a data map suitable for use with a chart.
"""
return self.get_raw_dict_form()
def get_raw_dict_form(self) -> dict:
""""
Provides a simple dict perspective of the data with no metadata or other
contextual transformations performed. For example, if the data is
natively in `dict` form then it would be passed-through with no
modification or enhancement.
:returns: `dict`
The `dict` perspective of the data with as little modification or
interpretation as is reasonable.
"""
try:
# Check the data as a JSON object. If the JSON object equates to
# a dict, list, or str then pass the Python form along as it will be
# converted back into a string as part of the HTML render. For
# anything else treat the content as a standard string to be
# passed along.
candidate = {
'raw': json.loads(self.text)
}
if isinstance(candidate['raw'], (dict, list, str)):
return {
'raw': json.loads(self.text)
}
else:
return {
'raw': self.text
}
except Exception as e:
return {
'raw': self.text
}
def render_to_dict(
self,
**kwargs
) -> dict:
"""
Converts the object into a dict representation.
:returns: `dict`
A dictionary representation of the object, such as what might be
needed for a JSON export.
"""
return self.get_raw_dict_form()
def __init__(
self,
data: Union[object, None] = None
) -> None:
"""
Initializes the CXData object with data.
:param data: `Union[object, None]`
Given an object or no data prepares a new CXData instance ready for
use by a `CanvasXpress` object.
"""
self.text = data
| 31.966942 | 80 | 0.569804 | 472 | 3,868 | 4.599576 | 0.322034 | 0.029019 | 0.020267 | 0.019346 | 0.1345 | 0.116076 | 0.055274 | 0 | 0 | 0 | 0 | 0 | 0.363237 | 3,868 | 120 | 81 | 32.233333 | 0.881445 | 0.507239 | 0 | 0.367347 | 0 | 0 | 0.010512 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.122449 | false | 0 | 0.061224 | 0 | 0.346939 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cebd60d351f6f279f46b2324ca335141e5da4451 | 300 | py | Python | pset6/mario.py | Caleb-Ellis/CS50 | 469a4f5d85a3f149aef19570b4b968c41147e7fd | [
"MIT"
] | 1 | 2018-08-16T12:37:43.000Z | 2018-08-16T12:37:43.000Z | pset6/mario.py | Caleb-Ellis/CS50x | 469a4f5d85a3f149aef19570b4b968c41147e7fd | [
"MIT"
] | null | null | null | pset6/mario.py | Caleb-Ellis/CS50x | 469a4f5d85a3f149aef19570b4b968c41147e7fd | [
"MIT"
] | 1 | 2017-02-20T20:15:04.000Z | 2017-02-20T20:15:04.000Z | import cs50
while True:
print("Please give me a number of floors between 0 and 23 inclusive: ", end="")
n = cs50.get_int()
if n < 0 or n > 23:
print("Error in floor number.")
else:
break
for i in range(n):
print(" " * (n - i - 1), end="")
print("#" * (i + 2)) | 25 | 83 | 0.53 | 48 | 300 | 3.291667 | 0.6875 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.057971 | 0.31 | 300 | 12 | 84 | 25 | 0.705314 | 0 | 0 | 0 | 0 | 0 | 0.285714 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.090909 | 0 | 0.090909 | 0.363636 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cebea3835ebb4e975c2e0fe2e1a909758c7e2bc8 | 3,508 | py | Python | src/dictionary/gen_suffix_data.py | dancerj/mozc | a5a4927c1f709d2ff0c681585c746f73a434e4c9 | [
"BSD-3-Clause"
] | null | null | null | src/dictionary/gen_suffix_data.py | dancerj/mozc | a5a4927c1f709d2ff0c681585c746f73a434e4c9 | [
"BSD-3-Clause"
] | 1 | 2021-06-30T14:59:51.000Z | 2021-06-30T15:31:56.000Z | src/dictionary/gen_suffix_data.py | dancerj/mozc | a5a4927c1f709d2ff0c681585c746f73a434e4c9 | [
"BSD-3-Clause"
] | 1 | 2022-03-25T09:01:39.000Z | 2022-03-25T09:01:39.000Z | # -*- coding: utf-8 -*-
# Copyright 2010-2020, Google Inc.
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are
# met:
#
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following disclaimer
# in the documentation and/or other materials provided with the
# distribution.
# * Neither the name of Google Inc. nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
# A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
# OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
# SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
# LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
# DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
# THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "taku"
import codecs
import optparse
import struct
from build_tools import serialized_string_array_builder
def _ParseOptions():
parser = optparse.OptionParser()
parser.add_option('--input', dest='input', help='Input suffix file')
parser.add_option('--output_key_array', dest='output_key_array',
help='Output serialized string array for keys')
parser.add_option('--output_value_array', dest='output_value_array',
help='Output serialized string array for values')
parser.add_option('--output_token_array', dest='output_token_array',
help='Output uint32 array for lid, rid and cost.')
return parser.parse_args()[0]
def main():
opts = _ParseOptions()
result = []
with codecs.open(opts.input, 'r', encoding='utf-8') as stream:
for line in stream:
line = line.rstrip('\r\n')
fields = line.split('\t')
key = fields[0]
lid = int(fields[1])
rid = int(fields[2])
cost = int(fields[3])
value = fields[4]
if key == value:
value = ''
result.append((key, value, lid, rid, cost))
# Sort entries in ascending order of key.
result.sort(key=lambda e: e[0])
# Write keys to serialized string array.
serialized_string_array_builder.SerializeToFile(
list(entry[0] for entry in result), opts.output_key_array)
# Write values to serialized string array.
serialized_string_array_builder.SerializeToFile(
list(entry[1] for entry in result), opts.output_value_array)
# Write a sequence of (lid, rid, cost) to uint32 array:
# {lid[0], rid[0], cost[0], lid[1], rid[1], cost[1], ...}
# So the final array has 3 * len(result) elements.
with open(opts.output_token_array, 'wb') as f:
for _, _, lid, rid, cost in result:
f.write(struct.pack('<I', lid))
f.write(struct.pack('<I', lid))
f.write(struct.pack('<I', cost))
if __name__ == '__main__':
main()
| 37.319149 | 72 | 0.707811 | 497 | 3,508 | 4.897384 | 0.404427 | 0.046015 | 0.060394 | 0.034511 | 0.214051 | 0.19433 | 0.172966 | 0.14092 | 0.14092 | 0.14092 | 0 | 0.010604 | 0.193558 | 3,508 | 93 | 73 | 37.72043 | 0.84977 | 0.507127 | 0 | 0.095238 | 0 | 0 | 0.173066 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.047619 | false | 0 | 0.095238 | 0 | 0.166667 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cebf10244bfb1982239e84e68f0790e22262bd5c | 2,149 | py | Python | static/py/handler.py | bokonV2/WebRemoteControl | 0b070eb42678cc0c00d0c9f37f5df7424cf7ce1f | [
"MIT"
] | null | null | null | static/py/handler.py | bokonV2/WebRemoteControl | 0b070eb42678cc0c00d0c9f37f5df7424cf7ce1f | [
"MIT"
] | 1 | 2022-03-11T07:05:12.000Z | 2022-03-11T07:05:12.000Z | static/py/handler.py | bokonV2/WebRemoteControl | 0b070eb42678cc0c00d0c9f37f5df7424cf7ce1f | [
"MIT"
] | null | null | null | import pyautogui as pag
import subprocess
import os
from datetime import datetime
from static.py.utilsDB import *
desktop = os.path.join(os.path.join(os.environ['USERPROFILE']), 'Desktop')
def getCords():
return pag.position()
def openLocal(port):
subprocess.call(f"start http://localhost:{port}/welcome",
creationflags=subprocess.CREATE_NEW_CONSOLE, shell=True)
def mouseClick(bt):
x = True if bt.x != 'None' else False
y = True if bt.y != 'None' else False
if x and y:
pag.click(x=bt.x, y=bt.y, clicks=bt.clicks,
interval=bt.interval, button=bt.button)
else:
pag.click(clicks=bt.clicks, interval=bt.interval, button=bt.button)
def mouseMove(bt):
x = True if bt.x != 'None' else False
y = True if bt.y != 'None' else False
if x and y:
events = [[
lambda: pag.moveTo(bt.x, bt.y, duration=bt.duration),
lambda: pag.moveRel(bt.x, bt.y, duration=bt.duration)
],[
lambda: pag.dragTo(bt.x, bt.y,
duration=bt.duration, button=bt.button),
lambda: pag.dragRel(bt.x, bt.y,
duration=bt.duration, button=bt.button)
]]
events[bt.mode][bt.move]()
def mouseScroll(bt):
x = True if bt.x != 'None' else False
y = True if bt.y != 'None' else False
if x and y:
pag.scroll(bt.scroll, bt.x, bt.y)
else:
pag.scroll(int(bt.scroll))
def keyboard(bt):
events = [
lambda: pag.typewrite(bt.text, interval=bt.interval),
lambda: pag.hotkey(*bt.text.split(' '), interval=bt.interval),
lambda: pag.press(bt.text, presses=bt.presses, interval=bt.interval),
]
events[bt.mode]()
def cmd(bt):
subprocess.call(bt.text,
creationflags=subprocess.CREATE_NEW_CONSOLE, shell=True)
def handl(id):
button = getButton(id)
events = {
0: mouseClick,
1: mouseMove,
2: mouseScroll,
3: keyboard,
4: lambda bt: pag.screenshot(f"{desktop}\{datetime.now().strftime('%m-%d-%Y %H-%M')}.png"),
5: cmd,
}
events[button.type](button)
| 29.438356 | 99 | 0.593299 | 301 | 2,149 | 4.222591 | 0.282392 | 0.028324 | 0.037766 | 0.023603 | 0.441385 | 0.398899 | 0.398899 | 0.398899 | 0.318647 | 0.194335 | 0 | 0.003774 | 0.260121 | 2,149 | 72 | 100 | 29.847222 | 0.795597 | 0 | 0 | 0.213115 | 0 | 0 | 0.063751 | 0.020475 | 0 | 0 | 0 | 0 | 0 | 1 | 0.131148 | false | 0 | 0.081967 | 0.016393 | 0.229508 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cebf63ab8a6cc2d6019e6de4a865ad7d0e9b0671 | 13,358 | py | Python | train_ssd_mobilenet.py | 10183308/tf-mobilenet-SSD | 593b9c2007a2e5991b800ce9bb5e444ee1b43796 | [
"MIT"
] | 1 | 2020-09-19T07:27:32.000Z | 2020-09-19T07:27:32.000Z | train_ssd_mobilenet.py | 10183308/tf-mobilenet-SSD | 593b9c2007a2e5991b800ce9bb5e444ee1b43796 | [
"MIT"
] | null | null | null | train_ssd_mobilenet.py | 10183308/tf-mobilenet-SSD | 593b9c2007a2e5991b800ce9bb5e444ee1b43796 | [
"MIT"
] | null | null | null | # -*- coding: utf-8 -*-
import tensorflow as tf
from tensorflow.python.ops import control_flow_ops
import ssd_mobilenet_v1 as ssd
from datasets import dataset_factory
from preprocessing import preprocessing_factory
import tf_utils
import os
import pdb
slim = tf.contrib.slim
# ssd network flags
tf.app.flags.DEFINE_float(
'match_threshold', 0.5, 'Matching threshold in the loss function.')
tf.app.flags.DEFINE_float(
'loss_alpha', 1., 'Alpha parameter in the loss function.')
tf.app.flags.DEFINE_float(
'negative_ratio', 3., 'Negative ratio in the loss function.')
# General flags
tf.app.flags.DEFINE_integer(
'num_readers', 4,
'The number of parallel readers that read data from the dataset.')
tf.app.flags.DEFINE_string(
'train_dir', './logs',
'Directory where checkpoints and event logs are written to.')
tf.app.flags.DEFINE_integer(
'num_preprocessing_threads', 4,
'The number of threads used to create the batches.')
tf.app.flags.DEFINE_integer(
'log_every_n_steps', 10,
'The frequency with which logs are print.')
tf.app.flags.DEFINE_integer(
'save_summaries_secs', 600,
'The frequency with which summaries are saved, in seconds.')
tf.app.flags.DEFINE_integer(
'save_interval_secs', 600,
'The frequency with which the model is saved, in seconds.')
tf.app.flags.DEFINE_float(
'gpu_memory_fraction', 0.5, 'GPU memory fraction to use.')
# learning rate flags.
tf.app.flags.DEFINE_string(
'learning_rate_decay_type',
'exponential',
'Specifies how the learning rate is decayed. One of "fixed", "exponential",'
' or "polynomial"')
tf.app.flags.DEFINE_float(
"learning_rate_decay_factor",
0.94,"Learning rate decay factor.")
tf.app.flags.DEFINE_float(
"num_epochs_per_decay",2.0,
"Number of epochs after which learning rate decays.")
tf.app.flags.DEFINE_float(
"learning_rate",0.01,"Initial learning rate.")
tf.app.flags.DEFINE_float(
"end_learning_rate",0.0001,"The minimum end learning rate used by polynomial decay learning rate.")
tf.app.flags.DEFINE_float(
'moving_average_decay', 0.9999,
'The decay to use for the moving average.'
'If left as None, then moving averages are not used.')
# optimization flags, only support RMSprop in this version
tf.app.flags.DEFINE_float(
"weight_decay",0.00004,"The weight decay on the model weights.")
tf.app.flags.DEFINE_float(
'label_smoothing', 0.0, 'The amount of label smoothing.')
tf.app.flags.DEFINE_string(
"optimizer","rmsprop",
"The name of the optimizer, only support `rmsprop`.")
tf.app.flags.DEFINE_float(
'momentum', 0.9,
'The momentum for the MomentumOptimizer and RMSPropOptimizer.')
tf.app.flags.DEFINE_float('rmsprop_momentum', 0.9, 'Momentum.')
tf.app.flags.DEFINE_float('rmsprop_decay', 0.9, 'Decay term for RMSProp.')
tf.app.flags.DEFINE_float('opt_epsilon', 1.0, 'Epsilon term for the optimizer.')
# dataset flags
tf.app.flags.DEFINE_string(
'dataset_name', 'pascalvoc_2007', 'The name of the dataset to load.')
tf.app.flags.DEFINE_integer(
'num_classes', 21, 'Number of classes to use in the dataset.')
tf.app.flags.DEFINE_string(
'dataset_split_name', 'train', 'The name of the train/test split.')
tf.app.flags.DEFINE_string(
'dataset_dir', None, 'The directory where the dataset files are stored.')
tf.app.flags.DEFINE_string(
'preprocessing_name', "ssd_512_vgg", 'The name of the preprocessing to use.')
tf.app.flags.DEFINE_integer(
'batch_size', 32, 'The number of samples in each batch.')
tf.app.flags.DEFINE_integer(
'train_image_size', None, 'Train image size')
tf.app.flags.DEFINE_integer('max_number_of_steps', None,
'The maximum number of training steps.')
# fine-tuning flags
tf.app.flags.DEFINE_string(
'checkpoint_path', None,
'The path to a checkpoint from which to fine-tune.')
tf.app.flags.DEFINE_string(
'trainable_scopes', None,
'Comma-separated list of scopes to filter the set of variables to train.'
'By default, None would train all the variables.')
tf.app.flags.DEFINE_boolean(
'ignore_missing_vars', True,
'When restoring a checkpoint would ignore missing variables.')
tf.app.flags.DEFINE_boolean(
'train_on_cpu', False,
'Set as `True` will make use of CPU for training.')
tf.app.flags.DEFINE_string(
"gpu_device","0",
"Set used gpu id for training.")
tf.app.flags.DEFINE_boolean("allow_growth",True,
"If allow increasing use of memory of GPU.")
FLAGS = tf.app.flags.FLAGS
def main(_):
if FLAGS.train_on_cpu:
os.environ["CUDA_VISIBLE_DEVICES"]="-1"
else:
os.environ["CUDA_VISIBLE_DEVICES"]=FLAGS.gpu_device
if not FLAGS.dataset_dir:
raise ValueError("You must supply the dataset directory with --dataset-dir.")
tf.logging.set_verbosity(tf.logging.DEBUG)
g = tf.Graph()
with g.as_default():
# select the dataset
dataset = dataset_factory.get_dataset(
FLAGS.dataset_name, FLAGS.dataset_split_name,FLAGS.dataset_dir)
# create global step, used for optimizer moving average decay
with tf.device("/cpu:0"):
global_step = tf.train.create_global_step()
# pdb.set_trace()
# get the ssd network and its anchors
ssd_cls = ssd.SSDnet
ssd_params = ssd_cls.default_params._replace(num_classes=FLAGS.num_classes)
ssd_net = ssd_cls(ssd_params)
image_size = ssd_net.params.img_shape
ssd_anchors = ssd_net.anchors(img_shape=image_size)
# select the preprocessing function
preprocessing_name = FLAGS.preprocessing_name
image_preprocessing_fn = preprocessing_factory.get_preprocessing(
preprocessing_name,is_training=True)
tf_utils.print_configuration(FLAGS.__flags,ssd_params,
dataset.data_sources,FLAGS.train_dir)
# create a dataset provider and batches.
with tf.device("/cpu:0"):
with tf.name_scope(FLAGS.dataset_name+"_data_provider"):
provider = slim.dataset_data_provider.DatasetDataProvider(
dataset,
num_readers=FLAGS.num_readers,
common_queue_capacity=20*FLAGS.batch_size,
common_queue_min=10*FLAGS.batch_size,
shuffle=True)
# get for ssd network: image,labels,bboxes
[image,shape,glabels,gbboxes] = provider.get(["image","shape",
"object/label",
"object/bbox"])
# pdb.set_trace()
# preprocessing
image,glabels,gbboxes = \
image_preprocessing_fn(image,
glabels,gbboxes,
out_shape=image_size,
data_format="NHWC")
# encode groundtruth labels and bboxes
gclasses,glocalisations,gscores= \
ssd_net.bboxes_encode(glabels,gbboxes,ssd_anchors)
batch_shape = [1] + [len(ssd_anchors)] * 3
# training batches and queue
r = tf.train.batch(
tf_utils.reshape_list([image, gclasses, glocalisations, gscores]),
batch_size=FLAGS.batch_size,
num_threads=FLAGS.num_preprocessing_threads,
capacity=5*FLAGS.batch_size)
b_image,b_gclasses,b_glocalisations,b_gscores = \
tf_utils.reshape_list(r,batch_shape)
# prefetch queue
batch_queue = slim.prefetch_queue.prefetch_queue(
tf_utils.reshape_list([b_image,b_gclasses,b_glocalisations,b_gscores]),
capacity = 8)
# dequeue batch
b_image, b_gclasses, b_glocalisations, b_gscores = \
tf_utils.reshape_list(batch_queue.dequeue(), batch_shape)
# gather initial summaries
summaries = set(tf.get_collection(tf.GraphKeys.SUMMARIES))
arg_scope = ssd_net.arg_scope(weight_decay=FLAGS.weight_decay)
with slim.arg_scope(arg_scope):
predictions,localisations,logits,end_points,mobilenet_var_list = \
ssd_net.net(b_image,is_training=True)
# add loss function
ssd_net.losses(logits,localisations,
b_gclasses,b_glocalisations,b_gscores,
match_threshold=FLAGS.match_threshold,
negative_ratio=FLAGS.negative_ratio,
alpha=FLAGS.loss_alpha,
label_smoothing=FLAGS.label_smoothing)
update_ops = tf.get_collection(tf.GraphKeys.UPDATE_OPS)
# add summaries for end_points
for end_point in end_points:
x = end_points[end_point]
summaries.add(tf.summary.histogram("activations/"+end_point,x))
summaries.add(tf.summary.scalar("sparsity/"+end_point,
tf.nn.zero_fraction(x)))
# add summaries for losses and extra losses
for loss in tf.get_collection(tf.GraphKeys.LOSSES):
summaries.add(tf.summary.scalar(loss.op.name,loss))
for loss in tf.get_collection("EXTRA_LOSSES"):
summaries.add(tf.summary.scalar(loss.op.name,loss))
# add summaries for variables
for var in slim.get_model_variables():
summaries.add(tf.summary.histogram(var.op.name,var))
# configure the moving averages
if FLAGS.moving_average_decay: # use moving average decay on weights variables
moving_average_variables = slim.get_model_variables()
variable_averages = tf.train.ExponentialMovingAverage(
FLAGS.moving_average_decay,global_step)
else:
moving_average_variables,variable_averages = None,None
# configure the optimization procedure
with tf.device("/cpu:0"):
learning_rate = tf_utils.configure_learning_rate(FLAGS,
dataset.num_samples,global_step)
optimizer = tf_utils.configure_optimizer(FLAGS,learning_rate)
summaries.add(tf.summary.scalar("learning_rate",learning_rate))
if FLAGS.moving_average_decay:
# update ops executed by trainer
update_ops.append(variable_averages.apply(moving_average_variables))
# get variables to train
variables_to_train = tf_utils.get_variables_to_train(FLAGS)
# return a train tensor and summary op
total_losses = tf.get_collection(tf.GraphKeys.LOSSES)
total_loss = tf.add_n(total_losses,name="total_loss")
summaries.add(tf.summary.scalar("total_loss",total_loss))
# create gradient updates
grads = optimizer.compute_gradients(total_loss,var_list=variables_to_train)
grad_updates = optimizer.apply_gradients(grads,global_step=global_step)
update_ops.append(grad_updates)
# create train op
update_op = tf.group(*update_ops)
train_tensor = control_flow_ops.with_dependencies([update_op],total_loss,
name="train_op")
# merge all summaries together
summary_op = tf.summary.merge(list(summaries),name="summary_op")
# start training
gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=FLAGS.gpu_memory_fraction,allow_growth=FLAGS.allow_growth)
config = tf.ConfigProto(log_device_placement=False,
gpu_options=gpu_options)
saver = tf.train.Saver(max_to_keep=2,
keep_checkpoint_every_n_hours=1.0,
write_version=2,
pad_step_number=False)
# create initial assignment op
init_assign_op,init_feed_dict = slim.assign_from_checkpoint(
FLAGS.checkpoint_path,mobilenet_var_list,
ignore_missing_vars=FLAGS.ignore_missing_vars)
# create an initial assignment function
for k,v in init_feed_dict.items():
if "global_step" in k.name:
g_step = k
init_feed_dict[g_step] = 0 # change the global_step to zero.
init_fn = lambda sess: sess.run(init_assign_op,init_feed_dict)
# run training
slim.learning.train(train_tensor,logdir=FLAGS.train_dir,
init_fn=init_fn,
summary_op=summary_op,
number_of_steps=FLAGS.max_number_of_steps,
save_summaries_secs=FLAGS.save_summaries_secs,
save_interval_secs=FLAGS.save_interval_secs,
session_config=config,
saver=saver,
)
# slim.learning.train(
# train_tensor,
# logdir=FLAGS.train_dir,
# init_fn =tf_utils.get_init_fn(FLAGS,mobilenet_var_list),
# summary_op=summary_op,
# global_step=global_step,
# number_of_steps=FLAGS.max_number_of_steps,
# log_every_n_steps=FLAGS.log_every_n_steps,
# save_summaries_secs=FLAGS.save_summaries_secs,
# saver=saver,
# save_interval_secs =FLAGS.save_interval_secs,
# session_config=config,
# sync_optimizer=None)
if __name__ == '__main__':
tf.app.run()
| 39.173021 | 126 | 0.655338 | 1,704 | 13,358 | 4.88615 | 0.198357 | 0.023421 | 0.04564 | 0.071103 | 0.271079 | 0.197214 | 0.1135 | 0.082753 | 0.060053 | 0.050925 | 0 | 0.008118 | 0.253032 | 13,358 | 340 | 127 | 39.288235 | 0.826318 | 0.114239 | 0 | 0.186957 | 0 | 0 | 0.217757 | 0.006372 | 0 | 0 | 0 | 0 | 0 | 1 | 0.004348 | false | 0 | 0.034783 | 0 | 0.03913 | 0.008696 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cec13449fd212cb9f3dc4042fa9b1b5cbb28bdd5 | 3,104 | py | Python | Yaml2ProbaTree/yaml2probatree.py | SCOTT-HAMILTON/Yaml2ProbaTree | f3566f4b26b0bcb43fff04a17b43074d5f952346 | [
"MIT"
] | null | null | null | Yaml2ProbaTree/yaml2probatree.py | SCOTT-HAMILTON/Yaml2ProbaTree | f3566f4b26b0bcb43fff04a17b43074d5f952346 | [
"MIT"
] | null | null | null | Yaml2ProbaTree/yaml2probatree.py | SCOTT-HAMILTON/Yaml2ProbaTree | f3566f4b26b0bcb43fff04a17b43074d5f952346 | [
"MIT"
] | null | null | null | from yaml import load, Loader
import re
import sys
class Yaml2ProbaTree:
def __init__(self, debug=False):
self.debug = debug
def indent(self, text):
if not text:
return ""
text = "\t"+text
return text.replace("\n", "\n\t")
def parse_weight(self, weight):
if not weight is str:
weight = str(weight)
p = re.compile('([0-9]*)\/([0-9]*)')
return p.sub(r'$\\frac{\1}{\2}$', weight)
def recurse_node(self, node, name, n=0):
if not node:
print(f"[log] node `{name}` is corrupted")
return
if not "_v" in node.keys():
weight = None
else:
weight = self.parse_weight(node["_v"])
last = len(node.keys()) == 1
text = ""
if n > 0:
if last:
text = """node[end, label=right:\n"""
text += """\t{"""+name+"""}] {}\n"""
else:
text = """node[mytree] {"""+name+"""}\n"""
texts = [self.recurse_node(child_node, child_name, n=n+1)
for child_name, child_node in node.items()
if child_name != "_v"]
text += '\n'.join(list(map(lambda t: """child {\n"""+self.indent(t)+"""\n}""", reversed(texts))))+'\n'
if n > 0:
text = text.strip()
text += """\nedge from parent\n"""
text += """node[above] {"""+weight+"""}"""
if name == "Root":
text = """\\node[mytree] {}\n\t""" + self.indent(text).strip()+ """;\n"""
return text
def yaml2tikz(self, input_yaml_file=None, yaml_text=None):
if yaml_text != None:
# Yaml doesn't work with tabs
text = yaml_text.replace('\t', ' ')
data = load(text, Loader=Loader)
elif input_yaml_file == None:
text = ''.join([ line for line in sys.stdin])
# Yaml doesn't work with tabs
text = text.replace('\t', ' ')
if self.debug:
print(f"[log] stdin : `{text}`")
data = load(text, Loader=Loader)
elif input_yaml_file:
with open(input_yaml_file, "r") as input_tree:
# Yaml doesn't work with tabs
input_tree = ''.join(input_tree.readlines()).replace('\t', ' ')
if self.debug:
print(f"[log] yaml : `{input_tree}`")
data = load(input_tree, Loader=Loader)
if not "root" in data.keys():
print("[error] No root node, exiting...")
exit(1)
result = """
% Set the overall layout of the tree
\\tikzstyle{level 1}=[level distance=3.5cm, sibling distance=3.5cm]
\\tikzstyle{level 2}=[level distance=3.5cm, sibling distance=2cm]
% Define styles for mytree and leafs
\\tikzstyle{mytree} = [text width=4em, text centered]
\\tikzstyle{end} = [circle, minimum width=3pt,fill, inner sep=0pt]
\\begin{tikzpicture}[grow=right, sloped]
"""
result += self.recurse_node(data["root"], "Root")
result += """\end{tikzpicture}"""
return result
| 35.678161 | 110 | 0.509665 | 385 | 3,104 | 4.023377 | 0.303896 | 0.016139 | 0.03357 | 0.027114 | 0.178179 | 0.178179 | 0.12266 | 0.08909 | 0.052937 | 0 | 0 | 0.012328 | 0.320554 | 3,104 | 86 | 111 | 36.093023 | 0.722143 | 0.02674 | 0 | 0.108108 | 0 | 0.013514 | 0.235158 | 0.010614 | 0 | 0 | 0 | 0 | 0 | 1 | 0.067568 | false | 0 | 0.040541 | 0 | 0.202703 | 0.054054 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cec183997379179ede3e87e510d994afb4f980c1 | 1,009 | py | Python | server.py | validatedid/vidchain-prize-social-good-web | 45ea40e0273846b6ad7753aac86e3d7ffd80282f | [
"Apache-2.0"
] | null | null | null | server.py | validatedid/vidchain-prize-social-good-web | 45ea40e0273846b6ad7753aac86e3d7ffd80282f | [
"Apache-2.0"
] | null | null | null | server.py | validatedid/vidchain-prize-social-good-web | 45ea40e0273846b6ad7753aac86e3d7ffd80282f | [
"Apache-2.0"
] | null | null | null | import http.server
import socketserver
import multiprocessing, time
DIRECTORY = "demo/"
class Handler(http.server.SimpleHTTPRequestHandler):
def __init__(self, *args, **kwargs):
super().__init__(*args, directory=DIRECTORY, **kwargs)
def start_demo_server(host='0.0.0.0', port=8181):
with socketserver.TCPServer((host, port), Handler) as httpd:
print(' * Starting VidChain demo server at http://' + host + ':' + str(port))
httpd.serve_forever()
if __name__ == "__main__":
print (" * Starting VidChain demo")
# Start the job processes
try:
web_demo_server_proc = multiprocessing.Process(target=start_demo_server)
# launch servers
web_demo_server_proc.start()
# Keep the main thread running, otherwise signals are ignored.
while True:
time.sleep(0.5)
except KeyboardInterrupt:
# Terminate the running processes.
web_demo_server_proc.terminate()
print('\n * Exiting VidChain demo') | 31.53125 | 85 | 0.670961 | 118 | 1,009 | 5.483051 | 0.525424 | 0.092736 | 0.060278 | 0.078825 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.012723 | 0.221011 | 1,009 | 32 | 86 | 31.53125 | 0.810433 | 0.130823 | 0 | 0 | 0 | 0 | 0.13173 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.142857 | 0 | 0.285714 | 0.142857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cec2cf3c1e1a2962f84baa27e64fd12c30c65a57 | 10,914 | py | Python | PythonVirtEnv/Lib/site-packages/pythonwin/pywin/tools/regedit.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 64 | 2020-07-22T06:24:18.000Z | 2022-03-27T10:48:15.000Z | PythonVirtEnv/Lib/site-packages/pythonwin/pywin/tools/regedit.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 20 | 2021-05-03T18:02:23.000Z | 2022-03-12T12:01:04.000Z | PythonVirtEnv/Lib/site-packages/pythonwin/pywin/tools/regedit.py | zuhorski/EPL_Project | 2d2417652879cfbe33c44c003ad77b7222590849 | [
"MIT"
] | 18 | 2021-11-12T03:15:45.000Z | 2022-03-25T05:29:00.000Z | # Regedit - a Registry Editor for Python
import win32api, win32ui, win32con, commctrl
from pywin.mfc import window, docview, dialog
from . import hierlist
import regutil
import string
def SafeApply( fn, args, err_desc = "" ):
try:
fn(*args)
return 1
except win32api.error as exc:
msg = "Error " + err_desc + "\r\n\r\n" + exc.strerror
win32ui.MessageBox(msg)
return 0
class SplitterFrame(window.MDIChildWnd):
def __init__(self):
# call base CreateFrame
self.images = None
window.MDIChildWnd.__init__(self)
def OnCreateClient(self, cp, context):
splitter = win32ui.CreateSplitter()
doc = context.doc
frame_rect = self.GetWindowRect()
size = ((frame_rect[2] - frame_rect[0]),
(frame_rect[3] - frame_rect[1])//2)
sub_size = (size[0]//3, size[1])
splitter.CreateStatic (self, 1, 2)
# CTreeControl view
self.keysview = RegistryTreeView(doc)
# CListControl view
self.valuesview = RegistryValueView(doc)
splitter.CreatePane (self.keysview, 0, 0, (sub_size))
splitter.CreatePane (self.valuesview, 0, 1, (0,0)) # size ignored.
splitter.SetRowInfo(0, size[1] ,0)
# Setup items in the imagelist
return 1
def OnItemDoubleClick(self, info, extra):
(hwndFrom, idFrom, code) = info
if idFrom==win32ui.AFX_IDW_PANE_FIRST:
# Tree control
return None
elif idFrom==win32ui.AFX_IDW_PANE_FIRST + 1:
item = self.keysview.SelectedItem()
self.valuesview.EditValue(item)
return 0
# List control
else:
return None # Pass it on
def PerformItemSelected(self,item):
return self.valuesview.UpdateForRegItem(item)
def OnDestroy(self, msg):
window.MDIChildWnd.OnDestroy(self, msg)
if self.images:
self.images.DeleteImageList()
self.images = None
class RegistryTreeView(docview.TreeView):
def OnInitialUpdate(self):
rc = self._obj_.OnInitialUpdate()
self.frame = self.GetParent().GetParent()
self.hierList = hierlist.HierListWithItems( self.GetHLIRoot(), win32ui.IDB_HIERFOLDERS, win32ui.AFX_IDW_PANE_FIRST)
self.hierList.HierInit(self.frame, self.GetTreeCtrl())
self.hierList.SetStyle(commctrl.TVS_HASLINES | commctrl.TVS_LINESATROOT | commctrl.TVS_HASBUTTONS)
self.hierList.PerformItemSelected = self.PerformItemSelected
self.frame.HookNotify(self.frame.OnItemDoubleClick, commctrl.NM_DBLCLK)
self.frame.HookNotify(self.OnItemRightClick, commctrl.NM_RCLICK)
# self.HookMessage(self.OnItemRightClick, win32con.WM_RBUTTONUP)
def GetHLIRoot(self):
doc = self.GetDocument()
regroot = doc.root
subkey = doc.subkey
return HLIRegistryKey(regroot, subkey, "Root")
def OnItemRightClick(self, notify_data, extra):
# First select the item we right-clicked on.
pt = self.ScreenToClient(win32api.GetCursorPos())
flags, hItem = self.HitTest(pt)
if hItem==0 or commctrl.TVHT_ONITEM & flags==0:
return None
self.Select(hItem, commctrl.TVGN_CARET)
menu = win32ui.CreatePopupMenu()
menu.AppendMenu(win32con.MF_STRING|win32con.MF_ENABLED,1000, "Add Key")
menu.AppendMenu(win32con.MF_STRING|win32con.MF_ENABLED,1001, "Add Value")
menu.AppendMenu(win32con.MF_STRING|win32con.MF_ENABLED,1002, "Delete Key")
self.HookCommand(self.OnAddKey, 1000)
self.HookCommand(self.OnAddValue, 1001)
self.HookCommand(self.OnDeleteKey, 1002)
menu.TrackPopupMenu(win32api.GetCursorPos()) # track at mouse position.
return None
def OnDeleteKey(self,command, code):
hitem = self.hierList.GetSelectedItem()
item = self.hierList.ItemFromHandle(hitem)
msg = "Are you sure you wish to delete the key '%s'?" % (item.keyName,)
id = win32ui.MessageBox(msg, None, win32con.MB_YESNO)
if id != win32con.IDYES:
return
if SafeApply(win32api.RegDeleteKey, (item.keyRoot, item.keyName), "deleting registry key" ):
# Get the items parent.
try:
hparent = self.GetParentItem(hitem)
except win32ui.error:
hparent = None
self.hierList.Refresh(hparent)
def OnAddKey(self,command, code):
from pywin.mfc import dialog
val = dialog.GetSimpleInput("New key name", '', "Add new key")
if val is None: return # cancelled.
hitem = self.hierList.GetSelectedItem()
item = self.hierList.ItemFromHandle(hitem)
if SafeApply(win32api.RegCreateKey, (item.keyRoot, item.keyName + "\\" + val)):
self.hierList.Refresh(hitem)
def OnAddValue(self,command, code):
from pywin.mfc import dialog
val = dialog.GetSimpleInput("New value", "", "Add new value")
if val is None: return # cancelled.
hitem = self.hierList.GetSelectedItem()
item = self.hierList.ItemFromHandle(hitem)
if SafeApply(win32api.RegSetValue, (item.keyRoot, item.keyName, win32con.REG_SZ, val)):
# Simply re-select the current item to refresh the right spitter.
self.PerformItemSelected(item)
# self.Select(hitem, commctrl.TVGN_CARET)
def PerformItemSelected(self, item):
return self.frame.PerformItemSelected(item)
def SelectedItem(self):
return self.hierList.ItemFromHandle(self.hierList.GetSelectedItem())
def SearchSelectedItem(self):
handle = self.hierList.GetChildItem(0)
while 1:
# print "State is", self.hierList.GetItemState(handle, -1)
if self.hierList.GetItemState(handle, commctrl.TVIS_SELECTED):
# print "Item is ", self.hierList.ItemFromHandle(handle)
return self.hierList.ItemFromHandle(handle)
handle = self.hierList.GetNextSiblingItem(handle)
class RegistryValueView(docview.ListView):
def OnInitialUpdate(self):
hwnd = self._obj_.GetSafeHwnd()
style = win32api.GetWindowLong(hwnd, win32con.GWL_STYLE);
win32api.SetWindowLong(hwnd, win32con.GWL_STYLE, (style & ~commctrl.LVS_TYPEMASK) | commctrl.LVS_REPORT);
itemDetails = (commctrl.LVCFMT_LEFT, 100, "Name", 0)
self.InsertColumn(0, itemDetails)
itemDetails = (commctrl.LVCFMT_LEFT, 500, "Data", 0)
self.InsertColumn(1, itemDetails)
def UpdateForRegItem(self, item):
self.DeleteAllItems()
hkey = win32api.RegOpenKey(item.keyRoot, item.keyName)
try:
valNum = 0
ret = []
while 1:
try:
res = win32api.RegEnumValue(hkey, valNum)
except win32api.error:
break
name = res[0]
if not name: name = "(Default)"
self.InsertItem(valNum, name)
self.SetItemText(valNum, 1, str(res[1]))
valNum = valNum + 1
finally:
win32api.RegCloseKey(hkey)
def EditValue(self, item):
# Edit the current value
class EditDialog(dialog.Dialog):
def __init__(self, item):
self.item = item
dialog.Dialog.__init__(self, win32ui.IDD_LARGE_EDIT)
def OnInitDialog(self):
self.SetWindowText("Enter new value")
self.GetDlgItem(win32con.IDCANCEL).ShowWindow(win32con.SW_SHOW)
self.edit = self.GetDlgItem(win32ui.IDC_EDIT1)
# Modify the edit windows style
style = win32api.GetWindowLong(self.edit.GetSafeHwnd(), win32con.GWL_STYLE)
style = style & (~win32con.ES_WANTRETURN)
win32api.SetWindowLong(self.edit.GetSafeHwnd(), win32con.GWL_STYLE, style)
self.edit.SetWindowText(str(self.item))
self.edit.SetSel(-1)
return dialog.Dialog.OnInitDialog(self)
def OnDestroy(self,msg):
self.newvalue = self.edit.GetWindowText()
try:
index = self.GetNextItem(-1, commctrl.LVNI_SELECTED)
except win32ui.error:
return # No item selected.
if index==0:
keyVal = ""
else:
keyVal = self.GetItemText(index,0)
# Query for a new value.
try:
newVal = self.GetItemsCurrentValue(item, keyVal)
except TypeError as details:
win32ui.MessageBox(details)
return
d = EditDialog(newVal)
if d.DoModal()==win32con.IDOK:
try:
self.SetItemsCurrentValue(item, keyVal, d.newvalue)
except win32api.error as exc:
win32ui.MessageBox("Error setting value\r\n\n%s" % exc.strerror)
self.UpdateForRegItem(item)
def GetItemsCurrentValue(self, item, valueName):
hkey = win32api.RegOpenKey(item.keyRoot, item.keyName)
try:
val, type = win32api.RegQueryValueEx(hkey, valueName)
if type != win32con.REG_SZ:
raise TypeError("Only strings can be edited")
return val
finally:
win32api.RegCloseKey(hkey)
def SetItemsCurrentValue(self, item, valueName, value):
# ** Assumes already checked is a string.
hkey = win32api.RegOpenKey(item.keyRoot, item.keyName , 0, win32con.KEY_SET_VALUE)
try:
win32api.RegSetValueEx(hkey, valueName, 0, win32con.REG_SZ, value)
finally:
win32api.RegCloseKey(hkey)
class RegTemplate(docview.DocTemplate):
def __init__(self):
docview.DocTemplate.__init__(self, win32ui.IDR_PYTHONTYPE, None, SplitterFrame, None)
# def InitialUpdateFrame(self, frame, doc, makeVisible=1):
# self._obj_.InitialUpdateFrame(frame, doc, makeVisible) # call default handler.
# frame.InitialUpdateFrame(doc, makeVisible)
def OpenRegistryKey(self, root = None, subkey = None): # Use this instead of OpenDocumentFile.
# Look for existing open document
if root is None: root = regutil.GetRootKey()
if subkey is None: subkey = regutil.BuildDefaultPythonKey()
for doc in self.GetDocumentList():
if doc.root==root and doc.subkey==subkey:
doc.GetFirstView().ActivateFrame()
return doc
# not found - new one.
doc = RegDocument(self, root, subkey)
frame = self.CreateNewFrame(doc)
doc.OnNewDocument()
self.InitialUpdateFrame(frame, doc, 1)
return doc
class RegDocument (docview.Document):
def __init__(self, template, root, subkey):
docview.Document.__init__(self, template)
self.root = root
self.subkey = subkey
self.SetTitle("Registry Editor: " + subkey)
def OnOpenDocument (self, name):
raise TypeError("This template can not open files")
return 0
class HLIRegistryKey(hierlist.HierListItem):
def __init__( self, keyRoot, keyName, userName ):
self.keyRoot = keyRoot
self.keyName = keyName
self.userName = userName
hierlist.HierListItem.__init__(self)
def __lt__(self, other):
return self.name < other.name
def __eq__(self, other):
return self.keyRoot==other.keyRoot and \
self.keyName == other.keyName and \
self.userName == other.userName
def __repr__(self):
return "<%s with root=%s, key=%s>" % (self.__class__.__name__, self.keyRoot, self.keyName)
def GetText(self):
return self.userName
def IsExpandable(self):
# All keys are expandable, even if they currently have zero children.
return 1
## hkey = win32api.RegOpenKey(self.keyRoot, self.keyName)
## try:
## keys, vals, dt = win32api.RegQueryInfoKey(hkey)
## return (keys>0)
## finally:
## win32api.RegCloseKey(hkey)
def GetSubList(self):
hkey = win32api.RegOpenKey(self.keyRoot, self.keyName)
win32ui.DoWaitCursor(1)
try:
keyNum = 0
ret = []
while 1:
try:
key = win32api.RegEnumKey(hkey, keyNum)
except win32api.error:
break
ret.append(HLIRegistryKey(self.keyRoot, self.keyName + "\\" + key, key))
keyNum = keyNum + 1
finally:
win32api.RegCloseKey(hkey)
win32ui.DoWaitCursor(0)
return ret
template = RegTemplate()
def EditRegistry(root = None, key = None):
doc=template.OpenRegistryKey(root, key)
if __name__=='__main__':
EditRegistry()
| 33.072727 | 117 | 0.729064 | 1,359 | 10,914 | 5.758646 | 0.264165 | 0.030667 | 0.019934 | 0.016867 | 0.165474 | 0.136468 | 0.110912 | 0.083823 | 0.053795 | 0.045234 | 0 | 0.024277 | 0.154572 | 10,914 | 329 | 118 | 33.173252 | 0.823886 | 0.111508 | 0 | 0.273077 | 0 | 0 | 0.033775 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.134615 | false | 0 | 0.026923 | 0.030769 | 0.292308 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cec30aede34527486729ccff4a99b8e41c23a401 | 1,061 | py | Python | fish-bowl-rounds-counter-process-data.py | jonkracht/fish-bowl-round-counter | 3a468fd05f05d6873d811fbc31930dfe5eb3736e | [
"MIT"
] | null | null | null | fish-bowl-rounds-counter-process-data.py | jonkracht/fish-bowl-round-counter | 3a468fd05f05d6873d811fbc31930dfe5eb3736e | [
"MIT"
] | null | null | null | fish-bowl-rounds-counter-process-data.py | jonkracht/fish-bowl-round-counter | 3a468fd05f05d6873d811fbc31930dfe5eb3736e | [
"MIT"
] | null | null | null | import pandas as pd
def load_data(file_name):
'''Load saved data (in csv form) into a Pandas dataframe.'''
return pd.read_csv(file_name)
def main():
'''Clean up data scraped from PDGA.'''
file_name = 'fish-bowl-rounds-counter-data.csv'
# Load saved data
data = load_data(file_name)
# Make names upper case
data['Name'] = data['Name'].apply(lambda Name: Name.upper())
data = data.sort_values(by='Number')
newData = []
for number in data['Number'].unique():
matches = data[data['Number'] == number]
years = sorted(matches['Year'])
names = list(sorted(matches['Name'].unique()))
counts = len(years)
newData.append([names, number, years, counts])
monkey = pd.DataFrame(newData, columns=['Names', 'Number', 'Years', 'Counts']).sort_values(by='Counts', ascending=False)
print(data.value_counts().head(50))
#print(data['Player Name'].value_counts().head(20))
monkey.to_csv('processed-data.csv')
print('hi')
if __name__ == '__main__':
main() | 23.065217 | 124 | 0.626767 | 140 | 1,061 | 4.607143 | 0.45 | 0.049612 | 0.037209 | 0.049612 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.004751 | 0.206409 | 1,061 | 46 | 125 | 23.065217 | 0.761283 | 0.165881 | 0 | 0 | 0 | 0 | 0.140893 | 0.037801 | 0 | 0 | 0 | 0 | 0 | 1 | 0.095238 | false | 0 | 0.047619 | 0 | 0.190476 | 0.095238 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cec3376e12dfbf15bb9e0b77a397e1ab26ed5b6a | 19,286 | py | Python | trace-viewer/third_party/closure_linter/closure_linter/closurizednamespacesinfo_test.py | yinquan529/platform-external-chromium-trace | 8252ae6b83ea65cf871e7981e981da07379f5a0f | [
"BSD-3-Clause"
] | 231 | 2015-01-08T09:04:44.000Z | 2021-12-30T03:03:10.000Z | third_party/closure_linter/closure_linter/closurizednamespacesinfo_test.py | 1065672644894730302/Chromium | 239dd49e906be4909e293d8991e998c9816eaa35 | [
"BSD-3-Clause"
] | 5 | 2015-03-27T14:29:23.000Z | 2019-09-25T13:23:12.000Z | third_party/closure_linter/closure_linter/closurizednamespacesinfo_test.py | 1065672644894730302/Chromium | 239dd49e906be4909e293d8991e998c9816eaa35 | [
"BSD-3-Clause"
] | 268 | 2015-01-21T05:53:28.000Z | 2022-03-25T22:09:01.000Z | #!/usr/bin/env python
#
# Copyright 2010 The Closure Linter Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS-IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Unit tests for ClosurizedNamespacesInfo."""
import unittest as googletest
from closure_linter import closurizednamespacesinfo
from closure_linter import javascriptstatetracker
from closure_linter import javascripttokenizer
from closure_linter import javascripttokens
from closure_linter import tokenutil
# pylint: disable-msg=C6409
TokenType = javascripttokens.JavaScriptTokenType
class ClosurizedNamespacesInfoTest(googletest.TestCase):
"""Tests for ClosurizedNamespacesInfo."""
_test_cases = {
'goog.global.anything': None,
'package.CONSTANT': 'package',
'package.methodName': 'package',
'package.subpackage.methodName': 'package.subpackage',
'package.subpackage.methodName.apply': 'package.subpackage',
'package.ClassName.something': 'package.ClassName',
'package.ClassName.Enum.VALUE.methodName': 'package.ClassName',
'package.ClassName.CONSTANT': 'package.ClassName',
'package.namespace.CONSTANT.methodName': 'package.namespace',
'package.ClassName.inherits': 'package.ClassName',
'package.ClassName.apply': 'package.ClassName',
'package.ClassName.methodName.apply': 'package.ClassName',
'package.ClassName.methodName.call': 'package.ClassName',
'package.ClassName.prototype.methodName': 'package.ClassName',
'package.ClassName.privateMethod_': 'package.ClassName',
'package.className.privateProperty_': 'package.className',
'package.className.privateProperty_.methodName': 'package.className',
'package.ClassName.PrivateEnum_': 'package.ClassName',
'package.ClassName.prototype.methodName.apply': 'package.ClassName',
'package.ClassName.property.subProperty': 'package.ClassName',
'package.className.prototype.something.somethingElse': 'package.className'
}
_tokenizer = javascripttokenizer.JavaScriptTokenizer()
def testGetClosurizedNamespace(self):
"""Tests that the correct namespace is returned for various identifiers."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'], ignored_extra_namespaces=[])
for identifier, expected_namespace in self._test_cases.items():
actual_namespace = namespaces_info.GetClosurizedNamespace(identifier)
self.assertEqual(
expected_namespace,
actual_namespace,
'expected namespace "' + str(expected_namespace) +
'" for identifier "' + str(identifier) + '" but was "' +
str(actual_namespace) + '"')
def testIgnoredExtraNamespaces(self):
"""Tests that ignored_extra_namespaces are ignored."""
token = self._GetRequireTokens('package.Something')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=['package'],
ignored_extra_namespaces=['package.Something'])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should be valid since it is in ignored namespaces.')
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be invalid since it is not in ignored namespaces.')
def testIsExtraProvide_created(self):
"""Tests that provides for created namespaces are not extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_createdIdentifier(self):
"""Tests that provides for created identifiers are not extra."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is created.')
def testIsExtraProvide_notCreated(self):
"""Tests that provides for non-created namespaces are extra."""
input_lines = ['goog.provide(\'package.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is not created.')
def testIsExtraProvide_duplicate(self):
"""Tests that providing a namespace twice makes the second one extra."""
input_lines = [
'goog.provide(\'package.Foo\');',
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
# Advance to the second goog.provide token.
token = tokenutil.Search(token.next, TokenType.IDENTIFIER)
self.assertTrue(namespaces_info.IsExtraProvide(token),
'Should be extra since it is already provided.')
def testIsExtraProvide_notClosurized(self):
"""Tests that provides of non-closurized namespaces are not extra."""
input_lines = ['goog.provide(\'notclosurized.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraProvide(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_used(self):
"""Tests that requires for used namespaces are not extra."""
input_lines = [
'goog.require(\'package.Foo\');',
'var x = package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is used.')
def testIsExtraRequire_usedIdentifier(self):
"""Tests that requires for used methods on classes are extra."""
input_lines = [
'goog.require(\'package.Foo.methodName\');',
'var x = package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should require the package, not the method specifically.')
def testIsExtraRequire_notUsed(self):
"""Tests that requires for unused namespaces are extra."""
input_lines = ['goog.require(\'package.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'Should be extra since it is not used.')
def testIsExtraRequire_notClosurized(self):
"""Tests that requires of non-closurized namespaces are not extra."""
input_lines = ['goog.require(\'notclosurized.Foo\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is not closurized.')
def testIsExtraRequire_objectOnClass(self):
"""Tests that requiring an object on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The whole class, not the object, should be required.');
def testIsExtraRequire_constantOnClass(self):
"""Tests that requiring a constant on a class is extra."""
input_lines = [
'goog.require(\'package.Foo.CONSTANT\');',
'var x = package.Foo.CONSTANT',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsExtraRequire(token),
'The class, not the constant, should be required.');
def testIsExtraRequire_constantNotOnClass(self):
"""Tests that requiring a constant not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.CONSTANT\');',
'var x = package.subpackage.CONSTANT',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Constants can be required except on classes.');
def testIsExtraRequire_methodNotOnClass(self):
"""Tests that requiring a method not on a class is OK."""
input_lines = [
'goog.require(\'package.subpackage.method\');',
'var x = package.subpackage.method()',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Methods can be required except on classes.');
def testIsExtraRequire_defaults(self):
"""Tests that there are no warnings about extra requires for test utils"""
input_lines = ['goog.require(\'goog.testing.jsunit\');']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['goog'], [])
self.assertFalse(namespaces_info.IsExtraRequire(token),
'Should not be extra since it is for testing.')
def testGetMissingProvides_provided(self):
"""Tests that provided functions don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedIdentifier(self):
"""Tests that provided identifiers don't cause a missing provide."""
input_lines = [
'goog.provide(\'package.Foo.methodName\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_providedParentIdentifier(self):
"""Tests that provided identifiers on a class don't cause a missing provide
on objects attached to that class."""
input_lines = [
'goog.provide(\'package.foo.ClassName\');',
'package.foo.ClassName.methodName = function() {};',
'package.foo.ClassName.ObjectName = 1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_unprovided(self):
"""Tests that unprovided functions cause a missing provide."""
input_lines = ['package.Foo = function() {};']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingProvides()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingProvides())
def testGetMissingProvides_privatefunction(self):
"""Tests that unprovided private functions don't cause a missing provide."""
input_lines = ['package.Foo_ = function() {};']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingProvides_required(self):
"""Tests that required namespaces don't cause a missing provide."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName = function() {};'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_required(self):
"""Tests that required namespaces don't cause a missing require."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredIdentifier(self):
"""Tests that required namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingProvides()))
def testGetMissingRequires_requiredParentClass(self):
"""Tests that requiring a parent class of an object is sufficient to prevent
a missing require on that object."""
input_lines = [
'goog.require(\'package.Foo\');',
'package.Foo.methodName();',
'package.Foo.methodName(package.Foo.ObjectName);'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_unrequired(self):
"""Tests that unrequired namespaces cause a missing require."""
input_lines = ['package.Foo();']
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
def testGetMissingRequires_provided(self):
"""Tests that provided namespaces satisfy identifiers on that namespace."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_created(self):
"""Tests that created namespaces do not satisfy usage of an identifier."""
input_lines = [
'package.Foo = function();',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()))
self.assertTrue('package.Foo' in namespaces_info.GetMissingRequires())
def testGetMissingRequires_createdIdentifier(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.methodName = function();',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(0, len(namespaces_info.GetMissingRequires()))
def testGetMissingRequires_objectOnClass(self):
"""Tests that we should require a class, not the object on the class."""
input_lines = [
'goog.require(\'package.Foo.Enum\');',
'var x = package.Foo.Enum.VALUE1;',
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertEquals(1, len(namespaces_info.GetMissingRequires()),
'The whole class, not the object, should be required.');
def testIsFirstProvide(self):
"""Tests operation of the isFirstProvide method."""
input_lines = [
'goog.provide(\'package.Foo\');',
'package.Foo.methodName();'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = self._GetInitializedNamespacesInfo(token, ['package'], [])
self.assertTrue(namespaces_info.IsFirstProvide(token))
def testGetWholeIdentifierString(self):
"""Tests that created identifiers satisfy usage of the identifier."""
input_lines = [
'package.Foo.',
' veryLong.',
' identifier;'
]
token = self._tokenizer.TokenizeFile(input_lines)
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo([], [])
self.assertEquals('package.Foo.veryLong.identifier',
namespaces_info._GetWholeIdentifierString(token))
self.assertEquals(None,
namespaces_info._GetWholeIdentifierString(token.next))
def _GetInitializedNamespacesInfo(self, token, closurized_namespaces,
ignored_extra_namespaces):
"""Returns a namespaces info initialized with the given token stream."""
namespaces_info = closurizednamespacesinfo.ClosurizedNamespacesInfo(
closurized_namespaces=closurized_namespaces,
ignored_extra_namespaces=ignored_extra_namespaces)
state_tracker = javascriptstatetracker.JavaScriptStateTracker()
while token:
namespaces_info.ProcessToken(token, state_tracker)
token = token.next
return namespaces_info
def _GetProvideTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
def _GetRequireTokens(self, namespace):
"""Returns a list of tokens for a goog.require of the given namespace."""
line_text = 'goog.require(\'' + namespace + '\');\n'
return javascripttokenizer.JavaScriptTokenizer().TokenizeFile([line_text])
if __name__ == '__main__':
googletest.main()
| 42.668142 | 80 | 0.704034 | 1,935 | 19,286 | 6.873385 | 0.136434 | 0.077895 | 0.030301 | 0.067669 | 0.691053 | 0.637669 | 0.59015 | 0.577669 | 0.552331 | 0.529474 | 0 | 0.001826 | 0.176397 | 19,286 | 451 | 81 | 42.762749 | 0.835495 | 0.155087 | 0 | 0.490446 | 0 | 0 | 0.218527 | 0.071682 | 0 | 0 | 0 | 0 | 0.117834 | 1 | 0.111465 | false | 0 | 0.019108 | 0 | 0.149682 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cec6da0c9630fff655c3d1d3633942fd2491337a | 12,216 | py | Python | rllib/contrib/sumo/utils.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 22 | 2018-05-08T05:52:34.000Z | 2020-04-01T10:09:55.000Z | rllib/contrib/sumo/utils.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 73 | 2021-09-25T07:11:39.000Z | 2022-03-26T07:10:59.000Z | rllib/contrib/sumo/utils.py | mgelbart/ray | 4cec2286572e368a4bd64aae467751a384eff62d | [
"Apache-2.0"
] | 10 | 2018-04-27T10:50:59.000Z | 2020-02-24T02:41:43.000Z | """ RLLIB SUMO Utils - SUMO Connector Wrapper
Author: Lara CODECA lara.codeca@gmail.com
See:
https://github.com/lcodeca/rllibsumoutils
https://github.com/lcodeca/rllibsumodocker
for further details.
"""
import collections
from copy import deepcopy
import logging
import os
from pprint import pformat
import sys
from lxml import etree
from ray.rllib.contrib.sumo.connector import SUMOConnector, DEFAULT_CONFIG
# """ Import SUMO library """
if "SUMO_HOME" in os.environ:
sys.path.append(os.path.join(os.environ["SUMO_HOME"], "tools"))
# from traci.exceptions import TraCIException
import traci.constants as tc
else:
sys.exit("please declare environment variable 'SUMO_HOME'")
###############################################################################
logging.basicConfig()
logger = logging.getLogger(__name__)
###############################################################################
def sumo_default_config():
"""Return the default configuration for the SUMO Connector."""
return deepcopy(DEFAULT_CONFIG)
###############################################################################
class SUMOUtils(SUMOConnector):
"""
A wrapper for the interaction with the SUMO simulation that adds
functionalities.
"""
def _initialize_metrics(self):
"""Specific metrics initialization"""
# Default TripInfo file metrics
self.tripinfo = collections.defaultdict(dict)
self.personinfo = collections.defaultdict(dict)
###########################################################################
# TRIPINFO FILE
def process_tripinfo_file(self):
"""
Closes the TraCI connections, then reads and process the tripinfo
data. It requires "tripinfo_xml_file" and "tripinfo_xml_schema"
configuration parametes set.
"""
if "tripinfo_keyword" not in self._config:
raise Exception(
"Function process_tripinfo_file requires the parameter "
"'tripinfo_keyword' set.",
self._config,
)
if "tripinfo_xml_schema" not in self._config:
raise Exception(
"Function process_tripinfo_file requires the parameter "
"'tripinfo_xml_schema' set.",
self._config,
)
# Make sure that the simulation is finished and the tripinfo file is
# written.
self.end_simulation()
# Reset the data structures.
self.tripinfo = collections.defaultdict(dict)
self.personinfo = collections.defaultdict(dict)
schema = etree.XMLSchema(file=self._config["tripinfo_xml_schema"])
parser = etree.XMLParser(schema=schema)
tripinfo_file = "{}{}".format(
self._sumo_output_prefix, self._config["tripinfo_keyword"]
)
tree = etree.parse(tripinfo_file, parser)
logger.info("Processing %s tripinfo file.", tripinfo_file)
for element in tree.getroot():
if element.tag == "tripinfo":
self.tripinfo[element.attrib["id"]] = dict(element.attrib)
elif element.tag == "personinfo":
self.personinfo[element.attrib["id"]] = dict(element.attrib)
stages = []
for stage in element:
stages.append([stage.tag, dict(stage.attrib)])
self.personinfo[element.attrib["id"]]["stages"] = stages
else:
raise Exception("Unrecognized element in the tripinfo file.")
logger.debug("TRIPINFO: \n%s", pformat(self.tripinfo))
logger.debug("PERSONINFO: \n%s", pformat(self.personinfo))
def get_timeloss(self, entity, default=float("NaN")):
"""Returns the timeLoss computed by SUMO for the given entity."""
if entity in self.tripinfo:
logger.debug("TRIPINFO for %s", entity)
if "timeLoss" in self.tripinfo[entity]:
logger.debug("timeLoss %s", self.tripinfo[entity]["timeLoss"])
return float(self.tripinfo[entity]["timeLoss"])
logger.debug("timeLoss not found.")
return default
elif entity in self.personinfo:
logger.debug("PERSONINFO for %s", entity)
logger.debug("%s", pformat(self.personinfo[entity]))
time_loss, ts_found = 0.0, False
for _, stage in self.personinfo[entity]["stages"]:
if "timeLoss" in stage:
logger.debug("timeLoss %s", stage["timeLoss"])
time_loss += float(stage["timeLoss"])
ts_found = True
if not ts_found:
logger.debug("timeLoss not found.")
return default
if time_loss <= 0:
logger.debug("ERROR: timeLoss is %.2f", time_loss)
return default
logger.debug("total timeLoss %.2f", time_loss)
return time_loss
else:
logger.debug("Entity %s not found.", entity)
return default
def get_depart(self, entity, default=float("NaN")):
"""
Returns the departure recorded by SUMO for the given entity.
The functions process_tripinfo_file() needs to be called in advance
to initialize the data structures required.
If the entity does not exist or does not have the value, it returns
the default value.
"""
if entity in self.tripinfo:
logger.debug("TRIPINFO for %s", entity)
if "depart" in self.tripinfo[entity]:
logger.debug("depart %s", self.tripinfo[entity]["depart"])
return float(self.tripinfo[entity]["depart"])
logger.debug("depart not found.")
elif entity in self.personinfo:
logger.debug("PERSONINFO for %s", entity)
logger.debug("%s", pformat(self.personinfo[entity]))
if "depart" in self.personinfo[entity]:
logger.debug("depart %s", self.personinfo[entity]["depart"])
return float(self.personinfo[entity]["depart"])
logger.debug("depart not found.")
else:
logger.debug("Entity %s not found.", entity)
return default
def get_duration(self, entity, default=float("NaN")):
"""
Returns the duration computed by SUMO for the given entity.
The functions process_tripinfo_file() needs to be called in advance
to initialize the data structures required.
If the entity does not exist or does not have the value, it returns
the default value.
"""
if entity in self.tripinfo:
logger.debug("TRIPINFO for %s", entity)
if "duration" in self.tripinfo[entity]:
logger.debug("duration %s", self.tripinfo[entity]["duration"])
return float(self.tripinfo[entity]["duration"])
logger.debug("duration not found.")
elif entity in self.personinfo:
logger.debug("PERSONINFO for %s", entity)
logger.debug("%s", pformat(self.personinfo[entity]))
if "depart" in self.personinfo[entity]:
depart = float(self.personinfo[entity]["depart"])
arrival = depart
for _, stage in self.personinfo[entity]["stages"]:
if "arrival" in stage:
arrival = float(stage["arrival"])
duration = arrival - depart
if duration > 0:
logger.debug("duration %d", duration)
return duration
logger.debug("duration impossible to compute.")
else:
logger.debug("Entity %s not found.", entity)
return default
def get_arrival(self, entity, default=float("NaN")):
"""
Returns the arrival computed by SUMO for the given entity.
The functions process_tripinfo_file() needs to be called in advance
to initialize the data structures required.
If the entity does not exist or does not have the value, it returns
the default value.
"""
if entity in self.tripinfo:
logger.debug("TRIPINFO for %s", entity)
if "arrival" in self.tripinfo[entity]:
logger.debug("arrival %s", self.tripinfo[entity]["arrival"])
return float(self.tripinfo[entity]["arrival"])
logger.debug("arrival not found.")
return default
elif entity in self.personinfo:
logger.debug("PERSONINFO for %s", entity)
arrival, arrival_found = 0.0, False
for _, stage in self.personinfo[entity]["stages"]:
if "arrival" in stage:
logger.debug("arrival %s", stage["arrival"])
arrival = float(stage["arrival"])
arrival_found = True
if not arrival_found:
logger.debug("arrival not found.")
return default
if arrival <= 0:
logger.debug("ERROR: arrival is %.2f", arrival)
return default
logger.debug("total arrival %.2f", arrival)
return arrival
else:
logger.debug("Entity %s not found.", entity)
return default
def get_global_travel_time(self):
"""
Returns the global travel time computed from SUMO tripinfo data.
The functions process_tripinfo_file() needs to be called in advance
to initialize the data structures required.
"""
gtt = 0
for entity in self.tripinfo:
gtt += self.get_duration(entity, default=0.0)
for entity in self.personinfo:
gtt += self.get_duration(entity, default=0.0)
return gtt
###########################################################################
# ROUTING
@staticmethod
def get_mode_parameters(mode):
"""
Return the correst TraCI parameters for the requested mode.
See: https://sumo.dlr.de/docs/TraCI/Simulation_Value_Retrieval.html
#command_0x87_find_intermodal_route
Param: mode, String.
Returns: _mode, _ptype, _vtype
"""
if mode == "public":
return "public", "", ""
if mode == "bicycle":
return "bicycle", "", "bicycle"
if mode == "walk":
return "", "pedestrian", ""
return "car", "", mode # (but car is not always necessary, and it may
# creates unusable alternatives)
def is_valid_route(self, mode, route):
"""
Handle findRoute and findIntermodalRoute results.
Params:
mode, String.
route, return value of findRoute or findIntermodalRoute.
"""
if route is None:
# traci failed
return False
_mode, _ptype, _vtype = self.get_mode_parameters(mode)
if not isinstance(route, (list, tuple)):
# only for findRoute
if len(route.edges) >= 2:
return True
elif _mode == "public":
for stage in route:
if stage.line:
return True
elif _mode in ("car", "bicycle"):
for stage in route:
if stage.type == tc.STAGE_DRIVING and len(stage.edges) >= 2:
return True
else:
for stage in route:
if len(stage.edges) >= 2:
return True
return False
@staticmethod
def cost_from_route(route):
"""
Compute the route cost.
Params:
route, return value of findRoute or findIntermodalRoute.
"""
cost = 0.0
for stage in route:
cost += stage.cost
return cost
@staticmethod
def travel_time_from_route(route):
"""
Compute the route estimated travel time.
Params:
route, return value of findRoute or findIntermodalRoute.
"""
ett = 0.0
for stage in route:
ett += stage.estimatedTime
return ett
| 37.018182 | 79 | 0.56205 | 1,296 | 12,216 | 5.218364 | 0.177469 | 0.06018 | 0.031938 | 0.017004 | 0.498447 | 0.446843 | 0.386958 | 0.327665 | 0.301937 | 0.301937 | 0 | 0.0031 | 0.313523 | 12,216 | 329 | 80 | 37.130699 | 0.803363 | 0.201539 | 0 | 0.402985 | 0 | 0 | 0.139669 | 0.007102 | 0 | 0 | 0 | 0 | 0 | 1 | 0.059701 | false | 0 | 0.044776 | 0 | 0.268657 | 0.004975 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cec7b47707484b15c2fd0bb69be9ff24859673ff | 5,224 | py | Python | tests/unit/fake_data_root/openstack/var/lib/juju/agents/unit-ceph-osd-0/charm/unit_tests/test_actions_get_availability_zone.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 17 | 2016-04-17T04:00:39.000Z | 2020-05-06T11:20:15.000Z | tests/unit/fake_data_root/openstack/var/lib/juju/agents/unit-ceph-osd-0/charm/unit_tests/test_actions_get_availability_zone.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 111 | 2021-10-01T18:18:17.000Z | 2022-03-29T12:23:20.000Z | tests/unit/fake_data_root/openstack/var/lib/juju/agents/unit-ceph-osd-0/charm/unit_tests/test_actions_get_availability_zone.py | KellenRenshaw/hotsos | e3fc51ab7f8af606a5846a3486a7fda23d761583 | [
"Apache-2.0"
] | 24 | 2016-03-07T09:07:20.000Z | 2020-10-15T13:41:40.000Z | # Copyright 2021 Canonical Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import json
from actions import get_availability_zone
from lib.charms_ceph.utils import CrushLocation
from test_utils import CharmTestCase
TABULATE_OUTPUT = """
+-------------+---------+-------------+
| unit | root | region |
+=============+=========+=============+
| juju-ceph-0 | default | juju-ceph-0 |
+-------------+---------+-------------+
| juju-ceph-1 | default | juju-ceph-1 |
+-------------+---------+-------------+
| juju-ceph-2 | default | juju-ceph-2 |
+-------------+---------+-------------+
"""
AVAILABILITY_ZONES = {
"unit": {"root": "default", "host": "juju-ceph-0"},
"all-units": {
"juju-ceph-0": {"root": "default", "host": "juju-ceph-0"},
"juju-ceph-1": {"root": "default", "host": "juju-ceph-1"},
"juju-ceph-2": {"root": "default", "host": "juju-ceph-2"}
}
}
class GetAvailabilityZoneActionTests(CharmTestCase):
def setUp(self):
super(GetAvailabilityZoneActionTests, self).setUp(
get_availability_zone,
["get_osd_tree", "get_unit_hostname", "tabulate"]
)
self.tabulate.return_value = TABULATE_OUTPUT
self.get_unit_hostname.return_value = "juju-ceph-0"
def test_get_human_readable(self):
"""Test formatting as human readable."""
table = get_availability_zone._get_human_readable(AVAILABILITY_ZONES)
self.assertTrue(table == TABULATE_OUTPUT)
def test_get_crush_map(self):
"""Test get Crush Map hierarchy from CrushLocation."""
crush_location = CrushLocation(
name="test", identifier="t1", host="test", rack=None, row=None,
datacenter=None, chassis=None, root="default")
crush_map = get_availability_zone._get_crush_map(crush_location)
self.assertDictEqual(crush_map, {"root": "default", "host": "test"})
crush_location = CrushLocation(
name="test", identifier="t1", host="test", rack="AZ",
row="customAZ", datacenter=None, chassis=None, root="default")
crush_map = get_availability_zone._get_crush_map(crush_location)
self.assertDictEqual(crush_map, {"root": "default", "row": "customAZ",
"rack": "AZ", "host": "test"})
def test_get_availability_zones(self):
"""Test function to get information about availability zones."""
self.get_unit_hostname.return_value = "test_1"
self.get_osd_tree.return_value = [
CrushLocation(name="test_1", identifier="t1", host="test_1",
rack="AZ1", row="AZ", datacenter=None,
chassis=None, root="default"),
CrushLocation(name="test_2", identifier="t2", host="test_2",
rack="AZ1", row="AZ", datacenter=None,
chassis=None, root="default"),
CrushLocation(name="test_3", identifier="t3", host="test_3",
rack="AZ2", row="AZ", datacenter=None,
chassis=None, root="default"),
CrushLocation(name="test_4", identifier="t4", host="test_4",
rack="AZ2", row="AZ", datacenter=None,
chassis=None, root="default"),
]
results = get_availability_zone.get_availability_zones()
self.assertDictEqual(results, {
"unit": dict(root="default", row="AZ", rack="AZ1", host="test_1")})
results = get_availability_zone.get_availability_zones(show_all=True)
self.assertDictEqual(results, {
"unit": dict(root="default", row="AZ", rack="AZ1", host="test_1"),
"all-units": {
"test_1": dict(root="default", row="AZ", rack="AZ1",
host="test_1"),
"test_2": dict(root="default", row="AZ", rack="AZ1",
host="test_2"),
"test_3": dict(root="default", row="AZ", rack="AZ2",
host="test_3"),
"test_4": dict(root="default", row="AZ", rack="AZ2",
host="test_4"),
}})
def test_format_availability_zones(self):
"""Test function to formatted availability zones."""
# human readable format
results_table = get_availability_zone.format_availability_zones(
AVAILABILITY_ZONES, True)
self.assertEqual(results_table, TABULATE_OUTPUT)
# json format
results_json = get_availability_zone.format_availability_zones(
AVAILABILITY_ZONES, False)
self.assertDictEqual(json.loads(results_json), AVAILABILITY_ZONES)
| 43.533333 | 79 | 0.582887 | 577 | 5,224 | 5.097054 | 0.240901 | 0.067324 | 0.058143 | 0.044883 | 0.470248 | 0.457327 | 0.375383 | 0.344101 | 0.303978 | 0.268276 | 0 | 0.014612 | 0.253254 | 5,224 | 119 | 80 | 43.89916 | 0.739298 | 0.147779 | 0 | 0.235294 | 0 | 0 | 0.207607 | 0.044148 | 0 | 0 | 0 | 0 | 0.082353 | 1 | 0.058824 | false | 0 | 0.047059 | 0 | 0.117647 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cec80887325ea6ac747bc04eeaa3bb10035ce664 | 5,483 | py | Python | see/classifier_fitness.py | emmaline11235/see-segment | df4b8f1524114c92b9fc16a5f751d9f60c0ee2fc | [
"MIT"
] | 2 | 2022-01-10T20:34:50.000Z | 2022-01-14T19:35:00.000Z | see/classifier_fitness.py | chenqili2020/see-segment | f8b9f2376e0b1713e287152bf6797282036d1579 | [
"MIT"
] | 27 | 2020-06-12T13:07:36.000Z | 2020-09-11T17:44:21.000Z | see/classifier_fitness.py | chenqili2020/see-segment | f8b9f2376e0b1713e287152bf6797282036d1579 | [
"MIT"
] | 12 | 2020-09-08T18:34:33.000Z | 2022-01-14T19:35:12.000Z | import numpy as np
from see.base_classes import algorithm
from sklearn.model_selection import cross_val_score
from sklearn.metrics import accuracy_score
class ClassifierFitness(algorithm):
"""Contains functions to return result of fitness function.
and run classifier algorithm.
Attributes
----------
metric : string
The metric to be used to test the classifier.
Methods
-------
evaluate(predictions, targets)
Returns the error/fitness rate of predictions.
pipe_evaluate(data)
Calls the evaluate method within the context of the
pipeline.
pipe(data)
Evaluates the classifier on the dataset as the final stage
of the classifier pipeline.
"""
def __init__(self, paramlist=None, metric="simple"):
"""Generate algorithm params from parameter list."""
super(ClassifierFitness, self).__init__(paramlist)
self.metric = metric
def evaluate(self, predictions, targets):
"""
Returns the error rate/fitness score of predictions.
Parameters
----------
predictions : array-like of shape (n_samples,)
The predicted labels of each item.
targets : array-like of shape (n_samples,)
The target labels to predict.
Returns
-------
The error/fitness rate of predictions.
"""
return 1 - accuracy_score(targets, predictions)
def pipe_evaluate(self, data):
"""
Determines the fitness value of the attached classifier.
Parameters
----------
data : PipelineClassifyDataset
Returns
-------
fitness : float
The fitness score of the classifier (data.clf) after
trained on the training set and tested on the testing
set.
Notes
-----
This method should be overridden by subclasses.
"""
if data.testing_set is None:
raise ValueError("Testing set cannot be none")
if len(data.testing_set.X) <= 0:
raise ValueError("Testing set must have at least one item")
clf = data.clf
clf.fit(data.training_set.X, data.training_set.y)
predictions = clf.predict(data.testing_set.X)
return self.evaluate(predictions, data.testing_set.y)
def pipe(self, data):
"""
Evaluates the classifier on the dataset as the final stage
of the classifier pipeline.
Parameters
----------
data : PipelineClassifyDataset
Returns
-------
data : PipelineClassifyDataset
Attaches the fitness score to the data object.
Notes
-----
Unless there is good reason to, one should not override this
method.
"""
if data.clf is None:
print(
"ERROR: classifier cannot be None. This must be set prior in the pipeline"
)
data.fitness = self.pipe_evaluate(data)
return data
class CVFitness(ClassifierFitness):
"""Uses the Stratified Cross-Validaiton scheme to measure
the fitness of a classifier algorithm.
Attributes
----------
cv : int
The number of folds to split the dataset.
Methods
-------
set_cv(cv)
Class method that sets the cv class attribute.
pipe_evaluate(predictions, targets)
Returns the average cross validation error
of the classifier (data.clf).
Notes
-----
When this is used during the classifier pipeline (i.e. as the
last item of a Workflow), the class attribute cv will be
used to initialize this fitness instance by default. The
default cv class attribute is 5. To change this use
the class method CVFitness#set_cv(cv).
"""
cv = 5
def __init__(self, paramlist=None, cv=None):
super(CVFitness, self).__init__(paramlist=paramlist, metric="CV")
if cv is None:
self.cv = CVFitness.cv
else:
self.cv = cv
def pipe_evaluate(self, data):
"""
Determines the fitness value of the attached classifier.
Parameters
----------
data : PipelineClassifyDataset
Returns
-------
data : PipelineClassifyDataset
"""
if data.training_set is None:
raise ValueError("Training set cannot be none")
if len(data.training_set.X) <= 0:
raise ValueError("Training set must have at least one item")
cv_fitness = cross_val_score(
data.clf, data.training_set.X, data.training_set.y, cv=self.cv
).mean()
cv_fitness = 1 - cv_fitness
print("cv_fitness: ", cv_fitness)
print("type cv_fitness: ", type(cv_fitness))
return cv_fitness
@classmethod
def set_cv(clf, cv):
"""
Class method that sets the cv class attribute.
Parameters
----------
cv : int
The number of folds to split a dataset.
Side Effects
------------
Sets the class attribute cv. This should be done only once at the beginning.
Instances of this class will use the class cv attribute to determine the
number of splits to use for cross validation.
Returns
-------
None
"""
if type(cv) != int:
raise ValueError("cv must be an int")
clf.cv = cv
| 26.877451 | 90 | 0.596024 | 641 | 5,483 | 5.017161 | 0.263651 | 0.030784 | 0.027985 | 0.026119 | 0.36847 | 0.28296 | 0.252488 | 0.18097 | 0.143657 | 0.118781 | 0 | 0.001608 | 0.319351 | 5,483 | 203 | 91 | 27.009852 | 0.860129 | 0.482035 | 0 | 0.039216 | 0 | 0 | 0.119722 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.137255 | false | 0 | 0.078431 | 0 | 0.352941 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cec82a491e48e098ad32e60bf03ec9fd31eb84bc | 696 | py | Python | py/jpy/src/test/python/jpy_obj_test.py | devinrsmith/deephaven-core | 3a6930046faf1cd556f62a914ce1cfd7860147b9 | [
"MIT"
] | 55 | 2021-05-11T16:01:59.000Z | 2022-03-30T14:30:33.000Z | py/jpy/src/test/python/jpy_obj_test.py | devinrsmith/deephaven-core | 3a6930046faf1cd556f62a914ce1cfd7860147b9 | [
"MIT"
] | 943 | 2021-05-10T14:00:02.000Z | 2022-03-31T21:28:15.000Z | py/jpy/src/test/python/jpy_obj_test.py | devinrsmith/deephaven-core | 3a6930046faf1cd556f62a914ce1cfd7860147b9 | [
"MIT"
] | 29 | 2021-05-10T11:33:16.000Z | 2022-03-30T21:01:54.000Z | import unittest
import jpyutil
jpyutil.init_jvm(jvm_maxmem='32M', jvm_classpath=['target/test-classes'])
import jpy
class TestJavaArrays(unittest.TestCase):
def setUp(self):
self.Fixture = jpy.get_type('org.jpy.fixtures.ConstructionTestFixture')
self.assertIsNotNone(self.Fixture)
def test_large_obj_by_constructor_alloc(self):
# 100 * 1MB
for _ in range(100):
fixture = self.Fixture(1000000) # 1MB
def test_large_obj_by_static_alloc(self):
# 100 * 1MB
for _ in range(100):
fixture = self.Fixture.viaStatic(1000000) # 1MB
if __name__ == '__main__':
print('\nRunning ' + __file__)
unittest.main()
| 24.857143 | 79 | 0.668103 | 84 | 696 | 5.202381 | 0.52381 | 0.100687 | 0.05492 | 0.06865 | 0.28833 | 0.210526 | 0.210526 | 0.210526 | 0.210526 | 0.210526 | 0 | 0.05915 | 0.222701 | 696 | 27 | 80 | 25.777778 | 0.748614 | 0.038793 | 0 | 0.117647 | 0 | 0 | 0.120482 | 0.060241 | 0 | 0 | 0 | 0 | 0.058824 | 1 | 0.176471 | false | 0 | 0.176471 | 0 | 0.411765 | 0.058824 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cecb29da119a7171119d53a5ce1d7ff348e1a70a | 6,225 | py | Python | dags/experimental_results_calculation.py | mboverell/docker-airflow | 2712de5fb907a072af9da767e4f579634387205e | [
"Apache-2.0"
] | null | null | null | dags/experimental_results_calculation.py | mboverell/docker-airflow | 2712de5fb907a072af9da767e4f579634387205e | [
"Apache-2.0"
] | null | null | null | dags/experimental_results_calculation.py | mboverell/docker-airflow | 2712de5fb907a072af9da767e4f579634387205e | [
"Apache-2.0"
] | null | null | null | import statsd
from airflow import DAG
from airflow.hooks import PostgresHook
from airflow.operators.python_operator import PythonOperator
from airflow.operators.dummy_operator import DummyOperator
# from airflow.operators.sensors import ExternalTaskSensor
from datetime import datetime, timedelta
from dateutil import parser
from experimental_platform_modules import result_calculator
import os
import uuid
RESULTS_METADATA_TABLE = 'ab_platform.results_run'
def _create_results_run_table(conn_id):
pg_hook = PostgresHook(conn_id)
query = '''
CREATE TABLE IF NOT EXISTS {} (
run_id VARCHAR(36) ENCODE ZSTD distkey,
status VARCHAR(128) ENCODE ZSTD,
intermediate_results_date TIMESTAMP,
createdat TIMESTAMP DEFAULT sysdate
)
COMPOUND SORTKEY(createdat)
;
'''.format(RESULTS_METADATA_TABLE)
pg_hook.run(query)
def _callback(state, ctx):
task_instance = ctx['task_instance']
conn_id = 'analytics_redshift'
pg_hook = PostgresHook(conn_id)
intermediate_results_run_date = task_instance.xcom_pull(
key='intermediate_results_run_date'
)
run_id = uuid.uuid4()
_create_results_run_table(conn_id)
query = '''
INSERT INTO {} (run_id, status, intermediate_results_date) VALUES
('{}', '{}', '{}'::TIMESTAMP)
'''.format(RESULTS_METADATA_TABLE, run_id, state, intermediate_results_run_date.isoformat())
pg_hook.run(query)
conf = ctx['conf']
if conf.getboolean('scheduler', 'statsd_on'):
client = statsd.StatsClient(
host=conf.get('scheduler', 'statsd_host'),
port=conf.get('scheduler', 'statsd_port'),
prefix=conf.get('scheduler', 'statsd_prefix'),
)
client.incr('results_dag.%s' % state, 1)
def success_callback(ctx):
_callback('success', ctx)
def failure_callback(ctx):
_callback('failure', ctx)
def get_date_to_calculate(ts, **kwargs):
# Get the last days worth of stuff
# Use this instead of the provided 'ds' so we can do some date operations
task_instance = kwargs['task_instance']
dt = parser.parse(ts)
yesterday = dt.date() - timedelta(days=1)
task_instance.xcom_push(
key='intermediate_results_run_date', value=yesterday)
def get_active_experiment_and_population_map(analytics_conn_id, ts, **kwargs):
task_instance = kwargs['task_instance']
yesterday = task_instance.xcom_pull(
key='intermediate_results_run_date')
return result_calculator.get_active_experiment_and_population_map(
analytics_conn_id, yesterday)
def create_intermediate_results_table(frontend_conn_id, ts, **kwargs):
result_calculator.create_intermediate_results_table(frontend_conn_id)
def calculate_intermediate_results(analytics_conn_id, ts, **kwargs):
task_instance = kwargs['task_instance']
yesterday = task_instance.xcom_pull(
key='intermediate_results_run_date')
experiment_to_population_map = task_instance.xcom_pull(
task_ids='get_active_experiment_and_population_map'
)
return result_calculator.calculate_intermediate_result_for_day(analytics_conn_id, yesterday, experiment_to_population_map, timeout=True)
def insert_intermediate_records(frontend_conn_id, ts, **kwargs):
task_instance = kwargs['task_instance']
records = task_instance.xcom_pull(
task_ids='calculate_intermediate_results'
)
result_calculator.insert_intermediate_records(frontend_conn_id, records)
def calculate_results(frontend_conn_id, ts, **kwargs):
result_calculator.calculate_results(frontend_conn_id)
print("Done writing results to RDS")
# Default settings applied to all tasks
default_args = {
'owner': 'airflow',
'depends_on_past': False,
'email_on_failure': False,
'email_on_retry': False,
'retries': 2,
'retry_delay': timedelta(minutes=5)
}
default_task_kwargs = {
'analytics_conn_id': 'analytics_redshift',
'frontend_conn_id': 'ab_platform_frontend',
}
with DAG('experimental_results_calculator',
start_date=datetime(2020, 6, 25, 17), # Starts at 5pm PST
max_active_runs=1,
catchup=False,
schedule_interval='@daily',
default_args=default_args,
on_failure_callback=failure_callback,
on_success_callback=success_callback,
) as dag:
# start_task = ExternalTaskSensor(
# task_id="start",
# external_dag_id="experiment_population_creation"
# )
start_task = DummyOperator(
task_id='start'
)
get_date_to_calculate_task = PythonOperator(
task_id='get_date_to_calculate',
python_callable=get_date_to_calculate,
op_kwargs=default_task_kwargs,
provide_context=True
)
create_intermediate_results_table_task = PythonOperator(
task_id='create_intermediate_results_table',
python_callable=create_intermediate_results_table,
op_kwargs=default_task_kwargs,
provide_context=True
)
get_active_experiment_and_population_map_task = PythonOperator(
task_id='get_active_experiment_and_population_map',
python_callable=get_active_experiment_and_population_map,
op_kwargs=default_task_kwargs,
provide_context=True
)
calculate_intermediate_results_task = PythonOperator(
task_id='calculate_intermediate_results',
python_callable=calculate_intermediate_results,
op_kwargs=default_task_kwargs,
provide_context=True
)
insert_intermediate_records_task = PythonOperator(
task_id='insert_intermediate_results',
python_callable=insert_intermediate_records,
op_kwargs=default_task_kwargs,
provide_context=True
)
calculate_results_task = PythonOperator(
task_id='calculate_results',
python_callable=calculate_results,
op_kwargs=default_task_kwargs,
provide_context=True,
)
start_task >> [get_date_to_calculate_task,
create_intermediate_results_table_task] >> \
get_active_experiment_and_population_map_task >> \
calculate_intermediate_results_task >> insert_intermediate_records_task >> \
calculate_results_task
| 31.439394 | 140 | 0.72755 | 733 | 6,225 | 5.75307 | 0.230559 | 0.094617 | 0.029879 | 0.036519 | 0.377282 | 0.304245 | 0.21769 | 0.163623 | 0.14323 | 0.052644 | 0 | 0.004165 | 0.19004 | 6,225 | 197 | 141 | 31.598985 | 0.832209 | 0.052369 | 0 | 0.171233 | 0 | 0 | 0.196808 | 0.075055 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068493 | false | 0 | 0.068493 | 0 | 0.150685 | 0.006849 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cecb49aea49398c231413ffc466a07b9326fb51a | 12,759 | py | Python | scripts/creacion_items_actividades.py | SintecDigital/Optimizador_Red_Distribucion | 5dbe0744ad147b893f5b46e689307f4fa706d7ae | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | scripts/creacion_items_actividades.py | SintecDigital/Optimizador_Red_Distribucion | 5dbe0744ad147b893f5b46e689307f4fa706d7ae | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | scripts/creacion_items_actividades.py | SintecDigital/Optimizador_Red_Distribucion | 5dbe0744ad147b893f5b46e689307f4fa706d7ae | [
"Apache-2.0",
"BSD-3-Clause"
] | null | null | null | import pandas as pd
import numpy as np
def build_items(master_red: pd.DataFrame, master_ubicaciones: pd.DataFrame, master_demanda, master_producto):
"""
Crea un df de items con 5 columnas donde se especifica tiempo, producto, nodo, tipo, y valor. Estamos
ignorando material importado, ya que toca hacer cambios a la tabla de ubicación para agregar a CGNA_PLANT como
CGNA_PLANT_DISTR
:param master_producto:
:param master_demanda:
:param master_ubicaciones:
:param master_red:
:return:
"""
# De hecho, se debe crear primero la sección de restricciones estáticas y dinámicas, ya que no dependen de producto.
# Delimitar cantidad de tiempo
MONTHS = sorted(master_demanda['fecha'].unique())
# Nodos totales y únicos de la red
nodos = pd.concat([master_red.loc[:, 'id_locacion_origen'], master_red.loc[:, 'id_locacion_destino']],
ignore_index=True).unique()
# Creamos DF final que tiene estructura definida en documentacion: `tiempo`, `producto`, `nodo`, `tipo`, `valor`
item_df = pd.DataFrame(columns=['tiempo', 'producto', 'nodo', 'tipo', 'valor'])
for t in MONTHS:
# RESTR DINAMICA Y ESTATICA: Extraemos restricciones dinámicas y estáticas y lo ponemos en formato de `item_df`
nodos_restr = master_ubicaciones.loc[:, ['id_locacion', 'capacidad_din', 'capacidad_est']]
nodos_restr = pd.melt(nodos_restr, id_vars=['id_locacion'], value_vars=['capacidad_din', 'capacidad_est'])
nodos_restr.columns = item_df.columns[-3:]
# Borramos las filas que tengan `nodos_restr[valor].isna()`
nodos_restr = nodos_restr.dropna(subset=['valor'])
# Añadimos tiempo `t` y producto `NaN` a esas restricciones para que se pueda concatenar a `item_df`
nodos_restr['tiempo'] = t
nodos_restr['producto'] = np.nan
# PRODUCTOS: seleccionamos los productos (familias) del master de demanda para el mes en cuestion
PRODUCTS = master_demanda.loc[master_demanda['fecha'] == t, 'familia'].unique()
for k in PRODUCTS:
# PRODUCCION: Buscamos el sitio origen del producto y su producción máx en master de productos.
# Debería ser solo UN origen
nodos_prod = master_producto.loc[master_producto['familia'] == k, ['familia',
'ubicacion_producto', 'produccion_max']]
# Renombrar y agregar columnas de tipo y tiempo
nodos_prod.columns = ['producto', 'nodo', 'valor']
nodos_prod['tipo'] = 'produccion'
nodos_prod['tiempo'] = t
# DEMANDA: buscar todos los clientes para producto k en tiempo t. Los clientes los tomaremos como ciudades
clientes_demanda = master_demanda.loc[(master_demanda['fecha'] == t) & (master_demanda['familia'] == k),
['id_ciudad', 'cantidad']]
# Renombrar y crear columnas restantes para que tenga estructura de `item_df`
clientes_demanda.columns = ['nodo', 'valor']
clientes_demanda['tiempo'] = t
clientes_demanda['producto'] = k
clientes_demanda['tipo'] = 'demanda'
# FLUJO: los nodos restantes son de flujo. Estos son la diferencia de conjuntos entre todos los nodos de la
# red, el nodo de produccion, y el nodo de demanda. Recordar que hay que borrar CLIENTE de los nodos únicos,
# ya que en ITEMS ya estará representado como `clientes_demanda`
nodos_flujo = list(set(nodos) - ({'CLIENTE'} | set(nodos_prod['nodo'])))
nodos_flujo = pd.DataFrame(data={'tiempo': t, 'producto': k, 'nodo': nodos_flujo,
'tipo': 'flujo', 'valor': 0})
# ITEMS: Concatenar las secciones que iteran por producto a `item_df`
item_df = pd.concat([item_df, nodos_prod, nodos_flujo, clientes_demanda], ignore_index=True)
# ITEMS: Concatenar las restricciones estática y dinámica a `item_df`
item_df = pd.concat([item_df, nodos_restr], ignore_index=True)
return item_df
def build_activities(master_red, master_tarifario, master_demanda, master_ubicaciones):
"""
Construye la tabla de Actividades que contiene 6 columnas: 'tiempo', 'producto', 'transporte', 'origen', 'destino', 'costo'.
Esos origenes y destinos pueden ser id_locaciones para comunicaciones entre nodos de la infraestructura de Esenttia,
o pueden ser id_ciudades para las entregas a clientes. En esta tabla se evidencian todas las actividades de distribución
y almacenamiento de la red, así como sus costos
:param master_ubicaciones:
:param master_demanda:
:param master_red:
:param master_tarifario:
:return:
"""
# Delimitar cuantos meses hay para t
MONTHS = sorted(master_demanda['fecha'].unique())
# Abrir red infraestructra, seleccionar columnas relevantes ['origen', 'destino']
master_red = master_red.loc[:, ['id_locacion_origen', 'id_locacion_destino']]
# Abrir master tarifario, seleccionar columnas relevantes
master_tarifario = master_tarifario[['id_ciudad_origen', 'id_ciudad_destino', 'capacidad', 'costo']]
# Crear DF final con estructura definida en documentación
actividad_df = pd.DataFrame(columns=['tiempo', 'producto', 'transporte', 'origen', 'destino', 'costo'])
for t in MONTHS:
# PRODUCTOS: seleccionamos los productos (familias) del master de demanda para el mes `t`
PRODUCTS = master_demanda.loc[master_demanda['fecha'] == t, 'familia'].unique()
for k in PRODUCTS:
# ALMACENAMIENTO: crear actividad de almacenamiento a partir de los nodos que tengan valor diferente a cero
# en capacidad_est en el master de ubicaciones. Es decir, que no sean NaN
nodos_alm = master_ubicaciones.loc[~master_ubicaciones['capacidad_est'].isna(),
['id_locacion', 'costo_almacenamiento']]
# Para distinguir almacenamiento (mov. en dimension tiempo) de demás actividades, agregar 'ALMACENAMIENTO'
nodos_alm['id_locacion'] = nodos_alm['id_locacion'] + '_ALMACENAMIENTO'
# Renombramos columnas
nodos_alm.columns = ['origen', 'costo']
# Agregar columna destino, que es una copia de la columna origen, producto, tiempo, y transporte
nodos_alm['destino'] = nodos_alm['origen'].copy()
nodos_alm['tiempo'] = t
nodos_alm['producto'] = k
nodos_alm['transporte'] = np.nan
# TRANSPORTE: Reemplazar CLIENTE de master_red por `id_ciudad` de `master_demanda`. Haremos un DF de la
# demanda, para luego hacerle un join con master_red de acuerdo a los sitios que pueden suplir CLIENTE
clientes_demanda = master_demanda.loc[(master_demanda['fecha'] == t) & (master_demanda['familia'] == k),
'id_ciudad'].to_frame()
clientes_demanda['key'] = 'CLIENTE'
# Separamos master_red entre los que tienen en destino CLIENTE y los que no
master_red_cliente = master_red.loc[master_red['id_locacion_destino'] == 'CLIENTE', :]
master_red_no_cliente = master_red.loc[~(master_red['id_locacion_destino'] == 'CLIENTE'), :]
# Cruzar `master_red_cliente` con `clientes_demanda`
master_red_cliente = master_red_cliente.merge(clientes_demanda, left_on=['id_locacion_destino'],
right_on=['key'], how='inner')
master_red_cliente = master_red_cliente.drop(columns=['id_locacion_destino', 'key'])
master_red_cliente = master_red_cliente.rename(columns={'id_ciudad': 'id_locacion_destino'})
# Volvemos a unir master_red_cliente con master_red
master_red_clean = pd.concat([master_red_no_cliente, master_red_cliente], ignore_index=True)
# Join entre tarifario y master de red
# Se hace inner join porque si no hay vehículos que transporten, no puede existir arco en el `master_red`.
nodos_trans = master_red_clean.merge(master_tarifario,
left_on=['id_locacion_origen', 'id_locacion_destino'],
right_on=['id_ciudad_origen', 'id_ciudad_destino'], how='inner')
# Renombramos columnas específicas para que tengan formato de `actividad_df`
nodos_trans = nodos_trans.rename(columns={'id_locacion_origen': 'origen',
'id_locacion_destino': 'destino',
'capacidad': 'transporte'})
# Filtrar columnas relevantes
nodos_trans = nodos_trans.loc[:, ['transporte', 'origen', 'destino', 'costo']]
# Crear columnas restantes para tener estructura de `actividad_df`
nodos_trans['tiempo'] = t
nodos_trans['producto'] = k
# ACIVIDADES: Concatenar nodos con transportes y almacenamiento a `actividad_df`
actividad_df = pd.concat([actividad_df, nodos_trans, nodos_alm], ignore_index=True)
return actividad_df
def matriz_coef(items_df: pd.DataFrame, actividades_df: pd.DataFrame):
"""
v.2
Función optimizada para crear la matriz de coeficientes con base a las actividades (columnas) e ítems (filas)
ingresadas. Explota la velocidad de procesamiento de pd.merge() para realizar el cruce de condiciones por escenario
o flujo.
Retorna un np.array de coeficientes, siendo los indices `items_df`, y las columnas `actividades_df`.
:param items_df: pd.DataFrame con los items del problema
:param actividades_df: pd.DataFrame con las actividades (flujos) del problema
:return: np.array con los coeficientes de entrada y salida de las actividades, en relación a las restricciones
"""
coef_mat = np.zeros((items_df.shape[0], actividades_df.shape[0]))
# Crear DFs para manejar tema de mutabilidad y columnas de indice de items y actividades
actividades_df = actividades_df.copy()
items_df = items_df.copy()
actividades_df['idy'] = actividades_df.index
items_df['idx'] = items_df.index
# Al ser seis grupos de condiciones, serían 6 JOIN. CONDICIONES:
# ENTRADA DE FLUJO. al ser INNER, no habrá valores nulos
cond1 = pd.merge(items_df, actividades_df, left_on=['tiempo', 'producto', 'nodo'],
right_on=['tiempo', 'producto', 'origen'], how='inner')
cond1['valor_mat'] = cond1['transporte'].copy()
# SALIDA DE FLUJO
cond2 = pd.merge(items_df, actividades_df, left_on=['tiempo', 'producto', 'nodo'],
right_on=['tiempo', 'producto', 'destino'], how='inner')
cond2['valor_mat'] = -cond2['transporte'].copy()
# ENTRADA INPUT A ALMACENAMIENTO
cond3_items = items_df.copy()
cond3_items.loc[:, 'nodo'] = cond3_items.loc[:, 'nodo'] + '_ALMACENAMIENTO'
cond3 = pd.merge(cond3_items, actividades_df, left_on=['tiempo', 'producto', 'nodo'],
right_on=['tiempo', 'producto', 'origen'], how='inner')
cond3['valor_mat'] = 1
del cond3_items
# SALIDA OUTPUT ALMACENAMIENTO
cond4_items = items_df.copy()
cond4_items.loc[:, 'tiempo'] -= 1
cond4_items.loc[:, 'nodo'] = cond4_items.loc[:, 'nodo'] + '_ALMACENAMIENTO'
cond4 = pd.merge(cond4_items, actividades_df, left_on=['tiempo', 'producto', 'nodo'],
right_on=['tiempo', 'producto', 'destino'], how='inner')
cond4['valor_mat'] = -1
del cond4_items
# MAXIMO ALMACENAMIENTO (CAP ESTATICA)
cond5_items = items_df.loc[items_df['tipo'] == 'capacidad_est'].copy()
cond5_items.loc[:, 'nodo'] = cond5_items.loc[:, 'nodo'] + '_ALMACENAMIENTO'
cond5 = pd.merge(cond5_items, actividades_df, left_on=['tiempo', 'nodo'], right_on=['tiempo', 'origen'],
how='inner')
cond5['valor_mat'] = 1
del cond5_items
# MAXIMO FLUJO (CAP DINAMICA)
cond6_items = items_df.loc[items_df['tipo'] == 'capacidad_din']
cond6 = pd.merge(cond6_items, actividades_df, left_on=['tiempo', 'nodo'], right_on=['tiempo', 'destino'],
how='inner')
cond6['valor_mat'] = cond6['transporte'].copy()
del cond6_items
condiciones = pd.concat([cond1, cond2, cond3, cond4, cond5, cond6], ignore_index=True)
# Crear matriz de coeficiente a partir de tabla de condiciones
for index, condicion in condiciones.iterrows():
coef_mat[condicion['idx'], condicion['idy']] = condicion['valor_mat']
return coef_mat
| 53.835443 | 128 | 0.648405 | 1,563 | 12,759 | 5.102367 | 0.202815 | 0.034984 | 0.020063 | 0.014295 | 0.24489 | 0.209655 | 0.137429 | 0.137429 | 0.128652 | 0.128652 | 0 | 0.005405 | 0.245944 | 12,759 | 236 | 129 | 54.063559 | 0.823511 | 0.371032 | 0 | 0.145455 | 0 | 0 | 0.18154 | 0 | 0 | 0 | 0 | 0.004237 | 0 | 1 | 0.027273 | false | 0 | 0.018182 | 0 | 0.072727 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cecd13141e3cc698f8d0c254a829013ed111a1a6 | 2,907 | py | Python | app.py | abarahonar/brilab_back | 0f057d7ada6553dd7ad3ac8e837e3b341c4371e9 | [
"MIT"
] | null | null | null | app.py | abarahonar/brilab_back | 0f057d7ada6553dd7ad3ac8e837e3b341c4371e9 | [
"MIT"
] | null | null | null | app.py | abarahonar/brilab_back | 0f057d7ada6553dd7ad3ac8e837e3b341c4371e9 | [
"MIT"
] | null | null | null | # TODO page in search
from flask import Flask, request, jsonify
from json import dumps
from psycopg2 import connect
from os import getenv
from functools import cache
from elasticsearch import Elasticsearch
from flask_cors import CORS
PAGE_SIZE = 10
app = Flask(__name__)
cors = CORS(app, resources={r"/api/*": {"origins": "*"}})
conn = connect(dbname=getenv("DBNAME"), user=getenv("DBUSER"),
password=getenv("DBPASS"), host=getenv("DBHOST"))
es = Elasticsearch([{"host": getenv("ESHOST"), "port": getenv("ESPORT")}])
@cache
def get_sectors():
cur = conn.cursor()
cur.execute("SELECT nombre from sectores;")
data = cur.fetchall()
cur.close()
return data
@cache
def get_regions():
cur = conn.cursor()
cur.execute("SELECT nombre FROM regiones;")
data = cur.fetchall()
cur.close()
return data
def process_search(text: str, page: int):
res = es.search(index="files", body={
"from": PAGE_SIZE * (page - 1),
"size": PAGE_SIZE,
"query": {
"match": {
"attachment.content": {
"query": text
}
}
},
"fields": [
"filename"
],
"_source": False
})
filenames = []
print(res)
for hit in res["hits"]["hits"]:
filenames.append(hit["fields"]["filename"][0])
if not filenames:
return {}
filenames = tuple(filenames)
cur = conn.cursor()
cur.execute("SELECT filename, name, region, sector, year, content FROM conflictos " +
"WHERE filename IN %s;", (filenames, ))
data = dumps(cur.fetchall())
cur.close()
return data
def process_get(data: dict):
offset = PAGE_SIZE * (data.get("page", 1) - 1)
from_year = data.get("from", 1900)
till_year = data.get("until", 2100)
sector = tuple(data.get("sector", get_sectors()))
region = tuple(data.get("region", get_regions()))
cur = conn.cursor()
cur.execute(
"SELECT filename, name, region, sector, year, content FROM conflictos " +
"WHERE year BETWEEN %s AND %s AND region IN %s AND sector IN %s " +
"OFFSET %s ROWS FETCH FIRST %s ROWS ONLY;",
(from_year, till_year, region, sector, offset, PAGE_SIZE)
)
data = dumps(cur.fetchall())
cur.close()
return data
@app.route("/api/search", methods=["GET"])
def search():
text = request.args.get("text")
page = request.args.get("page", type=int)
page = 1 if page is None else page
payload = process_search(text, page)
return payload
@app.route("/api/get", methods=["POST"])
def get():
print("Entre")
data = request.json
payload = process_get(data)
return payload
@app.route("/api/filters", methods=["GET"])
def filters():
regions = get_regions()
sectors = get_sectors()
return jsonify({"regiones": regions, "sectores": sectors})
| 26.669725 | 89 | 0.601651 | 356 | 2,907 | 4.842697 | 0.308989 | 0.023202 | 0.030162 | 0.037123 | 0.274942 | 0.2471 | 0.2471 | 0.225638 | 0.096288 | 0.096288 | 0 | 0.007323 | 0.248366 | 2,907 | 108 | 90 | 26.916667 | 0.781693 | 0.006536 | 0 | 0.222222 | 0 | 0 | 0.19404 | 0 | 0 | 0 | 0 | 0.009259 | 0 | 1 | 0.077778 | false | 0.011111 | 0.077778 | 0 | 0.244444 | 0.022222 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ced04082136716def374a7bb6adbf5ab73983ab6 | 926 | py | Python | appinit_backend/app/lib/jobs/notify.py | lost-osiris/webplatform-backend | 8b1b7c94dbc5314450fbe75b8ca4625d39608d4a | [
"MIT"
] | null | null | null | appinit_backend/app/lib/jobs/notify.py | lost-osiris/webplatform-backend | 8b1b7c94dbc5314450fbe75b8ca4625d39608d4a | [
"MIT"
] | null | null | null | appinit_backend/app/lib/jobs/notify.py | lost-osiris/webplatform-backend | 8b1b7c94dbc5314450fbe75b8ca4625d39608d4a | [
"MIT"
] | null | null | null | from lib.imports.default import *
import lib.notifications.email as email_notifications
def call(action, job=None):
manager = Manager()
users = set()
title = None
body = None
if action == "stopped":
title = "Jobs-Scheduler has stopped"
# groups.add("jobs.scheduler.stopped")
users.add("mowens")
body = "All runners have finished their remaining jobs, and the scheduler has stopped. The container is safe for stopping or restarting."
elif job is not None:
jid = None
if "_id" in job:
jid = job["_id"]
else:
jid = job["id"]
users.add(job["uid"])
title = """Job %s has %s""" % (jid, action)
body = """Job <a href="https://%s/jobs/%s/results/">%s</a> running '%s' has %s.""" % (manager.get_hostname(), jid, jid, job["api"], action)
else:
return None
email_notifications.call("Job Runner", title, users, body, job=False) | 34.296296 | 145 | 0.614471 | 127 | 926 | 4.440945 | 0.472441 | 0.031915 | 0.067376 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.237581 | 926 | 27 | 146 | 34.296296 | 0.798867 | 0.038877 | 0 | 0.086957 | 0 | 0.086957 | 0.307087 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.043478 | false | 0 | 0.086957 | 0 | 0.173913 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ced0bd963d0d3a0ba5187ad08a33c6c3946a9ac3 | 308 | py | Python | q2/environments/maker.py | tdb-alcorn/q2 | ca03e419b1c62660ca65981ff790b70fe979c51f | [
"MIT"
] | 3 | 2018-07-03T06:14:58.000Z | 2018-07-10T22:56:21.000Z | q2/environments/maker.py | tdb-alcorn/q2 | ca03e419b1c62660ca65981ff790b70fe979c51f | [
"MIT"
] | 10 | 2018-07-02T09:02:44.000Z | 2022-02-09T23:45:31.000Z | q2/environments/maker.py | tdb-alcorn/q2 | ca03e419b1c62660ca65981ff790b70fe979c51f | [
"MIT"
] | null | null | null | from typing import NamedTuple, Callable, List, Any
from .environment import Environment, NullEnv
Maker = NamedTuple('Maker', [
('name', str),
('make', Callable[[Any], Environment]),
('states', List[str]),
])
NullMaker = Maker(name='null', make=lambda *args, **kwargs: NullEnv(), states=list()) | 28 | 85 | 0.665584 | 35 | 308 | 5.857143 | 0.542857 | 0.087805 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.149351 | 308 | 11 | 85 | 28 | 0.782443 | 0 | 0 | 0 | 0 | 0 | 0.074434 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 | false | 0 | 0.25 | 0 | 0.25 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ced23ed86a06bdc1b89bc63274a029695c6fd2ca | 3,755 | py | Python | DOM/base/object.py | AlexDev-py/CubIC | 7932d2789c0b45ebc9ce631d21f6bed99d3a3a51 | [
"MIT"
] | 2 | 2022-02-05T13:06:28.000Z | 2022-02-09T07:07:11.000Z | DOM/base/object.py | AlexDev-py/CubIC | 7932d2789c0b45ebc9ce631d21f6bed99d3a3a51 | [
"MIT"
] | null | null | null | DOM/base/object.py | AlexDev-py/CubIC | 7932d2789c0b45ebc9ce631d21f6bed99d3a3a51 | [
"MIT"
] | 2 | 2022-01-24T13:42:45.000Z | 2022-02-08T09:18:58.000Z | """
Описание базового объекта.
"""
from __future__ import annotations
import typing as ty
from abc import ABC, abstractmethod
from loguru import logger
if ty.TYPE_CHECKING:
import pygame as pg
from .group import Group
FIELDS = [
"_x",
"_y",
"_width",
"_height",
"_text",
"_sprite",
"_padding",
"_font",
"_border_width",
"_color",
"_background",
"_anchor",
"_border_color",
"_inactive_background",
"_active_background",
] # Атрибуты, которые может содержать объект
class Object(ABC):
def __init__(
self,
parent: Group | None,
name: str = None,
*,
hidden: True | False = False,
):
"""
Базовый объект.
:param parent: Объект, которому принадлежит данный объект.
:param name: Название объекта.
:param hidden: Будет ли объект скрыт.
"""
self._name = name
self.__parent = parent
self._hidden = hidden
self._enabled = True # Активен ли объект
logger.opt(colors=True).trace(f"Инициализация {self}")
# Добавляем этот объект в группу.
if parent is not None:
parent.add(self)
def show(self) -> None:
"""
Снимает скрытие с объекта.
"""
self._hidden = False
logger.opt(colors=True).trace(f"show {self}")
def hide(self) -> None:
"""
Скрывает объект.
"""
self._hidden = True
logger.opt(colors=True).trace(f"hide {self}")
@property
def hidden(self) -> True | False:
return self._hidden
def enable(self) -> None:
"""
Включает объект.
"""
self._enabled = True
logger.opt(colors=True).trace(f"enable {self}")
def disable(self) -> None:
"""
Выключает объект.
"""
self._enabled = False
logger.opt(colors=True).trace(f"disable {self}")
@property
def enabled(self) -> True | False:
return self._enabled
@property
def name(self) -> str | None:
return self._name
@name.setter
def name(self, value: str | None):
logger.opt(colors=True).trace(f"{self} -> <c>{value}</c>")
self._name = value
@property
def parent(self) -> Group | None:
return self.__parent
@parent.setter
def parent(self, parent: Group | None):
self.__parent = parent
@abstractmethod
def update(self, *args, **kwargs) -> None:
"""
Метод должен быть определен в классе-наследнике.
Обновляет объект.
"""
@abstractmethod
def handle_event(self, event: pg.event.Event) -> None:
"""
Метод должен быть определен в классе-наследнике.
Обрабатывает событие.
:param event: Событие.
"""
@abstractmethod
def draw(self, surface: pg.Surface) -> None:
"""
Метод должен быть определен в классе-наследнике.
Отображает объект.
:param surface: Поверхность.
"""
def __setattr__(self, key: str, value: ...) -> None:
"""
Изменение атрибута объекта.
:param key: Название атрибута.
:param value: Новое значение.
"""
# Если это 1 из атрибутов объекта
if key in self.__dict__ and key in FIELDS:
logger.opt(colors=True).trace(
"{self} <le>{key}</le>=<y>{value}</y>", self=self, key=key, value=value
)
super(Object, self).__setattr__(key, value)
(self.parent or self).update()
return
super(Object, self).__setattr__(key, value)
def __repr__(self):
return f"<y>{self.__class__.__name__}</y> - <c>{self.name}</c>"
| 23.765823 | 87 | 0.559787 | 407 | 3,755 | 4.982801 | 0.309582 | 0.031065 | 0.051775 | 0.065582 | 0.213511 | 0.178994 | 0.124753 | 0.066568 | 0 | 0 | 0 | 0.00039 | 0.317443 | 3,755 | 157 | 88 | 23.917197 | 0.79087 | 0.191744 | 0 | 0.149425 | 0 | 0.011494 | 0.114077 | 0.022303 | 0 | 0 | 0 | 0 | 0 | 1 | 0.183908 | false | 0 | 0.068966 | 0.057471 | 0.333333 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ced627649b731e703817a3ed29c13f93b1156100 | 907 | py | Python | chap7/heap_sort_key.py | marble-git/python-laoqi | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | [
"MIT"
] | null | null | null | chap7/heap_sort_key.py | marble-git/python-laoqi | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | [
"MIT"
] | null | null | null | chap7/heap_sort_key.py | marble-git/python-laoqi | 74c4bb5459113e54ce64443e5da5a9c6a3052d6a | [
"MIT"
] | null | null | null | #coding:utf-8
'''
filename:heap_sort_key.py
chap:7
subject:6-1
conditions:books_price,heapq,operator.itemgetter
solution:fun heapsort
'''
import heapq
import operator
from pprint import pprint
books_price = [
{'book':'Python', 'price':69.99},
{'book':'Java', 'price':59.99},
{'book':'Rust', 'price':79.99},
{'book':'JavaScript', 'price':49.99},
{'book':'C++','price':89.99},
{'book':'Ruby', 'price':39.99},
{'book':'hadoop', 'price':99.99},
{'book':'HTML5', 'price':29.99},
]
def heapsort(iterable,/,*,key=None,reverse=False):
sortfunc = heapq.nlargest if reverse else heapq.nsmallest
return sortfunc(len(iterable),iterable,key = key)
if __name__ == '__main__':
by_book = heapsort(books_price,key=operator.itemgetter('book'))
print('print:',by_book)
pprint(by_book,indent=4,depth=2)
| 24.513514 | 67 | 0.605292 | 117 | 907 | 4.555556 | 0.529915 | 0.078799 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0.053793 | 0.200662 | 907 | 36 | 68 | 25.194444 | 0.681379 | 0.145535 | 0 | 0 | 0 | 0 | 0.176707 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.05 | false | 0 | 0.15 | 0 | 0.25 | 0.15 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
ced998f9ed290ae30a6a3a4fbe74e777957b0e18 | 4,055 | py | Python | saleor/site/models.py | glosoftgroup/restaurant | 5b10a8f5199103e5bee01b45952c9638e63f28af | [
"BSD-3-Clause"
] | 1 | 2018-05-03T06:17:02.000Z | 2018-05-03T06:17:02.000Z | saleor/site/models.py | glosoftgroup/restaurant | 5b10a8f5199103e5bee01b45952c9638e63f28af | [
"BSD-3-Clause"
] | 8 | 2018-05-07T16:42:35.000Z | 2022-02-26T03:31:56.000Z | saleor/site/models.py | glosoftgroup/tenants | a6b229ad1f6d567b7078f83425a532830b71e1bb | [
"BSD-3-Clause"
] | null | null | null | from django.contrib.sites.models import _simple_domain_name_validator
from django.db import models
from django.utils.encoding import python_2_unicode_compatible
from django.utils.translation import pgettext_lazy
from . import AuthenticationBackends
from decimal import Decimal
from django.core.validators import MinValueValidator
from datetime import datetime
import datetime as t
@python_2_unicode_compatible
class SiteSettings(models.Model):
domain = models.CharField(
pgettext_lazy('Site field', 'domain'), max_length=100,
validators=[_simple_domain_name_validator],blank=True, null=True,default='')
name = models.CharField(pgettext_lazy('Site field', 'name'),
max_length=50,blank=True, null=True)
email = models.EmailField(pgettext_lazy('Site field', 'email'),
max_length=50,blank=True, null=True)
header_text = models.CharField(
pgettext_lazy('Site field', 'header text'), max_length=200, blank=True)
description = models.CharField(
pgettext_lazy('Site field', 'site description'), max_length=500,
blank=True)
loyalty_point_equiv = models.IntegerField( pgettext_lazy('Site field', 'loyalty points equivalency'),
validators=[MinValueValidator(0)], default=Decimal(0))
floors = models.IntegerField(pgettext_lazy('Site field', 'floors'),
validators=[MinValueValidator(0)], default=Decimal(6))
max_credit_date = models.IntegerField( pgettext_lazy('Site field', 'Maximum credit sale expiration in days'),
validators=[MinValueValidator(0)], unique=True, default=Decimal(0))
opening_time = models.TimeField(pgettext_lazy('Site field', 'opening time'),
default=t.time(6, 00))
closing_time = models.TimeField(pgettext_lazy('Site field', 'closing time'),
default=t.time(21, 00))
sms_gateway_username = models.CharField(
pgettext_lazy('Site field', 'sms gateway username'), max_length=500,
blank=True)
sms_gateway_apikey = models.CharField(
pgettext_lazy('Site field', 'sms gateway api key'), max_length=500,
blank=True)
image = models.ImageField(upload_to='employee', null=True, blank=True)
def __str__(self):
return self.name
def available_backends(self):
return self.authorizationkey_set.values_list('name', flat=True)
@python_2_unicode_compatible
class AuthorizationKey(models.Model):
site_settings = models.ForeignKey(SiteSettings)
name = models.CharField(
pgettext_lazy('Authentication field', 'name'), max_length=20,
choices=AuthenticationBackends.BACKENDS)
key = models.TextField(pgettext_lazy('Authentication field', 'key'))
password = models.TextField(
pgettext_lazy('Authentication field', 'password'))
class Meta:
unique_together = (('site_settings', 'name'),)
def __str__(self):
return self.name
def key_and_secret(self):
return self.key, self.password
class Bank(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return str(self.name)
class BankBranch(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
bank = models.ForeignKey(Bank, related_name='branch', max_length=100, null=True, blank=True)
def __str__(self):
return str(self.name)
class Department(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return str(self.name)
class UserRole(models.Model):
name = models.CharField(max_length=100, null=True, blank=True)
def __str__(self):
return str(self.name)
class Files(models.Model):
file = models.TextField(null=True, blank=True)
check = models.CharField(max_length=256, null=True, blank=True)
created = models.DateTimeField(auto_now_add=True)
modified = models.DateTimeField(auto_now=True)
| 39.368932 | 114 | 0.691739 | 487 | 4,055 | 5.564682 | 0.24846 | 0.070849 | 0.070849 | 0.092989 | 0.47417 | 0.385609 | 0.255351 | 0.191144 | 0.145018 | 0.145018 | 0 | 0.016902 | 0.197534 | 4,055 | 102 | 115 | 39.754902 | 0.815919 | 0 | 0 | 0.283951 | 0 | 0 | 0.102454 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.098765 | false | 0.037037 | 0.111111 | 0.098765 | 0.728395 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cedae3379f64f962505b6c4f646a4773f71a3a7b | 1,341 | py | Python | models/vos_net.py | hynekdav/SimCLR | dc5e6000a1afabb5ab32ad62b849547f95360300 | [
"MIT"
] | null | null | null | models/vos_net.py | hynekdav/SimCLR | dc5e6000a1afabb5ab32ad62b849547f95360300 | [
"MIT"
] | null | null | null | models/vos_net.py | hynekdav/SimCLR | dc5e6000a1afabb5ab32ad62b849547f95360300 | [
"MIT"
] | null | null | null | # -*- encoding: utf-8 -*-
# ! python3
import torch.nn as nn
from models.resnet import resnet18, resnet50, resnet101
class VOSNet(nn.Module):
def __init__(self, model='resnet18'):
super(VOSNet, self).__init__()
self.model = model
if model == 'resnet18':
resnet = resnet18(pretrained=True)
self.backbone = nn.Sequential(*list(resnet.children())[0:8])
elif model == 'resnet50':
resnet = resnet50(pretrained=True)
self.backbone = nn.Sequential(*list(resnet.children())[0:8])
self.adjust_dim = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0, bias=False)
self.bn256 = nn.BatchNorm2d(256)
elif model == 'resnet101':
resnet = resnet101(pretrained=True)
self.backbone = nn.Sequential(*list(resnet.children())[0:8])
self.adjust_dim = nn.Conv2d(1024, 256, kernel_size=1, stride=1, padding=0, bias=False)
self.bn256 = nn.BatchNorm2d(256)
else:
raise NotImplementedError
def forward(self, x):
if self.model == 'resnet18':
x = self.backbone(x)
elif self.model == 'resnet50' or self.model == 'resnet101':
x = self.backbone(x)
x = self.adjust_dim(x)
x = self.bn256(x)
return x
| 31.186047 | 98 | 0.58091 | 160 | 1,341 | 4.7875 | 0.325 | 0.058747 | 0.070496 | 0.101828 | 0.456919 | 0.456919 | 0.456919 | 0.456919 | 0.456919 | 0.456919 | 0 | 0.080376 | 0.285608 | 1,341 | 42 | 99 | 31.928571 | 0.719207 | 0.024609 | 0 | 0.310345 | 0 | 0 | 0.044444 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.068966 | false | 0 | 0.068966 | 0 | 0.206897 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
cedfa2733099c2586c209590f544edfd847fe927 | 978 | py | Python | Python/minimum-path-sum.py | xiaohalo/LeetCode | 68211ba081934b21bb1968046b7e3c1459b3da2d | [
"MIT"
] | 9 | 2019-06-30T07:15:18.000Z | 2022-02-10T20:13:40.000Z | Python/minimum-path-sum.py | xiaohalo/LeetCode | 68211ba081934b21bb1968046b7e3c1459b3da2d | [
"MIT"
] | null | null | null | Python/minimum-path-sum.py | xiaohalo/LeetCode | 68211ba081934b21bb1968046b7e3c1459b3da2d | [
"MIT"
] | 9 | 2019-01-16T22:16:49.000Z | 2022-02-06T17:33:41.000Z | from __future__ import print_function
# Time: O(m * n)
# Space: O(m + n)
#
# Given a m x n grid filled with non-negative numbers,
# find a path from top left to bottom right which minimizes the sum of all numbers along its path.
#
# Note: You can only move either down or right at any point in time.
#
class Solution:
# @param grid, a list of lists of integers
# @return an integer
def minPathSum(self, grid):
sum = list(grid[0])
for j in xrange(1, len(grid[0])):
sum[j] = sum[j - 1] + grid[0][j]
for i in xrange(1, len(grid)):
sum[0] += grid[i][0]
for j in xrange(1, len(grid[0])):
sum[j] = min(sum[j - 1], sum[j]) + grid[i][j]
return sum[-1]
if __name__ == "__main__":
print(Solution().minPathSum([[0,1]
,[1,0]]))
print(Solution().minPathSum([[1,3,1]
,[1,5,1]
,[4,2,1]])) | 31.548387 | 98 | 0.517382 | 150 | 978 | 3.286667 | 0.486667 | 0.040568 | 0.054767 | 0.073022 | 0.137931 | 0.105477 | 0.105477 | 0.105477 | 0.105477 | 0.105477 | 0 | 0.0387 | 0.339468 | 978 | 31 | 99 | 31.548387 | 0.724458 | 0.314928 | 0 | 0.117647 | 0 | 0 | 0.012121 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0.058824 | false | 0 | 0.058824 | 0 | 0.235294 | 0.176471 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c69f097de600135ef9e6e552535de5926f581f1 | 7,127 | py | Python | tests/unit/test_fp16.py | kyuhyoung/DeepSpeed | 59758ab1d2612014406c804bd25b6d32de937570 | [
"MIT"
] | 1 | 2020-02-20T07:23:31.000Z | 2020-02-20T07:23:31.000Z | tests/unit/test_fp16.py | arita37/DeepSpeed | f2d7513561eb72f6b9c5188b5a227ecb7b05a2ee | [
"MIT"
] | 5 | 2020-11-13T17:43:04.000Z | 2022-03-12T00:16:21.000Z | tests/unit/test_fp16.py | arita37/DeepSpeed | f2d7513561eb72f6b9c5188b5a227ecb7b05a2ee | [
"MIT"
] | null | null | null | import torch
import deepspeed
import argparse
import pytest
import json
import os
from common import distributed_test
def create_config_from_dict(tmpdir, config_dict):
config_path = os.path.join(tmpdir, 'temp_config.json')
with open(config_path, 'w') as fd:
json.dump(config_dict, fd)
return config_path
class SimpleModel(torch.nn.Module):
def __init__(self, hidden_dim, empty_grad=False):
super(SimpleModel, self).__init__()
self.linear = torch.nn.Linear(hidden_dim, hidden_dim)
if empty_grad:
self.layers2 = torch.nn.ModuleList([torch.nn.Linear(hidden_dim, hidden_dim)])
self.cross_entropy_loss = torch.nn.CrossEntropyLoss()
def forward(self, x, y):
hidden_dim = x
hidden_dim = self.linear(hidden_dim)
return self.cross_entropy_loss(hidden_dim, y)
def test_temp_config_json(tmpdir):
config_dict = {
"train_batch_size": 1,
}
config_path = create_config_from_dict(tmpdir, config_dict)
config_json = json.load(open(config_path, 'r'))
assert 'train_batch_size' in config_json
def prepare_optimizer_parameters(model):
param_optimizer = list(model.named_parameters())
optimizer_grouped_parameters = [{
'params': [p for n,
p in param_optimizer],
'weight_decay': 0.0
}]
return optimizer_grouped_parameters
def get_data_loader(model, total_samples, hidden_dim, device):
batch_size = model.train_micro_batch_size_per_gpu()
train_data = torch.randn(total_samples, hidden_dim, device=device, dtype=torch.half)
train_label = torch.empty(total_samples,
dtype=torch.long,
device=device).random_(hidden_dim)
train_dataset = torch.utils.data.TensorDataset(train_data, train_label)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batch_size)
return train_loader
def get_args(tmpdir, config_dict):
config_path = create_config_from_dict(tmpdir, config_dict)
parser = argparse.ArgumentParser()
args = parser.parse_args(args='')
args.deepspeed = True
args.deepspeed_config = config_path
args.local_rank = 0
return args
def test_lamb_fp16_basic(tmpdir):
config_dict = {
"train_batch_size": 2,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015,
"max_grad_norm": 1.0
}
},
"fp16": {
"enabled": True
}
}
args = get_args(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
@distributed_test(world_size=[1, 2])
def _test_lamb_fp16_basic(args, model, hidden_dim):
model, _, _,_ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters(),
dist_init_required=False)
data_loader = get_data_loader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_lamb_fp16_basic(args=args, model=model, hidden_dim=hidden_dim)
def test_lamb_fp16_empty_grad(tmpdir):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"optimizer": {
"type": "Lamb",
"params": {
"lr": 0.00015,
"max_grad_norm": 1.0
}
},
"fp16": {
"enabled": True
}
}
args = get_args(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=True)
@distributed_test(world_size=[1])
def _test_lamb_fp16_empty_grad(args, model, hidden_dim):
model, _, _,_ = deepspeed.initialize(args=args,
model=model,
model_parameters=model.parameters(),
dist_init_required=False)
data_loader = get_data_loader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_lamb_fp16_empty_grad(args=args, model=model, hidden_dim=hidden_dim)
def test_adamw_fp16_basic(tmpdir):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True
}
}
args = get_args(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=False)
@distributed_test(world_size=[1])
def _test_adamw_fp16_basic(args, model, hidden_dim):
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _,_ = deepspeed.initialize(args=args,
model=model,
optimizer=optimizer,
dist_init_required=False)
data_loader = get_data_loader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adamw_fp16_basic(args=args, model=model, hidden_dim=hidden_dim)
def test_adamw_fp16_empty_grad(tmpdir):
config_dict = {
"train_batch_size": 1,
"steps_per_print": 1,
"fp16": {
"enabled": True
}
}
args = get_args(tmpdir, config_dict)
hidden_dim = 10
model = SimpleModel(hidden_dim, empty_grad=True)
@distributed_test(world_size=[1])
def _test_adamw_fp16_empty_grad(args, model, hidden_dim):
optimizer = torch.optim.AdamW(params=model.parameters())
model, _, _,_ = deepspeed.initialize(args=args,
model=model,
optimizer=optimizer,
dist_init_required=False)
data_loader = get_data_loader(model=model,
total_samples=50,
hidden_dim=hidden_dim,
device=model.device)
for n, batch in enumerate(data_loader):
loss = model(batch[0], batch[1])
model.backward(loss)
model.step()
_test_adamw_fp16_empty_grad(args=args, model=model, hidden_dim=hidden_dim)
| 34.100478 | 89 | 0.561948 | 778 | 7,127 | 4.81491 | 0.155527 | 0.096103 | 0.055526 | 0.048051 | 0.698879 | 0.665243 | 0.65937 | 0.628671 | 0.604111 | 0.572878 | 0 | 0.019326 | 0.346569 | 7,127 | 208 | 90 | 34.264423 | 0.785055 | 0 | 0 | 0.56 | 0 | 0 | 0.043777 | 0 | 0 | 0 | 0 | 0 | 0.005714 | 1 | 0.085714 | false | 0 | 0.04 | 0 | 0.16 | 0.022857 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |
0c6be64c4ca2d676baf997db0f797093726c9ded | 9,754 | py | Python | code_samples/graphics/node_graphics_node_resizeable.py | lcopey/node_editor | 04d56ae4c7f2149e46903d5dd2e46f3906ef69e6 | [
"MIT"
] | 1 | 2021-04-30T11:28:42.000Z | 2021-04-30T11:28:42.000Z | code_samples/graphics/node_graphics_node_resizeable.py | lcopey/node_editor | 04d56ae4c7f2149e46903d5dd2e46f3906ef69e6 | [
"MIT"
] | null | null | null | code_samples/graphics/node_graphics_node_resizeable.py | lcopey/node_editor | 04d56ae4c7f2149e46903d5dd2e46f3906ef69e6 | [
"MIT"
] | null | null | null | from PyQt5.QtCore import Qt, QRectF, QPointF
from PyQt5.QtGui import QBrush, QPainterPath, QPainter, QColor, QPen, QFont
from PyQt5.QtWidgets import QGraphicsRectItem, QGraphicsItem, QWidget, QVBoxLayout, QGraphicsSceneMouseEvent, \
QGraphicsSceneHoverEvent, QStyleOptionGraphicsItem, QLabel, QTextEdit, QGraphicsProxyWidget, QGraphicsTextItem
from node_editor.node_content_widget import NodeContentWidget
from .const import Handle, handleCursors, handleUpdate
from typing import Optional
from node_editor.utils import print_func_name
DEBUG = False
OUTLINE_WIDTH = 1.0
class QGraphicsResizableRectItem(QGraphicsRectItem):
def __init__(self, min_height, min_width, *args):
super().__init__(*args)
# Diverse parameters for drawing
self.handleSelected = None
self.handles = {}
self.min_width = min_width
self.min_height = min_height
self.initSizes()
self.initContent()
self.initAssets()
self.initUI()
self.initTitle()
def initUI(self):
# set flags
self.setAcceptHoverEvents(True)
self.setFlag(QGraphicsItem.ItemSendsGeometryChanges, True)
self.setFlag(QGraphicsItem.ItemIsFocusable, True)
self.updateHandles()
self.setFlag(QGraphicsItem.ItemIsMovable, True)
self.setFlag(QGraphicsItem.ItemIsSelectable, True)
def initContent(self):
self.content = NodeContentWidget(None)
self.grContent = QGraphicsProxyWidget(self)
self.setContentGeometry()
self.grContent.setWidget(self.content)
def initAssets(self):
self._title_color = Qt.white
self._title_font = QFont('Ubuntu', 8)
self._color = QColor("#7F00000")
self._color_selected = QColor("#FFFFA637")
self._color_hovered = QColor("#FF37A6FF")
self._pen_default = QPen(self._color)
self._pen_default.setWidthF(OUTLINE_WIDTH)
self._pen_selected = QPen(self._color_selected)
self._pen_selected.setWidthF(OUTLINE_WIDTH)
self._pen_hovered = QPen(self._color_hovered)
self._pen_hovered.setWidthF(OUTLINE_WIDTH + 1)
self._brush_title = QBrush(QColor("#FF313131"))
self._brush_background = QBrush(QColor("#E3212121"))
def initSizes(self):
# self.width = 180
# self.height = 240
# Diverse parameters for drawing
self.handleSize = 5
self.edge_roundness = 15.
self.edge_padding = 10.
self.title_height = 24
self.title_horizontal_padding = 5.
self.title_vertical_padding = 4.
def setContentGeometry(self):
self.content.setGeometry(self.edge_roundness, self.title_height + self.edge_roundness,
self.width - 2 * self.edge_roundness,
self.height - 2 * self.edge_roundness - self.title_height)
def initTitle(self):
# Draw the _title
self._title_color = Qt.white
self._title_font = QFont('Ubuntu', 10)
self._padding = 5.
self.title_height = 24
self.title_item = QGraphicsTextItem(self)
# self.title_item.node = self.node
self.title_item.setDefaultTextColor(self._title_color)
self.title_item.setFont(self._title_font)
self.title_item.setPos(self._padding, 0)
self.title_item.setTextWidth(self.width - 2 * self._padding)
self.title_item.setPlainText('Resizeable node')
@property
def height(self):
return self.rect().height()
@property
def width(self):
return self.rect().width()
def updateHandles(self):
rect = self.boundingRect()
left, width, top, height = rect.left(), rect.width(), rect.top(), rect.height()
offset = self.handleSize
self.handles[Handle.TopLeft] = QRectF(left, top, offset, offset)
self.handles[Handle.TopMiddle] = QRectF(left + offset, top, width - 2 * offset, offset)
self.handles[Handle.TopRight] = QRectF(left + width - offset, top, offset, offset)
self.handles[Handle.BottomLeft] = QRectF(left, top + height - offset, offset, offset)
self.handles[Handle.MiddleLeft] = QRectF(left, top + offset, offset, height - 2 * offset)
self.handles[Handle.BottomRight] = QRectF(left + width - offset, top + height - offset, offset, offset)
self.handles[Handle.MiddleRight] = QRectF(left + width - offset, top + offset, offset, height - 2 * offset)
self.handles[Handle.BottomMiddle] = QRectF(left + offset, top + height - offset, width - 2 * offset, offset)
def boundingRect(self):
# Return rectangle for selection detection
return self.rect().normalized()
def handleAt(self, point):
for handle, rect in self.handles.items():
if rect.contains(point):
if DEBUG: print(handle, rect)
return handle
else:
return None
def hoverMoveEvent(self, event: 'QGraphicsSceneHoverEvent') -> None:
# if self.isSelected():
handle = self.handleAt(event.pos())
if handle is not None:
self.setCursor(handleCursors[handle])
else:
self.setCursor(Qt.ArrowCursor)
super().hoverMoveEvent(event)
def hoverLeaveEvent(self, event: 'QGraphicsSceneHoverEvent') -> None:
# if self.isSelected():
self.setCursor(Qt.ArrowCursor)
super().hoverLeaveEvent(event)
def mousePressEvent(self, event: 'QGraphicsSceneMouseEvent') -> None:
"""
Executed when the mouse is pressed on the item.
"""
try:
self.handleSelected = self.handleAt(event.pos())
if self.handleSelected:
# record the position where the mouse was pressed
self.currentPos = event.pos()
# current rectangle at mouse pressed
self.currentRect = self.boundingRect()
super().mousePressEvent(event)
except Exception as e:
print(e)
def mouseReleaseEvent(self, event: 'QGraphicsSceneMouseEvent') -> None:
"""
Executed when the mouse is released from the item.
"""
super().mouseReleaseEvent(event)
self.handleSelected = None
self.currentPos = None
self.currentRect = None
self.update()
def mouseMoveEvent(self, event: 'QGraphicsSceneMouseEvent') -> None:
"""
Executed when the mouse is being moved over the item while being pressed.
"""
if self.handleSelected is not None:
self.resize(event.pos())
else:
super().mouseMoveEvent(event)
def resize(self, pos):
"""Update rectangle and bounding rectangle"""
rect = self.rect()
boundingRect = self.boundingRect()
from_left = self.currentRect.left()
from_right = self.currentRect.right()
from_top = self.currentRect.top()
from_bottom = self.currentRect.bottom()
to_left = from_left + pos.x() - self.currentPos.x()
to_right = from_right + pos.x() - self.currentPos.x()
to_top = from_top + pos.y() - self.currentPos.y()
to_bottom = from_bottom + pos.y() - self.currentPos.y()
self.prepareGeometryChange()
update_left, update_top, update_right, update_bottom = handleUpdate[self.handleSelected]
if update_left:
if from_right - to_left <= self.min_width:
boundingRect.setLeft(from_right - self.min_width)
else:
boundingRect.setLeft(to_left)
rect.setLeft(boundingRect.left())
if update_top:
if from_bottom - to_top <= self.min_height:
boundingRect.setTop(from_bottom - self.min_height)
else:
boundingRect.setTop(to_top)
rect.setTop(boundingRect.top())
if update_bottom:
if to_bottom - from_top <= self.min_height:
boundingRect.setBottom(from_top + self.min_height)
else:
boundingRect.setBottom(to_bottom)
rect.setBottom(boundingRect.bottom())
if update_right:
if to_right - from_left <= self.min_width:
boundingRect.setRight(from_left + self.min_width)
else:
boundingRect.setRight(to_right)
rect.setRight(boundingRect.right())
self.setRect(rect)
self.updateHandles()
self.setContentGeometry()
def shape(self):
"""
Returns the shape of this item as a QPainterPath in local coordinates.
"""
path = QPainterPath()
# path.addRoundedRect(self.rect(), self.edge_size, self.edge_size)
path.addRect(self.rect())
return path
def paint(self, painter: QPainter, option: 'QStyleOptionGraphicsItem', widget: Optional[QWidget] = ...) -> None:
# content
rect = self.rect()
path_content = QPainterPath()
path_content.setFillRule(Qt.WindingFill)
path_content.addRoundedRect(rect, self.edge_roundness, self.edge_roundness)
painter.setPen(Qt.NoPen)
painter.setBrush(self._brush_background)
painter.drawPath(path_content.simplified())
# outline
path_outline = QPainterPath()
path_outline.addRoundedRect(rect, self.edge_roundness, self.edge_roundness)
painter.setPen(self._pen_default if not self.isSelected() else self._pen_selected)
painter.setBrush(Qt.NoBrush)
painter.drawPath(path_outline.simplified())
for handle in self.handles.values():
path_handle = QPainterPath()
path_handle.addRect(handle)
painter.drawPath(path_handle)
| 38.25098 | 116 | 0.636867 | 1,039 | 9,754 | 5.831569 | 0.213667 | 0.028222 | 0.025252 | 0.026572 | 0.255653 | 0.155966 | 0.121802 | 0.0949 | 0.080376 | 0.03631 | 0 | 0.008364 | 0.264507 | 9,754 | 254 | 117 | 38.401575 | 0.836214 | 0.070638 | 0 | 0.12234 | 0 | 0 | 0.024017 | 0.016086 | 0 | 0 | 0 | 0 | 0 | 1 | 0.106383 | false | 0 | 0.037234 | 0.015957 | 0.180851 | 0.015957 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | null | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 0 | 1 | 0 |