hexsha
string | size
int64 | ext
string | lang
string | max_stars_repo_path
string | max_stars_repo_name
string | max_stars_repo_head_hexsha
string | max_stars_repo_licenses
list | max_stars_count
int64 | max_stars_repo_stars_event_min_datetime
string | max_stars_repo_stars_event_max_datetime
string | max_issues_repo_path
string | max_issues_repo_name
string | max_issues_repo_head_hexsha
string | max_issues_repo_licenses
list | max_issues_count
int64 | max_issues_repo_issues_event_min_datetime
string | max_issues_repo_issues_event_max_datetime
string | max_forks_repo_path
string | max_forks_repo_name
string | max_forks_repo_head_hexsha
string | max_forks_repo_licenses
list | max_forks_count
int64 | max_forks_repo_forks_event_min_datetime
string | max_forks_repo_forks_event_max_datetime
string | content
string | avg_line_length
float64 | max_line_length
int64 | alphanum_fraction
float64 | qsc_code_num_words_quality_signal
int64 | qsc_code_num_chars_quality_signal
float64 | qsc_code_mean_word_length_quality_signal
float64 | qsc_code_frac_words_unique_quality_signal
float64 | qsc_code_frac_chars_top_2grams_quality_signal
float64 | qsc_code_frac_chars_top_3grams_quality_signal
float64 | qsc_code_frac_chars_top_4grams_quality_signal
float64 | qsc_code_frac_chars_dupe_5grams_quality_signal
float64 | qsc_code_frac_chars_dupe_6grams_quality_signal
float64 | qsc_code_frac_chars_dupe_7grams_quality_signal
float64 | qsc_code_frac_chars_dupe_8grams_quality_signal
float64 | qsc_code_frac_chars_dupe_9grams_quality_signal
float64 | qsc_code_frac_chars_dupe_10grams_quality_signal
float64 | qsc_code_frac_chars_replacement_symbols_quality_signal
float64 | qsc_code_frac_chars_digital_quality_signal
float64 | qsc_code_frac_chars_whitespace_quality_signal
float64 | qsc_code_size_file_byte_quality_signal
float64 | qsc_code_num_lines_quality_signal
float64 | qsc_code_num_chars_line_max_quality_signal
float64 | qsc_code_num_chars_line_mean_quality_signal
float64 | qsc_code_frac_chars_alphabet_quality_signal
float64 | qsc_code_frac_chars_comments_quality_signal
float64 | qsc_code_cate_xml_start_quality_signal
float64 | qsc_code_frac_lines_dupe_lines_quality_signal
float64 | qsc_code_cate_autogen_quality_signal
float64 | qsc_code_frac_lines_long_string_quality_signal
float64 | qsc_code_frac_chars_string_length_quality_signal
float64 | qsc_code_frac_chars_long_word_length_quality_signal
float64 | qsc_code_frac_lines_string_concat_quality_signal
float64 | qsc_code_cate_encoded_data_quality_signal
float64 | qsc_code_frac_chars_hex_words_quality_signal
float64 | qsc_code_frac_lines_prompt_comments_quality_signal
float64 | qsc_code_frac_lines_assert_quality_signal
float64 | qsc_codepython_cate_ast_quality_signal
float64 | qsc_codepython_frac_lines_func_ratio_quality_signal
float64 | qsc_codepython_cate_var_zero_quality_signal
bool | qsc_codepython_frac_lines_pass_quality_signal
float64 | qsc_codepython_frac_lines_import_quality_signal
float64 | qsc_codepython_frac_lines_simplefunc_quality_signal
float64 | qsc_codepython_score_lines_no_logic_quality_signal
float64 | qsc_codepython_frac_lines_print_quality_signal
float64 | qsc_code_num_words
int64 | qsc_code_num_chars
int64 | qsc_code_mean_word_length
int64 | qsc_code_frac_words_unique
null | qsc_code_frac_chars_top_2grams
int64 | qsc_code_frac_chars_top_3grams
int64 | qsc_code_frac_chars_top_4grams
int64 | qsc_code_frac_chars_dupe_5grams
int64 | qsc_code_frac_chars_dupe_6grams
int64 | qsc_code_frac_chars_dupe_7grams
int64 | qsc_code_frac_chars_dupe_8grams
int64 | qsc_code_frac_chars_dupe_9grams
int64 | qsc_code_frac_chars_dupe_10grams
int64 | qsc_code_frac_chars_replacement_symbols
int64 | qsc_code_frac_chars_digital
int64 | qsc_code_frac_chars_whitespace
int64 | qsc_code_size_file_byte
int64 | qsc_code_num_lines
int64 | qsc_code_num_chars_line_max
int64 | qsc_code_num_chars_line_mean
int64 | qsc_code_frac_chars_alphabet
int64 | qsc_code_frac_chars_comments
int64 | qsc_code_cate_xml_start
int64 | qsc_code_frac_lines_dupe_lines
int64 | qsc_code_cate_autogen
int64 | qsc_code_frac_lines_long_string
int64 | qsc_code_frac_chars_string_length
int64 | qsc_code_frac_chars_long_word_length
int64 | qsc_code_frac_lines_string_concat
null | qsc_code_cate_encoded_data
int64 | qsc_code_frac_chars_hex_words
int64 | qsc_code_frac_lines_prompt_comments
int64 | qsc_code_frac_lines_assert
int64 | qsc_codepython_cate_ast
int64 | qsc_codepython_frac_lines_func_ratio
int64 | qsc_codepython_cate_var_zero
int64 | qsc_codepython_frac_lines_pass
int64 | qsc_codepython_frac_lines_import
int64 | qsc_codepython_frac_lines_simplefunc
int64 | qsc_codepython_score_lines_no_logic
int64 | qsc_codepython_frac_lines_print
int64 | effective
string | hits
int64 |
|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|---|
46175a845f983668cd1561f331edd1b1e7cee344
| 12,398
|
py
|
Python
|
main_gat.py
|
basiralab/RG-Select
|
074274f61667611205724c4423fc498ee4a0a9d0
|
[
"MIT"
] | 1
|
2021-09-07T06:55:37.000Z
|
2021-09-07T06:55:37.000Z
|
main_gat.py
|
basiralab/RG-Select
|
074274f61667611205724c4423fc498ee4a0a9d0
|
[
"MIT"
] | null | null | null |
main_gat.py
|
basiralab/RG-Select
|
074274f61667611205724c4423fc498ee4a0a9d0
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from sklearn import preprocessing
from torch.autograd import Variable
from models_gat import GAT
import os
import torch
import numpy as np
import argparse
import pickle
import sklearn.metrics as metrics
import cross_val
import time
import random
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
device = torch.device('cuda:0' if torch.cuda.is_available() else 'cpu')
def evaluate(dataset, model_GAT, args, threshold_value, model_name):
"""
Parameters
----------
dataset : dataloader (dataloader for the validation/test dataset).
model_GCN : nn model (GAT model).
args : arguments
threshold_value : float (threshold for adjacency matrices).
Description
----------
This methods performs the evaluation of the model on test/validation dataset
Returns
-------
test accuracy.
"""
model_GAT.eval()
labels = []
preds = []
for batch_idx, data in enumerate(dataset):
adj = Variable(data['adj'].float(), requires_grad=False).to(device)
labels.append(data['label'].long().numpy())
adj = torch.squeeze(adj)
features = np.identity(adj.shape[0])
features = Variable(torch.from_numpy(features).float(), requires_grad=False).cpu()
if args.threshold in ["median", "mean"]:
adj = torch.where(adj > threshold_value, torch.tensor([1.0]), torch.tensor([0.0]))
ypred = model_GAT(features, adj)
_, indices = torch.max(ypred, 1)
preds.append(indices.cpu().data.numpy())
labels = np.hstack(labels)
preds = np.hstack(preds)
simple_r = {'labels':labels,'preds':preds}
with open("./gat/Labels_and_preds/"+model_name+".pickle", 'wb') as f:
pickle.dump(simple_r, f)
result = {'prec': metrics.precision_score(labels, preds, average='macro'),
'recall': metrics.recall_score(labels, preds, average='macro'),
'acc': metrics.accuracy_score(labels, preds),
'F1': metrics.f1_score(labels, preds, average="micro")}
if args.evaluation_method == 'model assessment':
name = 'Test'
if args.evaluation_method == 'model selection':
name = 'Validation'
print(name, " accuracy:", result['acc'])
return result['acc']
def minmax_sc(x):
min_max_scaler = preprocessing.MinMaxScaler()
x = min_max_scaler.fit_transform(x)
return x
def train(args, train_dataset, val_dataset, model_GAT, threshold_value, model_name):
"""
Parameters
----------
args : arguments
train_dataset : dataloader (dataloader for the validation/test dataset).
val_dataset : dataloader (dataloader for the validation/test dataset).
model_GAT : nn model (GAT model).
threshold_value : float (threshold for adjacency matrices).
Description
----------
This methods performs the training of the model on train dataset and calls evaluate() method for evaluation.
Returns
-------
test accuracy.
"""
params = list(model_GAT.parameters())
optimizer = torch.optim.Adam(params, lr=args.lr, weight_decay=args.weight_decay)
test_accs = []
train_loss=[]
val_acc=[]
for epoch in range(args.num_epochs):
print("Epoch ",epoch)
print("Size of Training Set:" + str(len(train_dataset)))
print("Size of Validation Set:" + str(len(val_dataset)))
model_GAT.train()
total_time = 0
avg_loss = 0.0
preds = []
labels = []
for batch_idx, data in enumerate(train_dataset):
begin_time = time.time()
adj = Variable(data['adj'].float(), requires_grad=False).to(device)
label = Variable(data['label'].long()).to(device)
#adj_id = Variable(data['id'].int()).to(device)
adj = torch.squeeze(adj)
features = np.identity(adj.shape[0])
features = Variable(torch.from_numpy(features).float(), requires_grad=False).cpu()
if args.threshold in ["median", "mean"]:
adj = torch.where(adj > threshold_value, torch.tensor([1.0]), torch.tensor([0.0]))
ypred = model_GAT(features, adj)
_, indices = torch.max(ypred, 1)
preds.append(indices.cpu().data.numpy())
labels.append(data['label'].long().numpy())
loss = model_GAT.loss(ypred, label)
model_GAT.zero_grad()
loss.backward()
#nn.utils.clip_grad_norm_(model_DIFFPOOL.parameters(), args.clip)
optimizer.step()
avg_loss += loss
elapsed = time.time() - begin_time
total_time += elapsed
if epoch == args.num_epochs-1:
model_GAT.is_trained = True
preds = np.hstack(preds)
labels = np.hstack(labels)
print("Train accuracy : ", np.mean( preds == labels ))
test_acc = evaluate(val_dataset, model_GAT, args, threshold_value, model_name)
print('Avg loss: ', avg_loss, '; epoch time: ', total_time)
test_accs.append(test_acc)
train_loss.append(avg_loss)
val_acc.append(test_acc)
path = './gat/weights/W_'+model_name+'.pickle'
if os.path.exists(path):
os.remove(path)
os.rename('GAT_W.pickle',path)
los_p = {'loss':train_loss}
with open("./gat/training_loss/Training_loss_"+model_name+".pickle", 'wb') as f:
pickle.dump(los_p, f)
torch.save(model_GAT,"./gat/models/GAT_"+model_name+".pt")
return test_acc
def load_data(args):
"""
Parameters
----------
args : arguments
Description
----------
This methods loads the adjacency matrices representing the args.view -th view in dataset
Returns
-------
List of dictionaries{adj, label, id}
"""
#Load graphs and labels
with open('data/'+args.dataset+'/'+args.dataset+'_edges','rb') as f:
multigraphs = pickle.load(f)
with open('data/'+args.dataset+'/'+args.dataset+'_labels','rb') as f:
labels = pickle.load(f)
adjacencies = [multigraphs[i][:,:,args.view] for i in range(len(multigraphs))]
#Normalize inputs
if args.NormalizeInputGraphs==True:
for subject in range(len(adjacencies)):
adjacencies[subject] = minmax_sc(adjacencies[subject])
#Create List of Dictionaries
G_list=[]
for i in range(len(labels)):
G_element = {"adj": adjacencies[i],"label": labels[i],"id": i,}
G_list.append(G_element)
return G_list
def arg_parse(dataset, view, num_shots=2, cv_number=5):
"""
arguments definition method
"""
parser = argparse.ArgumentParser(description='Graph Classification')
parser.add_argument('--mode', type=str, default='train', choices=['train', 'test'])
parser.add_argument('--v', type=str, default=1)
parser.add_argument('--data', type=str, default='Sample_dataset', choices = [ f.path[5:] for f in os.scandir("data") if f.is_dir() ])
parser.add_argument('--dataset', type=str, default=dataset,
help='Dataset')
parser.add_argument('--view', type=int, default=view,
help = 'view index in the dataset')
parser.add_argument('--num_epochs', type=int, default=1, #50
help='Training Epochs')
parser.add_argument('--num_shots', type=int, default=num_shots, #100
help='number of shots')
parser.add_argument('--cv_number', type=int, default=cv_number,
help='number of validation folds.')
parser.add_argument('--NormalizeInputGraphs', default=False, action='store_true',
help='Normalize Input adjacency matrices of graphs')
parser.add_argument('--evaluation_method', type=str, default='model assessment',
help='evaluation method, possible values : model selection, model assessment')
parser.add_argument('--threshold', dest='threshold', default='mean',
help='threshold the graph adjacency matrix. Possible values: no_threshold, median, mean')
parser.add_argument('--no-cuda', action='store_true', default=False,
help='Disables CUDA training.')
parser.add_argument('--num-classes', dest='num_classes', type=int, default=2,
help='Number of label classes')
parser.add_argument('--lr', type=float, default=0.001,
help='Initial learning rate.')
parser.add_argument('--weight_decay', type=float, default=5e-4,
help='Weight decay (L2 loss on parameters).')
parser.add_argument('--hidden', type=int, default=8,
help='Number of hidden units.')
parser.add_argument('--nb_heads', type=int, default=8,
help='Number of head attentions.')
parser.add_argument('--dropout', type=float, default=0.8,
help='Dropout rate (1 - keep probability).')
parser.add_argument('--alpha', type=float, default=0.2,
help='Alpha for the leaky_relu.')
return parser.parse_args()
def benchmark_task(args, model_name):
"""
Parameters
----------
args : Arguments
Description
----------
Initiates the model and performs train/test or train/validation splits and calls train() to execute training and evaluation.
Returns
-------
test_accs : test accuracies (list)
"""
G_list = load_data(args)
num_nodes = G_list[0]['adj'].shape[0]
test_accs = []
folds = cross_val.stratify_splits(G_list,args)
[random.shuffle(folds[i]) for i in range(len(folds))]
for i in range(args.cv_number):
train_set, validation_set, test_set = cross_val.datasets_splits(folds, args, i)
if args.evaluation_method =='model selection':
train_dataset, val_dataset, threshold_value = cross_val.model_selection_split(train_set, validation_set, args)
if args.evaluation_method =='model assessment':
train_dataset, val_dataset, threshold_value = cross_val.model_assessment_split(train_set, validation_set, test_set, args)
print("CV : ",i)
model_GAT = GAT(nfeat=num_nodes,
nhid=args.hidden,
nclass=args.num_classes,
dropout=args.dropout,
nheads=args.nb_heads,
alpha=args.alpha)
test_acc = train(args, train_dataset, val_dataset, model_GAT, threshold_value, model_name+"_CV_"+str(i)+"_view_"+str(args.view))
test_accs.append(test_acc)
return test_accs
def test_scores(dataset, view, model_name, cv_number):
args = arg_parse(dataset, view, cv_number=cv_number)
print("Main : ",args)
test_accs = benchmark_task(args, model_name)
print("test accuracies ",test_accs)
return test_accs
def two_shot_trainer(dataset, view, num_shots):
args = arg_parse(dataset, view, num_shots=num_shots)
torch.manual_seed(0)
np.random.seed(0)
random.seed(0)
start = time.time()
for i in range(args.num_shots):
model = "gat"
model_name = "Few_Shot_"+dataset+"_"+model + str(i)
print("Shot : ",i)
with open('./Two_shot_samples_views/'+dataset+'_view_'+str(view)+'_shot_'+str(i)+'_train','rb') as f:
train_set = pickle.load(f)
with open('./Two_shot_samples_views/'+dataset+'_view_'+str(view)+'_shot_'+str(i)+'_test','rb') as f:
test_set = pickle.load(f)
num_nodes = train_set[0]['adj'].shape[0]
model_GAT = GAT(nfeat=num_nodes,
nhid=args.hidden,
nclass=args.num_classes,
dropout=args.dropout,
nheads=args.nb_heads,
alpha=args.alpha)
train_dataset, val_dataset, threshold_value = cross_val.two_shot_loader(train_set, test_set, args)
test_acc = train(args, train_dataset, val_dataset, model_GAT, threshold_value, model_name+"_view_"+str(view))
print("Test accuracy:"+str(test_acc))
print('load data using ------>', time.time()-start)
| 37.008955
| 137
| 0.607275
| 1,514
| 12,398
| 4.795244
| 0.178336
| 0.022039
| 0.04449
| 0.018182
| 0.372176
| 0.322865
| 0.273003
| 0.256198
| 0.222865
| 0.193113
| 0
| 0.005985
| 0.258751
| 12,398
| 335
| 138
| 37.008955
| 0.784004
| 0.116793
| 0
| 0.267943
| 0
| 0
| 0.135067
| 0.012066
| 0
| 0
| 0
| 0
| 0
| 1
| 0.038278
| false
| 0
| 0.057416
| 0
| 0.129187
| 0.057416
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4617a69a551305b5f32925b26d808534b36117fc
| 12,752
|
py
|
Python
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Python/piexif/_dump.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 8
|
2019-10-07T16:33:47.000Z
|
2020-12-07T03:59:58.000Z
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Python/piexif/_dump.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 1
|
2018-02-18T22:24:55.000Z
|
2018-02-21T18:36:09.000Z
|
dev/Gems/CloudGemDefectReporter/v1/AWS/common-code/Python/piexif/_dump.py
|
jeikabu/lumberyard
|
07228c605ce16cbf5aaa209a94a3cb9d6c1a4115
|
[
"AML"
] | 5
|
2020-08-27T20:44:18.000Z
|
2021-08-21T22:54:11.000Z
|
import copy
import numbers
import struct
from ._common import *
from ._exif import *
TIFF_HEADER_LENGTH = 8
def dump(exif_dict_original):
"""
py:function:: piexif.load(data)
Return exif as bytes.
:param dict exif: Exif data({"0th":dict, "Exif":dict, "GPS":dict, "Interop":dict, "1st":dict, "thumbnail":bytes})
:return: Exif
:rtype: bytes
"""
exif_dict = copy.deepcopy(exif_dict_original)
header = b"Exif\x00\x00\x4d\x4d\x00\x2a\x00\x00\x00\x08"
exif_is = False
gps_is = False
interop_is = False
first_is = False
if "0th" in exif_dict:
zeroth_ifd = exif_dict["0th"]
else:
zeroth_ifd = {}
if (("Exif" in exif_dict) and len(exif_dict["Exif"]) or
("Interop" in exif_dict) and len(exif_dict["Interop"]) ):
zeroth_ifd[ImageIFD.ExifTag] = 1
exif_is = True
exif_ifd = exif_dict["Exif"]
if ("Interop" in exif_dict) and len(exif_dict["Interop"]):
exif_ifd[ExifIFD. InteroperabilityTag] = 1
interop_is = True
interop_ifd = exif_dict["Interop"]
elif ExifIFD. InteroperabilityTag in exif_ifd:
exif_ifd.pop(ExifIFD.InteroperabilityTag)
elif ImageIFD.ExifTag in zeroth_ifd:
zeroth_ifd.pop(ImageIFD.ExifTag)
if ("GPS" in exif_dict) and len(exif_dict["GPS"]):
zeroth_ifd[ImageIFD.GPSTag] = 1
gps_is = True
gps_ifd = exif_dict["GPS"]
elif ImageIFD.GPSTag in zeroth_ifd:
zeroth_ifd.pop(ImageIFD.GPSTag)
if (("1st" in exif_dict) and
("thumbnail" in exif_dict) and
(exif_dict["thumbnail"] is not None)):
first_is = True
exif_dict["1st"][ImageIFD.JPEGInterchangeFormat] = 1
exif_dict["1st"][ImageIFD.JPEGInterchangeFormatLength] = 1
first_ifd = exif_dict["1st"]
zeroth_set = _dict_to_bytes(zeroth_ifd, "0th", 0)
zeroth_length = (len(zeroth_set[0]) + exif_is * 12 + gps_is * 12 + 4 +
len(zeroth_set[1]))
if exif_is:
exif_set = _dict_to_bytes(exif_ifd, "Exif", zeroth_length)
exif_length = len(exif_set[0]) + interop_is * 12 + len(exif_set[1])
else:
exif_bytes = b""
exif_length = 0
if gps_is:
gps_set = _dict_to_bytes(gps_ifd, "GPS", zeroth_length + exif_length)
gps_bytes = b"".join(gps_set)
gps_length = len(gps_bytes)
else:
gps_bytes = b""
gps_length = 0
if interop_is:
offset = zeroth_length + exif_length + gps_length
interop_set = _dict_to_bytes(interop_ifd, "Interop", offset)
interop_bytes = b"".join(interop_set)
interop_length = len(interop_bytes)
else:
interop_bytes = b""
interop_length = 0
if first_is:
offset = zeroth_length + exif_length + gps_length + interop_length
first_set = _dict_to_bytes(first_ifd, "1st", offset)
thumbnail = _get_thumbnail(exif_dict["thumbnail"])
thumbnail_max_size = 64000
if len(thumbnail) > thumbnail_max_size:
raise ValueError("Given thumbnail is too large. max 64kB")
else:
first_bytes = b""
if exif_is:
pointer_value = TIFF_HEADER_LENGTH + zeroth_length
pointer_str = struct.pack(">I", pointer_value)
key = ImageIFD.ExifTag
key_str = struct.pack(">H", key)
type_str = struct.pack(">H", TYPES.Long)
length_str = struct.pack(">I", 1)
exif_pointer = key_str + type_str + length_str + pointer_str
else:
exif_pointer = b""
if gps_is:
pointer_value = TIFF_HEADER_LENGTH + zeroth_length + exif_length
pointer_str = struct.pack(">I", pointer_value)
key = ImageIFD.GPSTag
key_str = struct.pack(">H", key)
type_str = struct.pack(">H", TYPES.Long)
length_str = struct.pack(">I", 1)
gps_pointer = key_str + type_str + length_str + pointer_str
else:
gps_pointer = b""
if interop_is:
pointer_value = (TIFF_HEADER_LENGTH +
zeroth_length + exif_length + gps_length)
pointer_str = struct.pack(">I", pointer_value)
key = ExifIFD.InteroperabilityTag
key_str = struct.pack(">H", key)
type_str = struct.pack(">H", TYPES.Long)
length_str = struct.pack(">I", 1)
interop_pointer = key_str + type_str + length_str + pointer_str
else:
interop_pointer = b""
if first_is:
pointer_value = (TIFF_HEADER_LENGTH + zeroth_length +
exif_length + gps_length + interop_length)
first_ifd_pointer = struct.pack(">L", pointer_value)
thumbnail_pointer = (pointer_value + len(first_set[0]) + 24 +
4 + len(first_set[1]))
thumbnail_p_bytes = (b"\x02\x01\x00\x04\x00\x00\x00\x01" +
struct.pack(">L", thumbnail_pointer))
thumbnail_length_bytes = (b"\x02\x02\x00\x04\x00\x00\x00\x01" +
struct.pack(">L", len(thumbnail)))
first_bytes = (first_set[0] + thumbnail_p_bytes +
thumbnail_length_bytes + b"\x00\x00\x00\x00" +
first_set[1] + thumbnail)
else:
first_ifd_pointer = b"\x00\x00\x00\x00"
zeroth_bytes = (zeroth_set[0] + exif_pointer + gps_pointer +
first_ifd_pointer + zeroth_set[1])
if exif_is:
exif_bytes = exif_set[0] + interop_pointer + exif_set[1]
return (header + zeroth_bytes + exif_bytes + gps_bytes +
interop_bytes + first_bytes)
def _get_thumbnail(jpeg):
segments = split_into_segments(jpeg)
while (b"\xff\xe0" <= segments[1][0:2] <= b"\xff\xef"):
segments.pop(1)
thumbnail = b"".join(segments)
return thumbnail
def _pack_byte(*args):
return struct.pack("B" * len(args), *args)
def _pack_signed_byte(*args):
return struct.pack("b" * len(args), *args)
def _pack_short(*args):
return struct.pack(">" + "H" * len(args), *args)
def _pack_signed_short(*args):
return struct.pack(">" + "h" * len(args), *args)
def _pack_long(*args):
return struct.pack(">" + "L" * len(args), *args)
def _pack_slong(*args):
return struct.pack(">" + "l" * len(args), *args)
def _pack_float(*args):
return struct.pack(">" + "f" * len(args), *args)
def _pack_double(*args):
return struct.pack(">" + "d" * len(args), *args)
def _value_to_bytes(raw_value, value_type, offset):
four_bytes_over = b""
value_str = b""
if value_type == TYPES.Byte:
length = len(raw_value)
if length <= 4:
value_str = (_pack_byte(*raw_value) +
b"\x00" * (4 - length))
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_byte(*raw_value)
elif value_type == TYPES.Short:
length = len(raw_value)
if length <= 2:
value_str = (_pack_short(*raw_value) +
b"\x00\x00" * (2 - length))
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_short(*raw_value)
elif value_type == TYPES.Long:
length = len(raw_value)
if length <= 1:
value_str = _pack_long(*raw_value)
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_long(*raw_value)
elif value_type == TYPES.SLong:
length = len(raw_value)
if length <= 1:
value_str = _pack_slong(*raw_value)
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_slong(*raw_value)
elif value_type == TYPES.Ascii:
try:
new_value = raw_value.encode("latin1") + b"\x00"
except:
try:
new_value = raw_value + b"\x00"
except TypeError:
raise ValueError("Got invalid type to convert.")
length = len(new_value)
if length > 4:
value_str = struct.pack(">I", offset)
four_bytes_over = new_value
else:
value_str = new_value + b"\x00" * (4 - length)
elif value_type == TYPES.Rational:
if isinstance(raw_value[0], numbers.Integral):
length = 1
num, den = raw_value
new_value = struct.pack(">L", num) + struct.pack(">L", den)
elif isinstance(raw_value[0], tuple):
length = len(raw_value)
new_value = b""
for n, val in enumerate(raw_value):
num, den = val
new_value += (struct.pack(">L", num) +
struct.pack(">L", den))
value_str = struct.pack(">I", offset)
four_bytes_over = new_value
elif value_type == TYPES.SRational:
if isinstance(raw_value[0], numbers.Integral):
length = 1
num, den = raw_value
new_value = struct.pack(">l", num) + struct.pack(">l", den)
elif isinstance(raw_value[0], tuple):
length = len(raw_value)
new_value = b""
for n, val in enumerate(raw_value):
num, den = val
new_value += (struct.pack(">l", num) +
struct.pack(">l", den))
value_str = struct.pack(">I", offset)
four_bytes_over = new_value
elif value_type == TYPES.Undefined:
length = len(raw_value)
if length > 4:
value_str = struct.pack(">I", offset)
try:
four_bytes_over = b"" + raw_value
except TypeError:
raise ValueError("Got invalid type to convert.")
else:
try:
value_str = raw_value + b"\x00" * (4 - length)
except TypeError:
raise ValueError("Got invalid type to convert.")
elif value_type == TYPES.SByte: # Signed Byte
length = len(raw_value)
if length <= 4:
value_str = (_pack_signed_byte(*raw_value) +
b"\x00" * (4 - length))
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_signed_byte(*raw_value)
elif value_type == TYPES.SShort: # Signed Short
length = len(raw_value)
if length <= 2:
value_str = (_pack_signed_short(*raw_value) +
b"\x00\x00" * (2 - length))
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_signed_short(*raw_value)
elif value_type == TYPES.Float:
length = len(raw_value)
if length <= 1:
value_str = _pack_float(*raw_value)
else:
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_float(*raw_value)
elif value_type == TYPES.DFloat: # Double
length = len(raw_value)
value_str = struct.pack(">I", offset)
four_bytes_over = _pack_double(*raw_value)
length_str = struct.pack(">I", length)
return length_str, value_str, four_bytes_over
def _dict_to_bytes(ifd_dict, ifd, ifd_offset):
tag_count = len(ifd_dict)
entry_header = struct.pack(">H", tag_count)
if ifd in ("0th", "1st"):
entries_length = 2 + tag_count * 12 + 4
else:
entries_length = 2 + tag_count * 12
entries = b""
values = b""
for n, key in enumerate(sorted(ifd_dict)):
if (ifd == "0th") and (key in (ImageIFD.ExifTag, ImageIFD.GPSTag)):
continue
elif (ifd == "Exif") and (key == ExifIFD.InteroperabilityTag):
continue
elif (ifd == "1st") and (key in (ImageIFD.JPEGInterchangeFormat, ImageIFD.JPEGInterchangeFormatLength)):
continue
raw_value = ifd_dict[key]
key_str = struct.pack(">H", key)
value_type = TAGS[ifd][key]["type"]
type_str = struct.pack(">H", value_type)
four_bytes_over = b""
if isinstance(raw_value, numbers.Integral) or isinstance(raw_value, float):
raw_value = (raw_value,)
offset = TIFF_HEADER_LENGTH + entries_length + ifd_offset + len(values)
try:
length_str, value_str, four_bytes_over = _value_to_bytes(raw_value,
value_type,
offset)
except ValueError:
raise ValueError(
'"dump" got wrong type of exif value.\n' +
'{0} in {1} IFD. Got as {2}.'.format(key, ifd, type(ifd_dict[key]))
)
entries += key_str + type_str + length_str + value_str
values += four_bytes_over
return (entry_header + entries, values)
| 36.74928
| 117
| 0.571361
| 1,602
| 12,752
| 4.27216
| 0.093633
| 0.068673
| 0.051286
| 0.038866
| 0.522501
| 0.497224
| 0.468147
| 0.419053
| 0.403565
| 0.336061
| 0
| 0.020273
| 0.311481
| 12,752
| 346
| 118
| 36.855491
| 0.759226
| 0.018036
| 0
| 0.363036
| 0
| 0.0033
| 0.049808
| 0.008648
| 0
| 0
| 0
| 0
| 0
| 1
| 0.039604
| false
| 0
| 0.016502
| 0.026403
| 0.09571
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
4617fb2b1109fbc2c363e6e5ce80547d57eca614
| 8,000
|
py
|
Python
|
portal.py
|
mrahman4782/portalhoop
|
29e87ccaca5544590aa9ed20096c3628c1ab57b1
|
[
"Apache-2.0"
] | null | null | null |
portal.py
|
mrahman4782/portalhoop
|
29e87ccaca5544590aa9ed20096c3628c1ab57b1
|
[
"Apache-2.0"
] | null | null | null |
portal.py
|
mrahman4782/portalhoop
|
29e87ccaca5544590aa9ed20096c3628c1ab57b1
|
[
"Apache-2.0"
] | null | null | null |
import pygame
import random
from pygame import *
pygame.init()
width, height = 740, 500
screen = pygame.display.set_mode((width, height))
player = [pygame.transform.scale(pygame.image.load("Resources/Balljump-1(2).png"), (100,100)), pygame.transform.scale(pygame.image.load("Resources/Balljump-1.png"),(100,100))]
launch = [pygame.transform.scale(pygame.image.load("Resources/Balljump-1.png"), (100,100)), pygame.transform.scale(pygame.image.load("Resources/Balljump-1(2).png"), (100,100)),pygame.transform.scale(pygame.image.load("Resources/Balljump-2.png"), (100,100)),pygame.transform.scale(pygame.image.load("Resources/Balljump-3.png"), (100,100)), pygame.transform.scale(pygame.image.load("Resources/Balljump-4.png"),(100,100))]
shoot = [pygame.transform.scale(pygame.image.load("Resources/Balljump-5.png"), (100, 100)), pygame.transform.scale(pygame.image.load("Resources/Balljump-6.png"), (100, 100))]
ball = pygame.transform.scale(pygame.image.load("Resources/ball.png"), (100,100))
blue = (0, 0, 128)
white = (255, 255, 255)
janimation, danimation, movable, motionactivate, limit_reached, nojump = False, False, False, False, False, False
jumplock = True
ballrelease, ballregain = False, False
fr = pygame.time.Clock()
c = 0
i = 0
p = 0
x, y = 0, 300
score = 0
a, b, rpos = 0, 0, 0
xpos, ypos = 17, 313
# Background image source: https://www.freepik.com/free-vector/floral-ornamental-abstract-background_6189902.htm#page=1&query=black%20background&position=40
background = pygame.image.load("Resources/back.jpg")
gamestart = False
def basketball():
#Draw basketball
global rpos, xpos, ypos, ballregain
if gamestart == True and ballrelease == False:
if nojump == True:
if c % 2 == 0:
screen.blit(ball, (xpos, ypos + 24))
if c % 2 == 1:
screen.blit(ball, (xpos + 2 , ypos ))
if nojump == False and motionactivate == True:
if p // 4 == 0:
screen.blit(ball, (xpos, ypos))
if p // 4 == 1:
screen.blit(ball, (xpos-2, ypos-5))
if p // 4 == 2:
screen.blit(ball, (xpos-2, ypos-7))
if p // 4 == 3:
screen.blit(ball, (xpos-2, ypos-11))
if p// 4 == 4:
screen.blit(ball, (xpos-2, ypos-13))
if janimation == True:
rpos = y -13
screen.blit(ball, (xpos, rpos))
rposNew = 400 - rpos
if gamestart == True and ballrelease == True:
if rpos <= 325:
screen.blit(ball, (xpos, rpos))
if xpos <= 700:
ballregain = False
xpos += (rposNew / 20)
print("rpos is: " + str(rpos) + " xpos is: " + str(xpos))
rpos = (-1*((xpos/600)**2))+((xpos)/150)+rpos
if xpos > 700 or rpos > 325:
xpos = 17
ballregain = True
def player_animations():
# Animations while the user makes no input
global c
global player
global i
if nojump == True:
if c % 2 == 0 and i<= 10:
if i<10:
screen.blit(player[c], (0, 300))
i += 1
if i == 10:
c += 1
i += 1
elif c % 2 == 1 and i<= 20:
if i>10 and i<20:
screen.blit(player[c], (0, 300))
i += 1
if i == 20:
c -= 1
i += 1
elif i>20:
i = 0
screen.blit(player[c], (0, 300))
if nojump == False:
screen.fill(0)
def screen_text():
global score
global nojump
global movable
if nojump == True:
font = pygame.font.Font("Resources/android.ttf", 16)
text2 = font.render("Hold space to throw the ball", True, white)
textRect2 = text2.get_rect()
textRect2.center = (width // 2, height // 2 + 200)
screen.blit(text2, textRect2)
movable = True
font = pygame.font.Font("Resources/android.ttf", 16)
text2 = font.render("Score: "+ str(score), True, white)
textRect2 = text2.get_rect()
textRect2.center = (width // 2 - 300, height // 2 - 200)
screen.blit(text2, textRect2)
def player_jump():
# Initial animations before the player jumps
global p, nojump, movable, x, y, janimation, danimation, a, b, motionactivate, limit_reached
global jumplock, ballrelease, ballregain
if movable == True and keypress[K_SPACE]:
#print(pygame.time.get_ticks())
motionactivate = True
#print(nojump)
#if p >= 19:
# p = 0
if motionactivate == True:
#screen.fill(0)
nojump = False
if p < 21:
screen.blit(launch[p // 4], (0, 300))
p += 1
if p == 20:
a = pygame.time.get_ticks()
janimation = True
p += 1
#elif keypress[K_SPACE]:
# what to do when jump is completed
if janimation == True and limit_reached == False:
if keypress[K_SPACE] and pygame.KEYDOWN and jumplock == True:
b = pygame.time.get_ticks()
if y > 239:
y = ((b - a) / -25) + 310
if y >= 305:
screen.fill(0)
screen.blit(shoot[0], (x, y))
if y < 305 and y > 240:
screen.blit(shoot[1], (x,y))
if y <= 239:
screen.blit(shoot[0], (x, y))
danimation = True
limit_reached = True
#print(danimation)
if event.type == pygame.KEYUP:
if event.key == K_SPACE:
danimation = True
motionactivate = False
ballrelease = True
if danimation == True:
jumplock = False
if danimation == True or limit_reached == True:
#print("poopc "+ str(y))
if y < 310:
screen.blit(shoot[0], (x, y))
y += 2
#
# print("zag")
#print("poop: " + str(pygame.KEYUP) + " key down is: " + str(pygame.KEYDOWN))
if y >= 310:
nojump = True
danimation = False
janimation = False
movable = False
limit_reached = False
p = 0
jumplock = True
if ballregain == True:
ballrelease = False
#print("y value is: "+ str(y)+ " a is: "+ str(a) + " b is: "+ str(b))
while 1:
keypress = pygame.key.get_pressed()
fr.tick(30)
screen.fill(0)
if keypress[K_RETURN]:
gamestart = True
if gamestart == False:
#screen.fill(0)
screen.blit(background, (0,0))
# Draw opening texts
font = pygame.font.Font("Resources/android.ttf", 64)
text = font.render("Portal Hoop", True, white)
textRect = text.get_rect()
textRect.center = (width // 2, height // 2 - 100)
screen.blit(text, textRect)
font = pygame.font.Font("Resources/android.ttf", 18)
text2 = font.render("Press Return to start", True, white)
textRect2 = text2.get_rect()
textRect2.center = (width // 2, height // 2 + 100)
screen.blit(text2, textRect2)
nojump = True
# Check if any
if gamestart == True:
#screen.fill(0)
player_animations()
player_jump()
basketball()
screen_text()
pygame.display.flip()
pygame.display.set_caption("Portal Hoop")
for event in pygame.event.get():
if event.type == pygame.QUIT:
pygame.quit()
exit(0)
| 28.776978
| 419
| 0.5115
| 948
| 8,000
| 4.28692
| 0.194093
| 0.054134
| 0.0406
| 0.064961
| 0.380906
| 0.336614
| 0.290108
| 0.225886
| 0.202018
| 0.202018
| 0
| 0.062708
| 0.360125
| 8,000
| 277
| 420
| 28.880866
| 0.731198
| 0.081
| 0
| 0.213483
| 0
| 0
| 0.059924
| 0.041769
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022472
| false
| 0
| 0.016854
| 0
| 0.039326
| 0.005618
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
461ab2e151fa2b1e92504b3b4e0d6a8160278475
| 843
|
py
|
Python
|
prev_ob_models/KaplanLansner2014/plotting_and_analysis/plot_results.py
|
fameshpatel/olfactorybulb
|
8d7a644b4560309ef177c0590ff73ed4c2432604
|
[
"MIT"
] | 5
|
2019-10-03T14:49:02.000Z
|
2022-01-13T13:37:34.000Z
|
prev_ob_models/KaplanLansner2014/plotting_and_analysis/plot_results.py
|
fameshpatel/olfactorybulb
|
8d7a644b4560309ef177c0590ff73ed4c2432604
|
[
"MIT"
] | 4
|
2019-12-30T15:57:24.000Z
|
2020-10-07T22:42:50.000Z
|
prev_ob_models/KaplanLansner2014/plotting_and_analysis/plot_results.py
|
fameshpatel/olfactorybulb
|
8d7a644b4560309ef177c0590ff73ed4c2432604
|
[
"MIT"
] | 2
|
2020-05-19T20:12:48.000Z
|
2020-11-04T17:17:44.000Z
|
import pylab
import numpy
import sys
if (len(sys.argv) < 2):
fn = raw_input("Please enter data file to be plotted\n")
else:
fn = sys.argv[1]
data = np.loadtxt(fn)
# if the first line contains crap use skiprows=1
#data = np.loadtxt(fn, skiprows=1)
fig = pylab.figure()
ax = fig.add_subplot(111)
# if you want to use multiple figures in one, use
#ax1 = fig.add_subplot(211)
#ax2 = fig.add_subplot(212)
# and
if (data.ndim == 1):
x_axis = numpy.arange(data.size)
ax.plot(x_axis, data)
else:
# ax.errorbar(data[:,0], data[:,1], yerr=data[:, 2])
# print 'mean y-value:', data[:, 1].mean()
ax.plot(data[:, 0], data[:, 1], ls='-', lw=3, c='b')
# ax.scatter(data[:,0], data[:,2])
# ax.plot(data[:,3], data[:,6])
# saving:
# fig.savefig('output_figure.png')
# otherwise nothing is shown
pylab.show()
| 21.075
| 60
| 0.618031
| 142
| 843
| 3.619718
| 0.535211
| 0.035019
| 0.075875
| 0.054475
| 0.062257
| 0
| 0
| 0
| 0
| 0
| 0
| 0.039531
| 0.189798
| 843
| 39
| 61
| 21.615385
| 0.713031
| 0.500593
| 0
| 0.125
| 0
| 0
| 0.09828
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.1875
| 0
| 0.1875
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c9d4e8ece8e1713ad5f9d3791c348c5f5f83733
| 2,691
|
py
|
Python
|
lib/take2/main.py
|
zacharyfrederick/deep_q_gaf
|
6b712b17a6c89c1cba0d22e18fa336369c521d4e
|
[
"MIT"
] | null | null | null |
lib/take2/main.py
|
zacharyfrederick/deep_q_gaf
|
6b712b17a6c89c1cba0d22e18fa336369c521d4e
|
[
"MIT"
] | null | null | null |
lib/take2/main.py
|
zacharyfrederick/deep_q_gaf
|
6b712b17a6c89c1cba0d22e18fa336369c521d4e
|
[
"MIT"
] | null | null | null |
from __future__ import division
from lib import env_config
from lib.senior_env import BetterEnvironment
from keras.optimizers import Adam
from rl.agents.dqn import DQNAgent
from rl.policy import LinearAnnealedPolicy, BoltzmannQPolicy, EpsGreedyQPolicy
from rl.memory import SequentialMemory
from lib import models
import random
choices = [0,1,2]
def gen_action():
return random.choice(choices)
if __name__ == '__main__':
config_ = env_config.EnvConfig('config/debug.json')
env = BetterEnvironment(config_)
INPUT_SHAPE = (30, 180)
WINDOW_LENGTH = 4
model = models.build_paper_model()
# Get the environment and extract the number of actions.
nb_actions = 3
# Next, we build our model. We use the same model that was described by Mnih et al. (2015).
input_shape = (WINDOW_LENGTH,) + INPUT_SHAPE
# Finally, we configure and compile our agent. You can use every built-in Keras optimizer and
# even the metrics!
memory = SequentialMemory(limit=10000000, window_length=WINDOW_LENGTH)
# Select a policy. We use eps-greedy action selection, which means that a random action is selected
# with probability eps. We anneal eps from 1.0 to 0.1 over the course of 1M steps. This is done so that
# the agent initially explores the environment (high eps) and then gradually sticks to what it knows
# (low eps). We also set a dedicated eps value that is used during testing. Note that we set it to 0.05
# so that the agent still performs some random actions. This ensures that the agent cannot get stuck.
policy = LinearAnnealedPolicy(EpsGreedyQPolicy(), attr='eps', value_max=1., value_min=.1, value_test=.05,
nb_steps=1000000)
# The trade-off between exploration and exploitation is difficult and an on-going research topic.
# If you want, you can experiment with the parameters or use a different policy. Another popular one
# is Boltzmann-style exploration:
# policy = BoltzmannQPolicy(tau=1.)
# Feel free to give it a try!
dqn = DQNAgent(model=model, nb_actions=nb_actions, policy=policy, memory=memory,
nb_steps_warmup=50000, gamma=.99, target_model_update=10000,
train_interval=4, delta_clip=1.)
dqn.compile(Adam(lr=.00025), metrics=['mae'])
# Okay, now it's time to learn something! We capture the interrupt exception so that training
# can be prematurely aborted. Notice that now you can use the built-in Keras callbacks!
weights_filename = 'dqn_{}_weights.h5f'.format('god_help_me.weights')
dqn.fit(env, nb_steps=100000, log_interval=10000)
print(env.portfolio.print_portfolio_results())
| 42.714286
| 109
| 0.726867
| 393
| 2,691
| 4.852417
| 0.519084
| 0.02517
| 0.018878
| 0.014683
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.03389
| 0.199554
| 2,691
| 62
| 110
| 43.403226
| 0.851439
| 0.454106
| 0
| 0
| 0
| 0
| 0.046832
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033333
| false
| 0
| 0.3
| 0.033333
| 0.366667
| 0.033333
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c9df7adb0b666ca2019704ffcc93ac51a469c6b
| 1,752
|
py
|
Python
|
src/clcore.py
|
ShepardPower/PyMCBuilder
|
b247151864cc6b7bca0dc23aa87cdbb44b52defc
|
[
"MIT"
] | 1
|
2019-07-09T04:45:07.000Z
|
2019-07-09T04:45:07.000Z
|
src/clcore.py
|
ShepardPower/PyMCBuilder
|
b247151864cc6b7bca0dc23aa87cdbb44b52defc
|
[
"MIT"
] | null | null | null |
src/clcore.py
|
ShepardPower/PyMCBuilder
|
b247151864cc6b7bca0dc23aa87cdbb44b52defc
|
[
"MIT"
] | null | null | null |
# I'm just the one that executes the instructions!
import sys, math, json, operator, time
import mcpi.minecraft as minecraft
from PIL import Image as pillow
from blockid import get_block
import mcpi.block as block
import functions as pymc
from tqdm import tqdm
import tkinter as tk
# Functions
# Main code
mc = minecraft.Minecraft.create()
try:
json_file = open("blocks.json")
json_put = json.load(json_file)
except:
pymc.chat(mc, "blocks.json not found, exiting!", 0)
sys.exit(1)
try:
rim = pillow.open(sys.argv[1])
except:
pymc.chat(mc, "bad image, exiting!", 0)
sys.exit(1)
orders = []
used = []
imwid, imhei = rim.size
if imhei > 200:
maxheight = 200
rim.thumbnail((200, maxheight), pillow.ANTIALIAS)
imwid, imhei = rim.size
pymc.chat(mc, "image is over 200 pixels, reducing height.", 1)
rim.convert('RGB')
im = rim.load()
pbar = tqdm(total=imhei*imwid)
for hei in range(imhei):
for wid in range(imwid):
smal = pymc.comp_pixel((im[wid, hei][0], im[wid, hei][1], im[wid, hei][2]), json_put)
im[wid, hei] = smal[1]
used.append(str(smal[2]))
pbar.update(1)
pbar.close()
rim.save("result.GIF") # The result
json_file.close()
oldPos = mc.player.getPos()
playerPos = [round(oldPos.x), round(oldPos.y), round(oldPos.z)]
pymc.chat(mc, "Ready!")
pbar = tqdm(total=imhei*imwid)
num_temp = imhei*imwid-1
for hei in range(imhei):
for wid in range(imwid):
#print(used[wid + (imhei * hei)])
gblock = get_block(used[num_temp])
mc.setBlock(playerPos[0]+wid, playerPos[1]+hei, playerPos[2], gblock)
num_temp -= 1
pbar.update(1)
pbar.close()
pymc.chat(mc, "Done!!")
pymc.chat(mc, "Please star us on github if you like the result!", 2)
| 26.545455
| 93
| 0.66153
| 279
| 1,752
| 4.114695
| 0.387097
| 0.041812
| 0.052265
| 0.027875
| 0.165505
| 0.062718
| 0.062718
| 0.062718
| 0.062718
| 0.062718
| 0
| 0.021862
| 0.190639
| 1,752
| 65
| 94
| 26.953846
| 0.787729
| 0.063356
| 0
| 0.333333
| 0
| 0
| 0.107645
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.148148
| 0
| 0.148148
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1c9e901d1d765a0acf44b58e4a7dca2f74accdab
| 4,377
|
py
|
Python
|
gaphor/tools/gaphorconvert.py
|
987Frogh/project-makehuman
|
3afc838b03c50f8e574d8c87cb71de4435a18a6d
|
[
"Apache-2.0"
] | 1
|
2020-11-27T12:39:15.000Z
|
2020-11-27T12:39:15.000Z
|
gaphor/tools/gaphorconvert.py
|
987Frogh/project-makehuman
|
3afc838b03c50f8e574d8c87cb71de4435a18a6d
|
[
"Apache-2.0"
] | null | null | null |
gaphor/tools/gaphorconvert.py
|
987Frogh/project-makehuman
|
3afc838b03c50f8e574d8c87cb71de4435a18a6d
|
[
"Apache-2.0"
] | 3
|
2020-01-23T14:13:59.000Z
|
2020-02-18T18:21:47.000Z
|
#!/usr/bin/python
import optparse
import os
import re
import sys
import cairo
from gaphas.painter import Context, ItemPainter
from gaphas.view import View
import gaphor.UML as UML
from gaphor.application import Application
from gaphor.storage import storage
def pkg2dir(package):
"""
Return directory path from UML package class.
"""
name = []
while package:
name.insert(0, package.name)
package = package.package
return "/".join(name)
def paint(view, cr):
view.painter.paint(Context(cairo=cr, items=view.canvas.get_all_items(), area=None))
def main(argv=sys.argv[1:]):
def message(msg):
"""
Print message if user set verbose mode.
"""
if options.verbose:
print(msg, file=sys.stderr)
usage = "usage: %prog [options] file1 file2..."
parser = optparse.OptionParser(usage=usage)
parser.add_option(
"-v", "--verbose", dest="verbose", action="store_true", help="verbose output"
)
parser.add_option(
"-u",
"--use-underscores",
dest="underscores",
action="store_true",
help="use underscores instead of spaces for output filenames",
)
parser.add_option(
"-d", "--dir", dest="dir", metavar="directory", help="output to directory"
)
parser.add_option(
"-f",
"--format",
dest="format",
metavar="format",
help="output file format, default pdf",
default="pdf",
choices=["pdf", "svg", "png"],
)
parser.add_option(
"-r",
"--regex",
dest="regex",
metavar="regex",
help="process diagrams which name matches given regular expresion;"
" name includes package name; regular expressions are case insensitive",
)
(options, args) = parser.parse_args(argv)
if not args:
parser.print_help()
Application.init(
services=["event_manager", "component_registry", "element_factory"]
)
factory = Application.get_service("element_factory")
name_re = None
if options.regex:
name_re = re.compile(options.regex, re.I)
# we should have some gaphor files to be processed at this point
for model in args:
message(f"loading model {model}")
storage.load(model, factory)
message("ready for rendering")
for diagram in factory.select(lambda e: e.isKindOf(UML.Diagram)):
odir = pkg2dir(diagram.package)
# just diagram name
dname = diagram.name
# full diagram name including package path
pname = f"{odir}/{dname}"
if options.underscores:
odir = odir.replace(" ", "_")
dname = dname.replace(" ", "_")
if name_re and not name_re.search(pname):
message(f"skipping {pname}")
continue
if options.dir:
odir = f"{options.dir}/{odir}"
outfilename = f"{odir}/{dname}.{options.format}"
if not os.path.exists(odir):
message(f"creating dir {odir}")
os.makedirs(odir)
message(f"rendering: {pname} -> {outfilename}...")
view = View(diagram.canvas)
view.painter = ItemPainter()
tmpsurface = cairo.ImageSurface(cairo.FORMAT_ARGB32, 0, 0)
tmpcr = cairo.Context(tmpsurface)
view.update_bounding_box(tmpcr)
tmpcr.show_page()
tmpsurface.flush()
w, h = view.bounding_box.width, view.bounding_box.height
if options.format == "pdf":
surface = cairo.PDFSurface(outfilename, w, h)
elif options.format == "svg":
surface = cairo.SVGSurface(outfilename, w, h)
elif options.format == "png":
surface = cairo.ImageSurface(
cairo.FORMAT_ARGB32, int(w + 1), int(h + 1)
)
else:
assert False, f"unknown format {options.format}"
cr = cairo.Context(surface)
view.matrix.translate(-view.bounding_box.x, -view.bounding_box.y)
paint(view, cr)
cr.show_page()
if options.format == "png":
surface.write_to_png(outfilename)
surface.flush()
surface.finish()
| 28.796053
| 87
| 0.570482
| 484
| 4,377
| 5.088843
| 0.373967
| 0.021924
| 0.030451
| 0.015428
| 0.051969
| 0.024361
| 0
| 0
| 0
| 0
| 0
| 0.004639
| 0.310487
| 4,377
| 151
| 88
| 28.986755
| 0.811465
| 0.051177
| 0
| 0.045045
| 0
| 0
| 0.172951
| 0.007541
| 0
| 0
| 0
| 0
| 0.009009
| 1
| 0.036036
| false
| 0
| 0.09009
| 0
| 0.135135
| 0.018018
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca0182af54f9900fd8dd3f4d8ff457375adde5e
| 1,140
|
py
|
Python
|
test/test_ID.py
|
a-buntjer/tsib
|
9d6ddcdca55c9b8afb5324c0da8d0910cb1a326e
|
[
"MIT"
] | 14
|
2019-12-16T16:54:43.000Z
|
2021-11-08T11:46:51.000Z
|
test/test_ID.py
|
a-buntjer/tsib
|
9d6ddcdca55c9b8afb5324c0da8d0910cb1a326e
|
[
"MIT"
] | null | null | null |
test/test_ID.py
|
a-buntjer/tsib
|
9d6ddcdca55c9b8afb5324c0da8d0910cb1a326e
|
[
"MIT"
] | 7
|
2020-05-27T19:49:58.000Z
|
2022-02-02T12:45:33.000Z
|
# -*- coding: utf-8 -*-
"""
Created on Fri Apr 08 11:33:01 2016
@author: Leander Kotzur
"""
import tsib
def test_get_ID():
# parameterize a building
bdgcfg = tsib.BuildingConfiguration(
{
"refurbishment": False,
"nightReduction": False,
"occControl": False,
"capControl": True,
"n_persons": 2,
"roofOrientation": 0.0,
"n_apartments": 1,
"latitude": 49.,
"longitude": 12.,
}
)
bdgObj = tsib.Building(configurator=bdgcfg)
print('ID is : ' + str(bdgObj.ID))
return
def test_set_ID():
# parameterize a building
bdgcfg = tsib.BuildingConfiguration(
{
"buildingYear": 1980,
"n_persons": 2,
"roofOrientation": 0.0,
"n_apartments": 2,
"a_ref": 300.,
"surrounding": "Detached",
"latitude": 52.,
"longitude": 13.,
}
)
bdgObj = tsib.Building(configurator=bdgcfg)
bdgObj.ID = 'custom'
if not bdgObj.ID == 'custom':
raise ValueError()
return
| 20.727273
| 47
| 0.509649
| 106
| 1,140
| 5.396226
| 0.59434
| 0.041958
| 0.052448
| 0.08042
| 0.444056
| 0.318182
| 0.318182
| 0.129371
| 0
| 0
| 0
| 0.049451
| 0.361404
| 1,140
| 54
| 48
| 21.111111
| 0.736264
| 0.114912
| 0
| 0.277778
| 0
| 0
| 0.209209
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.027778
| 0
| 0.138889
| 0.027778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca13538bfcd1f5b4948e14f8020f58d87fb25a8
| 25,791
|
py
|
Python
|
dislib/model_selection/_search.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 36
|
2018-10-22T19:21:14.000Z
|
2022-03-22T12:10:01.000Z
|
dislib/model_selection/_search.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 329
|
2018-11-22T18:04:57.000Z
|
2022-03-18T01:26:55.000Z
|
dislib/model_selection/_search.py
|
alexbarcelo/dislib
|
989f81f235ae30b17410a8d805df258c7d931b38
|
[
"Apache-2.0"
] | 21
|
2019-01-10T11:46:39.000Z
|
2022-03-17T12:59:45.000Z
|
from abc import ABC, abstractmethod
from collections import defaultdict
from collections.abc import Sequence
from functools import partial
from itertools import product
import numpy as np
from pycompss.api.api import compss_wait_on
from scipy.stats import rankdata
from sklearn import clone
from sklearn.model_selection import ParameterGrid, ParameterSampler
from numpy.ma import MaskedArray
from dislib.model_selection._split import infer_cv
from dislib.model_selection._validation import check_scorer, fit_and_score, \
validate_score, aggregate_score_dicts
class BaseSearchCV(ABC):
"""Abstract base class for hyper parameter search with cross-validation."""
def __init__(self, estimator, scoring=None, cv=None, refit=True):
self.estimator = estimator
self.scoring = scoring
self.cv = cv
self.refit = refit
@abstractmethod
def _run_search(self, evaluate_candidates):
"""Abstract method to perform the search. The parameter
`evaluate_candidates` is a function that evaluates a ParameterGrid at a
time """
pass
def fit(self, x, y=None, **fit_params):
"""Run fit with all sets of parameters.
Parameters
----------
x : ds-array
Training data samples.
y : ds-array, optional (default = None)
Training data labels or values.
**fit_params : dict of string -> object
Parameters passed to the ``fit`` method of the estimator
"""
estimator = self.estimator
cv = infer_cv(self.cv)
scorers, refit_metric = self._infer_scorers()
base_estimator = clone(estimator)
n_splits = None
all_candidate_params = []
all_out = []
def evaluate_candidates(candidate_params):
"""Evaluate some parameters"""
candidate_params = list(candidate_params)
out = [fit_and_score(clone(base_estimator), train, validation,
scorer=scorers, parameters=parameters,
fit_params=fit_params)
for parameters, (train, validation)
in product(candidate_params, cv.split(x, y))]
nonlocal n_splits
n_splits = cv.get_n_splits()
all_candidate_params.extend(candidate_params)
all_out.extend(out)
self._run_search(evaluate_candidates)
for params_result in all_out:
scores = params_result[0]
for scorer_name, score in scores.items():
score = compss_wait_on(score)
scores[scorer_name] = validate_score(score, scorer_name)
results = self._format_results(all_candidate_params, scorers,
n_splits, all_out)
# For multi-metric evaluation, store the best_index_, best_params_ and
# best_score_ iff refit is one of the scorer names
# In single metric evaluation, refit_metric is "score"
if self.refit or not self.multimetric_:
# If callable, refit is expected to return the index of the best
# parameter set.
if callable(self.refit):
self.best_index_ = self.refit(results)
if not isinstance(self.best_index_, (int, np.integer)):
raise TypeError('best_index_ returned is not an integer')
if (self.best_index_ < 0 or
self.best_index_ >= len(results["params"])):
raise IndexError('best_index_ index out of range')
else:
self.best_index_ = results["rank_test_%s"
% refit_metric].argmin()
self.best_score_ = results["mean_test_%s" % refit_metric][
self.best_index_]
self.best_params_ = results["params"][self.best_index_]
if self.refit:
self.best_estimator_ = clone(base_estimator).set_params(
**self.best_params_)
self.best_estimator_.fit(x, y, **fit_params)
# Store the only scorer not as a dict for single metric evaluation
self.scorer_ = scorers if self.multimetric_ else scorers['score']
self.cv_results_ = results
self.n_splits_ = n_splits
return self
@staticmethod
def _format_results(candidate_params, scorers, n_splits, out):
n_candidates = len(candidate_params)
(test_score_dicts,) = zip(*out)
test_scores = aggregate_score_dicts(test_score_dicts)
results = {}
def _store(key_name, array, splits=False, rank=False):
"""A small helper to store the scores/times to the cv_results_"""
array = np.array(array, dtype=np.float64).reshape(n_candidates,
n_splits)
if splits:
for split_i in range(n_splits):
# Uses closure to alter the results
results["split%d_%s"
% (split_i, key_name)] = array[:, split_i]
array_means = np.mean(array, axis=1)
results['mean_%s' % key_name] = array_means
array_stds = np.std(array, axis=1)
results['std_%s' % key_name] = array_stds
if rank:
results["rank_%s" % key_name] = np.asarray(
rankdata(-array_means, method='min'), dtype=np.int32)
# Use one MaskedArray and mask all the places where the param is not
# applicable for that candidate. Use defaultdict as each candidate may
# not contain all the params
param_results = defaultdict(partial(MaskedArray,
np.empty(n_candidates, ),
mask=True,
dtype=object))
for cand_i, params in enumerate(candidate_params):
for name, value in params.items():
# An all masked empty array gets created for the key
# `"param_%s" % name` at the first occurrence of `name`.
# Setting the value at an index also unmasks that index
param_results["param_%s" % name][cand_i] = value
results.update(param_results)
# Store a list of param dicts at the key 'params'
results['params'] = candidate_params
for scorer_name in scorers.keys():
_store('test_%s' % scorer_name, test_scores[scorer_name],
splits=True, rank=True)
return results
def _infer_scorers(self):
estimator = self.estimator
scoring = self.scoring
refit = self.refit
if scoring is None or callable(scoring):
scorers = {"score": check_scorer(estimator, scoring)}
refit_metric = 'score'
self.multimetric_ = False
elif isinstance(scoring, dict):
scorers = {key: check_scorer(estimator, scorer)
for key, scorer in scoring.items()}
if refit is not False and (
not isinstance(refit, str) or
refit not in scorers) and not callable(refit):
raise ValueError("For multi-metric scoring, the parameter "
"refit must be set to a scorer key or a "
"callable to refit an estimator with the "
"best parameter setting on the whole "
"data and make the best_* attributes "
"available for that metric. If this is "
"not needed, refit should be set to "
"False explicitly. %r was passed."
% refit)
refit_metric = refit
self.multimetric_ = True
else:
raise ValueError('scoring is not valid')
return scorers, refit_metric
class GridSearchCV(BaseSearchCV):
"""Exhaustive search over specified parameter values for an estimator.
GridSearchCV implements a "fit" and a "score" method.
The parameters of the estimator used to apply these methods are optimized
by cross-validated grid-search over a parameter grid.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_grid : dict or list of dictionaries
Dictionary with parameters names (string) as keys and lists of
parameter settings to try as values, or a list of such
dictionaries, in which case the grids spanned by each dictionary
in the list are explored. This enables searching over any sequence
of parameter settings.
scoring : callable, dict or None, optional (default=None)
A callable to evaluate the predictions on the test set. It should take
3 parameters, estimator, x and y, and return a score (higher meaning
better). For evaluating multiple metrics, give a dict with names as
keys and callables as values. If None, the estimator's score method is
used.
cv : int or cv generator, optional (default=None)
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `KFold`,
- custom cv generator.
refit : boolean, string, or callable, optional (default=True)
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer. ``best_score_`` is not returned if refit is callable.
See ``scoring`` parameter to know more about multiple metric
evaluation.
Examples
--------
>>> import dislib as ds
>>> from dislib.model_selection import GridSearchCV
>>> from dislib.classification import RandomForestClassifier
>>> import numpy as np
>>> from sklearn import datasets
>>>
>>>
>>> if __name__ == '__main__':
>>> x_np, y_np = datasets.load_iris(return_X_y=True)
>>> x = ds.array(x_np, (30, 4))
>>> y = ds.array(y_np[:, np.newaxis], (30, 1))
>>> param_grid = {'n_estimators': (2, 4), 'max_depth': range(3, 5)}
>>> rf = RandomForestClassifier()
>>> searcher = GridSearchCV(rf, param_grid)
>>> searcher.fit(x, y)
>>> searcher.cv_results_
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table:
+------------+------------+-----------------+---+---------+
|param_kernel|param_degree|split0_test_score|...|rank_t...|
+============+============+=================+===+=========+
| 'poly' | 2 | 0.80 |...| 2 |
+------------+------------+-----------------+---+---------+
| 'poly' | 3 | 0.70 |...| 4 |
+------------+------------+-----------------+---+---------+
| 'rbf' | -- | 0.80 |...| 3 |
+------------+------------+-----------------+---+---------+
| 'rbf' | -- | 0.93 |...| 1 |
+------------+------------+-----------------+---+---------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel': masked_array(data = ['poly', 'poly', 'rbf', 'rbf'],
mask = [False False False False]...),
'param_degree': masked_array(data = [2.0 3.0 -- --],
mask = [False False True True]...),
'split0_test_score' : [0.80, 0.70, 0.80, 0.93],
'split1_test_score' : [0.82, 0.50, 0.68, 0.78],
'split2_test_score' : [0.79, 0.55, 0.71, 0.93],
...
'mean_test_score' : [0.81, 0.60, 0.75, 0.85],
'std_test_score' : [0.01, 0.10, 0.05, 0.08],
'rank_test_score' : [2, 4, 3, 1],
'params' : [{'kernel': 'poly', 'degree': 2}, ...],
}
NOTES:
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above ('split0_test_precision', 'mean_train_precision' etc.).
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is present only if ``refit`` is
specified.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is present only if ``refit`` is
specified.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
"""
def __init__(self, estimator, param_grid, scoring=None, cv=None,
refit=True):
super().__init__(estimator=estimator, scoring=scoring, cv=cv,
refit=refit)
self.param_grid = param_grid
self._check_param_grid(param_grid)
def _run_search(self, evaluate_candidates):
evaluate_candidates(ParameterGrid(self.param_grid))
@staticmethod
def _check_param_grid(param_grid):
if hasattr(param_grid, 'items'):
param_grid = [param_grid]
for p in param_grid:
for name, v in p.items():
if isinstance(v, np.ndarray) and v.ndim > 1:
raise ValueError("Parameter array should be "
"one-dimensional.")
if (isinstance(v, str) or
not isinstance(v, (np.ndarray, Sequence))):
raise ValueError(
"Parameter values for parameter ({0}) need "
"to be a sequence (but not a string) or"
" np.ndarray.".format(name))
if len(v) == 0:
raise ValueError(
"Parameter values for parameter ({0}) need "
"to be a non-empty sequence.".format(name))
class RandomizedSearchCV(BaseSearchCV):
"""Randomized search on hyper parameters.
RandomizedSearchCV implements a "fit" and a "score" method.
The parameters of the estimator used to apply these methods are optimized
by cross-validated search over parameter settings.
In contrast to GridSearchCV, not all parameter values are tried out, but
rather a fixed number of parameter settings is sampled from the specified
distributions. The number of parameter settings that are tried is
given by n_iter.
If all parameters are presented as a list,
sampling without replacement is performed. If at least one parameter
is given as a distribution, sampling with replacement is used.
Parameters
----------
estimator : estimator object.
This is assumed to implement the scikit-learn estimator interface.
Either estimator needs to provide a ``score`` function,
or ``scoring`` must be passed.
param_distributions : dict
Dictionary with parameters names (string) as keys and distributions
or lists of parameters to try. Distributions must provide a ``rvs``
method for sampling (such as those from scipy.stats.distributions).
If a list is given, it is sampled uniformly.
n_iter : int, optional (default=10)
Number of parameter settings that are sampled.
scoring : callable, dict or None, optional (default=None)
A callable to evaluate the predictions on the test set. It should take
3 parameters, estimator, x and y, and return a score (higher meaning
better). For evaluating multiple metrics, give a dict with names as
keys and callables as values. If None, the estimator's score method is
used.
cv : int or cv generator, optional (default=None)
Determines the cross-validation splitting strategy.
Possible inputs for cv are:
- None, to use the default 5-fold cross validation,
- integer, to specify the number of folds in a `KFold`,
- custom cv generator.
refit : boolean, string, or callable, optional (default=True)
Refit an estimator using the best found parameters on the whole
dataset.
For multiple metric evaluation, this needs to be a string denoting the
scorer that would be used to find the best parameters for refitting
the estimator at the end.
Where there are considerations other than maximum score in
choosing a best estimator, ``refit`` can be set to a function which
returns the selected ``best_index_`` given ``cv_results_``.
The refitted estimator is made available at the ``best_estimator_``
attribute and permits using ``predict`` directly on this
``GridSearchCV`` instance.
Also for multiple metric evaluation, the attributes ``best_index_``,
``best_score_`` and ``best_params_`` will only be available if
``refit`` is set and all of them will be determined w.r.t this specific
scorer. ``best_score_`` is not returned if refit is callable.
See ``scoring`` parameter to know more about multiple metric
evaluation.
random_state : int, RandomState instance or None, optional, default=None
Pseudo random number generator state used for random sampling of params
in param_distributions. This is not passed to each estimator.
If int, random_state is the seed used by the random number generator;
If RandomState instance, random_state is the random number generator;
If None, the random number generator is the RandomState instance used
by `np.random`.
Examples
--------
>>> import dislib as ds
>>> from dislib.model_selection import RandomizedSearchCV
>>> from dislib.classification import CascadeSVM
>>> import numpy as np
>>> import scipy.stats as stats
>>> from sklearn import datasets
>>>
>>>
>>> if __name__ == '__main__':
>>> x_np, y_np = datasets.load_iris(return_X_y=True)
>>> # Pre-shuffling required for CSVM
>>> p = np.random.permutation(len(x_np))
>>> x = ds.array(x_np[p], (30, 4))
>>> y = ds.array((y_np[p] == 0)[:, np.newaxis], (30, 1))
>>> param_distributions = {'c': stats.expon(scale=0.5),
>>> 'gamma': stats.expon(scale=10)}
>>> csvm = CascadeSVM()
>>> searcher = RandomizedSearchCV(csvm, param_distributions, n_iter=10)
>>> searcher.fit(x, y)
>>> searcher.cv_results_
Attributes
----------
cv_results_ : dict of numpy (masked) ndarrays
A dict with keys as column headers and values as columns, that can be
imported into a pandas ``DataFrame``.
For instance the below given table
+---------+-------------+-------------------+---+---------------+
| param_c | param_gamma | split0_test_score |...|rank_test_score|
+=========+=============+===================+===+===============+
| 0.193 | 1.883 | 0.82 |...| 3 |
+---------+-------------+-------------------+---+---------------+
| 1.452 | 0.327 | 0.81 |...| 2 |
+---------+-------------+-------------------+---+---------------+
| 0.926 | 3.452 | 0.94 |...| 1 |
+---------+-------------+-------------------+---+---------------+
will be represented by a ``cv_results_`` dict of::
{
'param_kernel' : masked_array(data = ['rbf', 'rbf', 'rbf'],
mask = False),
'param_gamma' : masked_array(data = [0.1 0.2 0.3], mask = False),
'split0_test_score' : [0.82, 0.81, 0.94],
'split1_test_score' : [0.66, 0.75, 0.79],
'split2_test_score' : [0.82, 0.87, 0.84],
...
'mean_test_score' : [0.76, 0.84, 0.86],
'std_test_score' : [0.01, 0.20, 0.04],
'rank_test_score' : [3, 2, 1],
'params' : [{'c' : 0.193, 'gamma' : 1.883}, ...],
}
NOTE
The key ``'params'`` is used to store a list of parameter
settings dicts for all the parameter candidates.
The ``mean_fit_time``, ``std_fit_time``, ``mean_score_time`` and
``std_score_time`` are all in seconds.
For multi-metric evaluation, the scores for all the scorers are
available in the ``cv_results_`` dict at the keys ending with that
scorer's name (``'_<scorer_name>'``) instead of ``'_score'`` shown
above. ('split0_test_precision', 'mean_train_precision' etc.)
best_estimator_ : estimator or dict
Estimator that was chosen by the search, i.e. estimator
which gave highest score (or smallest loss if specified)
on the left out data. Not available if ``refit=False``.
For multi-metric evaluation, this attribute is present only if
``refit`` is specified.
See ``refit`` parameter for more information on allowed values.
best_score_ : float
Mean cross-validated score of the best_estimator.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_params_ : dict
Parameter setting that gave the best results on the hold out data.
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
best_index_ : int
The index (of the ``cv_results_`` arrays) which corresponds to the best
candidate parameter setting.
The dict at ``search.cv_results_['params'][search.best_index_]`` gives
the parameter setting for the best model, that gives the highest
mean score (``search.best_score_``).
For multi-metric evaluation, this is not available if ``refit`` is
``False``. See ``refit`` parameter for more information.
scorer_ : function or a dict
Scorer function used on the held out data to choose the best
parameters for the model.
For multi-metric evaluation, this attribute holds the validated
``scoring`` dict which maps the scorer key to the scorer callable.
n_splits_ : int
The number of cross-validation splits (folds/iterations).
"""
def __init__(self, estimator, param_distributions, n_iter=10, scoring=None,
cv=None, refit=True, random_state=None):
super().__init__(estimator=estimator, scoring=scoring, cv=cv,
refit=refit)
self.param_distributions = param_distributions
self.n_iter = n_iter
self.random_state = random_state
def _run_search(self, evaluate_candidates):
"""Search n_iter candidates from param_distributions"""
ps = ParameterSampler(self.param_distributions, self.n_iter,
random_state=self.random_state)
evaluate_candidates(ps)
| 44.087179
| 79
| 0.584351
| 3,088
| 25,791
| 4.737047
| 0.145078
| 0.021876
| 0.012442
| 0.019688
| 0.51962
| 0.49166
| 0.470263
| 0.466229
| 0.460213
| 0.460213
| 0
| 0.013298
| 0.306037
| 25,791
| 584
| 80
| 44.162671
| 0.804
| 0.614129
| 0
| 0.098266
| 0
| 0
| 0.080383
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.069364
| false
| 0.011561
| 0.075145
| 0
| 0.179191
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca1a822fc7884017cd3ac1dd4d887f9f5596b45
| 1,407
|
py
|
Python
|
webapp/ui/tests/test_parse_search_results.py
|
robseed/botanist
|
2f4fbab5d26499105a5057c86e0e6e37a2e8a727
|
[
"MIT"
] | null | null | null |
webapp/ui/tests/test_parse_search_results.py
|
robseed/botanist
|
2f4fbab5d26499105a5057c86e0e6e37a2e8a727
|
[
"MIT"
] | null | null | null |
webapp/ui/tests/test_parse_search_results.py
|
robseed/botanist
|
2f4fbab5d26499105a5057c86e0e6e37a2e8a727
|
[
"MIT"
] | null | null | null |
import os
from django.test import TestCase
from mock import patch
from ui.views import parse_search_results
FIXTURES_ROOT = os.path.join(os.path.dirname(__file__), 'fixtures')
FX = lambda *relpath: os.path.join(FIXTURES_ROOT, *relpath)
@patch('ui.views.get_repo_type')
@patch('ui.views.CODE_ROOT', '/opt/botanist/repos')
class ParseSearchResults(TestCase):
def test_duplicate_repositories_in_github_and_bitbucket(self, get_repo_type):
def se(filepath):
if 'bitbucket' in filepath:
return 'hg'
elif 'github' in filepath:
return 'git'
else:
raise Exception('thats odd')
get_repo_type.side_effect = se
with open(FX('duplicate_repositories_in_github_and_bitbucket.results.txt')) as f:
output = f.read()
results, count = parse_search_results(output, 'AbstractSendTimeJob', True)
self.assertEqual(2, count)
self.assertListEqual(['bitbucket', 'github'], results['sproutjobs'].keys())
self.assertEqual('public abstract class AbstractJob implements Job {', results['sproutjobs']['bitbucket']['files']['src/main/java/com/sproutsocial/AbstractJob.java'][0]['srcline'])
self.assertEqual('public abstract class AbstractJob implements Job {', results['sproutjobs']['github']['files']['src/main/java/com/sproutsocial/AbstractJob.java'][0]['srcline'])
| 43.96875
| 188
| 0.686567
| 169
| 1,407
| 5.544379
| 0.47929
| 0.022412
| 0.035219
| 0.0619
| 0.36286
| 0.36286
| 0.275347
| 0.275347
| 0.275347
| 0.275347
| 0
| 0.002611
| 0.183369
| 1,407
| 32
| 189
| 43.96875
| 0.812881
| 0
| 0
| 0
| 0
| 0
| 0.320313
| 0.12358
| 0
| 0
| 0
| 0
| 0.16
| 1
| 0.08
| false
| 0
| 0.16
| 0
| 0.36
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca27636ecfdf6c794d529bed48bdedc1987bdf3
| 4,598
|
py
|
Python
|
tsdl/tools/extensions.py
|
burgerdev/hostload
|
93142628bb32923c5e6f3a8b791488d72a5c9077
|
[
"MIT"
] | null | null | null |
tsdl/tools/extensions.py
|
burgerdev/hostload
|
93142628bb32923c5e6f3a8b791488d72a5c9077
|
[
"MIT"
] | null | null | null |
tsdl/tools/extensions.py
|
burgerdev/hostload
|
93142628bb32923c5e6f3a8b791488d72a5c9077
|
[
"MIT"
] | null | null | null |
"""
Extensions for pylearn2 training algorithms. Those are either reimplemented to
suit the execution model of this package, or new ones for recording metrics.
"""
import os
import cPickle as pkl
import numpy as np
from pylearn2.train_extensions import TrainExtension
from .abcs import Buildable
class BuildableTrainExtension(TrainExtension, Buildable):
"""
makes a pylearn2 TrainExtension buildable
"""
@classmethod
def build(cls, config, parent=None, graph=None, workingdir=None):
"""
build an instance of this class with given configuration dict
"""
config_copy = config.copy()
if "wd" not in config_copy:
config_copy["wd"] = workingdir
obj = super(BuildableTrainExtension, cls).build(config_copy)
return obj
def __init__(self, **kwargs):
if "workingdir" in kwargs:
self._wd = kwargs["workingdir"]
super(BuildableTrainExtension, self).__init__()
@classmethod
def get_default_config(cls):
"""
override to provide your own default configuration
"""
conf = super(BuildableTrainExtension, cls).get_default_config()
conf["wd"] = None
return conf
class PersistentTrainExtension(BuildableTrainExtension):
"""
abstract extension that can store its results (on disk, probably)
"""
def store(self):
"""
store the findings of this extension
"""
pass
class WeightKeeper(PersistentTrainExtension):
"""
keeps track of the model's weights at each monitor step
This model stores weights *per monitor step* - the list grows large pretty
quickly.
"""
_weights = []
def on_monitor(self, model, dataset, algorithm):
"""
save the model's weights
"""
self._weights.append(model.get_param_values())
def setup(self, model, dataset, algorithm):
"""
initialize the weight list
"""
self._weights = []
def get_weights(self):
"""
get weights history
"""
return self._weights
def store(self):
path = os.path.join(self._wd, "weightkeeper.pkl")
with open(path, "w") as file_:
pkl.dump(self._weights, file_)
class ProgressMonitor(PersistentTrainExtension):
"""
Makes the monitor channel's history accessible to us.
"""
_progress = np.NaN
@classmethod
def get_default_config(cls):
config = super(ProgressMonitor, cls).get_default_config()
config["channel"] = "valid_objective"
return config
def on_monitor(self, model, dataset, algorithm):
"""
save the desired channel
"""
monitor = model.monitor
channels = monitor.channels
channel = channels[self._channel]
self._progress = channel.val_record
def get_progress(self):
"""
get the value's history
"""
return self._progress
def store(self):
filename = "progress_{}.pkl".format(self._channel)
path = os.path.join(self._wd, filename)
with open(path, "w") as file_:
pkl.dump(self._progress, file_)
class MonitorBasedSaveBest(BuildableTrainExtension):
"""
similar to pylearn2's MonitorBasedSaveBest, but avoids memory hogging
(see https://github.com/lisa-lab/pylearn2/issues/1567)
"""
best_cost = np.inf
best_params = None
@classmethod
def get_default_config(cls):
config = super(MonitorBasedSaveBest, cls).get_default_config()
config["channel"] = "valid_objective"
return config
def setup(self, model, dataset, algorithm):
self.best_cost = np.inf
self.best_params = model.get_param_values()
def on_monitor(self, model, dataset, algorithm):
"""
Looks whether the model performs better than earlier. If it's the
case, saves the model.
Parameters
----------
model : pylearn2.models.model.Model
model.monitor must contain a channel with name given by
self.channel_name
dataset : pylearn2.datasets.dataset.Dataset
Not used
algorithm : TrainingAlgorithm
Not used
"""
monitor = model.monitor
channels = monitor.channels
channel = channels[self._channel]
val_record = channel.val_record
new_cost = val_record[-1]
if new_cost < self.best_cost:
self.best_cost = new_cost
self.best_params = model.get_param_values()
| 27.047059
| 78
| 0.626577
| 506
| 4,598
| 5.549407
| 0.332016
| 0.021368
| 0.034188
| 0.044516
| 0.266738
| 0.259972
| 0.211538
| 0.174858
| 0.143519
| 0.09188
| 0
| 0.003633
| 0.281644
| 4,598
| 169
| 79
| 27.207101
| 0.846503
| 0.257503
| 0
| 0.381579
| 0
| 0
| 0.033971
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.197368
| false
| 0.013158
| 0.065789
| 0
| 0.460526
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca288015e226ac02375d4c474cc5edb714fc3e0
| 6,189
|
py
|
Python
|
nogi/utils/post_extractor.py
|
Cooomma/nogi-backup-blog
|
19868511d2b0f0c4d06bf4a88981bbfadc4121e4
|
[
"MIT"
] | null | null | null |
nogi/utils/post_extractor.py
|
Cooomma/nogi-backup-blog
|
19868511d2b0f0c4d06bf4a88981bbfadc4121e4
|
[
"MIT"
] | 164
|
2020-04-02T18:25:59.000Z
|
2022-02-17T17:09:32.000Z
|
nogi/utils/post_extractor.py
|
Cooomma/nogi-backup-blog
|
19868511d2b0f0c4d06bf4a88981bbfadc4121e4
|
[
"MIT"
] | null | null | null |
import asyncio
from io import BytesIO
import logging
import os
import random
import time
from typing import List
from urllib.parse import urlparse
import aiohttp
from aiohttp import ClientSession, TCPConnector
import requests
from requests import Response
from tqdm import tqdm
from nogi import REQUEST_HEADERS
from nogi.db.nogi_blog_content import NogiBlogContent
from nogi.db.nogi_blog_summary import NogiBlogSummary
from nogi.storages.gcs import GCS
from nogi.utils.parsers import PostParser, generate_post_key
logger = logging.getLogger()
HEADERS = {
'User-Agent': 'Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_3) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/80.0.3987.163 Safari/537.36'
}
class PostExecutor:
def __init__(self, member: dict, summary_db: NogiBlogSummary, content_db: NogiBlogContent, gcs_client: GCS, bucket: str, concurrent: int = 4):
self._waiting_limit = concurrent
self.member = member
# DB
self.summary_db = summary_db
self.content_db = content_db
# GCS Storage
self.bucket = bucket
self.storage = gcs_client
self.storage_blog_post_prefix = os.path.join(member['roma_name'], 'post')
self.storage_blog_image_prefix = os.path.join(member['roma_name'], 'img')
# Tasks
self.todos = self.summary_db.get_missing_blog_url(member['id'])
@staticmethod
def db_transform(post_url: str, obj: dict, **kwargs) -> dict:
return dict(
member_id=kwargs.get('member_id'),
blog_key=generate_post_key(post_url),
url=post_url,
title=obj['title'],
content=obj['content'],
image_gcs_paths=kwargs.get('image_gcs_paths'),
post_gcs_path=kwargs.get('post_gcs_path'),
blog_created_at=int(obj['created_at'].timestamp()))
@staticmethod
def _get_hd_image(url: str) -> BytesIO:
first_layer_response: Response = requests.get(url, headers=HEADERS)
logger.debug(first_layer_response.cookies)
resp = requests.get(
url=url.replace('http://dcimg.awalker.jp/v/', 'http://dcimg.awalker.jp/i/'),
cookies=first_layer_response.cookies)
logger.debug(resp.status_code)
logger.debug(resp.headers)
return BytesIO(resp.content) if resp.status_code == 200 else BytesIO(bytes=b'')
def backup_images(self, image_urls: List[dict]) -> List[str]:
downloaded_image_urls = list()
for url in image_urls:
image_gcs_path = os.path.join(self.storage_blog_image_prefix,
'/'.join(urlparse(url['image_url']).path.split('/')[-5:]))
if url['high_resolution_url'] != url['image_url']:
hd_image = self._get_hd_image(url['high_resolution_url'])
if hd_image:
self.storage.upload_stream(
bucket=self.bucket,
blob_name=image_gcs_path,
content=hd_image.read(),
content_type='image/jpeg'
)
else:
image = requests.get(url=url['image_url'])
if image.status_code != 200:
logger.warning('Image Request Fail: %s', url)
continue
self.storage.upload_stream(
bucket=self.bucket,
blob_name=image_gcs_path,
content=image.content,
content_type='image/jpeg'
)
downloaded_image_urls.append(url)
return downloaded_image_urls
async def backup_content(self, session: ClientSession, post_url: str) -> str:
post_gcs_path = os.path.join(self.storage_blog_post_prefix, '/'.join(urlparse(post_url).path.split('/')[-3:]))
try:
async with session.get(url=post_url, headers=REQUEST_HEADERS) as response:
self.storage.upload_stream(
bucket=self.bucket, blob_name=post_gcs_path,
content=await response.read(), content_type='text/html')
return post_gcs_path
except aiohttp.client_exceptions.InvalidURL:
print('Invalid URL: %s' % post_url)
except aiohttp.client_exceptions.ClientConnectorError:
print('Client Connector Error: %s' % post_url)
@staticmethod
def crawl_post(url: str) -> None:
return PostParser(requests.get(url, headers=REQUEST_HEADERS).text).to_dict()
async def _run(self, url: str):
try:
async with aiohttp.ClientSession(connector=TCPConnector(verify_ssl=False)) as session:
post_gcs_path = await self.backup_content(session, url)
post = self.crawl_post(url)
images_gcs_paths = self.backup_images(post['image_urls'])
result = self.db_transform(
post_url=url, obj=post, member_id=self.member['id'], image_gcs_paths=images_gcs_paths, post_gcs_path=post_gcs_path)
self.content_db.upsert_crawled_post(result)
self.summary_db.update_crawled_result(result)
except aiohttp.client_exceptions.InvalidURL:
print('Invalid URL: %s' % url)
except aiohttp.client_exceptions.ClientConnectorError:
print('Client Connector Error: %s' % url)
except Exception:
import traceback
print('Error URL: %s' % url)
print(traceback.format_exc())
def run(self):
loop = asyncio.get_event_loop()
if self.todos:
tasks = []
for url in tqdm(self.todos, desc='Current Member: {}'.format(self.member['kanji_name']), ncols=120):
tasks.append(asyncio.ensure_future(self._run(url)))
if len(tasks) > self._waiting_limit:
loop.run_until_complete(asyncio.gather(*tasks))
tasks = []
if tasks:
loop.run_until_complete(asyncio.gather(*tasks))
slepp_second = random.randint(1, 15)
print('Sleep for %s second' % slepp_second)
time.sleep(slepp_second)
| 41.26
| 146
| 0.618517
| 742
| 6,189
| 4.927224
| 0.256065
| 0.021061
| 0.02407
| 0.031729
| 0.213074
| 0.176422
| 0.176422
| 0.139223
| 0.121718
| 0.078775
| 0
| 0.009421
| 0.27969
| 6,189
| 149
| 147
| 41.536913
| 0.810677
| 0.003232
| 0
| 0.171875
| 0
| 0.007813
| 0.088078
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046875
| false
| 0
| 0.148438
| 0.015625
| 0.242188
| 0.054688
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca3e775b2d474b15aa148de47bee4762a8d884c
| 1,429
|
py
|
Python
|
sandbox/lib/jumpscale/JumpscaleLibsExtra/sal_zos/gateway/dhcp.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 1
|
2020-10-05T08:53:57.000Z
|
2020-10-05T08:53:57.000Z
|
sandbox/lib/jumpscale/JumpscaleLibsExtra/sal_zos/gateway/dhcp.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | 17
|
2019-11-14T08:41:37.000Z
|
2020-05-27T09:23:51.000Z
|
sandbox/lib/jumpscale/JumpscaleLibsExtra/sal_zos/gateway/dhcp.py
|
threefoldtech/threebot_prebuilt
|
1f0e1c65c14cef079cd80f73927d7c8318755c48
|
[
"Apache-2.0"
] | null | null | null |
from Jumpscale import j
import signal
from .. import templates
DNSMASQ = "/bin/dnsmasq --conf-file=/etc/dnsmasq.conf -d"
class DHCP:
def __init__(self, container, domain, networks):
self.container = container
self.domain = domain
self.networks = networks
def apply_config(self):
dnsmasq = templates.render("dnsmasq.conf", domain=self.domain, networks=self.networks)
self.container.upload_content("/etc/dnsmasq.conf", dnsmasq)
dhcp = templates.render("dhcp", networks=self.networks)
self.container.upload_content("/etc/dhcp", dhcp)
self.stop()
self.container.client.system(DNSMASQ, id="dhcp.{}".format(self.container.name))
# check if command is listening for dhcp
if not j.tools.timer.execute_until(self.is_running, 10):
raise j.exceptions.Base("Failed to run dnsmasq")
def is_running(self):
for port in self.container.client.info.port():
if port["network"] == "udp" and port["port"] == 53:
return True
def stop(self):
for process in self.container.client.process.list():
if "dnsmasq" in process["cmdline"]:
self.container.client.process.kill(process["pid"], signal.SIGKILL)
if not j.tools.timer.execute_until(lambda: not self.is_running(), 10):
raise j.exceptions.Base("Failed to stop DNSMASQ")
| 36.641026
| 94
| 0.638908
| 179
| 1,429
| 5.03352
| 0.368715
| 0.129856
| 0.084351
| 0.053274
| 0.266371
| 0.266371
| 0.266371
| 0.204218
| 0.09545
| 0.09545
| 0
| 0.0055
| 0.236529
| 1,429
| 38
| 95
| 37.605263
| 0.820348
| 0.026592
| 0
| 0
| 0
| 0
| 0.12095
| 0.020878
| 0
| 0
| 0
| 0
| 0
| 1
| 0.142857
| false
| 0
| 0.107143
| 0
| 0.321429
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca4f326c31dc7913ff0486df63cbab12df18fbe
| 888
|
py
|
Python
|
LEDdebug/examples/led-demo.py
|
UrsaLeo/LEDdebug
|
228af02468e4f3b617a50e6195931a623a4ad848
|
[
"Apache-2.0"
] | null | null | null |
LEDdebug/examples/led-demo.py
|
UrsaLeo/LEDdebug
|
228af02468e4f3b617a50e6195931a623a4ad848
|
[
"Apache-2.0"
] | null | null | null |
LEDdebug/examples/led-demo.py
|
UrsaLeo/LEDdebug
|
228af02468e4f3b617a50e6195931a623a4ad848
|
[
"Apache-2.0"
] | null | null | null |
#!/usr/bin/env python3
"""UrsaLeo LEDdebug board LED demo
Turn the LED's on one at a time, then all off"""
import time
ON = 1
OFF = 0
DELAY = 0.5 # seconds
try:
from LEDdebug import LEDdebug
except ImportError:
try:
import sys
import os
sys.path.append("..")
sys.path.append(os.path.join(os.path.dirname(__file__), '..',
'LEDdebug'))
from LEDdebug import LEDdebug
except ImportError:
print('LEDdebug import failed')
exit(0)
def main():
# Create device
device = LEDdebug()
# Turn on each LED in succession
for led in range(1, 7):
device.set_led(led, ON)
print(f'Turning LED{led} on')
time.sleep(DELAY)
print('Turning all LEDs off')
# Turn all the lights of before leaving!
device.set_leds(OFF)
if __name__ == '__main__':
main()
| 20.651163
| 69
| 0.595721
| 121
| 888
| 4.256198
| 0.504132
| 0.081553
| 0.069903
| 0.100971
| 0.16699
| 0.16699
| 0
| 0
| 0
| 0
| 0
| 0.01278
| 0.295045
| 888
| 42
| 70
| 21.142857
| 0.809904
| 0.21509
| 0
| 0.222222
| 0
| 0
| 0.118421
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.037037
| false
| 0
| 0.296296
| 0
| 0.333333
| 0.111111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca57a9994de049d5edebf8ee7ac8544c88a916a
| 6,352
|
py
|
Python
|
modules/server.py
|
Nitin-Mane/SARS-CoV-2-xDNN-Classifier
|
abb6a82b8ee89a041b0e26e14ec1e416c4561266
|
[
"MIT"
] | null | null | null |
modules/server.py
|
Nitin-Mane/SARS-CoV-2-xDNN-Classifier
|
abb6a82b8ee89a041b0e26e14ec1e416c4561266
|
[
"MIT"
] | null | null | null |
modules/server.py
|
Nitin-Mane/SARS-CoV-2-xDNN-Classifier
|
abb6a82b8ee89a041b0e26e14ec1e416c4561266
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
###################################################################################
##
## Project: COVID -19 xDNN Classifier 2020
## Version: 1.0.0
## Module: Server
## Desription: The COVID -19 xDNN Classifier 2020 server.
## License: MIT
## Copyright: 2021, Asociacion De Investigacion En Inteligencia Artificial Para
## La Leucemia Peter Moss.
## Author: Nitin Mane
## Maintainer: Nitin Mane
##
## Modified: 2021-2-19
##
###################################################################################
##
## Permission is hereby granted, free of charge, to any person obtaining a copy
## of this software and associated documentation files(the "Software"), to deal
## in the Software without restriction, including without limitation the rights
## to use, copy, modify, merge, publish, distribute, sublicense, and / or sell
## copies of the Software, and to permit persons to whom the Software is
## furnished to do so, subject to the following conditions:
##
## The above copyright notice and this permission notice shall be included in all
## copies or substantial portions of the Software.
##
## THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
## IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
## FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
## AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
## LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
## OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
## SOFTWARE.
##
###################################################################################
import cv2
import json
import jsonpickle
import os
import requests
import time
import numpy as np
import tensorflow as tf
from modules.AbstractServer import AbstractServer
from flask import Flask, request, Response
from io import BytesIO
from PIL import Image
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg16 import preprocess_input
class server(AbstractServer):
""" COVID 19 xDNN Classifier 2020 Server.
This object represents the COVID 19 xDNN Classifier 2020 Server.
"""
def predict(self, req):
""" Classifies an image sent via HTTP. """
if len(req.files) != 0:
img = Image.open(req.files['file'].stream).convert('RGB')
else:
img = Image.open(BytesIO(req.data)).convert('RGB')
img = img.resize((224, 224), Image.ANTIALIAS)
np_img = tf.keras.preprocessing.image.img_to_array(img)
np_img.transpose(1, 2, 0)
#img = keras.preprocessing.image.img_to_array(img)
#img = np.array([img]) # Convert single image to a batch.
img = np.expand_dims(np_img, axis=0)
img = preprocess_input(img)
#prediction = self.predict(img)
#img = img.resize((224, 224), Image.ANTIALIAS)
#img = image.img_to_array(img)
#img = np.expand_dims(img, axis=0)
#img = preprocess_input(img)
#img = img.reshape((1,224,224,3))
return self.model.predict(img)
def request(self, img_path):
""" Sends image to the inference API endpoint. """
self.helpers.logger.info("Sending request for: " + img_path)
_, img_encoded = cv2.imencode('.png', cv2.imread(img_path))
response = requests.post(
self.addr, data=img_encoded.tostring(), headers=self.headers)
response = json.loads(response.text)
return response
def start(self):
""" Starts the server. """
app = Flask(self.helpers.credentials["iotJumpWay"]["name"])
@app.route('/Inference', methods=['POST'])
def Inference():
""" Responds to HTTP POST requests. """
self.mqtt.publish("States", {
"Type": "Prediction",
"Name": self.helpers.credentials["iotJumpWay"]["name"],
"State": "Processing",
"Message": "Processing data"
})
message = ""
prediction = self.predict(request)
print(prediction)
if prediction == 1:
message = "Acute Lymphoblastic Leukemia detected!"
diagnosis = "Positive"
elif prediction == 0:
message = "Acute Lymphoblastic Leukemia not detected!"
diagnosis = "Negative"
self.mqtt.publish("States", {
"Type": "Prediction",
"Name": self.helpers.credentials["iotJumpWay"]["name"],
"State": diagnosis,
"Message": message
})
resp = jsonpickle.encode({
'Response': 'OK',
'Message': message,
'Diagnosis': diagnosis
})
return Response(response=resp, status=200, mimetype="application/json")
app.run(host=self.helpers.credentials["server"]["ip"],
port=self.helpers.credentials["server"]["port"])
def test(self):
""" Tests the trained model via HTTP. """
totaltime = 0
files = 0
tp = 0
fp = 0
tn = 0
fn = 0
self.addr = "http://" + self.helpers.credentials["server"]["ip"] + \
':'+str(self.helpers.credentials["server"]["port"]) + '/Inference'
self.headers = {'content-type': 'image/jpeg'}
for testFile in os.listdir(self.model.testing_dir):
if os.path.splitext(testFile)[1] in self.model.valid:
start = time.time()
prediction = self.request(self.model.testing_dir + "/" + testFile)
print(prediction)
end = time.time()
benchmark = end - start
totaltime += benchmark
msg = ""
status = ""
outcome = ""
if prediction["Diagnosis"] == "Positive" and "Non-Covid" in testFile:
fp += 1
status = "incorrectly"
outcome = "(False Positive)"
elif prediction["Diagnosis"] == "Negative" and "Non-Covid" in testFile:
tn += 1
status = "correctly"
outcome = "(True Negative)"
elif prediction["Diagnosis"] == "Positive" and "Covid" in testFile:
tp += 1
status = "correctly"
outcome = "(True Positive)"
elif prediction["Diagnosis"] == "Negative" and "Covid" in testFile:
fn += 1
status = "incorrectly"
outcome = "(False Negative)"
files += 1
self.helpers.logger.info("COVID-19 xDNN Classifier " + status +
" detected " + outcome + " in " + str(benchmark) + " seconds.")
self.helpers.logger.info("Images Classified: " + str(files))
self.helpers.logger.info("True Positives: " + str(tp))
self.helpers.logger.info("False Positives: " + str(fp))
self.helpers.logger.info("True Negatives: " + str(tn))
self.helpers.logger.info("False Negatives: " + str(fn))
self.helpers.logger.info("Total Time Taken: " + str(totaltime))
| 31.60199
| 83
| 0.65318
| 790
| 6,352
| 5.224051
| 0.349367
| 0.039981
| 0.032954
| 0.040708
| 0.258057
| 0.136903
| 0.109038
| 0.038769
| 0.038769
| 0.038769
| 0
| 0.016852
| 0.177897
| 6,352
| 200
| 84
| 31.76
| 0.773458
| 0.306675
| 0
| 0.133929
| 0
| 0
| 0.186047
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.044643
| false
| 0
| 0.125
| 0
| 0.205357
| 0.017857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca66df25ee895df823541d354d97c61178071b8
| 4,107
|
py
|
Python
|
jsparse/meijiexia/meijiexia.py
|
PyDee/Spiders
|
6fc0a414060032b5ba4332302285e3fcc9a6113e
|
[
"Apache-2.0"
] | 6
|
2020-06-02T16:22:58.000Z
|
2021-09-18T03:20:16.000Z
|
jsparse/meijiexia/meijiexia.py
|
PyDee/Spiders
|
6fc0a414060032b5ba4332302285e3fcc9a6113e
|
[
"Apache-2.0"
] | 4
|
2021-03-31T19:54:37.000Z
|
2022-03-12T00:33:41.000Z
|
jsparse/meijiexia/meijiexia.py
|
PyDee/Spiders
|
6fc0a414060032b5ba4332302285e3fcc9a6113e
|
[
"Apache-2.0"
] | 5
|
2020-06-02T16:23:00.000Z
|
2021-09-03T02:16:15.000Z
|
import time
import random
import requests
from lxml import etree
import pymongo
from .url_file import mjx_weibo, mjx_dy, mjx_ks, mjx_xhs
class DBMongo:
def __init__(self):
self.my_client = pymongo.MongoClient("mongodb://localhost:27017/")
# 连接数据库
self.db = self.my_client["mcn"]
def insert_2_xt(self, success_item, collection_name):
try:
collection = self.db[collection_name]
collection.insert_one(success_item) # 数据写入mongoDB
print('success!!!')
except:
print('写入数据失败')
class MJX:
def __init__(self):
self.db = DBMongo()
self.headers = {
'Accept': 'text/html,application/xhtml+xml,application/xml;q=0.9,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9',
'Accept-Encoding': 'gzip, deflate, br',
'Accept-Language': 'zh-CN,zh;q=0.9',
'Cache-Control': 'max-age=0',
'Connection': 'keep-alive',
'Cookie': 'accessId=defba4d0-9ab2-11e8-b156-7b8f577687be; qimo_seokeywords_defba4d0-9ab2-11e8-b156-7b8f577687be=; href=https%3A%2F%2Fwww.meijiexia.com%2Fmedias-118.html; ci_session=ccb97bb846cd5e0ce6538c2cc8f11ca7abc296ee; Hm_lvt_c96abf7da979015953d1d22702db6de8=1591685037,1592274339,1592278224; qimo_seosource_defba4d0-9ab2-11e8-b156-7b8f577687be=%E7%AB%99%E5%86%85; Hm_lpvt_c96abf7da979015953d1d22702db6de8=1592278238; pageViewNum=34',
'Host': 'www.meijiexia.com',
'Referer': 'https://www.meijiexia.com/medias-118.html',
'Sec-Fetch-Dest': 'document',
'Sec-Fetch-Mode': 'navigate',
'Sec-Fetch-Site': 'same-origin',
'Sec-Fetch-User': '?1',
'Upgrade-Insecure-Requests': '1',
'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/83.0.4103.97 Safari/537.36',
}
def get_response(self, url, collection):
proxy = self.get_proxy()
proxies = {
"http": "http://{}:{}".format(proxy.get('IP'), proxy.get('Port')),
"https": "http://{}:{}".format(proxy.get('IP'), proxy.get('Port')),
}
ret = requests.get(url, headers=self.headers, proxies=proxies)
response = etree.HTML(ret.text)
tr_list = response.xpath('//tbody[@id="qu-con"]/tr')
for tr in tr_list:
item = dict()
user_id = tr.xpath('./td[@class="td1"]/input/@value')[0]
nick_name = tr.xpath('./td[@class="td2"]/div[@class="itemMsg"]//a/text()')[0]
place = tr.xpath('./td[@class="td3"]/text()')[0]
fans_num = tr.xpath('./td[@class="td6"]/p[@class="num"]/text()')[0]
price_list = tr.xpath('./td[@class="td4"]/p')
for price_element in price_list:
classify = price_element.xpath(
'./span[@class="money"]/preceding-sibling::span[1]/text()')[0]
price = price_element.xpath('./span[@class="money"]/text()')[0]
item[classify.strip()] = price.strip()
item['fans_num'] = fans_num.strip()
item['user_id'] = user_id.strip()
item['nick_name'] = nick_name.strip()
item['place'] = place.strip()
item['plant'] = collection.split('mjx_')[1]
self.db.insert_2_xt(item, collection)
@staticmethod
def get_proxy():
proxy = [{"IP": "180.123.199.105", "Port": 21730}]
return random.choice(proxy)
def run(self):
urls = ''
for item in {'mjx_weibo': mjx_weibo, 'mjx_dy': mjx_dy, 'mjx_ks': mjx_ks, 'mjx_xhs': mjx_xhs}.keys():
if item == 'mjx_weibo':
urls = mjx_weibo
if item == 'mjx_dy':
urls = mjx_dy
if item == 'mjx_ks':
urls = mjx_ks
if item == 'mjx_xhs':
urls = mjx_xhs
for url in urls:
time.sleep(3)
print(url)
self.get_response(url, item)
if __name__ == '__main__':
mjx = MJX()
mjx.run()
| 41.07
| 450
| 0.567324
| 504
| 4,107
| 4.468254
| 0.412698
| 0.017762
| 0.019982
| 0.031083
| 0.119449
| 0.05595
| 0.028419
| 0.028419
| 0
| 0
| 0
| 0.083306
| 0.266374
| 4,107
| 99
| 451
| 41.484848
| 0.664122
| 0.004139
| 0
| 0.023529
| 0
| 0.035294
| 0.356496
| 0.204306
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070588
| false
| 0
| 0.070588
| 0
| 0.176471
| 0.035294
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca67740a4b7ba54382fd28803af944938695c13
| 2,756
|
py
|
Python
|
MLModules/ABD/B_PCAQDA.py
|
jamster112233/ICS_IDS
|
dac6abc3c8d6e840a21adedcb9e8dcfaa304b499
|
[
"BSD-3-Clause"
] | null | null | null |
MLModules/ABD/B_PCAQDA.py
|
jamster112233/ICS_IDS
|
dac6abc3c8d6e840a21adedcb9e8dcfaa304b499
|
[
"BSD-3-Clause"
] | null | null | null |
MLModules/ABD/B_PCAQDA.py
|
jamster112233/ICS_IDS
|
dac6abc3c8d6e840a21adedcb9e8dcfaa304b499
|
[
"BSD-3-Clause"
] | null | null | null |
import numpy as np
from keras.utils import np_utils
import pandas as pd
import sys
from sklearn.preprocessing import LabelEncoder
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis as QDA
from sklearn.decomposition import PCA
import os
from sklearn.externals import joblib
from sklearn.metrics import f1_score
trainName = sys.argv[1]
testName = sys.argv[2]
# Create an object called iris with the iris Data
dftrain = pd.read_csv(filepath_or_buffer=trainName, header=None, sep=',')
dftest = pd.read_csv(filepath_or_buffer=testName, header=None, sep=',')
cols = ['Proto']
for i in range(1,dftrain.shape[1]):
cols.append('Byte' + str(i))
dftrain.columns=cols
dftrain.dropna(how="all", inplace=True)
dftrain.tail()
dftest.columns=cols
dftest.dropna(how="all", inplace=True)
dftest.tail()
Xtrain = dftrain.ix[:,1:dftrain.shape[1]].values
Ytrain = dftrain.ix[:,0].values
Xtest = dftest.ix[:,1:dftrain.shape[1]].values
Ytest = dftest.ix[:,0].values
encoder = LabelEncoder()
encoder.fit(Ytrain)
encYtrain = encoder.transform(Ytrain)
encoder = LabelEncoder()
encoder.fit(Ytest)
encYtest = encoder.transform(Ytest)
directory = "models/ABD/QDA/"
if not os.path.exists(directory):
os.makedirs(directory)
logfile = directory + "log-0.csv"
with open(logfile, "w") as file:
file.write("PCAlevel,acc,val_acc,f1\n")
fscores = []
accs = []
for q in xrange(1,151):
pca = PCA(n_components=q)
Xtrain_pca = pca.fit_transform(Xtrain)
Xtest_pca = pca.transform(Xtest)
clf = QDA(priors=None, reg_param=0.0)
clf.fit(Xtrain_pca, encYtrain)
trainPred = clf.predict(Xtrain_pca)
testPred = clf.predict(Xtest_pca)
score = 0.0
for i in xrange(0, len(trainPred)):
if trainPred[i] == encYtrain[i]:
score += 1
trainAcc = float(score) / len(trainPred)
score = 0.0
for i in xrange(0, len(testPred)):
if testPred[i] == encYtest[i]:
score += 1
testAcc = float(score) / len(testPred)
f1 = f1_score(encYtest, testPred)
accs.append(testAcc)
fscores.append(f1)
print("Train " + str(trainAcc))
print("Test " + str(testAcc))
print("F1 " + str(f1))
with open(logfile, "a") as file:
file.write(str(q) + "," + str(trainAcc) + "," + str(testAcc) + "," + str(f1) + "\n")
if q == 2:
joblib.dump(clf, 'QDA2.pkl')
print("Val Acc max" + str(max(accs)))
print("FMAX " + str(max(fscores)))
# print(str(q) + ":" + str((float(score)/len(classesPred)*100)) + "%")
#
# preds = classesPred
# if(len(preds) > 0):
# preds = np.array(list(encoder.inverse_transform(preds)))
#
# df = pd.crosstab(dftest['Proto'], preds, rownames=['Actual Protocol'], colnames=['Predicted Protocol'])
# df.to_csv('ConfusionMatrixLDA.csv')
| 26.757282
| 105
| 0.675617
| 390
| 2,756
| 4.720513
| 0.348718
| 0.029875
| 0.009777
| 0.022814
| 0.101032
| 0.076046
| 0.024986
| 0.024986
| 0.024986
| 0
| 0
| 0.017001
| 0.167634
| 2,756
| 102
| 106
| 27.019608
| 0.785527
| 0.129898
| 0
| 0.084507
| 0
| 0
| 0.046502
| 0.010473
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.140845
| 0
| 0.140845
| 0.070423
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca68195c840c66d0de8f1f855f4ded2b7c95a94
| 2,850
|
py
|
Python
|
GR2-Save-Loader.py
|
203Null/Gravity-Rush-2-Save-Loader
|
40cf8a1748449c0e019a2e57ac2b8eccd50d8917
|
[
"MIT"
] | 2
|
2022-02-06T10:40:22.000Z
|
2022-02-06T10:45:51.000Z
|
GR2-Save-Loader.py
|
203Null/Gravity-Rush-2-Save-Loader
|
40cf8a1748449c0e019a2e57ac2b8eccd50d8917
|
[
"MIT"
] | null | null | null |
GR2-Save-Loader.py
|
203Null/Gravity-Rush-2-Save-Loader
|
40cf8a1748449c0e019a2e57ac2b8eccd50d8917
|
[
"MIT"
] | null | null | null |
import struct
import json
from collections import OrderedDict
file_path = "data0002.bin"
show_offset = True
show_hash = False
loaded_data = 0
def unpack(upstream_data_set):
global loaded_data
loaded_data = loaded_data + 1
currentCursor = file.tell()
print(hex(file.tell()))
file.seek(int.from_bytes(file.read(4), byteorder='little'), 0)
variable_name = file.read(200).split(b'\x00')[0].decode('UTF8') #Use UTF8 because some strings are in Japanese
print(hex(file.tell()))
print(variable_name)
file.seek(currentCursor + 4, 0)
type = int.from_bytes(file.read(4), byteorder='little')
data_location = file.tell()
if type == 0x08: # List
list_length = int.from_bytes(file.read(4), byteorder='little')
name_hash = file.read(4).hex()
data_location = file.tell()
value = {}
for i in range(0, list_length):
unpack(value)
value = OrderedDict(sorted(value.items()))
else:
if type % 0x10 == 0x0b: # String
string_length = int.from_bytes(file.read(4), byteorder='little') - 1
data_location = type // 0x10
file.seek(data_location, 0)
try:
value = file.read(string_length).decode('UTF8')
except:
value = "ERROR EXTRACTING STRING"
file.seek(currentCursor + 0x0c, 0)
elif type == 0x09: # Float
value = struct.unpack('f', file.read(4))[0]
elif type == 0x0C: # Boolean
value = int.from_bytes(file.read(1), byteorder='little') > 0
file.seek(3, 1)
else:
value = file.read(4).hex()
print("Warring!!! Unknow type!!! %s at %s with value %s" % (hex(type), hex(file.tell()-8), value))
print()
name_hash = file.read(4).hex()
if variable_name == None:
variable_name = hex(data_location)
else:
if show_hash:
variable_name = variable_name = "%s %s" % (variable_name, name_hash)
if show_offset:
variable_name = variable_name = "%s %s" % (variable_name, hex(data_location))
print(value)
upstream_data_set[variable_name] = value
file = open(file_path, mode='rb')
data = file.read()
data_set = OrderedDict()
if len(data) > 0x40 and data[0:4] == b'ggdL':
file.seek(0x0c, 0)
numOfData = int.from_bytes(file.read(4), byteorder='little')
while loaded_data < numOfData:
unpack(data_set)
print()
print(data_set)
print()
print("Complete with %i/%i data" % (loaded_data, numOfData))
with open(r"%s.txt" % (file_path.split('.')[0]), 'w', encoding='utf-8') as json_file:
json.dump(data_set, json_file, indent=4, ensure_ascii=False)
else:
print("File Incorrect")
| 36.075949
| 115
| 0.587719
| 371
| 2,850
| 4.369272
| 0.283019
| 0.064158
| 0.049969
| 0.059223
| 0.228254
| 0.190006
| 0.16533
| 0.16533
| 0.05182
| 0
| 0
| 0.032023
| 0.276842
| 2,850
| 79
| 116
| 36.075949
| 0.754488
| 0.024912
| 0
| 0.180556
| 0
| 0
| 0.073786
| 0
| 0
| 0
| 0.013348
| 0
| 0
| 1
| 0.013889
| false
| 0
| 0.041667
| 0
| 0.055556
| 0.152778
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ca91ede49b4b76cb020ec83f9b1603af4b3c7c0
| 1,406
|
py
|
Python
|
pages/tests/test_views.py
|
andywar65/starter-fullstack
|
683d6282eb02a9b967d15cd254976e67549672e9
|
[
"BSD-2-Clause"
] | null | null | null |
pages/tests/test_views.py
|
andywar65/starter-fullstack
|
683d6282eb02a9b967d15cd254976e67549672e9
|
[
"BSD-2-Clause"
] | null | null | null |
pages/tests/test_views.py
|
andywar65/starter-fullstack
|
683d6282eb02a9b967d15cd254976e67549672e9
|
[
"BSD-2-Clause"
] | null | null | null |
from django.test import TestCase, override_settings
from django.urls import reverse
from pages.models import Article, HomePage
@override_settings(USE_I18N=False)
class PageViewTest(TestCase):
@classmethod
def setUpTestData(cls):
print("\nTest page views")
# Set up non-modified objects used by all test methods
HomePage.objects.create(title="Title")
Article.objects.create(title="First", date="2022-04-09")
def test_homepage_view(self):
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 200)
print("\n-Test Homepage status 200")
self.assertTemplateUsed(response, "pages/home.html")
print("\n-Test Homepage template")
def test_no_homepage(self):
HomePage.objects.all().delete()
response = self.client.get(reverse("home"))
self.assertEqual(response.status_code, 404)
print("\n-Test Homepage status 404")
def test_article_template(self):
response = self.client.get(
reverse(
"pages:article_detail",
kwargs={"year": 2022, "month": 4, "day": 9, "slug": "first"},
)
)
self.assertEqual(response.status_code, 200)
print("\n-Test Article status 200")
self.assertTemplateUsed(response, "pages/article_detail.html")
print("\n-Test Article template")
| 35.15
| 77
| 0.647226
| 165
| 1,406
| 5.430303
| 0.406061
| 0.033482
| 0.055804
| 0.070313
| 0.385045
| 0.34933
| 0.210938
| 0.210938
| 0.210938
| 0.145089
| 0
| 0.031481
| 0.231863
| 1,406
| 39
| 78
| 36.051282
| 0.798148
| 0.036984
| 0
| 0.125
| 0
| 0
| 0.188609
| 0.018491
| 0
| 0
| 0
| 0
| 0.15625
| 1
| 0.125
| false
| 0
| 0.09375
| 0
| 0.25
| 0.1875
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1caa879917346512e7a2dc23a9df954e997c28d0
| 26,030
|
py
|
Python
|
poco/services/batch/server.py
|
sunliwen/poco
|
a4b8c4ede63711eea42a444fb9d922c350855364
|
[
"MIT"
] | null | null | null |
poco/services/batch/server.py
|
sunliwen/poco
|
a4b8c4ede63711eea42a444fb9d922c350855364
|
[
"MIT"
] | 7
|
2019-03-22T06:26:39.000Z
|
2021-06-10T19:36:06.000Z
|
poco/services/batch/server.py
|
sunliwen/poco
|
a4b8c4ede63711eea42a444fb9d922c350855364
|
[
"MIT"
] | 1
|
2017-10-25T03:43:51.000Z
|
2017-10-25T03:43:51.000Z
|
#!/usr/bin/env python
import logging
import sys
sys.path.append("../../")
sys.path.append("pylib")
import time
import datetime
import pymongo
import uuid
import os
import subprocess
import os.path
import settings
from common.utils import getSiteDBCollection
sys.path.insert(0, "../../")
class LoggingManager:
def __init__(self):
self.h_console = None
self.h_file = None
logging.getLogger('').setLevel(logging.INFO)
def reconfig_h_console(self, site_id, calculation_id):
if self.h_console is not None:
self.h_console.flush()
logging.getLogger('').removeHandler(self.h_console)
self.h_console = logging.StreamHandler()
self.h_console.setLevel(logging.INFO)
formatter = logging.Formatter("%(asctime)s|" + calculation_id +
"|%(levelname)s|%(name)s|%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
self.h_console.setFormatter(formatter)
logging.getLogger('').addHandler(self.h_console)
def getLogFilePath(self, site_id, calculation_id):
site_log_dir = os.path.join(settings.log_dir, site_id)
if not os.path.isdir(site_log_dir):
os.makedirs(site_log_dir)
formatted_date_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
log_file_name = "%s_%s.log" % (formatted_date_time, calculation_id)
log_file_path = os.path.join(site_log_dir, log_file_name)
return log_file_path
def reconfig_h_file(self, site_id, calculation_id):
if self.h_file is not None:
self.h_file.flush()
self.h_file.close()
logging.getLogger('').removeHandler(self.h_file)
self.h_file = logging.FileHandler(
self.getLogFilePath(site_id, calculation_id))
self.h_file.setLevel(logging.INFO)
formatter = logging.Formatter(
"%(asctime)s|%(levelname)s|%(name)s|%(message)s", datefmt="%Y-%m-%d %H:%M:%S")
self.h_file.setFormatter(formatter)
logging.getLogger('').addHandler(self.h_file)
def reconfig(self, site_id, calculation_id):
self.reconfig_h_console(site_id, calculation_id)
self.reconfig_h_file(site_id, calculation_id)
logging_manager = LoggingManager()
def getLogger():
return logging.getLogger("Batch Server")
def getBaseWorkDir(site_id, calculation_id):
site_work_dir = os.path.join(settings.work_dir, site_id)
formatted_date_time = datetime.datetime.now().strftime("%Y%m%d_%H%M%S")
calculation_work_dir_name = "%s_%s" % (formatted_date_time, calculation_id)
calculation_work_dir_path = os.path.join(
site_work_dir, calculation_work_dir_name)
os.makedirs(calculation_work_dir_path)
return calculation_work_dir_path
def getConnection():
if(settings.replica_set):
return pymongo.MongoReplicaSetClient(settings.mongodb_host, replicaSet=settings.replica_set)
else:
return pymongo.Connection(settings.mongodb_host)
connection = getConnection()
class ShellExecutionError(Exception):
pass
class BaseFlow:
def __init__(self, name):
self.name = name
self.jobs = []
self.dependencies = []
def dependOn(self, flow):
self.parent = flow
flow.dependencies.append(self)
def getWorkDir(self):
work_dir = os.path.join(BASE_WORK_DIR, self.name)
if not os.path.exists(work_dir):
os.makedirs(work_dir)
return work_dir
def getWorkFile(self, file_name):
return os.path.join(self.getWorkDir(), file_name)
def __call__(self):
global CALC_SUCC
writeFlowBegin(SITE_ID, self.__class__.__name__)
if self.__class__.__name__ in DISABLEDFLOWS:
getLogger().info("Flow Skipped: %s" % self.__class__.__name__)
writeFlowEnd(SITE_ID, self.__class__.__name__,
is_successful=True, is_skipped=True)
return True
else:
for job_callable in self.jobs:
if not self._execJob(job_callable):
writeFlowEnd(
SITE_ID, self.__class__.__name__, is_successful=False, is_skipped=False,
err_msg="SOME_JOBS_FAILED")
CALC_SUCC = False
return False
writeFlowEnd(SITE_ID, self.__class__.__name__,
is_successful=True, is_skipped=False)
# execute downlines
for dependency in self.dependencies:
dependency()
return True
def _exec_shell(self, command):
getLogger().info("Execute %s" % command)
#ret_code = os.system(command)
# if ret_code != 0:
# raise ShellExecutionError("Shell Execution Failed, ret_code=%s" % ret_code)
ret_code = subprocess.call(command, shell=True)
if ret_code != 0:
getLogger().error("Failed %s" % sys.stderr)
raise ShellExecutionError(
"Shell Execution Failed, ret_code=%s" % ret_code)
def _execJob(self, callable):
try:
getLogger().info("Start Job: %s.%s" %
(self.__class__.__name__, callable.__name__))
callable()
getLogger().info("Job Succ: %s.%s" %
(self.__class__.__name__, callable.__name__))
return True
except:
getLogger(
).critical("An Exception happened while running Job: %s" % callable,
exc_info=True)
# TODO: send message (email, sms)
# TODO: record exception info.
writeFailedJob(SITE_ID, self.__class__.__name__, callable.__name__)
return False
class PreprocessingFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "preprocessing")
self.jobs += [self.do_backfill,
self.do_reverse_reversed_backfilled_raw_logs]
def do_backfill(self):
from preprocessing import backfiller
last_ts = None # FIXME: load correct last_ts from somewhere
bf = backfiller.BackFiller(connection, SITE_ID, last_ts,
self.getWorkFile("reversed_backfilled_raw_logs"))
last_ts = bf.start() # FIXME: save last_ts somewhere
def do_reverse_reversed_backfilled_raw_logs(self):
input_path = self.getWorkFile("reversed_backfilled_raw_logs")
output_path = self.getWorkFile("backfilled_raw_logs")
self._exec_shell("%s <%s >%s" %
(settings.tac_command, input_path, output_path))
class HiveBasedStatisticsFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "hive-based-statistics")
self.jobs += [self.do_hive_based_calculations]
# Begin Hive Based Calculations
def do_hive_based_calculations(self):
from statistics.hive_based_calculations import hive_based_calculations
backfilled_raw_logs_path = self.parent.getWorkFile(
"backfilled_raw_logs")
hive_based_calculations(
connection, SITE_ID, self.getWorkDir(), backfilled_raw_logs_path)
#
# End Hive Based Calculations
class BaseSimilarityCalcFlow(BaseFlow):
def __init__(self, type):
BaseFlow.__init__(self, "similarities-calc:%s" % type)
self.type = type
self.jobs += self.getExtractUserItemMatrixJobs(
) + [self.do_sort_user_item_matrix,
self.do_calc_item_prefer_count,
self.do_calc_user_count,
self.do_emit_cooccurances,
self.do_sort_cooccurances,
self.do_count_cooccurances,
self.do_format_cooccurances_counts,
self.do_calc_item_similarities,
self.do_make_item_similarities_bi_directional,
self.do_sort_item_similarities_bi_directional,
self.do_extract_top_n,
self.do_upload_item_similarities_result]
def do_sort_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix")
output_path = self.getWorkFile("user_item_matrix_sorted")
self._exec_shell("sort -T /cube/services/batch/temp %s > %s" %
(input_path, output_path))
def do_calc_item_prefer_count(self):
if SITE["algorithm_type"] == "llh":
input_path = self.getWorkFile("user_item_matrix_sorted")
output_path = self.getWorkFile("item_prefer_count")
self._exec_shell(
"cut -d , -f 2 %s | sort -T /cube/services/batch/temp | uniq -c > %s" %
(input_path, output_path))
def do_calc_user_count(self):
if SITE["algorithm_type"] == "llh":
input_path = self.getWorkFile("user_item_matrix_sorted")
output_path = self.getWorkFile("user_count")
self._exec_shell("cut -d , -f 1 %s | uniq | wc -l > %s" %
(input_path, output_path))
def do_emit_cooccurances(self):
from similarity_calculation.amazon.emit_cooccurances import emit_cooccurances
input_path = self.getWorkFile("user_item_matrix_sorted")
output_path = self.getWorkFile("cooccurances_not_sorted")
emit_cooccurances(input_path, output_path)
def do_sort_cooccurances(self):
input_path = self.getWorkFile("cooccurances_not_sorted")
output_path = self.getWorkFile("cooccurances_sorted")
self._exec_shell("sort -T /cube/services/batch/temp %s > %s" %
(input_path, output_path))
def do_count_cooccurances(self):
input_path = self.getWorkFile("cooccurances_sorted")
output_path = self.getWorkFile("cooccurances_counts_raw")
self._exec_shell("uniq -c %s > %s" % (input_path, output_path))
def do_format_cooccurances_counts(self):
from similarity_calculation.amazon.format_item_similarities import format_item_similarities
input_path = self.getWorkFile("cooccurances_counts_raw")
output_path = self.getWorkFile("cooccurances_counts_formatted")
format_item_similarities(input_path, output_path)
def do_calc_item_similarities(self):
if SITE["algorithm_type"] == "llh":
from similarity_calculation.loglikelihood.calc_loglikelihood import calc_loglikelihood
cooccurances_counts_path = self.getWorkFile(
"cooccurances_counts_formatted")
user_counts_path = self.getWorkFile("user_count")
item_prefer_count_path = self.getWorkFile("item_prefer_count")
output_path = self.getWorkFile("item_similarities_formatted")
calc_loglikelihood(cooccurances_counts_path,
user_counts_path, item_prefer_count_path, output_path)
else:
input_path = self.getWorkFile("cooccurances_counts_formatted")
output_path = self.getWorkFile("item_similarities_formatted")
self._exec_shell("mv %s %s" % (input_path, output_path))
def do_make_item_similarities_bi_directional(self):
from similarity_calculation.make_similarities_bidirectional import make_similarities_bidirectional
input_path = self.getWorkFile("item_similarities_formatted")
output_path = self.getWorkFile("item_similarities_bi_directional")
make_similarities_bidirectional(input_path, output_path)
def do_sort_item_similarities_bi_directional(self):
input_path = self.getWorkFile("item_similarities_bi_directional")
output_path = self.getWorkFile(
"item_similarities_bi_directional_sorted")
self._exec_shell("sort -T /cube/services/batch/temp %s > %s" %
(input_path, output_path))
def do_extract_top_n(self):
from similarity_calculation.extract_top_n import extract_top_n
input_path = self.getWorkFile(
"item_similarities_bi_directional_sorted")
output_path = self.getWorkFile("item_similarities_top_n")
n = 20
extract_top_n(input_path, output_path, n)
def do_upload_item_similarities_result(self):
from common.utils import UploadItemSimilarities
input_path = self.getWorkFile("item_similarities_top_n")
uis = UploadItemSimilarities(connection, SITE_ID, self.type)
uis(input_path)
class VSimiliarityCalcFlow(BaseSimilarityCalcFlow):
def __init__(self):
BaseSimilarityCalcFlow.__init__(self, "V")
def getExtractUserItemMatrixJobs(self):
return [self.do_extract_user_item_matrix,
self.do_de_duplicate_user_item_matrix]
def do_extract_user_item_matrix(self):
from preprocessing.extract_user_item_matrix import v_extract_user_item_matrix
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_item_matrix_maybe_dup")
v_extract_user_item_matrix(input_path, output_path)
def do_de_duplicate_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix_maybe_dup")
output_path = self.getWorkFile("user_item_matrix")
self._exec_shell("sort -T /cube/services/batch/temp < %s | uniq > %s" %
(input_path, output_path))
class PLOSimilarityCalcFlow(BaseSimilarityCalcFlow):
def __init__(self):
BaseSimilarityCalcFlow.__init__(self, "PLO")
def getExtractUserItemMatrixJobs(self):
return [self.do_extract_user_item_matrix,
self.do_de_duplicate_user_item_matrix]
def do_extract_user_item_matrix(self):
from preprocessing.extract_user_item_matrix import plo_extract_user_item_matrix
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_item_matrix_maybe_dup")
plo_extract_user_item_matrix(input_path, output_path)
def do_de_duplicate_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix_maybe_dup")
output_path = self.getWorkFile("user_item_matrix")
self._exec_shell("sort -T /cube/services/batch/temp < %s | uniq > %s" %
(input_path, output_path))
class BuyTogetherSimilarityFlow(BaseSimilarityCalcFlow):
def __init__(self):
BaseSimilarityCalcFlow.__init__(self, "BuyTogether")
def getExtractUserItemMatrixJobs(self):
return [self.do_extract_user_item_matrix,
self.do_de_duplicate_user_item_matrix]
def do_extract_user_item_matrix(self):
from preprocessing.extract_user_item_matrix import buytogether_extract_user_item_matrix
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_item_matrix_maybe_dup")
buytogether_extract_user_item_matrix(input_path, output_path)
def do_de_duplicate_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix_maybe_dup")
output_path = self.getWorkFile("user_item_matrix")
self._exec_shell("sort -T /cube/services/batch/temp < %s | uniq > %s" %
(input_path, output_path))
class ViewedUltimatelyBuyFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "ViewedUltimatelyBuy")
self.jobs += [self.do_extract_user_view_buy_logs,
self.do_sort_user_view_buy_logs,
self.do_pair_view_buy,
self.count_pairs,
self.do_extract_user_item_matrix,
self.do_de_duplicate_user_item_matrix,
self.count_item_view,
self.upload_viewed_ultimately_buy]
def do_extract_user_view_buy_logs(self):
from viewed_ultimately_buy.extract_user_view_buy_logs import extract_user_view_buy_logs
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_view_buy_logs")
extract_user_view_buy_logs(input_path, output_path)
def do_sort_user_view_buy_logs(self):
input_path = self.getWorkFile("user_view_buy_logs")
output_path = self.getWorkFile("user_view_buy_logs_sorted")
self._exec_shell("sort -T /cube/services/batch/temp <%s >%s" %
(input_path, output_path))
def do_pair_view_buy(self):
from viewed_ultimately_buy.pair_view_buy import pair_view_buy
input_path = self.getWorkFile("user_view_buy_logs_sorted")
output_path = self.getWorkFile("view_buy_pairs")
pair_view_buy(input_path, output_path)
def count_pairs(self):
input_path = self.getWorkFile("view_buy_pairs")
output_path = self.getWorkFile("view_buy_pairs_counted")
self._exec_shell("sort -T /cube/services/batch/temp <%s | uniq -c >%s" %
(input_path, output_path))
def do_extract_user_item_matrix(self):
from preprocessing.extract_user_item_matrix import v_extract_user_item_matrix
input_path = self.parent.getWorkFile("backfilled_raw_logs")
output_path = self.getWorkFile("user_item_matrix_maybe_dup")
v_extract_user_item_matrix(input_path, output_path)
def do_de_duplicate_user_item_matrix(self):
input_path = self.getWorkFile("user_item_matrix_maybe_dup")
output_path = self.getWorkFile("user_item_matrix")
self._exec_shell("sort -T /cube/services/batch/temp < %s | uniq > %s" %
(input_path, output_path))
def count_item_view(self):
# FIXME a hack
input_path = self.getWorkFile("user_item_matrix")
output_path = self.getWorkFile("item_view_times")
self._exec_shell(
"cut -d , -f 2 <%s | sort -T /cube/services/batch/temp | uniq -c >%s" %
(input_path, output_path))
def upload_viewed_ultimately_buy(self):
from viewed_ultimately_buy.upload_viewed_ultimately_buy import upload_viewed_ultimately_buy
item_view_times_path = self.getWorkFile("item_view_times")
view_buy_pairs_counted_path = self.getWorkFile(
"view_buy_pairs_counted")
upload_viewed_ultimately_buy(
connection, SITE_ID, item_view_times_path, view_buy_pairs_counted_path)
class EDMRelatedPreprocessingFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "ViewedUltimatelyBuy")
self.jobs += [self.do_update_user_orders_collection,
self.do_generate_edm_emailing_list]
def do_update_user_orders_collection(self):
from edm_calculations import doUpdateUserOrdersCollection
doUpdateUserOrdersCollection(connection, SITE_ID)
def do_generate_edm_emailing_list(self):
from edm_calculations import generateEdmEmailingList
generateEdmEmailingList(connection, SITE_ID)
class BeginFlow(BaseFlow):
def __init__(self):
BaseFlow.__init__(self, "Root")
self.jobs += [self.begin]
def begin(self):
pass
# TODO: removed items' similarities should also be removed.
begin_flow = BeginFlow()
preprocessing_flow = PreprocessingFlow()
preprocessing_flow.dependOn(begin_flow)
hive_based_statistics_flow = HiveBasedStatisticsFlow()
hive_based_statistics_flow.dependOn(preprocessing_flow)
v_similarity_calc_flow = VSimiliarityCalcFlow()
v_similarity_calc_flow.dependOn(preprocessing_flow)
plo_similarity_calc_flow = PLOSimilarityCalcFlow()
plo_similarity_calc_flow.dependOn(preprocessing_flow)
buy_together_similarity_flow = BuyTogetherSimilarityFlow()
buy_together_similarity_flow.dependOn(preprocessing_flow)
viewed_ultimately_buy_flow = ViewedUltimatelyBuyFlow()
viewed_ultimately_buy_flow.dependOn(preprocessing_flow)
#edm_related_preprocessing_flow = EDMRelatedPreprocessingFlow()
# edm_related_preprocessing_flow.dependOn(preprocessing_flow)
def createCalculationRecord(site_id):
calculation_id = str(uuid.uuid4())
record = {
"calculation_id": calculation_id, "begin_datetime": datetime.datetime.now(),
"flows": {}}
calculation_records = getSiteDBCollection(
connection, site_id, "calculation_records")
calculation_records.save(record)
return calculation_id
def getCalculationRecord(site_id, calculation_id):
calculation_records = getSiteDBCollection(
connection, site_id, "calculation_records")
return calculation_records.find_one({"calculation_id": calculation_id})
def updateCalculationRecord(site_id, record):
calculation_records = getSiteDBCollection(
connection, site_id, "calculation_records")
calculation_records.save(record)
def writeFailedJob(site_id, flow_name, failed_job_name):
record = getCalculationRecord(SITE_ID, CALCULATION_ID)
flow_record = record["flows"][flow_name]
flow_record["failed_job_name"] = failed_job_name
updateCalculationRecord(SITE_ID, record)
def writeFlowBegin(site_id, flow_name):
record = getCalculationRecord(SITE_ID, CALCULATION_ID)
logging.info("FlowBegin: %s" % (flow_name, ))
record["flows"][flow_name] = {"begin_datetime": datetime.datetime.now()}
updateCalculationRecord(SITE_ID, record)
def writeFlowEnd(site_id, flow_name, is_successful, is_skipped, err_msg=None):
record = getCalculationRecord(SITE_ID, CALCULATION_ID)
logging.info("FlowEnd: %s" % (flow_name, ))
flow_record = record["flows"][flow_name]
flow_record["end_datetime"] = datetime.datetime.now()
flow_record["is_successful"] = is_successful
flow_record["is_skipped"] = is_skipped
if not is_successful:
flow_record["err_msg"] = err_msg
updateCalculationRecord(SITE_ID, record)
def writeCalculationEnd(site_id, is_successful, err_msg=None):
record = getCalculationRecord(SITE_ID, CALCULATION_ID)
record["end_datetime"] = datetime.datetime.now()
record["is_successful"] = is_successful
if not is_successful:
record["err_msg"] = err_msg
updateCalculationRecord(SITE_ID, record)
def getManualCalculationSites():
result = []
for site in loadSites(connection):
manual_calculation_list = connection[
"tjb-db"]["manual_calculation_list"]
record_in_db = manual_calculation_list.find_one(
{"site_id": site["site_id"]})
if record_in_db is not None:
result.append(site)
return result
def updateSiteLastUpdateTs(site_id):
sites = connection["tjb-db"]["sites"]
sites.update({"site_id": site_id},
{"$set": {"last_update_ts": time.time()}})
def is_time_okay_for_automatic_calculation():
now = datetime.datetime.now()
return now.hour >= 0 and now.hour < 6
def loadSites(connection, site_ids=None):
c_sites = connection["tjb-db"]["sites"]
if site_ids:
return [site for site in c_sites.find({'available': 'on'}) if site["site_id"] in site_ids]
else:
return [site for site in c_sites.find({'available': 'on'})]
def workOnSite(site, is_manual_calculation=False):
calculation_result = None
# Pop a job
manual_calculation_list = connection["tjb-db"]["manual_calculation_list"]
record_in_db = manual_calculation_list.find_one(
{"site_id": site["site_id"]})
if record_in_db is not None:
manual_calculation_list.remove(record_in_db)
# Proceed the job
now = time.time()
is_time_interval_okay_for_auto = (site.get("last_update_ts", None) is None
or now - site.get("last_update_ts") > site["calc_interval"])
# print site["site_id"], is_time_interval_okay_for_auto,
# is_time_okay_for_automatic_calculation()
is_automatic_calculation_okay = is_time_okay_for_automatic_calculation(
) and is_time_interval_okay_for_auto
if is_manual_calculation or is_automatic_calculation_okay:
global SITE
global SITE_ID
global DISABLEDFLOWS
global CALCULATION_ID
global CALC_SUCC
global BASE_WORK_DIR
SITE = site
SITE_ID = site["site_id"]
DISABLEDFLOWS = site.get("disabledFlows", [])
CALC_SUCC = True
CALCULATION_ID = createCalculationRecord(SITE_ID)
logging_manager.reconfig(SITE_ID, CALCULATION_ID)
BASE_WORK_DIR = getBaseWorkDir(SITE_ID, CALCULATION_ID)
try:
try:
getLogger().info("BEGIN CALCULATION ON:%s, CALCULATION_ID:%s" %
(SITE_ID, CALCULATION_ID))
# Begin workflow to do calculations
begin_flow()
writeCalculationEnd(
SITE_ID, CALC_SUCC, err_msg="SOME_FLOWS_FAILED")
if CALC_SUCC:
calculation_result = "SUCC"
else:
calculation_result = "FAIL"
except:
getLogger().critical("Unexpected Exception:", exc_info=True)
writeCalculationEnd(SITE_ID, False, "UNEXPECTED_EXCEPTION")
calculation_result = "FAIL"
finally:
getLogger(
).info("END CALCULATION ON:%s, RESULT:%s, CALCULATION_ID:%s" %
(SITE_ID, calculation_result, CALCULATION_ID))
# FIXME: save last_update_ts
updateSiteLastUpdateTs(site["site_id"])
return calculation_result
def workOnSiteWithRetries(site, is_manual_calculation=False, max_attempts=2):
current_attempts = 0
while current_attempts < max_attempts:
calculation_result = workOnSite(site, is_manual_calculation)
if calculation_result != "FAIL":
break
current_attempts += 1
if __name__ == "__main__":
os.environ["PATH"] = "%s:%s" % (getattr(settings, "extra_shell_path", ""), os.environ["PATH"])
while True:
#site_ids = ["test_with_gdian_data"]
for site in loadSites(connection):
for site in getManualCalculationSites():
workOnSiteWithRetries(site, is_manual_calculation=True)
workOnSiteWithRetries(site)
sleep_seconds = 1
time.sleep(sleep_seconds)
| 39.142857
| 106
| 0.678871
| 3,047
| 26,030
| 5.383984
| 0.103709
| 0.021579
| 0.060226
| 0.039622
| 0.598049
| 0.490216
| 0.415239
| 0.317159
| 0.266809
| 0.255349
| 0
| 0.000749
| 0.230311
| 26,030
| 664
| 107
| 39.201807
| 0.818068
| 0.029466
| 0
| 0.274162
| 0
| 0.003945
| 0.12397
| 0.052338
| 0
| 0
| 0
| 0.001506
| 0
| 1
| 0.142012
| false
| 0.003945
| 0.055227
| 0.009862
| 0.2643
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1caaa79685649df41865169e49ad903c14174dcc
| 4,488
|
py
|
Python
|
tests/integration/basket/model_tests.py
|
makielab/django-oscar
|
0a325cd0f04a4278201872b2e163868b72b6fabe
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/basket/model_tests.py
|
makielab/django-oscar
|
0a325cd0f04a4278201872b2e163868b72b6fabe
|
[
"BSD-3-Clause"
] | null | null | null |
tests/integration/basket/model_tests.py
|
makielab/django-oscar
|
0a325cd0f04a4278201872b2e163868b72b6fabe
|
[
"BSD-3-Clause"
] | null | null | null |
from decimal import Decimal as D
from django.test import TestCase
from oscar.apps.basket.models import Basket
from oscar.apps.partner import strategy
from oscar.test import factories
from oscar.apps.catalogue.models import Option
class TestAddingAProductToABasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.product = factories.create_product()
self.record = factories.create_stockrecord(
currency='GBP',
product=self.product, price_excl_tax=D('10.00'))
self.stockinfo = factories.create_stockinfo(self.record)
self.basket.add(self.product)
def test_creates_a_line(self):
self.assertEqual(1, self.basket.num_lines)
def test_sets_line_prices(self):
line = self.basket.lines.all()[0]
self.assertEqual(line.price_incl_tax, self.stockinfo.price.incl_tax)
self.assertEqual(line.price_excl_tax, self.stockinfo.price.excl_tax)
def test_means_another_currency_product_cannot_be_added(self):
product = factories.create_product()
factories.create_stockrecord(
currency='USD', product=product, price_excl_tax=D('20.00'))
with self.assertRaises(ValueError):
self.basket.add(product)
class TestANonEmptyBasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.product = factories.create_product()
self.record = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'))
self.stockinfo = factories.create_stockinfo(self.record)
self.basket.add(self.product, 10)
def test_can_be_flushed(self):
self.basket.flush()
self.assertEqual(self.basket.num_items, 0)
def test_returns_correct_product_quantity(self):
self.assertEqual(10, self.basket.product_quantity(
self.product))
def test_returns_correct_line_quantity_for_existing_product_and_stockrecord(self):
self.assertEqual(10, self.basket.line_quantity(
self.product, self.record))
def test_returns_zero_line_quantity_for_alternative_stockrecord(self):
record = factories.create_stockrecord(
self.product, price_excl_tax=D('5.00'))
self.assertEqual(0, self.basket.line_quantity(
self.product, record))
def test_returns_zero_line_quantity_for_missing_product_and_stockrecord(self):
product = factories.create_product()
record = factories.create_stockrecord(
product, price_excl_tax=D('5.00'))
self.assertEqual(0, self.basket.line_quantity(
product, record))
def test_returns_correct_quantity_for_existing_product_and_stockrecord_and_options(self):
product = factories.create_product()
record = factories.create_stockrecord(
product, price_excl_tax=D('5.00'))
option = Option.objects.create(name="Message")
options = [{"option": option, "value": "2"}]
self.basket.add(product, options=options)
self.assertEqual(0, self.basket.line_quantity(
product, record))
self.assertEqual(1, self.basket.line_quantity(
product, record, options))
class TestMergingTwoBaskets(TestCase):
def setUp(self):
self.product = factories.create_product()
self.record = factories.create_stockrecord(
self.product, price_excl_tax=D('10.00'))
self.stockinfo = factories.create_stockinfo(self.record)
self.main_basket = Basket()
self.main_basket.strategy = strategy.Default()
self.main_basket.add(self.product, quantity=2)
self.merge_basket = Basket()
self.merge_basket.strategy = strategy.Default()
self.merge_basket.add(self.product, quantity=1)
self.main_basket.merge(self.merge_basket)
def test_doesnt_sum_quantities(self):
self.assertEquals(1, self.main_basket.num_lines)
def test_changes_status_of_merge_basket(self):
self.assertEquals(Basket.MERGED, self.merge_basket.status)
class TestASubmittedBasket(TestCase):
def setUp(self):
self.basket = Basket()
self.basket.strategy = strategy.Default()
self.basket.submit()
def test_has_correct_status(self):
self.assertTrue(self.basket.is_submitted)
def test_can_be_edited(self):
self.assertFalse(self.basket.can_be_edited)
| 35.619048
| 93
| 0.694296
| 543
| 4,488
| 5.504604
| 0.180479
| 0.076949
| 0.036132
| 0.044496
| 0.552024
| 0.456005
| 0.385413
| 0.385413
| 0.359318
| 0.337906
| 0
| 0.011771
| 0.204991
| 4,488
| 125
| 94
| 35.904
| 0.825953
| 0
| 0
| 0.361702
| 0
| 0
| 0.012701
| 0
| 0
| 0
| 0
| 0
| 0.159574
| 1
| 0.180851
| false
| 0
| 0.06383
| 0
| 0.287234
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cab057f92135b745b2c22597acdb2d7401a8e30
| 11,134
|
py
|
Python
|
experiments/render-tests-avg.py
|
piotr-karon/realworld-starter-kit
|
6285e4b5913fe5e99d72e9178eb4b1db246d02c9
|
[
"MIT"
] | null | null | null |
experiments/render-tests-avg.py
|
piotr-karon/realworld-starter-kit
|
6285e4b5913fe5e99d72e9178eb4b1db246d02c9
|
[
"MIT"
] | null | null | null |
experiments/render-tests-avg.py
|
piotr-karon/realworld-starter-kit
|
6285e4b5913fe5e99d72e9178eb4b1db246d02c9
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
import json
import os
from pathlib import Path
import numpy as np
from natsort import natsorted
try:
from docopt import docopt
from marko.ext.gfm import gfm
import pygal
from pygal.style import Style, DefaultStyle
except ImportError as e:
raise Exception('Some external dependencies not found, install them using: pip install -r requirements.txt') from e
def render():
suffix = '.avg.checks.bench.json'
suites = {}
for filepath in Path('').glob(f'*{suffix}'):
name = filepath.name[:-len(suffix)]
print(f'Loading {filepath} as {name}.')
with open(filepath) as fp:
suites[name] = json.load(fp)
names = natsorted(suites.keys())
figure_filenames = render_figures(names, suites)
out_filename = Path('bench-results.md')
with open(out_filename, 'w') as out:
cwd = os.getcwd().split(os.sep)[-2:]
print(f'# Benchmark of {", ".join(names)} in {cwd}', file=out)
notes_file = Path('notes.md')
if notes_file.exists():
print(f'Including {notes_file} in resulting Markdown.')
with notes_file.open() as fp:
out.write(fp.read())
else:
print(f'File {notes_file} does not exist, create it to include it in resulting Markdown.')
# print('## General Info & Checks', file=out)
# render_checks(names, suites, out)
print('## Graphs', file=out)
print('*The graphs are interactive, view the rendered HTML locally to enjoy it.*\n', file=out)
for filename in figure_filenames:
# Use HTML instead of Markdown image to specify the width
print(f'<img type="image/svg+xml" src="{filename}" alt="{filename}" width="49%"/>', file=out)
print(f'Markdown output written to {out_filename}.')
render_html(out_filename, Path('bench-results.html'))
def render_checks(names, suites, out):
print(f'|Check|{"|".join(names)}|', file=out)
print(f'|{"|".join(["---"] * (len(names) + 1))}|', file=out)
per_impl_checks = {name: suite['checks'] for name, suite in suites.items()}
check_names = sorted(set().union(*(checks.keys() for checks in per_impl_checks.values())))
def sanitize(value):
if type(value) is float:
value = float(f'{value:.3g}') # round to 3 significant figures
return str(int(value) if value >= 100 else value)
return str(value)
for check_name in check_names:
values = [sanitize(per_impl_checks[name].get(check_name)) for name in names]
if len(values) > 1 and len(set(values)) > 1:
values = [f'**{value}**' for value in values]
print(f'|{check_name}|{"|".join(values)}|', file=out)
FIGURE_FUNCS = []
def figure(func):
"""Simple decorator to mark a function as a figure generator."""
FIGURE_FUNCS.append(func)
return func
def render_figures(names, suites):
filenames = []
config = pygal.Config(legend_at_bottom=True, style=DefaultStyle)
for figure_func in FIGURE_FUNCS:
chart = figure_func(names, suites, config.copy())
filename = f'bench-results.{figure_func.__name__}.svg'
chart.render_to_file(filename)
filenames.append(filename)
return filenames
@figure
def startup_time_figure(names, suites, config):
all_vals = [suites[name]['startup_max'] for name in names]
mx = np.max(all_vals)
config.range = (0, mx + 0.1)
chart = pygal.Bar(config, value_formatter=lambda x: "{:0.2f}s".format(x))
chart.title = 'Czas uruchomienia (s)'
for name in names:
vals = [{'value': suites[name]['startup_avg'],
'ci': {'low': suites[name]['startup_min'], 'high': suites[name]['startup_max']}}]
# print(vals)
chart.add(name, vals)
return chart
@figure
def errors_vs_connections_figure(names, suites, config):
all_vals = [suites[name]['stats'] for name in names]
flat = [item for sublist in all_vals for item in sublist]
print(flat)
all_rates = [
div_or_none(s['request_errors_new_avg'], s['request_errors_new_avg'] + s['requests_new_avg'], scale=100) for s
in flat]
mx = np.max(all_rates)
config.range = (0, mx + mx * 0.1)
chart = pygal.Line(config, value_formatter=lambda x: "{:0.2f}%".format(x))
chart.title = 'Współczynnik liczby błędów względem liczby połączeń (%)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name, [
div_or_none(s['request_errors_new_avg'], s['request_errors_new_avg'] + s['requests_new_avg'], scale=100)
for s in suites[name]['stats'][0:]])
return chart
@figure
def requests_vs_connections_figure(names, suites, config):
vals = [[x['requests_per_s_avg'] for x in suites[name]['stats']] for name in names]
print(vals)
mx = np.max(vals)
config.range = (0, mx + mx * 0.1)
config.min_scale = 6
chart = pygal.Line(config, value_formatter=lambda x: "{:0.0f}".format(x))
chart.title = 'Liczba sukcesów na sekundę względem liczby połączeń (Zapytań/s)'
connections_x_labels(chart, suites, skip=0)
for name in names:
# print(suites[name]['stats'])
# vals = [{'value': x['requests_per_s_avg'], 'ci': {'low': x['requests_per_s_min'], 'high': x['requests_per_s_max']}} for x in suites[name]['stats']]
vals = [{'value': x['requests_per_s_avg']} for x in suites[name]['stats']]
chart.add(name, vals)
return chart
@figure
def latency_vs_connections_50_figure(names, suites, config):
return latency_vs_connections_figure(50, names, suites, config)
@figure
def latency_vs_connections_90_figure(names, suites, config):
return latency_vs_connections_figure(90, names, suites, config)
@figure
def latency_vs_connections_99_figure(names, suites, config):
return latency_vs_connections_figure(99, names, suites, config)
def latency_vs_connections_figure(percentile, names, suites, config):
all_vals = [[s[f'latency_{percentile}p_ms_avg'] for s in suites[name]['stats'][0:]] for name in names]
mx = np.max(all_vals)
mn = np.min(all_vals)
config.range = (mn - mn * .5, mx + mx * .5)
chart = pygal.Line(config, logarithmic=True, value_formatter=lambda x: "{:0.0f}".format(x))
chart.title = f'{percentile}. centyl czasu odpowiedzi względem liczby połączeń (ms)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name, [s[f'latency_{percentile}p_ms_avg']
for s in suites[name]['stats'][0:]])
return chart
@figure
def max_mem_usage_figure(names, suites, config):
all_vals = [[s['mem_usage_mb_avg'] for s in suites[name]['stats']] for name in names]
mx = np.max(all_vals)
config.range = (0, mx + .1 * mx)
chart = pygal.Line(config, value_formatter=lambda x: "{:0.0f}".format(x))
chart.title = 'Maksymalne zużycie pamięci względem liczby połączeń (MiB)'
connections_x_labels(chart, suites)
for name in names:
chart.add(name, [s['mem_usage_mb_avg'] for s in suites[name]['stats']])
return chart
@figure
def max_mem_usage_per_requests_figure(names, suites, config):
all_vals = [[div_or_none(s['mem_usage_mb_avg'], s['requests_per_s_avg']) for s in suites[name]['stats'][0:]] for name in names]
mx = np.max(all_vals)
config.range = (0, mx + .1 * mx)
config.min_scale = 6
chart = pygal.Line(config, value_formatter=lambda x: "{:0.3f}".format(x))
chart.title = 'Maksymalne zużycie pamięci per liczba sukcesów na sekundę (MiB-sekunda/Zapytanie)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name,
[div_or_none(s['mem_usage_mb_avg'], s['requests_per_s_avg']) for s in suites[name]['stats'][0:]])
return chart
@figure
def cpu_figure(names, suites, config):
mx = np.max([[s['cpu_new_s_avg'] for s in suites[name]['stats'][0:]] for name in names])
config.range = (0, mx + mx * 0.1)
chart = pygal.Line(config, value_formatter=lambda x: "{:0.3f}".format(x))
chart.title = 'Wykorzystanie czasu procesora w czasie rundy testów (sekundy CPU)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name, [s['cpu_new_s_avg'] for s in suites[name]['stats'][0:]])
return chart
@figure
def cpu_per_request_figure(names, suites, config):
mx = np.max([[div_or_none(s['cpu_new_s_avg'], s['requests_new_avg'], scale=1000) for s in
suites[name]['stats'][0:]] for name in names])
config.range = (0, mx + mx * 0.1)
chart = pygal.Line(config, value_formatter=lambda x: "{:0.3f}".format(x))
chart.title = 'Wykorzystanie czasu procesora per poprawna odpowiedź (milisekundy CPU/Req)'
connections_x_labels(chart, suites, skip=0)
for name in names:
chart.add(name, [div_or_none(s['cpu_new_s_avg'], s['requests_new_avg'], scale=1000) for s in
suites[name]['stats'][0:]])
return chart
@figure
def cpu_vs_requests_figure(names, suites, config):
all_vls = [[s['requests_total_avg'] for s in suites[name]['stats']] for name in names]
mx = np.max(all_vls)
config.range = (0, mx + mx * 0.1)
config.min_scale = 6
chart = pygal.XY(config, value_formatter=lambda x: "{:0.0f}".format(x), series_formatter=lambda x: "{:0.2f}".format(x))
chart.title = 'Skumulowana liczba poprawnych odpowiedzi względem skumulowanego czasu CPU'
chart.x_title = 'sekundy CPU'
chart.y_title = 'skumulowana liczba poprawnych odpowiedzi'
for name in names:
chart.add(name, [
{'value': (s['cpu_total_s_avg'], s['requests_total_avg']),
'label': f'After {s["connections"]} connections round.'}
for s in suites[name]['stats']
])
return chart
def connections_x_labels(chart, suites, skip=0):
chart.x_labels = [f"{s['connections']} conn's" if s['connections'] else s['message']
for s in next(iter(suites.values()))['stats']][skip:]
chart.x_label_rotation = -30
def div_or_none(numerator, denominator, scale=1):
if not denominator:
return None
return scale * numerator / denominator
HTML_PREFIX = '''<!DOCTYPE html>
<html lang="en">
<head>
<meta charset="utf-8">
<title>Benchmark Report</title>
</head>
<body>
'''
HTML_SUFFIX = ''' </body>
</html>
'''
def render_html(md_file, html_file):
with open(md_file) as in_fp, open(html_file, 'w') as out_fp:
rs = in_fp.read()
html = gfm(rs)
# Replace <img> by <embed> for pygal interactivity, http://www.pygal.org/en/latest/documentation/web.html
html = html.replace('<img', '<embed')
# Replace link to md with link to .html for better browsability at HTML level.
html = html.replace('/README.md">full benchmark', '/README.html">full benchmark')
out_fp.write(HTML_PREFIX)
out_fp.write(html)
out_fp.write(HTML_SUFFIX)
print(f'HTML output written to {html_file.resolve().as_uri()}.')
if __name__ == '__main__':
# args = docopt(__doc__)
render()
| 36.032362
| 157
| 0.647566
| 1,614
| 11,134
| 4.304833
| 0.185254
| 0.033103
| 0.024611
| 0.038284
| 0.483161
| 0.453224
| 0.41825
| 0.382556
| 0.335492
| 0.298503
| 0
| 0.011916
| 0.20855
| 11,134
| 308
| 158
| 36.149351
| 0.776555
| 0.057122
| 0
| 0.274336
| 0
| 0.004425
| 0.225477
| 0.030344
| 0
| 0
| 0
| 0
| 0
| 1
| 0.088496
| false
| 0
| 0.044248
| 0.013274
| 0.212389
| 0.061947
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cab32916328111ed29e8c7581e89b8013c63586
| 9,839
|
py
|
Python
|
litex/build/altera/quartus.py
|
osterwood/litex
|
db20cb172dc982c5879aa8080ec7aa18de181cc5
|
[
"ADSL"
] | 1,501
|
2016-04-19T18:16:21.000Z
|
2022-03-31T17:46:31.000Z
|
litex/build/altera/quartus.py
|
osterwood/litex
|
db20cb172dc982c5879aa8080ec7aa18de181cc5
|
[
"ADSL"
] | 1,135
|
2016-04-19T05:49:14.000Z
|
2022-03-31T15:21:19.000Z
|
litex/build/altera/quartus.py
|
osterwood/litex
|
db20cb172dc982c5879aa8080ec7aa18de181cc5
|
[
"ADSL"
] | 357
|
2016-04-19T05:00:24.000Z
|
2022-03-31T11:28:32.000Z
|
#
# This file is part of LiteX.
#
# Copyright (c) 2014-2019 Florent Kermarrec <florent@enjoy-digital.fr>
# Copyright (c) 2019 msloniewski <marcin.sloniewski@gmail.com>
# Copyright (c) 2019 vytautasb <v.buitvydas@limemicro.com>
# SPDX-License-Identifier: BSD-2-Clause
import os
import subprocess
import sys
import math
from shutil import which
from migen.fhdl.structure import _Fragment
from litex.build.generic_platform import Pins, IOStandard, Misc
from litex.build import tools
# IO/Placement Constraints (.qsf) ------------------------------------------------------------------
def _format_constraint(c, signame, fmt_r):
# IO location constraints
if isinstance(c, Pins):
tpl = "set_location_assignment -comment \"{name}\" -to {signame} Pin_{pin}"
return tpl.format(signame=signame, name=fmt_r, pin=c.identifiers[0])
# IO standard constraints
elif isinstance(c, IOStandard):
tpl = "set_instance_assignment -name io_standard -comment \"{name}\" \"{std}\" -to {signame}"
return tpl.format(signame=signame, name=fmt_r, std=c.name)
# Others constraints
elif isinstance(c, Misc):
if not isinstance(c.misc, str) and len(c.misc) == 2:
tpl = "set_instance_assignment -comment \"{name}\" -name {misc[0]} \"{misc[1]}\" -to {signame}"
return tpl.format(signame=signame, name=fmt_r, misc=c.misc)
else:
tpl = "set_instance_assignment -comment \"{name}\" -name {misc} -to {signame}"
return tpl.format(signame=signame, name=fmt_r, misc=c.misc)
def _format_qsf_constraint(signame, pin, others, resname):
fmt_r = "{}:{}".format(*resname[:2])
if resname[2] is not None:
fmt_r += "." + resname[2]
fmt_c = [_format_constraint(c, signame, fmt_r) for c in ([Pins(pin)] + others)]
return '\n'.join(fmt_c)
def _build_qsf_constraints(named_sc, named_pc):
qsf = []
for sig, pins, others, resname in named_sc:
if len(pins) > 1:
for i, p in enumerate(pins):
qsf.append(_format_qsf_constraint("{}[{}]".format(sig, i), p, others, resname))
else:
qsf.append(_format_qsf_constraint(sig, pins[0], others, resname))
if named_pc:
qsf.append("\n\n".join(named_pc))
return "\n".join(qsf)
# Timing Constraints (.sdc) ------------------------------------------------------------------------
def _build_sdc(clocks, false_paths, vns, named_sc, build_name, additional_sdc_commands):
sdc = []
# Clock constraints
for clk, period in sorted(clocks.items(), key=lambda x: x[0].duid):
is_port = False
for sig, pins, others, resname in named_sc:
if sig == vns.get_name(clk):
is_port = True
if is_port:
tpl = "create_clock -name {clk} -period {period} [get_ports {{{clk}}}]"
sdc.append(tpl.format(clk=vns.get_name(clk), period=str(period)))
else:
tpl = "create_clock -name {clk} -period {period} [get_nets {{{clk}}}]"
sdc.append(tpl.format(clk=vns.get_name(clk), period=str(period)))
# False path constraints
for from_, to in sorted(false_paths, key=lambda x: (x[0].duid, x[1].duid)):
tpl = "set_false_path -from [get_clocks {{{from_}}}] -to [get_clocks {{{to}}}]"
sdc.append(tpl.format(from_=vns.get_name(from_), to=vns.get_name(to)))
# Add additional commands
sdc += additional_sdc_commands
# Generate .sdc
tools.write_to_file("{}.sdc".format(build_name), "\n".join(sdc))
# Project (.qsf) -----------------------------------------------------------------------------------
def _build_qsf(device, ips, sources, vincpaths, named_sc, named_pc, build_name, additional_qsf_commands):
qsf = []
# Set device
qsf.append("set_global_assignment -name DEVICE {}".format(device))
# Add sources
for filename, language, library in sources:
if language == "verilog": language = "systemverilog" # Enforce use of SystemVerilog
tpl = "set_global_assignment -name {lang}_FILE {path} -library {lib}"
# Do not add None type files
if language is not None:
qsf.append(tpl.format(lang=language.upper(), path=filename.replace("\\", "/"), lib=library))
# Check if the file is a header. Those should not be explicitly added to qsf,
# but rather included in include search_path
else:
if filename.endswith(".svh") or filename.endswith(".vh"):
fpath = os.path.dirname(filename)
if fpath not in vincpaths:
vincpaths.append(fpath)
# Add ips
for filename in ips:
tpl = "set_global_assignment -name QSYS_FILE {filename}"
qsf.append(tpl.replace(filename=filename.replace("\\", "/")))
# Add include paths
for path in vincpaths:
qsf.append("set_global_assignment -name SEARCH_PATH {}".format(path.replace("\\", "/")))
# Set top level
qsf.append("set_global_assignment -name top_level_entity " + build_name)
# Add io, placement constraints
qsf.append(_build_qsf_constraints(named_sc, named_pc))
# Set timing constraints
qsf.append("set_global_assignment -name SDC_FILE {}.sdc".format(build_name))
# Add additional commands
qsf += additional_qsf_commands
# Generate .qsf
tools.write_to_file("{}.qsf".format(build_name), "\n".join(qsf))
# Script -------------------------------------------------------------------------------------------
def _build_script(build_name, create_rbf):
if sys.platform in ["win32", "cygwin"]:
script_contents = "REM Autogenerated by LiteX / git: " + tools.get_litex_git_revision()
script_file = "build_" + build_name + ".bat"
else:
script_contents = "# Autogenerated by LiteX / git: " + tools.get_litex_git_revision()
script_file = "build_" + build_name + ".sh"
script_contents += """
quartus_map --read_settings_files=on --write_settings_files=off {build_name} -c {build_name}
quartus_fit --read_settings_files=off --write_settings_files=off {build_name} -c {build_name}
quartus_asm --read_settings_files=off --write_settings_files=off {build_name} -c {build_name}
quartus_sta {build_name} -c {build_name}"""
if create_rbf:
script_contents += """
if [ -f "{build_name}.sof" ]
then
quartus_cpf -c {build_name}.sof {build_name}.rbf
fi
"""
script_contents = script_contents.format(build_name=build_name)
tools.write_to_file(script_file, script_contents, force_unix=True)
return script_file
def _run_script(script):
if sys.platform in ["win32", "cygwin"]:
shell = ["cmd", "/c"]
else:
shell = ["bash"]
if which("quartus_map") is None:
msg = "Unable to find Quartus toolchain, please:\n"
msg += "- Add Quartus toolchain to your $PATH."
raise OSError(msg)
if subprocess.call(shell + [script]) != 0:
raise OSError("Error occured during Quartus's script execution.")
# AlteraQuartusToolchain ---------------------------------------------------------------------------
class AlteraQuartusToolchain:
attr_translate = {}
def __init__(self):
self.clocks = dict()
self.false_paths = set()
self.additional_sdc_commands = []
self.additional_qsf_commands = []
def build(self, platform, fragment,
build_dir = "build",
build_name = "top",
run = True,
**kwargs):
# Create build directory
cwd = os.getcwd()
os.makedirs(build_dir, exist_ok=True)
os.chdir(build_dir)
# Finalize design
if not isinstance(fragment, _Fragment):
fragment = fragment.get_fragment()
platform.finalize(fragment)
# Generate verilog
v_output = platform.get_verilog(fragment, name=build_name, **kwargs)
named_sc, named_pc = platform.resolve_signals(v_output.ns)
v_file = build_name + ".v"
v_output.write(v_file)
platform.add_source(v_file)
# Generate design timing constraints file (.sdc)
_build_sdc(
clocks = self.clocks,
false_paths = self.false_paths,
vns = v_output.ns,
named_sc = named_sc,
build_name = build_name,
additional_sdc_commands = self.additional_sdc_commands)
# Generate design project and location constraints file (.qsf)
_build_qsf(
device = platform.device,
ips = platform.ips,
sources = platform.sources,
vincpaths = platform.verilog_include_paths,
named_sc = named_sc,
named_pc = named_pc,
build_name = build_name,
additional_qsf_commands = self.additional_qsf_commands)
# Generate build script
script = _build_script(build_name, platform.create_rbf)
# Run
if run:
_run_script(script)
os.chdir(cwd)
return v_output.ns
def add_period_constraint(self, platform, clk, period):
clk.attr.add("keep")
period = math.floor(period*1e3)/1e3 # round to lowest picosecond
if clk in self.clocks:
if period != self.clocks[clk]:
raise ValueError("Clock already constrained to {:.2f}ns, new constraint to {:.2f}ns"
.format(self.clocks[clk], period))
self.clocks[clk] = period
def add_false_path_constraint(self, platform, from_, to):
from_.attr.add("keep")
to.attr.add("keep")
if (to, from_) not in self.false_paths:
self.false_paths.add((from_, to))
| 38.433594
| 107
| 0.598536
| 1,194
| 9,839
| 4.721106
| 0.207705
| 0.047898
| 0.014902
| 0.024481
| 0.292177
| 0.209331
| 0.161788
| 0.15008
| 0.115487
| 0.103424
| 0
| 0.0054
| 0.24718
| 9,839
| 255
| 108
| 38.584314
| 0.755637
| 0.146255
| 0
| 0.129412
| 0
| 0
| 0.186027
| 0.044264
| 0
| 0
| 0
| 0
| 0
| 1
| 0.064706
| false
| 0
| 0.047059
| 0
| 0.170588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cac38aa4a5a8e636d6285190a3fb18a56c06114
| 10,831
|
py
|
Python
|
polystores/stores/azure_store.py
|
polyaxon/polystores
|
141789ef75622c80d1f3875cec6952ad3c2d5ec7
|
[
"MIT"
] | 50
|
2018-12-10T14:46:12.000Z
|
2021-11-03T16:38:58.000Z
|
polystores/stores/azure_store.py
|
polyaxon/polystores
|
141789ef75622c80d1f3875cec6952ad3c2d5ec7
|
[
"MIT"
] | 17
|
2019-01-21T14:14:30.000Z
|
2019-08-23T20:39:07.000Z
|
polystores/stores/azure_store.py
|
polyaxon/polystores
|
141789ef75622c80d1f3875cec6952ad3c2d5ec7
|
[
"MIT"
] | 8
|
2019-01-21T14:52:37.000Z
|
2019-07-29T19:53:12.000Z
|
# -*- coding: utf-8 -*-
from __future__ import absolute_import, division, print_function
import os
from rhea import RheaError
from rhea import parser as rhea_parser
from azure.common import AzureHttpError
from azure.storage.blob.models import BlobPrefix
from polystores.clients.azure_client import get_blob_service_connection
from polystores.exceptions import PolyaxonStoresException
from polystores.stores.base_store import BaseStore
from polystores.utils import append_basename, check_dirname_exists, get_files_in_current_directory
# pylint:disable=arguments-differ
class AzureStore(BaseStore):
"""
Azure store Service.
"""
STORE_TYPE = BaseStore._AZURE_STORE # pylint:disable=protected-access
def __init__(self, connection=None, **kwargs):
self._connection = connection
self._account_name = kwargs.get('account_name') or kwargs.get('AZURE_ACCOUNT_NAME')
self._account_key = kwargs.get('account_key') or kwargs.get('AZURE_ACCOUNT_KEY')
self._connection_string = (
kwargs.get('connection_string') or kwargs.get('AZURE_CONNECTION_STRING'))
@property
def connection(self):
if self._connection is None:
self.set_connection(account_name=self._account_name,
account_key=self._account_key,
connection_string=self._connection_string)
return self._connection
def set_connection(self, account_name=None, account_key=None, connection_string=None):
"""
Sets a new Blob service connection.
Args:
account_name: `str`. The storage account name.
account_key: `str`. The storage account key.
connection_string: `str`. If specified, this will override all other parameters besides
request session.
Returns:
BlockBlobService instance
"""
self._connection = get_blob_service_connection(account_name=account_name,
account_key=account_key,
connection_string=connection_string)
def set_env_vars(self):
if self._account_name:
os.environ['AZURE_ACCOUNT_NAME'] = self._account_name
if self._account_key:
os.environ['AZURE_ACCOUNT_KEY'] = self._account_key
if self._connection_string:
os.environ['AZURE_CONNECTION_STRING'] = self._connection_string
@staticmethod
def parse_wasbs_url(wasbs_url):
"""
Parses and validates a wasbs url.
Returns:
tuple(container, storage_account, path).
"""
try:
spec = rhea_parser.parse_wasbs_path(wasbs_url)
return spec.container, spec.storage_account, spec.path
except RheaError as e:
raise PolyaxonStoresException(e)
def check_blob(self, blob, container_name=None):
"""
Checks if a blob exists.
Args:
blob: `str`. Name of existing blob.
container_name: `str`. Name of existing container.
Returns:
bool
"""
if not container_name:
container_name, _, blob = self.parse_wasbs_url(blob)
try:
return self.connection.get_blob_properties(
container_name,
blob
)
except AzureHttpError:
return None
def ls(self, path):
results = self.list(key=path)
return {'files': results['blobs'], 'dirs': results['prefixes']}
def list(self, key, container_name=None, path=None, delimiter='/', marker=None):
"""
Checks if a blob exists.
Args:
key: `str`. key prefix.
container_name: `str`. Name of existing container.
path: `str`. an extra path to append to the key.
delimiter: `str`. the delimiter marks key hierarchy.
marker: `str`. An opaque continuation token.
"""
if not container_name:
container_name, _, key = self.parse_wasbs_url(key)
if key and not key.endswith('/'):
key += '/'
prefix = key
if path:
prefix = os.path.join(prefix, path)
if prefix and not prefix.endswith('/'):
prefix += '/'
list_blobs = []
list_prefixes = []
while True:
results = self.connection.list_blobs(container_name,
prefix=prefix,
delimiter=delimiter,
marker=marker)
for r in results:
if isinstance(r, BlobPrefix):
name = r.name[len(key):]
list_prefixes.append(name)
else:
name = r.name[len(key):]
list_blobs.append((name, r.properties.content_length))
if results.next_marker:
marker = results.next_marker
else:
break
return {
'blobs': list_blobs,
'prefixes': list_prefixes
}
def upload_file(self, filename, blob, container_name=None, use_basename=True):
"""
Uploads a local file to Google Cloud Storage.
Args:
filename: `str`. the file to upload.
blob: `str`. blob to upload to.
container_name: `str`. the name of the container.
use_basename: `bool`. whether or not to use the basename of the filename.
"""
if not container_name:
container_name, _, blob = self.parse_wasbs_url(blob)
if use_basename:
blob = append_basename(blob, filename)
self.connection.create_blob_from_path(container_name, blob, filename)
def upload_dir(self, dirname, blob, container_name=None, use_basename=True):
"""
Uploads a local directory to to Google Cloud Storage.
Args:
dirname: `str`. name of the directory to upload.
blob: `str`. blob to upload to.
container_name: `str`. the name of the container.
use_basename: `bool`. whether or not to use the basename of the directory.
"""
if not container_name:
container_name, _, blob = self.parse_wasbs_url(blob)
if use_basename:
blob = append_basename(blob, dirname)
# Turn the path to absolute paths
dirname = os.path.abspath(dirname)
with get_files_in_current_directory(dirname) as files:
for f in files:
file_blob = os.path.join(blob, os.path.relpath(f, dirname))
self.upload_file(filename=f,
blob=file_blob,
container_name=container_name,
use_basename=False)
def download_file(self, blob, local_path, container_name=None, use_basename=True):
"""
Downloads a file from Google Cloud Storage.
Args:
blob: `str`. blob to download.
local_path: `str`. the path to download to.
container_name: `str`. the name of the container.
use_basename: `bool`. whether or not to use the basename of the blob.
"""
if not container_name:
container_name, _, blob = self.parse_wasbs_url(blob)
local_path = os.path.abspath(local_path)
if use_basename:
local_path = append_basename(local_path, blob)
check_dirname_exists(local_path)
try:
self.connection.get_blob_to_path(container_name, blob, local_path)
except AzureHttpError as e:
raise PolyaxonStoresException(e)
def download_dir(self, blob, local_path, container_name=None, use_basename=True):
"""
Download a directory from Google Cloud Storage.
Args:
blob: `str`. blob to download.
local_path: `str`. the path to download to.
container_name: `str`. the name of the container.
use_basename: `bool`. whether or not to use the basename of the key.
"""
if not container_name:
container_name, _, blob = self.parse_wasbs_url(blob)
local_path = os.path.abspath(local_path)
if use_basename:
local_path = append_basename(local_path, blob)
try:
check_dirname_exists(local_path, is_dir=True)
except PolyaxonStoresException:
os.makedirs(local_path)
results = self.list(container_name=container_name, key=blob, delimiter='/')
# Create directories
for prefix in sorted(results['prefixes']):
direname = os.path.join(local_path, prefix)
prefix = os.path.join(blob, prefix)
# Download files under
self.download_dir(blob=prefix,
local_path=direname,
container_name=container_name,
use_basename=False)
# Download files
for file_key in results['blobs']:
file_key = file_key[0]
filename = os.path.join(local_path, file_key)
file_key = os.path.join(blob, file_key)
self.download_file(blob=file_key,
local_path=filename,
container_name=container_name,
use_basename=False)
def delete(self, blob, container_name=None):
if not container_name:
container_name, _, blob = self.parse_wasbs_url(blob)
results = self.list(container_name=container_name, key=blob, delimiter='/')
if not any([results['prefixes'], results['blobs']]):
self.delete_file(blob=blob, container_name=container_name)
# Delete directories
for prefix in sorted(results['prefixes']):
prefix = os.path.join(blob, prefix)
# Download files under
self.delete(blob=prefix, container_name=container_name)
# Delete files
for file_key in results['blobs']:
file_key = file_key[0]
file_key = os.path.join(blob, file_key)
self.delete_file(blob=file_key, container_name=container_name)
def delete_file(self, blob, container_name=None):
"""
Deletes if a blob exists.
Args:
blob: `str`. Name of existing blob.
container_name: `str`. Name of existing container.
"""
if not container_name:
container_name, _, blob = self.parse_wasbs_url(blob)
try:
self.connection.delete_blob(container_name, blob)
except AzureHttpError:
pass
| 35.864238
| 98
| 0.587942
| 1,211
| 10,831
| 5.036334
| 0.141206
| 0.110838
| 0.057714
| 0.068208
| 0.489425
| 0.38285
| 0.360059
| 0.326611
| 0.311854
| 0.301361
| 0
| 0.000413
| 0.329886
| 10,831
| 301
| 99
| 35.983389
| 0.839901
| 0.206444
| 0
| 0.329268
| 0
| 0
| 0.029555
| 0.005736
| 0
| 0
| 0
| 0
| 0
| 1
| 0.085366
| false
| 0.006098
| 0.060976
| 0
| 0.195122
| 0.006098
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cad3cf72fd9e55c370708003b5cfc6962c4bf8e
| 22,217
|
py
|
Python
|
analysis/webservice/NexusHandler.py
|
dataplumber/nexus
|
f25a89e85eba098da9c6db1ff3d408dae8a6b310
|
[
"Apache-2.0"
] | 23
|
2016-08-09T22:45:14.000Z
|
2020-02-17T08:18:29.000Z
|
analysis/webservice/NexusHandler.py
|
lewismc/incubator-sdap-nexus
|
ff98fa346303431542b8391cc2a1bf7561d1bd03
|
[
"Apache-2.0"
] | 6
|
2017-04-27T21:22:17.000Z
|
2021-06-01T21:45:52.000Z
|
analysis/webservice/NexusHandler.py
|
dataplumber/nexus
|
f25a89e85eba098da9c6db1ff3d408dae8a6b310
|
[
"Apache-2.0"
] | 5
|
2016-08-31T13:47:29.000Z
|
2017-11-14T21:45:22.000Z
|
"""
Copyright (c) 2016 Jet Propulsion Laboratory,
California Institute of Technology. All rights reserved
"""
import sys
import numpy as np
import logging
import time
import types
from datetime import datetime
from netCDF4 import Dataset
from nexustiles.nexustiles import NexusTileService
from webservice.webmodel import NexusProcessingException
AVAILABLE_HANDLERS = []
AVAILABLE_INITIALIZERS = []
def nexus_initializer(clazz):
log = logging.getLogger(__name__)
try:
wrapper = NexusInitializerWrapper(clazz)
log.info("Adding initializer '%s'" % wrapper.clazz())
AVAILABLE_INITIALIZERS.append(wrapper)
except Exception as ex:
log.warn("Initializer '%s' failed to load (reason: %s)" % (clazz, ex.message), exc_info=True)
return clazz
def nexus_handler(clazz):
log = logging.getLogger(__name__)
try:
wrapper = AlgorithmModuleWrapper(clazz)
log.info("Adding algorithm module '%s' with path '%s' (%s)" % (wrapper.name(), wrapper.path(), wrapper.clazz()))
AVAILABLE_HANDLERS.append(wrapper)
except Exception as ex:
log.warn("Handler '%s' is invalid and will be skipped (reason: %s)" % (clazz, ex.message), exc_info=True)
return clazz
DEFAULT_PARAMETERS_SPEC = {
"ds": {
"name": "Dataset",
"type": "string",
"description": "One or more comma-separated dataset shortnames"
},
"minLat": {
"name": "Minimum Latitude",
"type": "float",
"description": "Minimum (Southern) bounding box Latitude"
},
"maxLat": {
"name": "Maximum Latitude",
"type": "float",
"description": "Maximum (Northern) bounding box Latitude"
},
"minLon": {
"name": "Minimum Longitude",
"type": "float",
"description": "Minimum (Western) bounding box Longitude"
},
"maxLon": {
"name": "Maximum Longitude",
"type": "float",
"description": "Maximum (Eastern) bounding box Longitude"
},
"startTime": {
"name": "Start Time",
"type": "long integer",
"description": "Starting time in milliseconds since midnight Jan. 1st, 1970 UTC"
},
"endTime": {
"name": "End Time",
"type": "long integer",
"description": "Ending time in milliseconds since midnight Jan. 1st, 1970 UTC"
},
"lowPassFilter": {
"name": "Apply Low Pass Filter",
"type": "boolean",
"description": "Specifies whether to apply a low pass filter on the analytics results"
},
"seasonalFilter": {
"name": "Apply Seasonal Filter",
"type": "boolean",
"description": "Specified whether to apply a seasonal cycle filter on the analytics results"
}
}
class NexusInitializerWrapper:
def __init__(self, clazz):
self.__log = logging.getLogger(__name__)
self.__hasBeenRun = False
self.__clazz = clazz
self.validate()
def validate(self):
if "init" not in self.__clazz.__dict__ or not type(self.__clazz.__dict__["init"]) == types.FunctionType:
raise Exception("Method 'init' has not been declared")
def clazz(self):
return self.__clazz
def hasBeenRun(self):
return self.__hasBeenRun
def init(self, config):
if not self.__hasBeenRun:
self.__hasBeenRun = True
instance = self.__clazz()
instance.init(config)
else:
self.log("Initializer '%s' has already been run" % self.__clazz)
class AlgorithmModuleWrapper:
def __init__(self, clazz):
self.__instance = None
self.__clazz = clazz
self.validate()
def validate(self):
if "calc" not in self.__clazz.__dict__ or not type(self.__clazz.__dict__["calc"]) == types.FunctionType:
raise Exception("Method 'calc' has not been declared")
if "path" not in self.__clazz.__dict__:
raise Exception("Property 'path' has not been defined")
if "name" not in self.__clazz.__dict__:
raise Exception("Property 'name' has not been defined")
if "description" not in self.__clazz.__dict__:
raise Exception("Property 'description' has not been defined")
if "params" not in self.__clazz.__dict__:
raise Exception("Property 'params' has not been defined")
def clazz(self):
return self.__clazz
def name(self):
return self.__clazz.name
def path(self):
return self.__clazz.path
def description(self):
return self.__clazz.description
def params(self):
return self.__clazz.params
def instance(self, algorithm_config=None, sc=None):
if "singleton" in self.__clazz.__dict__ and self.__clazz.__dict__["singleton"] is True:
if self.__instance is None:
self.__instance = self.__clazz()
try:
self.__instance.set_config(algorithm_config)
except AttributeError:
pass
try:
self.__instance.set_spark_context(sc)
except AttributeError:
pass
return self.__instance
else:
instance = self.__clazz()
try:
instance.set_config(algorithm_config)
except AttributeError:
pass
try:
self.__instance.set_spark_context(sc)
except AttributeError:
pass
return instance
def isValid(self):
try:
self.validate()
return True
except Exception as ex:
return False
class CalcHandler(object):
def calc(self, computeOptions, **args):
raise Exception("calc() not yet implemented")
class NexusHandler(CalcHandler):
def __init__(self, skipCassandra=False, skipSolr=False):
CalcHandler.__init__(self)
self.algorithm_config = None
self._tile_service = NexusTileService(skipCassandra, skipSolr)
def set_config(self, algorithm_config):
self.algorithm_config = algorithm_config
def _mergeDicts(self, x, y):
z = x.copy()
z.update(y)
return z
def _now(self):
millis = int(round(time.time() * 1000))
return millis
def _mergeDataSeries(self, resultsData, dataNum, resultsMap):
for entry in resultsData:
#frmtdTime = datetime.fromtimestamp(entry["time"] ).strftime("%Y-%m")
frmtdTime = entry["time"]
if not frmtdTime in resultsMap:
resultsMap[frmtdTime] = []
entry["ds"] = dataNum
resultsMap[frmtdTime].append(entry)
def _resultsMapToList(self, resultsMap):
resultsList = []
for key, value in resultsMap.iteritems():
resultsList.append(value)
resultsList = sorted(resultsList, key=lambda entry: entry[0]["time"])
return resultsList
def _mergeResults(self, resultsRaw):
resultsMap = {}
for i in range(0, len(resultsRaw)):
resultsSeries = resultsRaw[i]
resultsData = resultsSeries[0]
self._mergeDataSeries(resultsData, i, resultsMap)
resultsList = self._resultsMapToList(resultsMap)
return resultsList
class SparkHandler(NexusHandler):
class SparkJobContext(object):
class MaxConcurrentJobsReached(Exception):
def __init__(self, *args, **kwargs):
Exception.__init__(self, *args, **kwargs)
def __init__(self, job_stack):
self.spark_job_stack = job_stack
self.job_name = None
self.log = logging.getLogger(__name__)
def __enter__(self):
try:
self.job_name = self.spark_job_stack.pop()
self.log.debug("Using %s" % self.job_name)
except IndexError:
raise SparkHandler.SparkJobContext.MaxConcurrentJobsReached()
return self
def __exit__(self, exc_type, exc_val, exc_tb):
if self.job_name is not None:
self.log.debug("Returning %s" % self.job_name)
self.spark_job_stack.append(self.job_name)
def __init__(self, **kwargs):
import inspect
NexusHandler.__init__(self, **kwargs)
self._sc = None
self.spark_job_stack = []
def with_spark_job_context(calc_func):
from functools import wraps
@wraps(calc_func)
def wrapped(*args, **kwargs1):
try:
with SparkHandler.SparkJobContext(self.spark_job_stack) as job_context:
# TODO Pool and Job are forced to a 1-to-1 relationship
calc_func.im_self._sc.setLocalProperty("spark.scheduler.pool", job_context.job_name)
calc_func.im_self._sc.setJobGroup(job_context.job_name, "a spark job")
return calc_func(*args, **kwargs1)
except SparkHandler.SparkJobContext.MaxConcurrentJobsReached:
raise NexusProcessingException(code=503,
reason="Max concurrent requests reached. Please try again later.")
return wrapped
for member in inspect.getmembers(self, predicate=inspect.ismethod):
if member[0] == "calc":
setattr(self, member[0], with_spark_job_context(member[1]))
def set_spark_context(self, sc):
self._sc = sc
def set_config(self, algorithm_config):
max_concurrent_jobs = algorithm_config.getint("spark", "maxconcurrentjobs") if algorithm_config.has_section(
"spark") and algorithm_config.has_option("spark", "maxconcurrentjobs") else 10
self.spark_job_stack = list(["Job %s" % x for x in xrange(1, max_concurrent_jobs + 1)])
self.algorithm_config = algorithm_config
def _setQueryParams(self, ds, bounds, start_time=None, end_time=None,
start_year=None, end_year=None, clim_month=None,
fill=-9999., spark_master=None, spark_nexecs=None,
spark_nparts=None):
self._ds = ds
self._minLat, self._maxLat, self._minLon, self._maxLon = bounds
self._startTime = start_time
self._endTime = end_time
self._startYear = start_year
self._endYear = end_year
self._climMonth = clim_month
self._fill = fill
self._spark_master = spark_master
self._spark_nexecs = spark_nexecs
self._spark_nparts = spark_nparts
def _find_global_tile_set(self):
if type(self._ds) in (list,tuple):
ds = self._ds[0]
else:
ds = self._ds
ntiles = 0
##################################################################
# Temporary workaround until we have dataset metadata to indicate
# temporal resolution.
if "monthly" in ds.lower():
t_incr = 2592000 # 30 days
else:
t_incr = 86400 # 1 day
##################################################################
t = self._endTime
self._latRes = None
self._lonRes = None
while ntiles == 0:
nexus_tiles = self._tile_service.get_tiles_bounded_by_box(self._minLat, self._maxLat, self._minLon, self._maxLon, ds=ds, start_time=t-t_incr, end_time=t)
ntiles = len(nexus_tiles)
self.log.debug('find_global_tile_set got {0} tiles'.format(ntiles))
if ntiles > 0:
for tile in nexus_tiles:
self.log.debug('tile coords:')
self.log.debug('tile lats: {0}'.format(tile.latitudes))
self.log.debug('tile lons: {0}'.format(tile.longitudes))
if self._latRes is None:
lats = tile.latitudes.data
if (len(lats) > 1):
self._latRes = abs(lats[1]-lats[0])
if self._lonRes is None:
lons = tile.longitudes.data
if (len(lons) > 1):
self._lonRes = abs(lons[1]-lons[0])
if ((self._latRes is not None) and
(self._lonRes is not None)):
break
if (self._latRes is None) or (self._lonRes is None):
ntiles = 0
else:
lats_agg = np.concatenate([tile.latitudes.compressed()
for tile in nexus_tiles])
lons_agg = np.concatenate([tile.longitudes.compressed()
for tile in nexus_tiles])
self._minLatCent = np.min(lats_agg)
self._maxLatCent = np.max(lats_agg)
self._minLonCent = np.min(lons_agg)
self._maxLonCent = np.max(lons_agg)
t -= t_incr
return nexus_tiles
def _find_tile_bounds(self, t):
lats = t.latitudes
lons = t.longitudes
if (len(lats.compressed()) > 0) and (len(lons.compressed()) > 0):
min_lat = np.ma.min(lats)
max_lat = np.ma.max(lats)
min_lon = np.ma.min(lons)
max_lon = np.ma.max(lons)
good_inds_lat = np.where(lats.mask == False)[0]
good_inds_lon = np.where(lons.mask == False)[0]
min_y = np.min(good_inds_lat)
max_y = np.max(good_inds_lat)
min_x = np.min(good_inds_lon)
max_x = np.max(good_inds_lon)
bounds = (min_lat, max_lat, min_lon, max_lon,
min_y, max_y, min_x, max_x)
else:
self.log.warn('Nothing in this tile!')
bounds = None
return bounds
@staticmethod
def query_by_parts(tile_service, min_lat, max_lat, min_lon, max_lon,
dataset, start_time, end_time, part_dim=0):
nexus_max_tiles_per_query = 100
#print 'trying query: ',min_lat, max_lat, min_lon, max_lon, \
# dataset, start_time, end_time
try:
tiles = \
tile_service.find_tiles_in_box(min_lat, max_lat,
min_lon, max_lon,
dataset,
start_time=start_time,
end_time=end_time,
fetch_data=False)
assert(len(tiles) <= nexus_max_tiles_per_query)
except:
#print 'failed query: ',min_lat, max_lat, min_lon, max_lon, \
# dataset, start_time, end_time
if part_dim == 0:
# Partition by latitude.
mid_lat = (min_lat + max_lat) / 2
nexus_tiles = SparkHandler.query_by_parts(tile_service,
min_lat, mid_lat,
min_lon, max_lon,
dataset,
start_time, end_time,
part_dim=part_dim)
nexus_tiles.extend(SparkHandler.query_by_parts(tile_service,
mid_lat,
max_lat,
min_lon,
max_lon,
dataset,
start_time,
end_time,
part_dim=part_dim))
elif part_dim == 1:
# Partition by longitude.
mid_lon = (min_lon + max_lon) / 2
nexus_tiles = SparkHandler.query_by_parts(tile_service,
min_lat, max_lat,
min_lon, mid_lon,
dataset,
start_time, end_time,
part_dim=part_dim)
nexus_tiles.extend(SparkHandler.query_by_parts(tile_service,
min_lat,
max_lat,
mid_lon,
max_lon,
dataset,
start_time,
end_time,
part_dim=part_dim))
elif part_dim == 2:
# Partition by time.
mid_time = (start_time + end_time) / 2
nexus_tiles = SparkHandler.query_by_parts(tile_service,
min_lat, max_lat,
min_lon, max_lon,
dataset,
start_time, mid_time,
part_dim=part_dim)
nexus_tiles.extend(SparkHandler.query_by_parts(tile_service,
min_lat,
max_lat,
min_lon,
max_lon,
dataset,
mid_time,
end_time,
part_dim=part_dim))
else:
# No exception, so query Cassandra for the tile data.
#print 'Making NEXUS query to Cassandra for %d tiles...' % \
# len(tiles)
#t1 = time.time()
#print 'NEXUS call start at time %f' % t1
#sys.stdout.flush()
nexus_tiles = list(tile_service.fetch_data_for_tiles(*tiles))
nexus_tiles = list(tile_service.mask_tiles_to_bbox(min_lat, max_lat,
min_lon, max_lon,
nexus_tiles))
#t2 = time.time()
#print 'NEXUS call end at time %f' % t2
#print 'Seconds in NEXUS call: ', t2-t1
#sys.stdout.flush()
#print 'Returning %d tiles' % len(nexus_tiles)
return nexus_tiles
@staticmethod
def _prune_tiles(nexus_tiles):
del_ind = np.where([np.all(tile.data.mask) for tile in nexus_tiles])[0]
for i in np.flipud(del_ind):
del nexus_tiles[i]
def _lat2ind(self,lat):
return int((lat-self._minLatCent)/self._latRes)
def _lon2ind(self,lon):
return int((lon-self._minLonCent)/self._lonRes)
def _ind2lat(self,y):
return self._minLatCent+y*self._latRes
def _ind2lon(self,x):
return self._minLonCent+x*self._lonRes
def _create_nc_file_time1d(self, a, fname, varname, varunits=None,
fill=None):
self.log.debug('a={0}'.format(a))
self.log.debug('shape a = {0}'.format(a.shape))
assert len(a.shape) == 1
time_dim = len(a)
rootgrp = Dataset(fname, "w", format="NETCDF4")
rootgrp.createDimension("time", time_dim)
vals = rootgrp.createVariable(varname, "f4", dimensions=("time",),
fill_value=fill)
times = rootgrp.createVariable("time", "f4", dimensions=("time",))
vals[:] = [d['mean'] for d in a]
times[:] = [d['time'] for d in a]
if varunits is not None:
vals.units = varunits
times.units = 'seconds since 1970-01-01 00:00:00'
rootgrp.close()
def _create_nc_file_latlon2d(self, a, fname, varname, varunits=None,
fill=None):
self.log.debug('a={0}'.format(a))
self.log.debug('shape a = {0}'.format(a.shape))
assert len(a.shape) == 2
lat_dim, lon_dim = a.shape
rootgrp = Dataset(fname, "w", format="NETCDF4")
rootgrp.createDimension("lat", lat_dim)
rootgrp.createDimension("lon", lon_dim)
vals = rootgrp.createVariable(varname, "f4",
dimensions=("lat","lon",),
fill_value=fill)
lats = rootgrp.createVariable("lat", "f4", dimensions=("lat",))
lons = rootgrp.createVariable("lon", "f4", dimensions=("lon",))
vals[:,:] = a
lats[:] = np.linspace(self._minLatCent,
self._maxLatCent, lat_dim)
lons[:] = np.linspace(self._minLonCent,
self._maxLonCent, lon_dim)
if varunits is not None:
vals.units = varunits
lats.units = "degrees north"
lons.units = "degrees east"
rootgrp.close()
def _create_nc_file(self, a, fname, varname, **kwargs):
self._create_nc_file_latlon2d(a, fname, varname, **kwargs)
def executeInitializers(config):
[wrapper.init(config) for wrapper in AVAILABLE_INITIALIZERS]
| 40.030631
| 165
| 0.508755
| 2,221
| 22,217
| 4.831607
| 0.178748
| 0.020129
| 0.010064
| 0.012301
| 0.31656
| 0.25841
| 0.232784
| 0.203802
| 0.152735
| 0.13652
| 0
| 0.009246
| 0.396363
| 22,217
| 554
| 166
| 40.102888
| 0.790918
| 0.04186
| 0
| 0.287946
| 0
| 0
| 0.088667
| 0
| 0
| 0
| 0
| 0.001805
| 0.006696
| 1
| 0.102679
| false
| 0.015625
| 0.024554
| 0.024554
| 0.203125
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cade1c54a41deec5844621516e8934dad9ba6ed
| 2,602
|
py
|
Python
|
utils/box/metric.py
|
ming71/SLA
|
7024b093bc0d456b274314ebeae3bc500c2db65a
|
[
"MIT"
] | 9
|
2021-05-26T05:51:19.000Z
|
2021-12-25T02:31:55.000Z
|
utils/box/metric.py
|
ming71/SLA
|
7024b093bc0d456b274314ebeae3bc500c2db65a
|
[
"MIT"
] | 4
|
2021-09-17T11:24:20.000Z
|
2022-03-16T02:07:33.000Z
|
utils/box/metric.py
|
ming71/SLA
|
7024b093bc0d456b274314ebeae3bc500c2db65a
|
[
"MIT"
] | null | null | null |
import numpy as np
from collections import defaultdict, Counter
from .rbbox_np import rbbox_iou
def get_ap(recall, precision):
recall = [0] + list(recall) + [1]
precision = [0] + list(precision) + [0]
for i in range(len(precision) - 1, 0, -1):
precision[i - 1] = max(precision[i - 1], precision[i])
ap = sum((recall[i] - recall[i - 1]) * precision[i] for i in range(1, len(recall)) if recall[i] != recall[i - 1])
return ap * 100
def get_ap_07(recall, precision):
ap = 0.
for t in np.linspace(0, 1, 11, endpoint=True):
mask = recall >= t
if np.any(mask):
ap += np.max(precision[mask]) / 11
return ap * 100
def get_det_aps(detect, target, num_classes, iou_thresh=0.5, use_07_metric=False):
# [[index, bbox, score, label], ...]
aps = []
for c in range(num_classes):
target_c = list(filter(lambda x: x[3] == c, target))
detect_c = filter(lambda x: x[3] == c, detect)
detect_c = sorted(detect_c, key=lambda x: x[2], reverse=True)
tp = np.zeros(len(detect_c))
fp = np.zeros(len(detect_c))
target_count = Counter([x[0] for x in target_c])
target_count = {index: np.zeros(count) for index, count in target_count.items()}
target_lut = defaultdict(list)
for index, bbox, conf, label in target_c:
target_lut[index].append(bbox)
detect_lut = defaultdict(list)
for index, bbox, conf, label in detect_c:
detect_lut[index].append(bbox)
iou_lut = dict()
for index, bboxes in detect_lut.items():
if index in target_lut:
iou_lut[index] = rbbox_iou(np.stack(bboxes), np.stack(target_lut[index]))
counter = defaultdict(int)
for i, (index, bbox, conf, label) in enumerate(detect_c):
count = counter[index]
counter[index] += 1
iou_max = -np.inf
hit_j = 0
if index in iou_lut:
for j, iou in enumerate(iou_lut[index][count]):
if iou > iou_max:
iou_max = iou
hit_j = j
if iou_max > iou_thresh and target_count[index][hit_j] == 0:
tp[i] = 1
target_count[index][hit_j] = 1
else:
fp[i] = 1
tp_sum = np.cumsum(tp)
fp_sum = np.cumsum(fp)
npos = len(target_c)
recall = tp_sum / npos
precision = tp_sum / (tp_sum + fp_sum)
aps.append((get_ap_07 if use_07_metric else get_ap)(recall, precision))
return aps
| 37.171429
| 117
| 0.563028
| 378
| 2,602
| 3.716931
| 0.21164
| 0.034875
| 0.023488
| 0.038434
| 0.193594
| 0.081139
| 0.058363
| 0.058363
| 0.058363
| 0
| 0
| 0.025224
| 0.314374
| 2,602
| 69
| 118
| 37.710145
| 0.762332
| 0.013067
| 0
| 0.033333
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.05
| false
| 0
| 0.05
| 0
| 0.15
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1caee980c9d28fcb7768f3cf4259dd89c12fcb4a
| 5,186
|
py
|
Python
|
app.py
|
winstonschroeder/setlistmanager
|
3c177a8da4bd56049964076f6ead51e3fffff5fa
|
[
"MIT"
] | null | null | null |
app.py
|
winstonschroeder/setlistmanager
|
3c177a8da4bd56049964076f6ead51e3fffff5fa
|
[
"MIT"
] | null | null | null |
app.py
|
winstonschroeder/setlistmanager
|
3c177a8da4bd56049964076f6ead51e3fffff5fa
|
[
"MIT"
] | null | null | null |
import logging
import pygame
from app import *
from pygame.locals import *
from werkzeug.serving import run_simple
from web import webapp as w
import data_access as da
logging.basicConfig(filename='setlistmanager.log', level=logging.DEBUG)
SCREEN_WIDTH = 160
SCREEN_HEIGHT = 128
class Button:
pass
class Text():
"""Create a text object."""
def __init__(self, surface, text, pos, **options):
self.text = text
self.surface = surface
self.pos = pos
self.bold = True
self.italic = False
self.underline = False
self.background = None # Color('white')
self.font = pygame.font.SysFont('Arial', 64)
self.fontname = None # 'Free Sans'
self.fontsize = 40
self.fontcolor = Color('black')
self.set_font()
da.connect_db('db.db')
songs = da.get_all_songs_as_json()
print (songs)
# self.words = [word.split(' ') for word in self.text.splitlines()] # 2D array where each row is a list of words.
# self.space = self.font.size(' ')[0] # The width of a space.
# max_width, max_height = self.surface.get_size()
# x, y = self.pos
# for line in self.words:
# for word in line:
# word_surface = self.font.render(word, 0, self.fontcolor)
# # print(word)
# word_width, word_height = word_surface.get_size()
# if x + word_width >= max_width:
# x = pos[0] # Reset the x.
# y += word_height # Start on new row.
# surface.blit(word_surface, (x, y))
# x += word_width + self.space
# x = pos[0] # Reset the x.
# y += word_height # Start on new row.
self.render()
def set_font(self):
"""Set the font from its name and size."""
self.font = pygame.font.Font(self.fontname, self.fontsize)
self.font.set_bold(self.bold)
self.font.set_italic(self.italic)
self.font.set_underline(self.underline)
def render(self):
"""Render the text into an image."""
self.img = self.font.render(self.text, True, self.fontcolor, self.background)
self.rect = self.img.get_rect()
self.rect.size = self.img.get_size()
self.rect.topleft = self.pos
def draw(self):
"""Draw the text image to the screen."""
# Put the center of surf at the center of the display
surf_center = (
(SCREEN_WIDTH - self.rect.width)/2,
(SCREEN_HEIGHT - self.rect.height)/2
)
App.screen.blit(self.img, surf_center)
# App.screen.blit(self.img, self.rect)
class App:
"""Create a single-window app with multiple scenes."""
def __init__(self):
"""Initialize pygame and the application."""
logging.debug('Initializing App')
pygame.init()
pygame.mouse.set_cursor((8, 8), (0, 0), (0, 0, 0, 0, 0, 0, 0, 0), (0, 0, 0, 0, 0, 0, 0, 0))
self.shortcuts = {
(K_x, KMOD_LMETA): 'print("cmd+X")',
(K_x, KMOD_LALT): 'print("alt+X")',
(K_x, KMOD_LCTRL): 'print("ctrl+X")',
(K_x, KMOD_LMETA + KMOD_LSHIFT): 'print("cmd+shift+X")',
(K_x, KMOD_LMETA + KMOD_LALT): 'print("cmd+alt+X")',
(K_x, KMOD_LMETA + KMOD_LALT + KMOD_LSHIFT): 'print("cmd+alt+shift+X")',
}
self.color = Color('green')
self.flags = RESIZABLE
self.rect = Rect(0, 0, SCREEN_WIDTH, SCREEN_HEIGHT)
App.screen = pygame.display.set_mode(self.rect.size, self.flags)
App.t = Text(App.screen, 'Chorus', pos=(0, 0))
App.running = True
def run(self):
"""Run the main event loop."""
logging.debug('entering method run')
app = w.create_app()
run_simple('127.0.0.1', 5000, app, use_debugger=True, use_reloader=True)
logging.debug('after start of flask')
while App.running:
logging.debug('.')
for event in pygame.event.get():
if event.type == QUIT:
App.running = False
if event.type == KEYDOWN:
self.do_shortcut(event)
App.screen.fill(self.color)
App.t.draw()
pygame.display.update()
logging.debug('exiting setlistmanager')
pygame.quit()
def do_shortcut(self, event):
"""Find the the key/mod combination in the dictionary and execute the cmd."""
k = event.key
m = event.mod
if (k, m) in self.shortcuts:
exec(self.shortcuts[k, m])
def toggle_fullscreen(self):
"""Toggle between full screen and windowed screen."""
self.flags ^= FULLSCREEN
pygame.display.set_mode((0, 0), self.flags)
def toggle_resizable(self):
"""Toggle between resizable and fixed-size window."""
self.flags ^= RESIZABLE
pygame.display.set_mode(self.rect.size, self.flags)
def toggle_frame(self):
"""Toggle between frame and noframe window."""
self.flags ^= NOFRAME
pygame.display.set_mode(self.rect.size, self.flags)
| 35.040541
| 122
| 0.571732
| 685
| 5,186
| 4.220438
| 0.265693
| 0.014528
| 0.016603
| 0.020754
| 0.112764
| 0.094431
| 0.088897
| 0.075061
| 0.075061
| 0.032515
| 0
| 0.014576
| 0.298882
| 5,186
| 147
| 123
| 35.278912
| 0.780528
| 0.244312
| 0
| 0.020833
| 0
| 0
| 0.061522
| 0.006257
| 0
| 0
| 0
| 0
| 0
| 1
| 0.104167
| false
| 0.010417
| 0.072917
| 0
| 0.208333
| 0.072917
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1caf08d291951db640773cc4547ec6df82e53a36
| 4,488
|
py
|
Python
|
sim_keypoints.py
|
Praznat/annotationmodeling
|
014b8b94b2225f947691c18b26eb8a4b148d2c8a
|
[
"BSD-3-Clause"
] | 8
|
2020-05-03T20:01:03.000Z
|
2021-12-20T12:24:34.000Z
|
sim_keypoints.py
|
Praznat/annotationmodeling
|
014b8b94b2225f947691c18b26eb8a4b148d2c8a
|
[
"BSD-3-Clause"
] | 1
|
2021-11-19T02:33:19.000Z
|
2021-12-28T03:22:33.000Z
|
sim_keypoints.py
|
Praznat/annotationmodeling
|
014b8b94b2225f947691c18b26eb8a4b148d2c8a
|
[
"BSD-3-Clause"
] | 4
|
2020-05-04T15:04:57.000Z
|
2021-11-04T18:14:26.000Z
|
import json
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import simulation
from eval_functions import oks_score_multi
import utils
def alter_location(points, x_offset, y_offset):
x, y = points.T
return np.array([x + x_offset, y + y_offset]).T
def alter_rotation(points, radians):
centroid = np.mean(points, axis=0)
return utils.rotate_via_numpy((points - centroid).T, radians) + centroid
def alter_magnitude(points, percent_diff):
centroid = np.mean(points, axis=0)
return (points - centroid) * np.exp(percent_diff) + centroid
def alter_normal_jump(points, scale):
return points + np.random.normal(0, scale, points.shape)
def alter_cauchy_jump(points, scale, abs_bound):
return points + utils.bounded_cauchy(scale, points.shape, abs_bound)
def disappear(points, p_disappear):
return None if np.random.uniform() < p_disappear else points
def shift_by_uerr(annotation, uerr):
shifts = [
alter_rotation(annotation, np.random.normal(0, 0.5 * uerr) * np.pi / 8),
alter_magnitude(annotation, np.random.normal(0, 0.3 * uerr)),
alter_normal_jump(annotation, 30 * uerr),
alter_cauchy_jump(annotation, 30 * uerr, 100),
]
return np.mean(shifts, axis=0) * np.abs(np.sign(annotation))
def create_user_data(uid, df, pct_items, u_err, difficulty_dict=None, extraarg=None):
items = df["item"].unique()
n_items_labeled = int(np.round(pct_items * len(items)))
items_labeled = sorted(np.random.choice(items, n_items_labeled, replace=False))
labels = []
for item in items_labeled:
gold = df[df["item"] == item]["gold"].values[0]
shifted_kpobjs = [shift_by_uerr(kpobj, u_err) for kpobj in gold]
kpobjs = [shifted_kpobjs[0]] + [disappear(kp, u_err / 2) for kp in shifted_kpobjs[1:]]
kpobjs = [kp for kp in kpobjs if kp is not None]
labels.append(kpobjs)
dfdict = {
"uid": [uid] * len(items_labeled),
"item": items_labeled,
"annotation": labels,
}
return pd.DataFrame(dfdict)
class KeypointSimulator(simulation.Simulator):
def __init__(self, rawdata_dir='data/coco/person_keypoints_train2017.json', max_items=500, minlabelsperitem=4):
with open(rawdata_dir) as f:
dataset = json.load(f)
self.category_id_skeletons = {c["id"]: np.array(c["skeleton"])-1 for c in iter(dataset["categories"])}
img_label = {}
for dataset_annotation in iter(dataset["annotations"]):
v = img_label.setdefault(dataset_annotation["image_id"], [])
v.append(dataset_annotation)
img_label_minlen = {k: v for k, v in img_label.items() if len(v) >= minlabelsperitem}
i = 0
rows = []
item = []
annotation = []
category = []
for dataset_annotations in iter(img_label_minlen.values()):
for dataset_annotation in dataset_annotations:
kp = np.reshape(dataset_annotation["keypoints"], (-1,3))
kp = kp[kp[:,2]>-90][:,:2]
if len(kp) == 0:
continue
item.append(dataset_annotation["image_id"])
annotation.append(kp)
category.append(dataset_annotation["category_id"])
i += 1
if i > max_items:
break
kp_df = pd.DataFrame({"item":item, "gold":annotation, "category":category})
self.df = kp_df.groupby("item")["gold"].apply(list).reset_index()
self.itemdict = utils.make_categorical(self.df, "item")
def create_stan_data(self, n_users, pct_items, err_rates, difficulty_dict):
self.err_rates = err_rates
self.difficulty_dict = difficulty_dict
self.sim_df = simulation.create_sim_df(create_user_data, self.df, n_users, pct_items, err_rates, difficulty_dict)
stan_data = utils.calc_distances(self.sim_df, (lambda x,y: 1 - oks_score_multi(x, y)), label_colname="annotation", item_colname="item")
return stan_data
def sim_uerr_fn(self, uerr_a, uerr_b, n_users):
z = np.abs(np.random.normal(uerr_a, uerr_b, 10000))
return np.quantile(z, np.linspace(0,1,n_users+2)[1:-1])
def sim_diff_fn(self, difficulty_a, difficulty_b):
z = 1 * np.random.beta(difficulty_a, difficulty_b, 10000)
n_items = len(self.df["item"].unique())
return dict(zip(np.arange(n_items), np.quantile(z, np.linspace(0,1,n_items+2)[1:-1])))
| 43.153846
| 143
| 0.64951
| 627
| 4,488
| 4.440191
| 0.275917
| 0.020115
| 0.020115
| 0.016164
| 0.084052
| 0.084052
| 0.065374
| 0.043103
| 0
| 0
| 0
| 0.017796
| 0.223708
| 4,488
| 104
| 144
| 43.153846
| 0.781286
| 0
| 0
| 0.022222
| 0
| 0
| 0.040766
| 0.009133
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.077778
| 0.033333
| 0.344444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cb1d08525c852f3c763a5bfd0e70b7e85abb9c4
| 6,592
|
py
|
Python
|
local/controller.py
|
Loptt/home-automation-system
|
f1878596905e022d1d626d485d1a29dc7212f480
|
[
"MIT"
] | null | null | null |
local/controller.py
|
Loptt/home-automation-system
|
f1878596905e022d1d626d485d1a29dc7212f480
|
[
"MIT"
] | null | null | null |
local/controller.py
|
Loptt/home-automation-system
|
f1878596905e022d1d626d485d1a29dc7212f480
|
[
"MIT"
] | null | null | null |
import requests
import time
import os
import sys
import json
import threading
from getpass import getpass
import schedule
import event as e
import configuration as c
import RPi.GPIO as GPIO
#SERVER_URL = "https://home-automation-289621.uc.r.appspot.com"
#SERVER_URL = "http://127.0.0.1:4747"
SERVER_URL = "http://192.168.11.117:4747"
pins = [2, 3, 4, 7, 8, 9, 10, 11, 14, 15, 17, 18, 22, 23, 24, 27]
def calculate_max_duration(time):
hours = 23 - time.hour
minutes = 60 - time.minute
return hours * 60 + minutes
def turn_on(pin):
print("Turn on " + str(pin))
GPIO.output(pin, GPIO.HIGH)
def turn_off(pin):
print("Turn off " + str(pin))
GPIO.output(pin, GPIO.LOW)
def schedule_off(time, day, duration, pin):
new_day = day
end_time = e.Time(0, 0)
if duration > calculate_max_duration(time):
# Next day calculation
new_day = day + 1
off_duration = duration - calculate_max_duration(time)
end_time.hour = off_duration // 60
end_time.minute = off_duration % 60
else:
# Same day calculation
end_time.hour = time.hour + \
(duration // 60) + (time.minute + (duration % 60)) // 60
end_time.minute = (time.minute + duration % 60) % 60
if new_day > 7:
new_day = 1
if new_day == 1:
schedule.every().monday.at(str(end_time)).do(turn_off, pin)
elif new_day == 2:
schedule.every().tuesday.at(str(end_time)).do(turn_off, pin)
elif new_day == 3:
schedule.every().wednesday.at(str(end_time)).do(turn_off, pin)
elif new_day == 4:
schedule.every().thursday.at(str(end_time)).do(turn_off, pin)
elif new_day == 5:
schedule.every().friday.at(str(end_time)).do(turn_off, pin)
elif new_day == 6:
schedule.every().saturday.at(str(end_time)).do(turn_off, pin)
elif new_day == 7:
schedule.every().sunday.at(str(end_time)).do(turn_off, pin)
def schedule_job(event):
GPIO.setup(event.pin, GPIO.OUT)
if len(event.days) == 0 or len(event.days) == 7:
schedule.every().day.at(str(event.time)).do(turn_on, event.pin)
else:
if 1 in event.days:
schedule.every().monday.at(str(event.time)).do(turn_on, event.pin)
schedule_off(event.time, 1, event.duration, event.pin)
if 2 in event.days:
schedule.every().tuesday.at(str(event.time)).do(turn_on, event.pin)
schedule_off(event.time, 2, event.duration, event.pin)
if 3 in event.days:
schedule.every().wednesday.at(str(event.time)).do(turn_on, event.pin)
schedule_off(event.time, 3, event.duration, event.pin)
if 4 in event.days:
schedule.every().thursday.at(str(event.time)).do(turn_on, event.pin)
schedule_off(event.time, 4, event.duration, event.pin)
if 5 in event.days:
schedule.every().friday.at(str(event.time)).do(turn_on, event.pin)
schedule_off(event.time, 5, event.duration, event.pin)
if 6 in event.days:
schedule.every().saturday.at(str(event.time)).do(turn_on, event.pin)
schedule_off(event.time, 6, event.duration, event.pin)
if 7 in event.days:
schedule.every().sunday.at(str(event.time)).do(turn_on, event.pin)
schedule_off(event.time, 7, event.duration, event.pin)
def run_scheduling():
while True:
schedule.run_pending()
time.sleep(1)
def initial_setup():
username = input("Enter your username: ")
password = getpass("Enter your password: ")
pload = json.dumps({"username": username, "password": password})
r = requests.post(SERVER_URL + "/login", data=pload,
headers={'Content-type': 'application/json'})
r_dict = r.json()
if not r_dict["valid"]:
print("Invalid username/password")
print("Run program again to try again")
sys.exit()
print("Successful login...")
print("Saving configuration...")
f = open("config.txt", "w")
f.write(r_dict["user"])
f.close()
return r_dict["user"]
def get_user():
f = open("config.txt", "r")
user = f.readline()
r = requests.get(SERVER_URL + "/users/" + user)
if r.status_code == 200:
print("Successful login...")
return user
else:
print("Invalid user... Reinitializing configuration")
return initial_setup()
def get_configuration(user):
r = requests.get(SERVER_URL + "/configurations/by-user/" + user)
if r.status_code != 200:
print("Error retrieving configuration, check internet connection.")
sys.exit()
r_dict = r.json()
return c.Configuration(
r_dict["systemStatus"], r_dict["rainPercentage"], r_dict["defaultDuration"], r_dict["update"], r_dict["_id"])
def set_update_off(configuration):
r = requests.put(
SERVER_URL + "/configurations/set-off-update/" + configuration.id)
if r.status_code >= 400:
print(
"Error updating configuration status... Possible reconfiguration on next cycle")
else:
print("Update set off.")
def update_schedules(user):
r = requests.get(
SERVER_URL + "/devices/by-user-with-events/" + user)
devices = r.json()
schedule.clear()
GPIO.cleanup()
for device in devices:
for event in device["events"]:
print(event)
schedule_job(e.Event(
device["pin"], event["days"], e.Time(event["time"]["hour"], event["time"]["minute"]), e.Repetition(event["repetition"]["times"], event["repetition"]["date"], event["repetition"]["current"]), event["duration"]))
def setup():
GPIO.setmode(GPIO.BCM)
GPIO.setwarnings(False)
for pin in pins:
GPIO.setup(pin, GPIO.OUT, initial=GPIO.LOW)
GPIO.cleanup()
def main():
setup()
user = ""
if not os.path.isfile("./config.txt"):
print("No configuration found... Initializing configuration")
user = initial_setup()
else:
print("Validating user...")
user = get_user()
print("Initializing routine...")
# Initialize separate thread to run scheduling jobs
thread = threading.Thread(None, run_scheduling, "Schedule")
thread.start()
print("Schedule running.")
while True:
configuration = get_configuration(user)
if configuration.update:
print("Updating schedule...")
update_schedules(user)
set_update_off(configuration)
time.sleep(1)
thread.join()
if __name__ == "__main__":
main()
| 29.168142
| 226
| 0.618174
| 890
| 6,592
| 4.460674
| 0.222472
| 0.038539
| 0.037783
| 0.028212
| 0.347607
| 0.189924
| 0.165743
| 0.153149
| 0.147103
| 0.139547
| 0
| 0.023057
| 0.236802
| 6,592
| 225
| 227
| 29.297778
| 0.76605
| 0.028823
| 0
| 0.103659
| 0
| 0
| 0.135063
| 0.013131
| 0
| 0
| 0
| 0
| 0
| 1
| 0.079268
| false
| 0.02439
| 0.067073
| 0
| 0.176829
| 0.103659
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cb40b16f030cc0fc491e5ff712cd9ba3b6fe9c3
| 1,640
|
py
|
Python
|
src/graphql_sqlalchemy/graphql_types.py
|
gzzo/graphql-sqlalchemy
|
54a30d0b2fe2d5a1eb3668f0f7bc6ec3cb366ec4
|
[
"MIT"
] | 12
|
2020-06-11T18:17:46.000Z
|
2021-11-23T04:23:59.000Z
|
src/graphql_sqlalchemy/graphql_types.py
|
gzzo/graphql-sqlalchemy
|
54a30d0b2fe2d5a1eb3668f0f7bc6ec3cb366ec4
|
[
"MIT"
] | 9
|
2020-06-03T21:34:50.000Z
|
2021-05-23T16:48:01.000Z
|
src/graphql_sqlalchemy/graphql_types.py
|
gzzo/graphql-sqlalchemy
|
54a30d0b2fe2d5a1eb3668f0f7bc6ec3cb366ec4
|
[
"MIT"
] | 2
|
2020-07-02T09:59:30.000Z
|
2021-04-13T19:28:48.000Z
|
from typing import Dict, Union
from graphql import (
GraphQLBoolean,
GraphQLFloat,
GraphQLInputField,
GraphQLInt,
GraphQLList,
GraphQLNonNull,
GraphQLScalarType,
GraphQLString,
)
from sqlalchemy import ARRAY, Boolean, Float, Integer
from sqlalchemy.dialects.postgresql import ARRAY as PGARRAY
from sqlalchemy.types import TypeEngine
def get_graphql_type_from_column(column_type: TypeEngine) -> Union[GraphQLScalarType, GraphQLList]:
if isinstance(column_type, Integer):
return GraphQLInt
if isinstance(column_type, Float):
return GraphQLFloat
if isinstance(column_type, Boolean):
return GraphQLBoolean
if isinstance(column_type, (ARRAY, PGARRAY)):
return GraphQLList(get_graphql_type_from_column(column_type.item_type))
return GraphQLString
def get_base_comparison_fields(graphql_type: Union[GraphQLScalarType, GraphQLList]) -> Dict[str, GraphQLInputField]:
return {
"_eq": GraphQLInputField(graphql_type),
"_neq": GraphQLInputField(graphql_type),
"_in": GraphQLInputField(GraphQLList(GraphQLNonNull(graphql_type))),
"_nin": GraphQLInputField(GraphQLList(GraphQLNonNull(graphql_type))),
"_lt": GraphQLInputField(graphql_type),
"_gt": GraphQLInputField(graphql_type),
"_gte": GraphQLInputField(graphql_type),
"_lte": GraphQLInputField(graphql_type),
"_is_null": GraphQLInputField(GraphQLBoolean),
}
def get_string_comparison_fields() -> Dict[str, GraphQLInputField]:
return {"_like": GraphQLInputField(GraphQLString), "_nlike": GraphQLInputField(GraphQLString)}
| 32.8
| 116
| 0.739024
| 157
| 1,640
| 7.452229
| 0.318471
| 0.103419
| 0.14359
| 0.075214
| 0.148718
| 0.05812
| 0.05812
| 0
| 0
| 0
| 0
| 0
| 0.171341
| 1,640
| 49
| 117
| 33.469388
| 0.860927
| 0
| 0
| 0
| 0
| 0
| 0.028659
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.078947
| false
| 0
| 0.131579
| 0.052632
| 0.394737
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cb410c38e7b086fc006f0a9169efd98fc6fc76d
| 3,223
|
py
|
Python
|
Knapsack.py
|
byterubpay/mininero1
|
ea6b8017cdbab82011d7f329e7726cc52d1ef431
|
[
"BSD-3-Clause"
] | 182
|
2016-02-05T18:33:09.000Z
|
2022-03-23T12:31:54.000Z
|
Knapsack.py
|
byterubpay/mininero1
|
ea6b8017cdbab82011d7f329e7726cc52d1ef431
|
[
"BSD-3-Clause"
] | 81
|
2016-09-04T14:00:24.000Z
|
2022-03-28T17:22:52.000Z
|
Knapsack.py
|
byterubpay/mininero1
|
ea6b8017cdbab82011d7f329e7726cc52d1ef431
|
[
"BSD-3-Clause"
] | 63
|
2016-02-05T19:38:06.000Z
|
2022-03-07T06:07:46.000Z
|
import Crypto.Random.random as rand
import itertools
import math #for log
import sys
def decomposition(i):
#from stack exchange, don't think it's uniform
while i > 0:
n = rand.randint(1, i)
yield n
i -= n
def Decomposition(i):
while True:
l = list(decomposition(i))
if len(set(l)) == len(l):
return l
def decomposition2(n, s, d, k):
#home-brewed, returns no duplicates, includes the number d
s = s - 1
n = n
while True:
a = [d]
nn = n
#a.append(d)
for i in range(0, s):
a.append(rand.randint(0, n))
a.sort()
#print("a", a)
b = []
c = []
while len(a) > 0:
t = a.pop()
#print(t, a)
if t >= d:
b.append(nn - t)
else:
c.append(nn - t)
nn = t
c.append(nn)
tot = b[:] + c[:]
#print("b", b)
if sum(set(tot)) == n and len(c) > int(k):
return sorted(c), sorted(b)
def decomposition3(n, s, d, k):
#a combination of both methods, designed to get some smaller values
send, change = decomposition2(n, s, d, k)
for i in send:
if i > n / s:
send.remove(i)
send = send + list(Decomposition(i))
for i in change:
if i > n / (s - 1):
change.remove(i)
change = change + list(Decomposition(i))
return send, change
def divv(l, m):
return [a /float( m) for a in l]
def frexp10(x):
exp = int(math.log10(x))
return x / 10**exp, exp
def decideAmounts(totalInputs, toSend, Partitions, k, fuzz):
#fuzz is an optional amount to fuzz the transaction by
#so if you start with a big obvious number like 2000, it might be fuzzed by up to "fuzz" amount
fz = rand.randint(0, int(fuzz * 1000) ) / 1000.0
toSend += fz
g, ii =frexp10(totalInputs)
ii = 10 ** (-1 * min(ii - 2, 0))
print("ii", ii)
M = 10 ** (int(math.log(2 ** Partitions) / math.log(10))) * ii
#M = 10 ** M
print("multiplier:", M)
totalInputs = int(totalInputs * M)
toSend = int(toSend * M)
change = totalInputs - toSend
send_amounts, change_amounts = decomposition3(totalInputs, Partitions, toSend, k)
all_amounts = send_amounts[:] + change_amounts[:]
rand.shuffle(all_amounts)
print("")
print("change amounts:", divv(change_amounts, M))
print("send amounts:", divv(send_amounts, M))
print("now from the following, how much is sent?")
print("all amounts:", sorted(divv(all_amounts, M)))
print("possible sent amounts:")
amounts = []
for L in range(0, len(all_amounts)+1):
for subset in itertools.combinations(all_amounts, L):
amounts.append(sum(subset))
print("number of possible sent amounts:")
print(len(amounts))
print("2^N:", 2 ** len(all_amounts))
print("number of possible sent amounts duplicates removed:")
print(len(list(set(amounts))))
if len(sys.argv) > 2:
kk = 2
parts = 7
kk = rand.randint(1, int(parts / 4)) #how many sends to demand
fuzz = 1
decideAmounts(float(sys.argv[1]), float(sys.argv[2]), parts, kk, fuzz)
| 29.036036
| 99
| 0.559727
| 467
| 3,223
| 3.837259
| 0.286938
| 0.039063
| 0.030134
| 0.006696
| 0.055804
| 0.035714
| 0
| 0
| 0
| 0
| 0
| 0.025401
| 0.303754
| 3,223
| 110
| 100
| 29.3
| 0.773173
| 0.125659
| 0
| 0.022989
| 0
| 0
| 0.072345
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.08046
| false
| 0
| 0.045977
| 0.011494
| 0.183908
| 0.149425
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cb4f5278643eda7e6d9e305ee74cda8346049cd
| 14,601
|
py
|
Python
|
drought_impact_forecasting/models/model_parts/Conv_Transformer.py
|
rudolfwilliam/satellite_image_forecasting
|
164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc
|
[
"MIT"
] | 4
|
2021-12-16T18:32:01.000Z
|
2021-12-28T15:57:27.000Z
|
drought_impact_forecasting/models/model_parts/Conv_Transformer.py
|
rudolfwilliam/satellite_image_forecasting
|
164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc
|
[
"MIT"
] | null | null | null |
drought_impact_forecasting/models/model_parts/Conv_Transformer.py
|
rudolfwilliam/satellite_image_forecasting
|
164ee7e533e1a8d730a0ee9c0062fd9b32e0bcdc
|
[
"MIT"
] | 2
|
2021-10-05T15:01:47.000Z
|
2021-12-28T15:57:14.000Z
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
from einops import rearrange
from .shared import Conv_Block
from ..utils.utils import zeros, mean_cube, last_frame, ENS
class Residual(nn.Module):
def __init__(self, fn):
super().__init__()
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(x, **kwargs) + x
class PreNorm(nn.Module):
def __init__(self, dim, fn):
super().__init__()
self.norm = nn.LayerNorm(dim)
self.fn = fn
def forward(self, x, **kwargs):
return self.fn(torch.stack([self.norm(x[..., i]) for i in range(x.size()[-1])], dim=-1), **kwargs)
class FeedForward(nn.Module):
def __init__(self, kernel_size, num_hidden, dilation_rate, num_conv_layers):
super().__init__()
self.kernel_size = kernel_size
self.num_hidden = num_hidden
self.num_conv_layers = num_conv_layers
self.dilation_rate = dilation_rate
self.conv = Conv_Block(self.num_hidden, self.num_hidden, kernel_size=self.kernel_size,
dilation_rate=self.dilation_rate, num_conv_layers=self.num_conv_layers)
def forward(self, x):
return torch.stack([self.conv(x[..., i]) for i in range(x.size()[-1])], dim=-1)
class ConvAttention(nn.Module):
def __init__(self, num_hidden, kernel_size, enc=True, mask=False):
super(ConvAttention, self).__init__()
self.enc = enc
self.mask = mask
self.kernel_size = kernel_size
self.num_hidden = num_hidden
# important note: shared convolution is intentional here
if self.enc:
# 3 times num_hidden for out_channels due to queries, keys & values
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=self.num_hidden, out_channels=3*self.num_hidden, kernel_size=1, padding="same", padding_mode="reflect")
)
else:
# only 2 times num_hidden for keys & values
self.conv1 = nn.Sequential(
nn.Conv2d(in_channels=self.num_hidden, out_channels=2*self.num_hidden, kernel_size=1, padding="same", padding_mode="reflect")
)
self.conv2 = nn.Sequential(
nn.Conv2d(in_channels=self.num_hidden*2, out_channels=1, kernel_size=self.kernel_size, padding="same", padding_mode="reflect")
)
def forward(self, x, enc_out=None):
# s is num queries, t is num keys/values
b, _, _, _, s = x.shape
if self.enc:
t = s
qkv_set = torch.stack([self.conv1(x[..., i]) for i in range(t)], dim=-1)
Q, K, V = torch.split(qkv_set, self.num_hidden, dim=1)
else:
# x correspond to queries
t = enc_out.size()[-1]
kv_set = torch.stack([self.conv1(enc_out[..., i]) for i in range(t)], dim=-1)
K, V = torch.split(kv_set, self.num_hidden, dim=1)
Q = x
K_rep = torch.stack([K] * s, dim=-2)
V_rep = torch.stack([V] * s, dim=-1)
Q_rep = torch.stack([Q] * t, dim=-1)
# concatenate queries and keys for cross-channel convolution
Q_K = torch.concat((Q_rep, K_rep), dim=1)
if self.mask:
# only feed in 'previous' keys & values for computing softmax
V_out = []
# for each query
for i in range(t):
Q_K_temp = rearrange(Q_K[..., :i+1, i], 'b c h w t -> (b t) c h w')
extr_feat = rearrange(torch.squeeze(self.conv2(Q_K_temp), dim=1), '(b t) h w -> b h w t', b=b, t=i+1)
attn_mask = F.softmax(extr_feat, dim=-1)
# convex combination over values using weights from attention mask, per channel c
V_out.append(torch.stack([torch.sum(torch.mul(attn_mask, V_rep[:, c, :, :, i, :i+1]), dim=-1) for c in range(V_rep.size()[1])], dim=1))
V_out = torch.stack(V_out, dim=-1)
else:
Q_K = rearrange(Q_K, 'b c h w s t -> (b s t) c h w') # no convolution across time dim!
extr_feat = rearrange(torch.squeeze(self.conv2(Q_K), dim=1), '(b s t) h w -> b h w t s', b=b, t=t)
attn_mask = F.softmax(extr_feat, dim=-2)
V_out = torch.stack([torch.sum(torch.mul(attn_mask, V_rep[:, c, ...]), dim=-2) for c in range(V_rep.size()[1])], dim=1)
return V_out
class PositionalEncoding(nn.Module):
def __init__(self, num_hidden, img_width):
# no differentiation should happen with respect to the params in here!
super(PositionalEncoding, self).__init__()
self.num_hidden = num_hidden
self.img_width = img_width
def _get_sinusoid_encoding_table(self, t, device):
''' Sinusoid position encoding table '''
sinusoid_table = torch.stack([self._get_position_angle_vec(pos_i) for pos_i in range(t)], dim=0)
sinusoid_table[:, :, 0::2] = torch.sin(sinusoid_table[:, :, 0::2]) # even dim
sinusoid_table[:, :, 1::2] = torch.cos(sinusoid_table[:, :, 1::2]) # odd dim
return torch.moveaxis(sinusoid_table, 0, -1)
def _get_position_angle_vec(self, position):
return_list = [torch.ones((1,
self.img_width,
self.img_width),
device=torch.device("cuda:0" if torch.cuda.is_available() else "cpu")) *
(position / np.power(10000, 2 * (hid_j // 2) / self.num_hidden[-1])) for hid_j in range(self.num_hidden[-1])]
return torch.stack(return_list, dim=1)
def forward(self, x, t, single=False):
"""Returns entire positional encoding until step T if not single, otherwise only encoding of time step T."""
if not single:
self.register_buffer('pos_table', self._get_sinusoid_encoding_table(t, x.get_device()))
return torch.squeeze(x + self.pos_table.clone().detach(), dim=0)
else:
if t % 2 == 0:
return x + torch.unsqueeze(torch.sin(self._get_position_angle_vec(t)), dim=-1).clone().detach()
else:
return x + torch.unsqueeze(torch.cos(self._get_position_angle_vec(t)), dim=-1).clone().detach()
class Encoder(nn.Module):
def __init__(self, num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width):
super().__init__()
self.num_hidden = num_hidden
self.depth = depth
self.dilation_rate = dilation_rate
self.num_conv_layers = num_conv_layers
self.kernel_size = kernel_size
self.img_width = img_width
self.layers = nn.ModuleList([])
self.num_hidden = self.num_hidden
for _ in range(self.depth):
self.layers.append(nn.ModuleList([
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
ConvAttention(kernel_size=self.kernel_size, num_hidden=self.num_hidden[-1], enc=True))),
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
FeedForward(kernel_size=self.kernel_size, num_hidden=self.num_hidden[-1],
dilation_rate=self.dilation_rate, num_conv_layers=self.num_conv_layers)))
]))
def forward(self, x):
for attn, ff in self.layers:
x = attn(x)
x = ff(x)
return x
class Decoder(nn.Module):
def __init__(self, num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels):
super().__init__()
self.layers = nn.ModuleList([])
self.dilation_rate = dilation_rate
self.num_conv_layers = num_conv_layers
self.depth = depth
self.kernel_size = kernel_size
self.img_width = img_width
self.num_hidden = num_hidden
self.num_non_pred_feat = non_pred_channels
for _ in range(self.depth):
self.layers.append(nn.ModuleList([
# (masked) query self-attention
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
ConvAttention(num_hidden=self.num_hidden[-1], kernel_size=self.kernel_size, mask=True))),
# encoder-decoder attention
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
ConvAttention(num_hidden=self.num_hidden[-1], kernel_size=self.kernel_size, enc=False))),
# feed forward
Residual(PreNorm([self.num_hidden[-1], self.img_width, self.img_width],
FeedForward(num_hidden=self.num_hidden[-1], kernel_size=self.kernel_size, dilation_rate=self.dilation_rate, num_conv_layers=self.num_conv_layers)))
]))
def forward(self, queries, enc_out):
for query_attn, attn, ff in self.layers:
queries = query_attn(queries)
x = attn(queries, enc_out=enc_out)
x = ff(x)
return x
class Conv_Transformer(nn.Module):
"""Standard, single-headed ConvTransformer like in https://arxiv.org/pdf/2011.10185.pdf"""
def __init__(self, num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels, num_layers_query_feat, in_channels):
super(Conv_Transformer, self).__init__()
self.num_hidden = num_hidden
self.depth = depth
self.num_layers_query_feat = num_layers_query_feat
self.dilation_rate = dilation_rate
self.num_conv_layers = num_conv_layers
self.kernel_size = kernel_size
self.img_width = img_width
self.in_channels = in_channels
self.non_pred_channels = non_pred_channels
self.pos_embedding = PositionalEncoding(self.num_hidden, self.img_width)
self.Encoder = Encoder(num_hidden=self.num_hidden, depth=self.depth, dilation_rate=self.dilation_rate,
num_conv_layers=self.num_conv_layers, kernel_size=self.kernel_size, img_width=self.img_width)
self.Decoder = Decoder(num_hidden=self.num_hidden, depth=self.depth, dilation_rate=self.dilation_rate,
num_conv_layers=self.num_conv_layers, kernel_size=self.kernel_size, img_width=self.img_width, non_pred_channels=self.non_pred_channels)
self.input_feat_gen = Conv_Block(self.in_channels, self.num_hidden[-1], num_conv_layers=self.num_conv_layers, kernel_size=self.kernel_size)
# TODO (optionally): replace this by SFFN
self.back_to_pixel = nn.Sequential(
nn.Conv2d(self.num_hidden[-1], 4, kernel_size=1)
)
def forward(self, frames, n_predictions):
_, _, _, _, T = frames.size()
feature_map = self.feature_embedding(img=frames, network=self.input_feat_gen)
enc_in = self.pos_embedding(feature_map, T)
# encode all input values
enc_out = torch.concat(self.Encoder(enc_in), dim=-1)
out_list = []
queries = self.feature_embedding(img=feature_map[..., -1], network=self.query_feat_gen)
for _ in range(n_predictions):
dec_out = self.Decoder(queries, enc_out)
pred = self.feature_embedding(dec_out)
out_list.append(pred)
queries = torch.concat((queries, pred), dim=-1)
x = torch.stack(out_list, dim=-1)
return x
def feature_embedding(self, img, network):
generator = network
gen_img = []
for i in range(img.shape[-1]):
gen_img.append(generator(img[..., i]))
gen_img = torch.stack(gen_img, dim=-1)
return gen_img
class ENS_Conv_Transformer(Conv_Transformer):
"""ConvTransformer that employs delta model and can read in non-pred future features, hence taylored to the ENS challenge."""
def __init__(self, num_hidden, output_dim, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels, num_layers_query_feat, in_channels, baseline):
super(ENS_Conv_Transformer, self).__init__(num_hidden, depth, dilation_rate, num_conv_layers, kernel_size, img_width, non_pred_channels, num_layers_query_feat, in_channels - 1)
# remove cloud mask
self.in_channels = self.in_channels - 1
self.baseline = baseline
self.output_dim = output_dim
def forward(self, input_tensor, non_pred_feat=None, prediction_count=1):
baseline = eval(self.baseline + "(input_tensor[:, 0:5, :, :, :], 4)")
b, _, width, height, T = input_tensor.size()
pred_deltas = torch.zeros((b, self.output_dim, height, width, prediction_count), device = self._get_device())
preds = torch.zeros((b, self.output_dim, height, width, prediction_count), device = self._get_device())
baselines = torch.zeros((b, self.output_dim, height, width, prediction_count), device = self._get_device())
# remove cloud mask channel for feature embedding
feature_map = torch.concat((input_tensor[:, :4, ...], input_tensor[:, 5:, ...]), dim=1)
features = self.feature_embedding(img=feature_map, network=self.input_feat_gen)
enc_in = torch.stack([self.pos_embedding(features[i, ...], T) for i in range(b)], dim=0)
enc_out = self.Encoder(enc_in)
# first query stems from last input frame
queries = features[..., -1:]
baselines[..., 0] = baseline
pred_deltas[..., 0] = self.back_to_pixel(self.Decoder(queries, enc_out)[..., 0])
preds[..., 0] = pred_deltas[..., 0] + baselines[..., 0]
for t in range(1, prediction_count):
if self.baseline == "mean_cube":
baselines[..., t] = (preds[..., t - 1] + (baselines[..., t - 1] * (T + t)))/(T + t + 1)
if self.baseline == "zeros":
pass
else:
baselines[..., t] = preds[..., t - 1]
# concatenate with non-pred features & feature embedding & do positional encoding
query = self.pos_embedding(self.feature_embedding(torch.concat((preds[..., t-1:t], non_pred_feat[..., t-1:t]), dim=1), network=self.input_feat_gen), t, single=True)
queries = torch.concat((queries, query), dim=-1)
pred_deltas[..., :t] = torch.stack([self.back_to_pixel(self.Decoder(queries, enc_out)[..., i]) for i in range(t)], dim=-1)
preds[..., t] = pred_deltas[..., t] + baselines[..., t]
return preds, pred_deltas, baselines
def _get_device(self):
return next(self.parameters()).device
| 48.996644
| 184
| 0.617423
| 2,013
| 14,601
| 4.217586
| 0.123199
| 0.060424
| 0.061249
| 0.023086
| 0.520612
| 0.44841
| 0.41967
| 0.39258
| 0.382097
| 0.359128
| 0
| 0.012005
| 0.258338
| 14,601
| 298
| 185
| 48.996644
| 0.772001
| 0.083487
| 0
| 0.282609
| 0
| 0
| 0.014618
| 0
| 0
| 0
| 0
| 0.003356
| 0
| 1
| 0.095652
| false
| 0.004348
| 0.030435
| 0.017391
| 0.230435
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cb7e53b2c17e731b27a68b654287de75f6d7775
| 1,042
|
py
|
Python
|
src/precon/commands.py
|
Albert-91/precon
|
aaded1d6a5f743b3539ea46b19a37a7bf9930e05
|
[
"MIT"
] | null | null | null |
src/precon/commands.py
|
Albert-91/precon
|
aaded1d6a5f743b3539ea46b19a37a7bf9930e05
|
[
"MIT"
] | null | null | null |
src/precon/commands.py
|
Albert-91/precon
|
aaded1d6a5f743b3539ea46b19a37a7bf9930e05
|
[
"MIT"
] | null | null | null |
import asyncio
import click
from precon.devices_handlers.distance_sensor import show_distance as show_distance_func
from precon.remote_control import steer_vehicle, Screen
try:
import RPi.GPIO as GPIO
except (RuntimeError, ModuleNotFoundError):
import fake_rpi
GPIO = fake_rpi.RPi.GPIO
@click.command(name="rc")
def remote_control() -> None:
loop = asyncio.get_event_loop()
try:
with Screen() as screen:
loop.run_until_complete(steer_vehicle(screen))
except KeyboardInterrupt:
print("Finishing remote control...")
except Exception as e:
print("Raised unexpected error: %s" % e)
finally:
GPIO.cleanup()
@click.command(name="show-distance")
def show_distance() -> None:
loop = asyncio.get_event_loop()
try:
loop.run_until_complete(show_distance_func())
except KeyboardInterrupt:
print("Finishing measuring distance...")
except Exception as e:
print("Raised unexpected error: %s" % e)
finally:
GPIO.cleanup()
| 25.414634
| 87
| 0.690019
| 127
| 1,042
| 5.488189
| 0.377953
| 0.086083
| 0.045911
| 0.05165
| 0.269727
| 0.269727
| 0.269727
| 0.183644
| 0.183644
| 0.183644
| 0
| 0
| 0.213052
| 1,042
| 40
| 88
| 26.05
| 0.85
| 0
| 0
| 0.46875
| 0
| 0
| 0.121881
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0
| 0.1875
| 0
| 0.25
| 0.125
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cb813fdb41b3152ecad7b90bfbabd5c02323b45
| 57,607
|
py
|
Python
|
midway.py
|
sjtichenor/midway-ford
|
43bf8770f2edd483d7c27dede8b9ac1fb8f10152
|
[
"MIT"
] | null | null | null |
midway.py
|
sjtichenor/midway-ford
|
43bf8770f2edd483d7c27dede8b9ac1fb8f10152
|
[
"MIT"
] | null | null | null |
midway.py
|
sjtichenor/midway-ford
|
43bf8770f2edd483d7c27dede8b9ac1fb8f10152
|
[
"MIT"
] | null | null | null |
import csv
import string
import ftplib
import math
import time
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from selenium.webdriver.common.by import By
from selenium.webdriver.support.ui import WebDriverWait
from selenium.webdriver.support import expected_conditions as EC
import sqlite3
from lxml import html
import requests
import sys
import midwords
import facebook
import hd_images
import adwords_feeds
import sheets
import random
import sales_specials
import scrape
from pprint import pprint
from pyvirtualdisplay import Display
import locale
locale.setlocale(locale.LC_ALL, 'en_US.utf8')
# Misc stuff
def isNumber(s):
try:
float(s)
return True
except ValueError:
return False
def start_chromedriver():
display = Display(visible=0, size=(800, 800))
display.start()
path_to_chromedriver = 'chromedriver'
browser = webdriver.Chrome(executable_path=path_to_chromedriver)
return browser
# FMC Dealer Scrapes
def randomInterval(): #returns random float roughly between 1.5 and 2.75
return 1.75+1*random.random()-.25*random.random()
def switchDefaultSearch(browser) : # Switch between MyLot/States
#Switch default search back to Dealership Proximity
print('Switching default search...')
browser.get('https://www.vlplus.dealerconnection.com/Search?&searchType=quicksearch')
time.sleep(3)
browser.find_element_by_xpath('//a[@id="ActivateSettings"]').click()
time.sleep(3)
browser.find_element_by_xpath('//a[text()="Search Settings"]').click()
time.sleep(3)
# Check what default is currently set to
tree = html.fromstring(browser.page_source)
currentSetting = tree.xpath('//option[@selected]/text()')
print('Setting Before:', currentSetting)
if 'My Lot' in currentSetting:
print('Switching default search from My Lot to Proximity')
browser.find_element_by_xpath('//select[@id="searchSettingsDefaultSearchMode"]').click()
time.sleep(2)
browser.find_element_by_xpath('//option[@value="6"]').click()
time.sleep(2)
elif 'States' in currentSetting :
print('Switching default search from States to My Lot')
browser.find_element_by_xpath('//select[@id="searchSettingsDefaultSearchMode"]').click()
time.sleep(2)
browser.find_element_by_xpath('//option[@value="1"]').click()
time.sleep(2)
currentSetting = tree.xpath('//option[@selected]/text()')
#print('Setting After:', currentSetting) This doesn't work..
browser.find_element_by_xpath('//a[@id="saveSearchSettings"]').click()
time.sleep(2)
browser.get('https://www.vlplus.dealerconnection.com/Search?&searchType=quicksearch')
time.sleep(2)
print('Finished switching default search...')
return browser
def getVinList() :
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
vinList = []
c.execute('SELECT vin FROM masterInventory where invType = ?', ('New',))
vinTupleList = c.fetchall()
for vinTuple in vinTupleList :
vin = vinTuple[0]
vinList.append(vin)
numVehicles = len(vinList)
conn.commit()
conn.close()
return vinList
def fmcLogin(browser) : #Logs into fmcdealer and returns browser
# Fire up ChomeDriver
# path_to_chromedriver = '/Users/spencertichenor/PycharmProjects/midway/chromedriver'
# browser = webdriver.Chrome(executable_path = path_to_chromedriver)
# Log into FMC Dealer
url = 'https://fmcdealer.com'
browser.get(url)
username = browser.find_element_by_id('DEALER-WSLXloginUserIdInput')
password = browser.find_element_by_id('DEALER-WSLXloginPasswordInput')
username.send_keys('t-spen29')
password.send_keys('Tichenor5')
browser.find_element_by_xpath('//div[@id="DEALER-WSLXloginWSLSubmitButton"]/input').click()
time.sleep(5)
return browser
def navigateToVincent(browser, vin) :
print('\nNavigating to Vincent page for VIN: ' + vin + '...\n\n')
#print('\nSearching for rebate info for vehicle ' + str(k+1) + '/' + str(len(vinList)) + '...')
#print('\n\tVIN: ' + vin + '\n')
browser.get('https://www.vlplus.dealerconnection.com/Search?&searchType=quicksearch')
time.sleep(3)
try :
vinField = browser.find_element_by_id('txtVIN')
vinField.send_keys(vin)
browser.find_element_by_xpath('//input[@value="Search"]').click()
time.sleep(2)
except :
print('VIN FIELD ERROR:')
print(sys.exc_info()[0])
#errorList.append(vin)
#pass this was pass but i think it should be return
return browser
source = browser.page_source
if 'Please broaden your search.' not in source : # Check if vehicle was not found in dealership proximity search
# Click on Vincent button
#source = browser.page_source
try :
vincentUrl = vincentUrl[0]
browser.get(vincentUrl)
time.sleep(4)
except :
print('Vincent Url Error:')
print(sys.exc_info()[0])
#errorList.append(vin)
#pass
return browser
source = browser.page_source
tree = html.fromstring(source)
if 'Please click the "Close" button to continue with the Sales Process.' in source : # Check for recall warning
browser.find_element_by_xpath('//input[@value="Close"]').click()
time.sleep(2)
if 'value="Certificate Inquiry"' not in source : # Check if vehicle already sold
# Enter ZIP code and click next
try :
zipField = browser.find_element_by_xpath('//div/input[@name="customerZip"]')
zipField.send_keys('55113')
browser.find_element_by_id('primaryButtonId').click()
time.sleep(2)
except :
print('ZIP FIELD ERROR:')
print(sys.exc_info()[0])
#errorList.append(vin)
pass
# Get rebate info
#rebateInfo = scrapeRebateInfo(browser)
else :
#soldList.append(vin)
print('\tIt looks like this vehicle has already been sold.\n\n')
else : # Vehicle not found in Dealership Proximity search
print('\tVehicle not found after searching Dealership Proximity.')
#Switch default search to My Lot
browser = switchDefaultSearch(browser)
try :
vinField = browser.find_element_by_id('txtVIN')
vinField.send_keys(vin)
browser.find_element_by_xpath('//input[@value="Search"]').click()
time.sleep(2)
except :
#errorList.append(vin)
print('VIN FIELD ERROR:')
print(sys.exc_info()[0])
#switchToProximity(browser)
return browser
# Click on Vincent button
source = browser.page_source
tree = html.fromstring(source)
vincentUrl = tree.xpath('//a[@title="Smart Vincent"]/@href')
try :
vincentUrl = vincentUrl[0]
browser.get(vincentUrl)
time.sleep(4)
except :
#errorList.append(vin)
print('Vincent Url Error:')
print(sys.exc_info()[0])
#switchToProximity(browser)
#return browser
source = browser.page_source
tree = html.fromstring(source)
if 'Please click the "Close" button to continue with the Sales Process.' in source : # Check for recall warning
browser.find_element_by_xpath('//input[@value="Close"]').click()
time.sleep(2)
if 'value="Certificate Inquiry"' not in source : # Check if vehicle already sold
# Enter ZIP code and click next
try :
zipField = browser.find_element_by_xpath('//div/input[@name="customerZip"]')
zipField.send_keys('55113')
browser.find_element_by_id('primaryButtonId').click()
time.sleep(2)
except :
#errorList.append(vin)
print('ZIP FIELD ERROR:')
print(sys.exc_info()[0])
#switchToProximity(browser)
#return browser
# Get rebate info
#rebateInfo = scrapeRebateInfo(browser)
else :
#soldList.append(vin)
print('\tIt looks like this vehicle has already been sold.\n\n')
#Switch default search back to Dealership Proximity
#switchToProximity(browser)
#pass
return browser
# print('\nNumber of vehicles appear to have been sold: ' + str(len(soldList)))
# print('Sold List:')
# print(soldList)
# print('\nNumber of vehicles that ran into errors: ' + str(len(errorList)))
# print('Error List:')
# print(errorList)
#print('\n\nFinished getting rebate information.')
def scrapeRebateInfo(page_source) : #input browser of vincent page, return tuple with unconditional rebate info
# Get rebate info
#source = browser.page_source
tree = html.fromstring(page_source)
vin = tree.xpath('//dt[.="VIN:"]/following-sibling::dd/text()')
vin = vin[0].replace('\xa0', ' ').replace('\t', '').replace('\n', '')
rowspans = tree.xpath('//table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr/td[@class="textC altRow"]/@rowspan | //table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr/td[@class="textC "]/@rowspan')
conditions = tree.xpath('//table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr[@class="programTableHeader"]/td[@style="{border-right:none;}"]/text()')
nums = tree.xpath('//table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr/td[@class="textL txtCol "]/a/text() | //table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr/td[@class="textL txtCol altRow "]/a/text()')
names = tree.xpath('//table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr/td[@class="textL txtCol "]/text() | //table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr/td[@class="textL txtCol altRow "]/text()')
amounts = tree.xpath('//table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr/td[@class="textR "]/text() | //table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr/td[@class="textR altRow"]/text()')
expirations = tree.xpath('//table[@summary="This table displays and lets you choose public program bundles."]/tbody/tr/td[@class="textC highlight noWrap"]/text()')
to_db = (vin,)
if rowspans == [] : # No unconditional rebates
print('No rebates found for this vehicle.\n')
print('Updating rebate info...')
while len(to_db) < 43 :
to_db += (None,)
else : # Yah, it has unconditional rebates
# Clean up Condition info
condition = conditions[0]
condition = condition.replace('\n', '').replace('\t', '').replace(' ', '').replace(':C', ': C')
condition = condition[1:]
condition = removeWeirdChars(condition)
if 'Cash Payment' in condition :
print('\tUnconditional Rebates:\n')
i=0
for i in range(i, int(rowspans[0])) :
num = nums[i].replace('\n', '').replace('\t', '').replace(' ', '')
name = names[i*2+1].replace('\n', '').replace('\t', '').replace(' - ', '').replace('s C', 's C').replace(' ', '').replace('"', '')
amount = amounts[i].replace('\n', '').replace('\t', '').replace(' ', '')
expiration = expirations[i].replace('\n', '').replace('\t', '').replace(' ', '')
if 'SIRIUS' in name : #Fix for the stupid 6-month extra Sirius incentive
amount = '$0'
if ' - ' not in amount and 'Amount Not Available' not in amount : # stupid fix for Oct 2016 rebate and anotehr fix for Dec 2016 rebate
print('\t\tProgram: #' + num)
print('\t\tName: ' + name)
print('\t\tAmount: ' + amount)
print('\t\tExpiration: ' + expiration + '\n')
to_db += (num,) + (name,) + (condition,) + (amount,) + (expiration,) + (condition,) #fix double header
while len(to_db) < 43 :
to_db += (None,)
return to_db
time.sleep(2)
def scrapeLeaseInfo(page_source) :
# Connect to database
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
to_db = ()
# Get rebate info
tree = html.fromstring(page_source)
vin = tree.xpath('//dt[.="VIN:"]/following-sibling::dd/text()')
vin = vin[0].replace('\xa0', ' ').replace('\t', '').replace('\n', '')
vehDesc = tree.xpath('//dt[.="Description:"]/following-sibling::dd/text()')
residualTable = tree.xpath('//table[@class="rateTable"]/tbody/tr/td/text() | //table[@class="rateTable"]/thead/tr/th/text()')
#rclRebateRow = tree.xpath('//tr[td[contains(., "RCL Customer Cash")]]/td/text()')
rclFactorsRow = tree.xpath('//tr[td[contains(., "RCL Factors")]]/td/text()')
rclTermLengths = tree.xpath('//tr[td[contains(., "RCL Factors")]]//th/text()')
rclFactors = tree.xpath('//tr[td[contains(., "RCL Factors")]]//td/text()')
rebateCells = tree.xpath('//tr[td[contains(., "LEASE")]]/following-sibling::*/td/text()')
#print('rebateCells:', rebateCells)
#print('length of rebateCells:', len(rebateCells))
if rebateCells != [] :
print('Lease Rebates:')
rebateDict = {}
for i, cell in enumerate(rebateCells) :
if 'Cash' in cell and 'Fast Cash Certificate' not in cell:
rebateName = cell.replace('\t', '').replace('\n', '').replace(' - ', '')
if '$' in rebateCells[i+2] :
rebateAmount = int(rebateCells[i+2].replace('\t', '').replace('\n', '').replace(' ', '').replace('$', '').replace(',', ''))
rebateExpiration = rebateCells[i+3].replace('\t', '').replace('\n', '').replace(' ', '')
elif '$' in rebateCells[i+3] :
rebateAmount = int(rebateCells[i+3].replace('\t', '').replace('\n', '').replace(' ', '').replace('$', '').replace(',', ''))
rebateExpiration = rebateCells[i+4].replace('\t', '').replace('\n', '').replace(' ', '')
rebateDict[rebateName] = [rebateAmount, rebateExpiration]
print('\tRebate Name:', rebateName)
print('\tRebate Amount:', rebateAmount)
print('\tRebate Expiration:', rebateExpiration)
print('\n')
print('rebateDict:', rebateDict)
totalRebates = 0
for rebateName in rebateDict :
totalRebates += rebateDict[rebateName][0]
vehDesc = vehDesc[0].replace('\xa0', ' ').replace('\t', '').replace('\n', '')
rclResiduals = {}
for i, leaseTerm in enumerate(residualTable[0:4]) :
rclResiduals[leaseTerm + ' Month'] = float(residualTable[i+5])/100
#rclRebateName = rclRebateRow[5].replace('\t', '').replace('\n', '').replace(' - ', '')
#rclRebateAmount = rclRebateRow[8].replace('\t', '').replace('\n', '').replace(' ', '').replace('$', '').replace(',', '')
#rclRebateExpiration = rclRebateRow[9].replace('\t', '').replace('\n', '').replace(' ', '')
rclTermLengths = rclTermLengths[:-1]
for i, termLength in enumerate(rclTermLengths) :
rclTermLengths[i] = int(termLength)
rclFactorsExpiration = rclFactorsRow[8].replace('\t', '').replace('\n', '').replace(' ', '')
factors = {}
for e in rclFactors :
if 'Tier' in e :
tierIndex = rclFactors.index(e)
tier = rclFactors[tierIndex]
tierFactors = rclFactors[tierIndex+1:tierIndex+5]
for i, factor in enumerate(tierFactors) :
tierFactors[i] = float(factor)
factors[tier] = tierFactors
print('VIN:', vin)
print('Vehicle Description:', vehDesc)
#print('RCL Rebate Name:', rclRebateName)
print('Total Rebates:', totalRebates)
#print('RCL Rebate Expiration:', rclRebateExpiration)
print('RCL Lengths:', rclTermLengths)
print('RCL Factors: ', factors) #used to be factors but too hard to deal with everything
print('RCL Factors Expiration:', rclFactorsExpiration)
print('RCL Residual:', rclResiduals)
c.execute('SELECT stock, year, model, vehTrim FROM masterInventory WHERE vin = ?', (vin,))
vehInfo = c.fetchall()
vehInfo = vehInfo[0]
print('vehInfo:', vehInfo)
to_db = (vin,) + vehInfo + (str(rebateDict), totalRebates, str(rclTermLengths), str(factors), rclFactorsExpiration, str(rclResiduals))
#to_db = (vin, str(rebateDict), totalRebates, str(rclTermLengths), str(factors), rclFactorsExpiration, str(rclResiduals))
else :
print('No lease info found.')
to_db = (vin, None, None, None, None, None, None, None, None, None, None)
# Close connection to database
conn.commit()
conn.close()
return to_db
time.sleep(2)
def calculateLeasePayment(vin, termLength, mileage, tier) : # outputs monthly payments. input example: ('1FAHP1231', 36, 15000, 'Tier 0-1')
print('Calculating lease payments for VIN: ' + vin)
leaseParameters = getLeaseParameters(vin)
#print('leaseParameters:', leaseParameters)
if leaseParameters[5] == None : # if there are no lease deals
paymentOptions = (None, None, None, None, None, vin)
else :
msrp = leaseParameters[0]
dealerDiscount = leaseParameters[1]
rebateAmount = leaseParameters[2]
termLengths = leaseParameters[3]
interestRates = leaseParameters[4]
residuals = leaseParameters[5]
termLengthIndex = termLengths.index(termLength)
apr = interestRates[tier][termLengthIndex]
apr += 1 # Juicing the apr by 1%
residual = residuals[str(termLength) + ' Month']
# Adjust residual for mileage
residual += (15000 - mileage)/1500 * .01
residual = round(residual, 2)
taxRate = .07125 # plus any local taxes i guess
aquisitionFee = 645 # need to figure out better way
moneyFactor = apr/2400
salesTax = round(msrp * taxRate, 2) # dunno if this should be here
salesTax = 0
signAndDrive = 0 - aquisitionFee - salesTax
downPayments = [signAndDrive, 0, 1000, 2000, 3000]
print('MSRP:', msrp)
print('Dealer Discount:', dealerDiscount)
print('Rebate Amount:', rebateAmount)
print('Term Length:', str(termLength) + ' Month')
print('APR:', apr)
print('Money Factor:', moneyFactor)
print('Residual:', residual)
print('\n\n')
paymentOptions = ()
for downPayment in downPayments :
sellingPrice = msrp - dealerDiscount - rebateAmount
#taxableAmount = sellingPrice - residualValue - downPayment + rentCharge # not accurate
#salesTax = msrp * taxRate
#salesTax = 0
grossCapCost = msrp - dealerDiscount + aquisitionFee + salesTax
capCostReduction = rebateAmount + downPayment
netCapCost = round(grossCapCost - capCostReduction, 2)
residualValue = round(msrp * residual, 2)
depreciation = round(netCapCost - residualValue, 2)
basePayment = round(depreciation/termLength, 2)
rentPayment = round((netCapCost + residualValue) * moneyFactor, 2)
rentCharge = rentPayment*termLength
totalPayment = round(basePayment + rentPayment, 2)
print('Down Payment:', downPayment)
print('\n')
print('Gross Cap. Cost:', grossCapCost)
print('Cap. Cost Reduction:', capCostReduction)
print('Net Cap. Cost:', netCapCost)
print('Residual Value:', residualValue)
print('Depreciation:', depreciation)
print('Base Payment:', basePayment)
print('Rent Payment:', rentPayment)
print('Total Monthly Payment:', totalPayment)
print('\n\n\n')
paymentOptions += (totalPayment,)
paymentOptions += (vin,)
#print('Payment Options:', paymentOptions)
return paymentOptions
def scrapeFMC(): # Gets rebate and lease info from FMC Dealer
vinList = getVinList()
#vinList = ['3FA6P0VP1HR195216', '3FA6P0H77HR187150']
#path_to_chromedriver = 'chromedriver'
#browser = webdriver.Chrome(executable_path=path_to_chromedriver)
browser = start_chromedriver()
browser = fmcLogin(browser)
errorList = []
for i, vin in enumerate(vinList) :
print('Vehicle ' + str(i+1) + '/' + str(len(vinList)) + ':\n')
browser = navigateToVincent(browser, vin)
try :
to_db = scrapeRebateInfo(browser.page_source)
updateRebateTable(to_db)
to_db = scrapeLeaseInfo(browser.page_source)
updateLeaseTable(to_db)
#to_db = calculateLeasePayment(vin, 36, 10500, 'Tier 0-1')
#updateLeaseTable(to_db)
except Exception as e:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(e).__name__, e.args)
message += '\nError on line {}'.format(sys.exc_info()[-1].tb_lineno)
print(message)
errorList.append(vin)
continue
print('Error List:', errorList)
print('Number of Errors:', len(errorList))
doubleErrorList = []
for i, vin in enumerate(errorList) : # Re-run all VINs that had errors
print('Vehicle ' + str(i+1) + '/' + str(len(errorList)) + ':\n')
browser = navigateToVincent(browser, vin)
try :
to_db = scrapeRebateInfo(browser.page_source)
updateRebateTable(to_db)
to_db = scrapeLeaseInfo(browser.page_source)
updateLeaseTable(to_db)
except Exception as e:
template = "An exception of type {0} occurred. Arguments:\n{1!r}"
message = template.format(type(e).__name__, e.args)
message += '\nError on line {}'.format(sys.exc_info()[-1].tb_lineno)
print(message)
doubleErrorList.append(vin)
continue
print('Double Error List:', errorList)
print('Number of Double Errors:', len(errorList))
print(20*'\n')
def updateVLPlusInventoryTable():
print('Scraping Vehicle Locator..')
# Open connection to database
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
# Delete old data
query = 'DELETE FROM VLPlusInventory'
c.execute(query)
# all the xpath that we're gonna need
vin_list_xpath = '//tr[contains(@class, "vehiclerow")]/@vin'
msrp_list_xpath = '//td[contains(@class, "price")]/a[@class="pdfWindowSticker"]/span/text()'
invoice_list_xpath = '//tr[contains(@class, "vehiclerow")]/td[11]/a/span/text()'
pep_list_xpath = '//tr[contains(@class, "vehiclerow")]/td[7]/span[3]/text()'
order_type_list_xpath = '//a[@onclick="showOrderTypeInfo();"]/span/text()'
engine_list_xpath = '//tr[contains(@class, "vehiclerow")]/td[8]/span[1]/text()'
status_list_xpath = '//tr[contains(@class, "vehiclerow")]/td[1]/@class'
# Log into FMC Dealer
#path_to_chromedriver = 'chromedriver'
#browser = webdriver.Chrome(executable_path=path_to_chromedriver)
browser = start_chromedriver()
browser = fmcLogin(browser)
wait = WebDriverWait(browser, 10)
browser.get('https://www.vlplus.dealerconnection.com/InvMgt/')
time.sleep(randomInterval() * 2)
source = browser.page_source
tree = html.fromstring(source)
vehicle_count = tree.xpath('//th[@class="resultcount"]/text()')
print(vehicle_count)
vehicle_count = vehicle_count[1].split(' ')
vehicle_count_index = vehicle_count.index('vehicles') - 1
vehicle_count = vehicle_count[vehicle_count_index]
vehicle_count = int(vehicle_count)
page_count = math.ceil(vehicle_count/25)
print('Total pages:', page_count)
for j in range(0, page_count-1):
tree = html.fromstring(browser.page_source)
vin_list = tree.xpath(vin_list_xpath)
ugly_msrp_list = tree.xpath(msrp_list_xpath)
ugly_invoice_list = tree.xpath(invoice_list_xpath)
ugly_pep_list = tree.xpath(pep_list_xpath)
ugly_order_type_list = tree.xpath(order_type_list_xpath)
ugly_engine_list = tree.xpath(engine_list_xpath)
ugly_status_list = tree.xpath(status_list_xpath)
# Clean up PEP Codes
msrp_list = []
invoice_list = []
pep_list = []
order_type_list = []
engine_list = []
status_list = []
for k in range(0, len(vin_list)):
msrp_list.append(ugly_msrp_list[k].replace('$', '').replace(',', ''))
if msrp_list[k] != 'n/a':
msrp_list[k] = int(msrp_list[k])
else:
msrp_list[k] = ''
invoice_list.append(ugly_invoice_list[k].replace('$', '').replace(',', ''))
if invoice_list[k] != 'n/a':
invoice_list[k] = int(invoice_list[k])
else:
invoice_list[k] = ''
for pep_code in ugly_pep_list:
pep_list.append(pep_code)
for order_type in ugly_order_type_list:
order_type_list.append(order_type)
for engine in ugly_engine_list:
engine = engine.split('<br>')[0].replace(' ', '').replace('\n', '')
if 'L ' in engine and 'SPD' not in engine and 'SPEED' not in engine:
engine_list.append(engine)
for status in ugly_status_list:
if 'transit' in status:
status_list.append('In Transit')
elif 'plant' in status:
status_list.append('In Plant')
else:
status_list.append('In Stock')
if len(msrp_list) != len(invoice_list):
print('len msrp != invoice')
raise ValueError
if len(pep_list) != len(msrp_list):
print('len pep != msrp')
print(msrp_list)
print(ugly_pep_list)
raise ValueError
print('msrp_list len: ', len(msrp_list))
print('msrp_list: ', msrp_list)
print('invoice_list: ', invoice_list)
print('pep_list: ', pep_list)
print('order_type_list: ', order_type_list)
print('engine_list: ', engine_list)
print('status_list: ', status_list)
to_db = []
for k, vin in enumerate(vin_list):
print('VIN: ', vin)
print('msrp: ', msrp_list[k])
print('invoice: ', invoice_list[k])
print('pep: ', pep_list[k])
print('order_type: ', order_type_list[k])
print('engine: ', engine_list[k], '\n')
if msrp_list[k] < invoice_list[k]:
raise ValueError
to_db.append((vin, msrp_list[k], invoice_list[k], pep_list[k], order_type_list[k], engine_list[k], status_list[k]))
query = 'INSERT OR REPLACE INTO VLPlusInventory (vin, msrp, invoice, pepCode, orderType, engine, status) VALUES (?, ?, ?, ?, ?, ?, ?)'
c.executemany(query, to_db)
conn.commit()
time.sleep(randomInterval())
next_page_xpath = '//a[@page="{}"]'.format(str(j+2))
next_page_link = wait.until(EC.element_to_be_clickable((By.XPATH, next_page_xpath)))
next_page_link.click()
#browser.find_element_by_xpath(next_page_xpath).click()
time.sleep(randomInterval()*2)
conn.close()
def updateMasterInventoryStockStatus():
# Open connection to database
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
# Get all vin in master inv
query = 'SELECT vin FROM masterInventory'
c.execute(query)
master_results = c.fetchall()
master_vin_list = []
for r in master_results:
master_vin_list.append(r[0])
# Get all retail veh in vlplus inv
query = 'SELECT vin, status FROM VLPlusInventory WHERE orderType = ? OR orderType = ?'
to_db = ('1', '2')
c.execute(query, to_db)
vlplus_results = c.fetchall()
for r in vlplus_results:
vin = r[0]
vlpus_status = r[1]
print('\n', vin, ':\n\n')
if vin in master_vin_list:
query = 'SELECT status, dateInStock FROM masterInventory WHERE vin = ?'
to_db = (vin,)
c.execute(query, to_db)
result = c.fetchall()
master_status = result[0][0]
date_in_stock = result[0][1]
print(master_status)
if date_in_stock and master_status == 'In Stock':
print('Stock status already set')
continue
elif date_in_stock and master_status != 'In Stock':
print('Updating stock status')
query = 'UPDATE masterInventory SET status = ? WHERE vin = ?'
to_db = ('In Stock', vin)
c.execute(query, to_db)
else:
print('Adding veh to master')
query = 'INSERT OR REPLACE INTO masterInventory (vin, status, invType) VALUES (?, ?, ?)'
to_db = (vin, vlpus_status, 'New')
c.execute(query, to_db)
conn.commit()
conn.close()
# Data stuff
def get_incoming_homenet_file(): # Logs into Homenet FTP server and downloads inventory file
print('Getting CSV file from Homenet feed...')
#autouplinkFilePath = 'spencertichenor.com/home/sjtichenor/public_ftp/incoming/RosevilleMidwayFord' + YEAR + MO + DAY
ftp = ftplib.FTP('spencertichenor.com')
ftp.login(user='ftpbot@spencertichenor.com', passwd='M4lonePovolny')
homenetFileName = 'homenet_feed.csv'
localFilePath = 'data/local_homenet_file.csv'
localFile = open(localFilePath, 'wb')
ftp.retrbinary('RETR ' + homenetFileName, localFile.write, 1024)
print('CSV file from Homenet feed saved at: data/local_homenet_file.csv')
ftp.quit()
localFile.close()
def update_incoming_homenet_table(): # Gets data from local_homenet_file.csv then updates homenetInventory and masterInventory tables
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
print('Updating homenetInventory table with data sent from Homenet FTP feed...')
with open('data/local_homenet_file.csv', 'r') as homenetFile:
# csv.DictReader uses first line in file for column headings by default
dr = csv.DictReader(homenetFile) # comma is default delimiter
to_db = []
homenetVinList = []
## Clean out weird characters
valid_chars = string.ascii_letters + string.digits + ' ' + ':' + '-' + ',' + '&' + '$' + '/' + '.' + '_' + '!'
for i in dr:
for key in i.keys():
s = i[key]
clean = ''.join(c for c in s if c in valid_chars)
i[key] = clean
#print(key + ': ' + i[key])
#print('\n' + 50*'*' + '\n')
to_db.append((
i['VIN'],
i['Stock'],
i['Type'],
i['Year'],
i['Make'],
i['Model'],
i['Trim'],
i['Body'],
i['MSRP'],
i['SellingPrice'],
i['InternetPrice'],
i['Invoice'],
i['BookValue'],
i['Certified'],
i['ModelNumber'],
i['Doors'],
i['ExteriorColor'],
i['InteriorColor'],
i['EngineCylinders'],
i['EngineDisplacement'],
i['Transmission'],
i['Miles'],
i['DateInStock'],
i['Description'],
i['Options'],
i['Categorized Options'],
i['ImageList'],
i['Style Description'],
i['Drive type'],
i['Wheelbase Code'],
i['Engine Description'],
i['Market Class'],
i['Factory_Codes']
))
homenetVinList.append(i['VIN']) #used later to delete vehicles that aren't in stock anymore, index of 0 because it is a tuple
query = ("""
INSERT OR REPLACE INTO homenetInventory (vin, stock, invType, year, make, model, vehTrim, cabStyle, intMSRP, intPrice, intInternetPrice, intInvoice, intGeneralLedger, cpo, modelNumber, doors, exteriorColor, interiorColor, engineCylinders, engineDisplacement, transmission, miles, dateInStock, description, options, optionsCategorized, imageUrls, style, drive, wheelbase, engine, marketClass, factCodes)
VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)
""")
c.executemany(query, to_db)
#c.executemany("INSERT OR REPLACE INTO masterInventory (vin, stock, invType, year, make, model, vehTrim, cabStyle, intMSRP, intPrice, intInternetPrice, intInvoice, intGeneralLedger, cpo, modelNumber, doors, exteriorColor, interiorColor, engineCylinders, engineDisplacement, transmission, miles, dateInStock, description, options, optionsCategorized, imageUrls, style, drive, wheelbase, engine, marketClass, factCodes) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)", to_db)
# that was redundent i think ^^
# Delete vehicles that aren't in stock anymore from Homenet table
currentVinList = []
c.execute('SELECT vin FROM homenetInventory')
tupleVinList = c.fetchall()
for tupleVin in tupleVinList: # Convert tuples to strings in order to compare later
vin = tupleVin[0]
currentVinList.append(vin)
for vin in currentVinList:
if vin not in homenetVinList:
c.execute('DELETE FROM homenetInventory WHERE vin = ?', (vin,))
print('Deleted VIN ' + vin + ' from Homenet Inventory Table.')
conn.commit()
print('Finished updating homenetInventory table.\n')
# Update masterInventory table
print('Updating masterInventory table with data from homenetInventory table...')
query = 'INSERT OR REPLACE INTO masterInventory (vin, stock, invType, year, make, model, vehTrim, cabStyle, intMSRP, intPrice, intInternetPrice, intInvoice, intGeneralLedger, cpo, modelNumber, doors, exteriorColor, interiorColor, engineCylinders, engineDisplacement, transmission, miles, dateInStock, description, options, optionsCategorized, imageUrls, style, drive, wheelbase, engine, marketClass, factCodes) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)'
c.executemany(query, to_db)
c.execute('SELECT vin from masterInventory')
masterVinTupleList = c.fetchall()
for vinTuple in masterVinTupleList:
vin = vinTuple[0]
if vin not in homenetVinList:
c.execute('DELETE FROM masterInventory WHERE vin = ?', (vin,))
print('Deleted VIN ' + vin + ' from Master Inventory Table.')
conn.commit()
conn.close()
def updateMasterTable() :
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
c.execute('SELECT * FROM homenetInventory')
vehTupleList = c.fetchall()
to_db = vehTupleList
print(to_db)
print(len(to_db))
for i in to_db:
print(i)
print(len(i))
c.executemany("INSERT OR REPLACE INTO masterInventory (vin, stock, invType, year, make, model, vehTrim, bodyStyle, intMSRP, intPrice, intInternetPrice, intInvoice, intGeneralLedger, cpo, modelNumber, doors, exteriorColor, interiorColor, engineCylinders, engineDisplacement, transmission, miles, dateinStock, description, options, optionsCategorized, imageUrls) VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?);", to_db)
conn.commit()
# Delete vehicles that are no longer in stock from masterInventory
homenetVinList = []
c.execute('SELECT vin from homenetInventory')
homenetVinTupleList = c.fetchall()
for homenetVinTuple in homenetVinTupleList :
homenetVin = homenetVinTuple[0]
homenetVinList.append(homenetVin)
c.execute('SELECT vin from masterInventory')
masterVinTupleList = c.fetchall()
for vinTuple in masterVinTupleList :
vin = vinTuple[0]
if vin not in homenetVinList :
c.execute('DELETE FROM masterInventory WHERE vin = ?', (vin,))
print('Deleted VIN ' + vin + ' from Master Inventory Table.')
conn.commit()
conn.close()
def removeOldVins(table): #DOES NOT WORK removes VINs that are no longer in masterInventory from supplied table
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
masterVinList = []
c.execute('SELECT vin FROM masterInventory')
masterVinTupleList = c.fetchall()
for masterVinTuple in masterVinTupleList :
vin = masterVinTuple[0]
masterVinList.append(vin)
c.execute('SELECT vin FROM ?', (table,))
rebateVinTupleList = c.fetchall()
for rebateVinTuple in rebateVinTupleList :
vin = rebateVinTuple[0]
if vin not in masterVinList :
c.execute('DELETE FROM rebateInfo WHERE vin = ?', (vin,))
print('\t' + vin + ' deleted from rebateInfo table.')
conn.commit()
conn.close()
def compute_highlights(): # Gets masterInventory 'options' field for each veh then finds highlights then adds them to highlights column separated by commas
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
c.execute('SELECT vin, options, year, invType, description, cpo, engine, drive, stock, make, model, marketClass FROM masterInventory')
optionsTupleList = c.fetchall()
for optionsTuple in optionsTupleList:
highlightList = []
highlightStr = ''
vin = optionsTuple[0]
options = optionsTuple[1].lower()
year = optionsTuple[2]
invType = optionsTuple[3]
description = optionsTuple[4].lower()
cpo = optionsTuple[5]
engine = optionsTuple[6]
drive = optionsTuple[7]
stock = optionsTuple[8]
make = optionsTuple[9]
model = optionsTuple[10]
marketClass = optionsTuple[11]
# Get coolest options
if cpo == 'True':
highlightList.append('Certified Pre-Owned')
highlightList.append('100,000-Mile Warranty')
#if year == 2017 and invType == 'New' :
#highlightList.append('Apple CarPlay')
#highlightList.append('Android Auto')
# Highlight Idicators - List of dictionaries where the key is the highlight name and the value is a list of indicator phrases
indicatorList = [
{'One-Owner': ['one owner', 'one-owner']},
{'Low Miles': ['low mile']},
{'Remote Start': ['remote start', 'remote engine start', 'remote auto start']},
{'Technology Package': ['technology package', 'technology pkg']},
{'Cold Weather Package': ['cold weather package']},
{'Appearance Package': ['appearance package']},
{'Moonroof': ['vista roof', 'moonroof', 'glass roof', 'panoramic roof']},
{'Rear Camera': ['rear view camera', 'back-up camera', 'rear-view camera']},
{'Rear Camera w/ Hitch Assist': ['rear view camera w/dynamic hitch assist']},
{'Heated Seats': ['heated leather', 'heated front seats', 'heated bucket']},
{'Heated/Cooled Seats': ['heated & cooled', 'heated and cooled', 'heated/cooled']},
{'Heated Steering Wheel': ['heated steering wheel']},
{'Heated Mirrors': ['heated mirrors']},
{'Tow Package': ['tow package', 'Towing', 'Trailer Hitch']},
{'Trailer Brake Controller': ['trailer brake controller']},
{'Premium Audio System': ['premium audio system', 'premium 9 speaker']},
{'Leather Interior': ['leather seats', 'leather-trimmed', 'leather trimmed']},
{'Bluetooth': ['bluetooth']},
{'USB Connectivity': ['usb']},
{'Apple CarPlay': ['apple carplay']},
{'Android Auto': ['android auto']},
{'Snow Plow Package': ['snow plow package']},
{'Lane-Keeping System': ['lane-keeping system']},
{'Rain-Sensing Wipers': ['rain-sensing wipers']},
{'Park Assist System': ['park assist system']},
{'Sirius': ['sirius', 'satellite radio']},
{'Power Liftgate': ['pwr liftgate', 'power liftgate']},
{'Remote Tailgate': ['remote tailgate']},
{'Push Button Start': ['push button start']},
{'Navigation': ['navigation']},
{'Bedliner': ['bedliner']},
{'Extended Range Fuel Tank': ['extended range']},
{'2nd Row Bucket Seats': ['2nd row bucket seats']},
{'3rd Row Seat': ['3rd row seat', '3rd seat']},
{'Touchscreen': ['touchscreen', 'touch-screen', 'myford touch', 'sync 3']},
{'Keyless Entry': ['keyless', 'keypad entry']},
{'Cruise Control': ['cruise control']},
{'Auto Start-Stop Technology': ['auto start-stop technology']},
{'LED Box Lighting': ['led box lighting']},
]
for i in indicatorList:
highlight = list(i.keys())[0]
phraseList = list(i.values())[0]
for phrase in phraseList:
if phrase in options or phrase in description:
highlightList.append(highlight)
break
highlightList.append(engine)
highlightList.append(drive)
# Remove redundant highlights
redundantList = [
['Heated Seats', 'Heated/Cooled Seats'],
['Rear Camera', 'Rear Camera w/ Hitch Assist'],
['USB Connectivity', 'Bluetooth'],
['Bluetooth', 'Apple CarPlay'],
['Tow Package', 'Trailer Brake Controller']
]
for i in redundantList:
if i[0] in highlightList and i[1] in highlightList:
highlightList.remove(i[0])
for highlight in highlightList:
highlightStr += highlight + ','
if len(highlightStr) > 0: # Get rid of unnecessary comma on end of string
highlightStr = highlightStr[:-1]
# Set Body Style (not really a highlight) - Had to switch to ghetto version below because vans were getting marked as cars because iterating throguh dict is not ordered
# indicatorDict = {
# 'Car': ['Car'],
# 'Truck': ['Truck'],
# 'Van': ['Van', 'van'],
# 'SUV': ['Sport Utility Vehicles']
# }
# bodyStyles = indicatorDict.keys()
# for bodyStyle in bodyStyles :
# for indicator in indicatorDict[bodyStyle] :
# if indicator in marketClass :
# style = bodyStyle
if 'Car' in marketClass: # has to come first so cargo van gets listed as Van
style = 'Car'
if 'Truck' in marketClass:
style = 'Truck'
if 'Van' in marketClass or 'van' in marketClass :
style = 'Van'
if 'Sport Utility Vehicles' in marketClass :
style = 'SUV'
# Clean up Model
model = model.replace(' Commercial Cutaway', '').replace(' Sport Fleet', '').replace(' Cutaway', '')
# Clean up Engine
engine = engine.replace(' L', 'L')
print('Vehicle: ' + stock + ' ' + make + ' ' + model)
print('Highlights:', highlightList)
print('BodyStyle:', style)
print('\n')
# Set Status to In Stock
status = 'In Stock'
# Update database
c.execute('UPDATE masterInventory SET highlights = ?, bodyStyle = ?, model = ?, engine = ?, status = ? WHERE vin = ?', (highlightStr, style, model, engine, status, vin,))
conn.commit()
conn.close()
def calculate_pricing():
print('Calculating max discount for each vehicle...\n')
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
# Set dealer discount and total discount
query = ('SELECT vin, intMSRP, intInternetPrice, intTotalRebates, totalConditionalRebates '
'FROM masterInventory '
'WHERE invType = "New" AND intMSRP != 0')
c.execute(query)
results = c.fetchall()
for r in results:
print('r:', r)
vin = r[0]
msrp = r[1]
price_before_rebates = r[2]
unconditional_rebates = r[3]
conditional_rebates = r[4]
dealer_discount = msrp - price_before_rebates
if unconditional_rebates:
best_discount = dealer_discount + unconditional_rebates + conditional_rebates
else:
best_discount = dealer_discount
# Print results
print('\t\tVIN:', vin)
print('\t\tMSRP:', msrp)
print('\t\tPrice before rebates:', price_before_rebates)
print('\t\tDealer Discount:', dealer_discount)
print('\t\tUnconditional Rebates:', unconditional_rebates)
print('\t\tConditional Rebates:', conditional_rebates)
print('\t\tBest Discount:', best_discount, '\n\n')
# Update database
query = 'UPDATE masterInventory SET intTotalDiscount = ? WHERE vin = ?'
to_db = (best_discount, vin)
c.execute(query, to_db)
conn.commit()
conn.close()
print('Finished calculating max discount for each vehicle.\n')
def create_outgoing_homenet_table():
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
query = ("""
CREATE TABLE IF NOT EXISTS outgoingHomenet
(VIN TEXT UNIQUE, comment1 TEXT, misc_price1 INTEGER, comment2 TEXT, misc_price2 INTEGER, comment3 TEXT, misc_price3 INTEGER, comment5 TEXT)
""")
c.execute(query)
conn.commit()
conn.close()
def update_outgoing_homenet_table():
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
c.execute('DELETE FROM outgoingHomenet')
to_db = []
c.execute('SELECT vin, highlights, intTotalRebates, totalConditionalRebates FROM masterInventory')
results = c.fetchall()
for r in results:
vin = r[0]
highlights = r[1]
unconditional_rebates = r[2]
conditional_rebates = r[3]
if not unconditional_rebates:
unconditional_rebates = 0
if not conditional_rebates:
conditional_rebates = 0
to_db.append((vin, highlights, 0, None, unconditional_rebates, None, conditional_rebates, ''))
print('\n\nVIN:', vin)
print('Highlights:', highlights)
print('Unconditional Rebates:', unconditional_rebates)
print('Conditional Rebates:', conditional_rebates)
query = ("""
INSERT OR REPLACE INTO outgoingHomenet (vin, comment1, misc_price1, comment2, misc_price2, comment3, misc_price3, comment5)
VALUES (?, ?, ?, ?, ?, ?, ?, ?)
""")
c.executemany(query, to_db)
conn.commit()
conn.close()
def update_outgoing_homenet_file():
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
c.execute('SELECT vin, comment1, misc_price1, comment2, misc_price2, comment3, misc_price3, comment5 FROM outgoingHomenet')
with open('data/homenet-incentive-feed.csv', 'w') as csv_file:
csv_writer = csv.writer(csv_file, dialect='excel')
csv_writer.writerow([i[0] for i in c.description]) # write headers
csv_writer.writerows(c)
conn.commit()
conn.close()
def upload_outgoing_homenet_file():
print('\nUploading inventory to FTP server for Homenet...')
file_path = 'data/homenet-incentive-feed.csv'
file_name = file_path.split('/')
file_name = file_name[-1]
print('Uploading ' + file_name + ' to FTP server...\n')
file = open(file_path, 'rb')
ftp = ftplib.FTP('iol.homenetinc.com')
ftp.login('hndatafeed', 'gx8m6')
ftp.storbinary('STOR ' + file_name, file, 1024)
file.close()
ftp.quit()
print('Successfully uploaded ' + file_name + ' to homenet folder on FTP server.\n')
def send_feeds_from_homenet():
print('Navigating to Homenet.com and send out feeds to cars.com, cargurus, etc..')
# Fire up ChromeDriver
browser = start_chromedriver()
wait = WebDriverWait(browser, 10)
# Log into Homenet
#url = 'https://www.homenetiol.com/marketplace/overview'
url = 'https://www.homenetiol.com/login?RedirectUrl=%2fmarketplace%2foverview'
browser.get(url)
username = browser.find_element_by_xpath('//input[@class="username text-value"]')
password = browser.find_element_by_xpath('//input[@class="password text-value"]')
username.send_keys('spencer@rosevillemidwayford.com')
password.send_keys('G3nericwords')
wait.until(EC.element_to_be_clickable((By.XPATH, '//a[@class="login-action button"]'))).click()
wait.until(EC.element_to_be_clickable((By.XPATH, '//a[@class="run-all-button button"]'))).click()
time.sleep(10)
print('Finished sending out feeds.')
def vacuum_db():
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
c.execute("VACUUM")
conn.close()
def figureManagerSpecials():
# Open connection to database
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
url = 'http://www.rosevillemidwayford.com/new-car-sales-roseville-mn'
page = requests.get(url)
tree = html.fromstring(page.content)
stockResults = tree.xpath('//span[contains(@class, "spec-value-stocknumber")]/text()')
specialStockList = []
for specialStock in stockResults :
specialStock = specialStock.replace('#', '')
specialStockList.append(specialStock)
print(specialStockList)
c.execute('SELECT stock FROM masterInventory')
results = c.fetchall()
for r in results:
stock = r[0]
if stock in specialStockList :
print('looks like stock #' + stock + ' is a special!')
query = 'UPDATE masterInventory SET managerSpecial = ? WHERE stock = ?'
to_db = ('True', stock)
c.execute(query, to_db)
else :
print('looks like stock #' + stock + ' is NOT a special!')
query = 'UPDATE masterInventory SET managerSpecial = ? WHERE stock = ?'
to_db = ('False', stock)
c.execute(query, to_db)
conn.commit()
conn.close()
def figureLeaseSpecials():
# Open connection to database
conn = sqlite3.connect('data/inventory.db')
c = conn.cursor()
lease_specials = []
c.execute('SELECT DISTINCT year FROM masterInventory')
year_results = c.fetchall()
for y in year_results:
year = y[0]
# print(year)
query = 'SELECT DISTINCT model FROM masterInventory WHERE year = ? AND leasePayment != ?'
to_db = (year, '')
c.execute(query, to_db)
model_results = c.fetchall()
for m in model_results:
model = m[0]
query = 'SELECT min(leasePayment) FROM masterInventory WHERE year = ? AND model = ?'
to_db = (year, model)
c.execute(query, to_db)
payment_results = c.fetchall()
min_payment = payment_results[0][0]
query = 'SELECT vin, stock, vehTrim, intMSRP, intPrice, leaseRebateExpiration FROM masterInventory WHERE year = ? AND model = ? AND leasePayment = ?'
to_db = (year, model, minPayment)
c.execute(query, to_db)
veh_results = c.fetchall()
v = veh_results[0] # Just get first vehicle even if there are many
print(v)
vin = v[0]
stock = v[1]
vehTrim = v[2]
msrp = v[3]
price = v[4]
term = 36
residual = v[5]
downPayment = v[6]
totalLeaseRebates = v[7]
dueAtSigning = v[8]
expiration = v[9]
# Get data from masterInventory table for rest of required info
c.execute('SELECT bodyStyle, imageUrls, imageUrlsHD, vdp_url, drive FROM masterInventory WHERE vin = ?', (vin,)) # add option codes to this later
master_results = c.fetchall()
if not master_results:
continue
bodyStyle = master_results[0][0] # just getting that matched the else query, could maybe hone this to get one with pic later??
imageUrls = master_results[0][1]
imageUrlsHD = master_results[0][2]
vdp = master_results[0][3]
drive = master_results[0][4]
# option_codes = masterResults[0][4]
# Set image to HD version if available
if imageUrlsHD:
imageUrl = imageUrlsHD
elif imageUrls:
imageUrl = imageUrls.split(',')[0]
else:
continue
minPayment = locale.currency(minPayment, grouping=True).replace('.00', '')
#downPayment = locale.currency(downPayment, grouping=True).replace('.00', '')
#dueAtSigning = locale.currency(dueAtSigning, grouping=True).replace('.00', '')
msrp = locale.currency(msrp, grouping=True).replace('.00', '')
price = locale.currency(price, grouping=True).replace('.00', '')
# offer = '<p>' + minPayment + '/month with ' + downPayment + ' down payment.<br><br>Just ' + dueAtSigning + ' due at signing.<br><br>Based on MSRP of ' + msrp + '.</p>'
# title = minPayment + '/month with {} down.'.format(downPayment)
# description = 'Lease term of {} months. Based on MSRP of {} and selling price of {}. Requires {} due at signing.'.format(term, msrp, price, dueAtSigning)
# disclaimer = 'Must take new retail delivery from dealer stock by {}. Requires {} due at signing. Based on MSRP of {} and selling price of {}. See Subject to credit approval. Assumes 10,500 miles/year and Tier 0-1 credit. Tax, title, and license not included. Some restrictions apply. See sales representative for details.'.format(expiration, minPayment, msrp, price)
lease_specials.append({
'vin': vin,
'stock': stock,
'year': year,
'model': model,
'vehTrim': vehTrim,
# 'title': title,
# 'description': description,
'expiration': expiration,
'monthlyPayment': minPayment,
'dueAtSigning': dueAtSigning,
'vdp': vdp,
'imageUrl': imageUrl,
'bodyStyle': bodyStyle,
'msrp': msrp,
'price': price,
# 'disclaimer': disclaimer,
'drive': drive,
# 'option_codes': option_codes
})
print('\nFresh Specials:')
for s in lease_specials:
print('\n')
# print('\n\n', s, '\n')
for k in s.keys():
print(k + ': ' + str(s[k]))
print('\n\n')
# Close connection to database
conn.close()
return lease_specials
def wait_for_next_run(minutes_to_wait):
print('Finished running program. Waiting 30 minutes to rerun.')
minutes_to_wait = int(minutes_to_wait)
for i in range(minutes_to_wait, 1, -1):
time.sleep(60)
print('Waiting {} minutes until next run.'.format(i))
def main():
while True:
get_incoming_homenet_file()
update_incoming_homenet_table()
scrape.scrape_cdk()
calculate_pricing()
compute_highlights()
create_outgoing_homenet_table()
update_outgoing_homenet_table()
update_outgoing_homenet_file()
upload_outgoing_homenet_file()
send_feeds_from_homenet()
sales_specials.main()
midwords.main()
hd_images.main()
facebook.main()
#adwords_feeds.main()
sheets.main()
# maybe add something to check if any dealer discounts are negative then re run (and if model isnt raptor)
vacuum_db()
wait_for_next_run(30)
if __name__ == '__main__':
main()
| 39.894044
| 537
| 0.591282
| 6,201
| 57,607
| 5.404128
| 0.156588
| 0.006923
| 0.012354
| 0.013727
| 0.369341
| 0.334696
| 0.29835
| 0.272657
| 0.250843
| 0.226015
| 0
| 0.010216
| 0.277813
| 57,607
| 1,443
| 538
| 39.921691
| 0.795279
| 0.142795
| 0
| 0.2867
| 0
| 0.016749
| 0.276741
| 0.045615
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0.005911
| 0.024631
| 0.000985
| 0.06798
| 0.141872
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cb871009f40d73e438998df7547b42738178c54
| 3,932
|
py
|
Python
|
monolithe/generators/sdkgenerator.py
|
edwinfeener/monolithe
|
0f024b2ec7d4c5a2229612280e5e559bf2667ba5
|
[
"BSD-3-Clause"
] | 18
|
2015-06-24T18:35:20.000Z
|
2022-01-19T19:04:00.000Z
|
monolithe/generators/sdkgenerator.py
|
edwinfeener/monolithe
|
0f024b2ec7d4c5a2229612280e5e559bf2667ba5
|
[
"BSD-3-Clause"
] | 63
|
2015-11-03T18:57:12.000Z
|
2020-09-30T02:54:49.000Z
|
monolithe/generators/sdkgenerator.py
|
edwinfeener/monolithe
|
0f024b2ec7d4c5a2229612280e5e559bf2667ba5
|
[
"BSD-3-Clause"
] | 38
|
2015-10-23T19:04:44.000Z
|
2021-06-04T08:13:33.000Z
|
# -*- coding: utf-8 -*-
#
# Copyright (c) 2015, Alcatel-Lucent Inc
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import unicode_literals
import os
import shutil
from monolithe.lib import Printer
from monolithe.generators.lib import Generator
from monolithe.generators.managers import MainManager, CLIManager, VanillaManager
from .sdkapiversiongenerator import SDKAPIVersionGenerator
class SDKGenerator(Generator):
def cleanup(self):
output = self.config.get_option("output", "transformer")
language = self.config.language
overrides_path = "%s/%s/__overrides" % (output, language)
if os.path.exists(overrides_path):
shutil.rmtree(overrides_path)
attrs_defaults_path = "%s/%s/__attributes_defaults" % (output, language)
if os.path.exists(attrs_defaults_path):
shutil.rmtree(attrs_defaults_path)
code_header_path = "%s/%s/__code_header" % (output, language)
if os.path.exists(code_header_path):
os.remove(code_header_path)
def generate(self, specification_info):
user_vanilla = self.config.get_option("user_vanilla", "transformer")
output = self.config.get_option("output", "transformer")
name = self.config.get_option("name", "transformer")
lang = self.config.language
if not os.path.exists(os.path.join(output, lang)):
os.makedirs(os.path.join(output, lang))
vanilla_manager = VanillaManager(monolithe_config=self.config)
vanilla_manager.execute(output_path="%s/%s" % (output, lang))
self.install_user_vanilla(user_vanilla_path=user_vanilla, output_path="%s/%s" % (output, lang))
version_generator = SDKAPIVersionGenerator(self.config)
apiversions = []
for info in specification_info:
Printer.log("transforming specifications into %s for version %s..." % (lang, info["api"]["version"]))
apiversions.append(info["api"]["version"])
version_generator.generate(specification_info=specification_info)
Printer.log("assembling...")
manager = MainManager(monolithe_config=self.config)
manager.execute(apiversions=apiversions)
cli_manager = CLIManager(monolithe_config=self.config)
cli_manager.execute()
self.cleanup()
Printer.success("%s generation complete and available in \"%s/%s\"" % (name, output, self.config.language))
| 44.179775
| 115
| 0.721261
| 490
| 3,932
| 5.677551
| 0.385714
| 0.03954
| 0.010784
| 0.027318
| 0.156722
| 0.12509
| 0.07908
| 0.048886
| 0.048886
| 0.048886
| 0
| 0.001573
| 0.19176
| 3,932
| 88
| 116
| 44.681818
| 0.87382
| 0.392675
| 0
| 0.046512
| 0
| 0
| 0.115205
| 0.011436
| 0
| 0
| 0
| 0
| 0
| 1
| 0.046512
| false
| 0
| 0.162791
| 0
| 0.232558
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cb99804e820098ccccda4d6284924e807ceb66e
| 1,787
|
py
|
Python
|
rllab-taewoo/rllab/plotter/plotter.py
|
kyuhoJeong11/GrewRL
|
a514698df8d38df34de0bd1667d99927f0aa3885
|
[
"MIT"
] | null | null | null |
rllab-taewoo/rllab/plotter/plotter.py
|
kyuhoJeong11/GrewRL
|
a514698df8d38df34de0bd1667d99927f0aa3885
|
[
"MIT"
] | null | null | null |
rllab-taewoo/rllab/plotter/plotter.py
|
kyuhoJeong11/GrewRL
|
a514698df8d38df34de0bd1667d99927f0aa3885
|
[
"MIT"
] | null | null | null |
import atexit
import sys
if sys.version_info[0] == 2:
from Queue import Empty
else:
from queue import Empty
from multiprocessing import Process, Queue
from rllab.sampler.utils import rollout
import numpy as np
__all__ = [
'init_worker',
'init_plot',
'update_plot'
]
process = None
queue = None
def _worker_start():
env = None
policy = None
max_length = None
try:
while True:
msgs = {}
# Only fetch the last message of each type
while True:
try:
msg = queue.get_nowait()
msgs[msg[0]] = msg[1:]
except Empty:
break
if 'stop' in msgs:
break
elif 'update' in msgs:
env, policy = msgs['update']
# env.start_viewer()
elif 'demo' in msgs:
param_values, max_length = msgs['demo']
policy.set_param_values(param_values)
rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)
else:
if max_length:
rollout(env, policy, max_path_length=max_length, animated=True, speedup=5)
except KeyboardInterrupt:
pass
def _shutdown_worker():
if process:
queue.put(['stop'])
queue.close()
process.join()
def init_worker():
print("####################init_worker")
global process, queue
queue = Queue()
process = Process(target=_worker_start)
process.start()
atexit.register(_shutdown_worker)
def init_plot(env, policy):
queue.put(['update', env, policy])
def update_plot(policy, max_length=np.inf):
queue.put(['demo', policy.get_param_values(), max_length])
| 24.479452
| 94
| 0.564633
| 206
| 1,787
| 4.708738
| 0.359223
| 0.064948
| 0.030928
| 0.041237
| 0.119588
| 0.119588
| 0.119588
| 0.119588
| 0.119588
| 0.119588
| 0
| 0.005013
| 0.330162
| 1,787
| 72
| 95
| 24.819444
| 0.805347
| 0.033016
| 0
| 0.172414
| 0
| 0
| 0.057971
| 0.017971
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086207
| false
| 0.017241
| 0.12069
| 0
| 0.206897
| 0.017241
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cbebb0ca1313739b0fe47f6d54aaa9f17675ecf
| 1,949
|
py
|
Python
|
djangito/backends.py
|
mechanicbuddy/djangito
|
07c08a83c57577cbf945bba461219bc0ef2a7695
|
[
"Apache-2.0"
] | null | null | null |
djangito/backends.py
|
mechanicbuddy/djangito
|
07c08a83c57577cbf945bba461219bc0ef2a7695
|
[
"Apache-2.0"
] | null | null | null |
djangito/backends.py
|
mechanicbuddy/djangito
|
07c08a83c57577cbf945bba461219bc0ef2a7695
|
[
"Apache-2.0"
] | null | null | null |
import base64
import json
import jwt
import requests
from django.conf import settings
from django.contrib.auth import get_user_model
from django.contrib.auth.backends import ModelBackend
USER_MODEL = get_user_model()
class ALBAuth(ModelBackend):
def authenticate(self, request, **kwargs):
if request:
self.encoded_jwt = request.META.get('HTTP_X_AMZN_OIDC_DATA')
if self.encoded_jwt:
self.payload = self.decode_alb_jwt()
return self.get_or_create_for_alb()
def decode_alb_jwt(self):
# Step 1: Get the key id from JWT headers (the kid field)
jwt_headers = self.encoded_jwt.split('.')[0]
decoded_jwt_headers = base64.b64decode(jwt_headers)
decoded_jwt_headers = decoded_jwt_headers.decode("utf-8")
decoded_json = json.loads(decoded_jwt_headers)
kid = decoded_json['kid']
# Step 2: Get the public key from regional endpoint
url = f'https://public-keys.auth.elb.us-east-1.amazonaws.com/{kid}'
req = requests.get(url)
pub_key = req.text
# Step 3: Get the payload
return jwt.decode(
self.encoded_jwt,
pub_key,
algorithms=['ES256']
)
def get_or_create_for_alb(self):
user_info = {'username': self.payload['sub'][:150]}
if 'given_name' in self.payload:
user_info['first_name'] = self.payload['given_name'][:30]
elif 'name' in self.payload:
user_info['first_name'] = self.payload['name'][:30]
if 'family_name' in self.payload:
user_info['last_name'] = self.payload['family_name'][:30]
self.user, created = USER_MODEL.objects.get_or_create(
email=self.payload['email'],
defaults=user_info
)
if created:
self.setup_user_profile()
return self.user
def setup_user_profile(self):
pass
| 29.984615
| 75
| 0.626988
| 255
| 1,949
| 4.564706
| 0.356863
| 0.085052
| 0.04811
| 0.043814
| 0.165808
| 0.098797
| 0.07732
| 0.07732
| 0.07732
| 0.07732
| 0
| 0.01683
| 0.268343
| 1,949
| 64
| 76
| 30.453125
| 0.799439
| 0.066188
| 0
| 0
| 0
| 0.021739
| 0.103524
| 0.011564
| 0
| 0
| 0
| 0
| 0
| 1
| 0.086957
| false
| 0.021739
| 0.152174
| 0
| 0.326087
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cbf5ae6b77e5700645e93821c03cc92778db151
| 11,306
|
py
|
Python
|
data_profiler/labelers/regex_model.py
|
gme5078/data-profiler
|
602cc5e4f4463f9b807000abf3893815918d0723
|
[
"Apache-2.0"
] | null | null | null |
data_profiler/labelers/regex_model.py
|
gme5078/data-profiler
|
602cc5e4f4463f9b807000abf3893815918d0723
|
[
"Apache-2.0"
] | null | null | null |
data_profiler/labelers/regex_model.py
|
gme5078/data-profiler
|
602cc5e4f4463f9b807000abf3893815918d0723
|
[
"Apache-2.0"
] | null | null | null |
import json
import os
import sys
import re
import copy
import numpy as np
from data_profiler.labelers.base_model import BaseModel
from data_profiler.labelers.base_model import AutoSubRegistrationMeta
_file_dir = os.path.dirname(os.path.abspath(__file__))
sys.path.append(_file_dir)
class RegexModel(BaseModel, metaclass=AutoSubRegistrationMeta):
def __init__(self, label_mapping=None, parameters=None):
"""
Regex Model Initializer.
Example regex_patterns:
regex_patterns = {
"LABEL_1": [
"LABEL_1_pattern_1",
"LABEL_1_pattern_2",
...
],
"LABEL_2": [
"LABEL_2_pattern_1",
"LABEL_2_pattern_2",
...
],
...
}
Example encapsulators:
encapsulators = {
'start': r'(?<![\w.\$\%\-])',
'end': r'(?:(?=(\b|[ ]))|(?=[^\w\%\$]([^\w]|$))|$)',
}
:param label_mapping: maps labels to their encoded integers
:type label_mapping: dict
:param parameters: Contains all the appropriate parameters for the model.
Possible parameters are:
max_length, max_num_chars, dim_embed
:type parameters: dict
:return: None
"""
# parameter initialization
if not parameters:
parameters = {}
parameters.setdefault('regex_patterns', {})
parameters.setdefault('encapsulators', {'start': '', 'end': ''})
parameters.setdefault('ignore_case', True)
parameters.setdefault('default_label', 'BACKGROUND')
self._epoch_id = 0
# initialize class
self.set_label_mapping(label_mapping)
self._validate_parameters(parameters)
self._parameters = parameters
def _validate_parameters(self, parameters):
"""
Validate the parameters sent in. Raise error if invalid parameters are
present.
:param parameters: parameter dict containing the following parameters:
regex_patterns: patterns associated with each label_mapping
Example regex_patterns:
regex_patterns = {
"LABEL_1": [
"LABEL_1_pattern_1",
"LABEL_1_pattern_2",
...
],
"LABEL_2": [
"LABEL_2_pattern_1",
"LABEL_2_pattern_2",
...
],
...
}
encapsulators: regex to add to start and end of each regex
(used to capture entities inside of text).
Example encapsulators:
encapsulators = {
'start': r'(?<![\w.\$\%\-])',
'end': r'(?:(?=(\b|[ ]))|(?=[^\w\%\$]([^\w]|$))|$)',
}
ignore_case: whether or not to set the regex ignore case flag
default_label: default label to assign when no regex found
:type parameters: dict
:return: None
"""
_retype = type(re.compile('pattern for py 3.6 & 3.7'))
errors = []
list_of_necessary_params = ['encapsulators', 'regex_patterns',
'ignore_case', 'default_label']
# Make sure the necessary parameters are present and valid.
for param in parameters:
value = parameters[param]
if param == 'encapsulators' and (
not isinstance(value, dict)
or 'start' not in value
or 'end' not in value):
errors.append(
"`{}` must be a dict with keys 'start' and 'end'".format(
param
))
elif param == 'regex_patterns':
if not isinstance(value, dict):
errors.append('`{}` must be a dict of regex pattern lists.'.
format(param))
continue
for key in value:
if key not in self.label_mapping:
errors.append(
"`{}` was a regex pattern not found in the "
"label_mapping".format(key))
elif not isinstance(value[key], list):
errors.append(
"`{}` must be a list of regex patterns, i.e."
"[pattern_1, pattern_2, ...]".format(key))
else:
for i in range(len(value[key])):
if not isinstance(value[key][i], (_retype, str)):
errors.append(
"`{}`, pattern `{}' was not a valid regex "
"pattern (re.Pattern, str)".format(key, i))
elif isinstance(value[key][i], str):
try:
re.compile(value[key][i])
except re.error as e:
errors.append(
"`{}`, pattern {} was not a valid regex"
" pattern: {}".format(key, i, str(e)))
elif param == 'ignore_case' \
and not isinstance(parameters[param], bool):
errors.append("`{}` must be a bool.".format(param))
elif param == 'default_label' \
and not isinstance(parameters[param], str):
errors.append("`{}` must be a string.".format(param))
elif param not in list_of_necessary_params:
errors.append("`{}` is not an accepted parameter.".format(
param))
if errors:
raise ValueError('\n'.join(errors))
def _construct_model(self):
pass
def _reconstruct_model(self):
pass
def _need_to_reconstruct_model(self):
pass
def reset_weights(self):
pass
def predict(self, data, batch_size=None, show_confidences=False,
verbose=True):
"""
Applies the regex patterns (within regex_model) to the input_string,
create predictions for all matching patterns. Each pattern has an
associated entity and the predictions of each character within the
string are given a True or False identification for each entity. All
characters not identified by ANY of the regex patterns in the
pattern_dict are considered background characters, and are replaced with
the default_label value.
:param data: list of strings to predict upon
:type data: iterator
:param batch_size: does not impact this model and should be fixed to not
be required.
:type batch_size: N/A
:param show_confidences: whether user wants prediction confidences
:type show_confidences:
:param verbose: Flag to determine whether to print status or not
:type verbose: bool
:return: char level predictions and confidences
:rtype: dict
"""
start_pattern = ''
end_pattern = ''
regex_patterns = self._parameters['regex_patterns']
default_ind = self.label_mapping[self._parameters['default_label']]
encapsulators = self._parameters['encapsulators']
re_flags = re.IGNORECASE if self._parameters['ignore_case'] else 0
if encapsulators:
start_pattern = encapsulators['start']
end_pattern = encapsulators['end']
pre_compiled_patterns = copy.deepcopy(regex_patterns)
for entity_label, entity_patterns in pre_compiled_patterns.items():
for i in range(len(entity_patterns)):
pattern = (start_pattern
+ pre_compiled_patterns[entity_label][i]
+ end_pattern)
pre_compiled_patterns[entity_label][i] = re.compile(
pattern, flags=re_flags)
# Construct array initial regex predictions where background is
# predicted.
predictions = [np.empty((0,))] * 100
i = 0
for i, input_string in enumerate(data):
# Double array size
if len(predictions) <= i:
predictions.extend([np.empty((0,))] * len(predictions))
pred = np.zeros((len(input_string), self.num_labels), dtype=int)
pred[:, default_ind] = 1
for entity_label, entity_patterns in pre_compiled_patterns.items():
entity_id = self.label_mapping[entity_label]
for re_pattern in entity_patterns:
for each_find in re_pattern.finditer(input_string):
indices = each_find.span(0)
pred[indices[0]:indices[1], default_ind] = 0
pred[indices[0]:indices[1], entity_id] = 1
if verbose:
sys.stdout.flush()
sys.stdout.write(
"\rData Samples Processed: {:d} ".format(i))
predictions[i] = pred
if verbose:
print()
# Trim array size to number of samples
if len(predictions) > i+1:
del predictions[i+1:]
if show_confidences:
conf = copy.deepcopy(predictions)
for i in range(len(conf)):
conf[i] = conf[i] / \
np.linalg.norm(conf[i], axis=1, ord=1, keepdims=True)
return {"pred": predictions, 'conf': conf}
return {"pred": predictions}
@classmethod
def load_from_disk(cls, dirpath):
"""
Loads whole model from disk with weights
:param dirpath: directory path where you want to load the model from
:type dirpath: str
:return: None
"""
# load parameters
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'r') as fp:
parameters = json.load(fp)
# load label_mapping
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'r') as fp:
label_mapping = json.load(fp)
loaded_model = cls(label_mapping, parameters)
return loaded_model
def save_to_disk(self, dirpath):
"""
Saves whole model to disk with weights.
:param dirpath: directory path where you want to save the model to
:type dirpath: str
:return: None
"""
if not os.path.isdir(dirpath):
os.makedirs(dirpath)
model_param_dirpath = os.path.join(dirpath, "model_parameters.json")
with open(model_param_dirpath, 'w') as fp:
json.dump(self._parameters, fp)
labels_dirpath = os.path.join(dirpath, "label_mapping.json")
with open(labels_dirpath, 'w') as fp:
json.dump(self.label_mapping, fp)
| 38.719178
| 81
| 0.523527
| 1,161
| 11,306
| 4.936262
| 0.222222
| 0.033502
| 0.013959
| 0.015704
| 0.258768
| 0.203106
| 0.187751
| 0.154947
| 0.154947
| 0.139941
| 0
| 0.006589
| 0.38254
| 11,306
| 291
| 82
| 38.852234
| 0.814353
| 0.285335
| 0
| 0.112583
| 0
| 0
| 0.108749
| 0.005724
| 0
| 0
| 0
| 0
| 0
| 1
| 0.059603
| false
| 0.02649
| 0.05298
| 0
| 0.139073
| 0.006623
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cbf73995d2a6d71959f99c6cb216fdecd75b4e3
| 1,693
|
py
|
Python
|
taller_estructuras_de_control_selectivas/ejercicio_13.py
|
JMosqueraM/algoritmos_y_programacion
|
30dc179b976f1db24401b110496250fbcb98938e
|
[
"MIT"
] | null | null | null |
taller_estructuras_de_control_selectivas/ejercicio_13.py
|
JMosqueraM/algoritmos_y_programacion
|
30dc179b976f1db24401b110496250fbcb98938e
|
[
"MIT"
] | null | null | null |
taller_estructuras_de_control_selectivas/ejercicio_13.py
|
JMosqueraM/algoritmos_y_programacion
|
30dc179b976f1db24401b110496250fbcb98938e
|
[
"MIT"
] | null | null | null |
# Desarrolle un un programa que reciba la fecha de nacimiento
# de una persona, y como salida, indique el nombre del signo del
# zodiaco correspondiente, ademas de su edad
def zodiaco(DD, MM):
if (((DD >= 22) and (MM == 11)) or ((DD <=21) and (MM == 12))):
return("Sagitario")
if (((DD >= 22) and (MM == 12)) or ((DD <=20) and (MM == 1))):
return("Capricornio")
if (((DD >= 21) and (MM == 1)) or ((DD <=19) and (MM == 2))):
return("Acuario")
if (((DD >= 20) and (MM == 2)) or ((DD <=19) and (MM == 3))):
return("Piscis")
if (((DD >= 21) and (MM == 3)) or ((DD <=20) and (MM == 4))):
return("Aries")
if (((DD >= 21) and (MM == 4)) or ((DD <=21) and (MM == 5))):
return("Tauro")
if (((DD >= 22) and (MM == 5)) or ((DD <=21) and (MM == 6))):
return("Geminis")
if (((DD >= 22) and (MM == 6)) or ((DD <=22) and (MM == 7))):
return("Cancer")
if (((DD >= 23) and (MM == 7)) or ((DD <=23) and (MM == 8))):
return("Leo")
if (((DD >= 24) and (MM == 8)) or ((DD <=22) and (MM == 9))):
return("Virgo")
if (((DD >= 23) and (MM == 9)) or ((DD <=22) and (MM == 10))):
return("Libra")
if (((DD >= 23) and (MM == 10)) or ((DD <=21) and (MM == 11))):
return("Escorpion")
fecha_str = input("Ingrese la fecha de nacimiento (DD/MM/AAAA): ")
fecha = fecha_str.split("/")
fecha_int = []
for elemento in fecha:
fecha_int.append(int(elemento))
dia = fecha_int[0]
mes = fecha_int[1]
ano = fecha_int[2]
signo = zodiaco(dia, mes)
print(f"Siendo que su fecha de nacimiento es {fecha_str}, su signo zodiacal corresponde a {signo} y tiene {abs(ano - 2021)} años")
| 33.196078
| 130
| 0.512109
| 265
| 1,693
| 3.241509
| 0.324528
| 0.139697
| 0.057043
| 0.073341
| 0.268917
| 0
| 0
| 0
| 0
| 0
| 0
| 0.067783
| 0.259303
| 1,693
| 51
| 130
| 33.196078
| 0.617225
| 0.09746
| 0
| 0
| 0
| 0.028571
| 0.16
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028571
| false
| 0
| 0
| 0
| 0.028571
| 0.028571
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cc05adcc568b6fb2373878d0e0ebc62065ed391
| 5,110
|
py
|
Python
|
assignment3/crawler/spiders/benchmark_spider.py
|
vhazali/cs5331
|
3b3618aaa17199ebcd3c01bc6c25ddbdbe4f3d0f
|
[
"MIT"
] | 8
|
2020-02-22T12:47:12.000Z
|
2021-12-03T11:39:19.000Z
|
assignment3/crawler/spiders/benchmark_spider.py
|
vhazali/cs5331
|
3b3618aaa17199ebcd3c01bc6c25ddbdbe4f3d0f
|
[
"MIT"
] | null | null | null |
assignment3/crawler/spiders/benchmark_spider.py
|
vhazali/cs5331
|
3b3618aaa17199ebcd3c01bc6c25ddbdbe4f3d0f
|
[
"MIT"
] | 4
|
2018-08-15T12:58:36.000Z
|
2021-12-29T07:06:29.000Z
|
import re, scrapy
from crawler.items import *
class BenchmarkSpider(scrapy.Spider):
drop_params = True
# Spider name, for use with the scrapy crawl command
name = "benchmarks"
# Constants to get url parts
FULL, PROTOCOL, USER, PASSWORD, SUBDOMAIN, DOMAIN, TOP_LEVEL_DOMAIN, PORT_NUM, PATH, PAGE, GET_PARAMS, HASHTAGS = range(12)
# List of start urls to start crawling
start_urls = [
# 'https://app1.com',
# 'https://app2.com',
# 'https://app3.com',
# 'https://app4.com',
# 'https://app5.com',
# 'https://app6.com',
# 'https://app7.com',
# 'https://app8.com',
# 'https://app9.com',
# 'https://app10.com',
# 'https://app11.com',
'http://ec2-54-255-215-139.ap-southeast-1.compute.amazonaws.com/'
]
allowed_domains = [
"app1.com",
"app2.com",
"app3.com",
"app4.com",
"app5.com",
"app6.com",
"app7.com",
"app8.com",
"app9.com",
"app10.com",
"app11.com",
"app12.com",
"app13.com",
"app14.com",
"app15.com",
"app16.com",
"app17.com",
"app18.com",
"app19.com",
"app20.com",
"app21.com"
]
# Set to keep track of visited urls
visited_urls = set(start_urls)
"""
Uses Regex to split up url into components. Groups and what they are:
0 : the full url
1 : Protocol
2 : User
3 : Password
4 : Subdomain
5 : Domain
6 : Top level domain (.com .net etc)
7 : Port number
8 : Path
9 : Page
10: Get parameters
11: Hashtags
"""
def splitUrlIntoParts(self, url, index):
pattern = '(?:([^\:]*)\:\/\/)?(?:([^\:\@]*)(?:\:([^\@]*))?\@)?(?:([^\/\:]*)\.(?=[^\.\/\:]*\.[^\.\/\:]*))?([^\.\/\:]*)(?:\.([^\/\.\:#]*))?(?:\:([0-9]*))?(\/[^\?#]*(?=.*?\/)\/)?([^\?#]*)?(?:\?([^#]*))?(?:#(.*))?'
match = re.search(pattern, url)
if match:
if match.group(index):
return match.group(index)
return ''
def populateURLItem(self, item, url):
item['url'] = url
item['protocol'] = self.splitUrlIntoParts(url, self.PROTOCOL)
item['domain'] = self.splitUrlIntoParts(url, self.DOMAIN)
item['path'] = self.splitUrlIntoParts(url, self.PATH)
item['page'] = self.splitUrlIntoParts(url, self.PAGE)
item['get_params'] = self.splitUrlIntoParts(url, self.GET_PARAMS)
def getUrlWithoutParams(self, url):
# Pattern looks out for a question mark that marks start of params
# Assumption is that url is already valid
pattern = '([^? ]+).*'
match = re.search(pattern, url)
if match:
if match.group(1):
return match.group(1)
else:
return ''
def isVisited(self, url):
if self.drop_params:
truncated_url = self.getUrlWithoutParams(url)
return truncated_url in self.visited_urls
else :
return url in self.visited_urls
def markAsVisited(self, url):
if self.drop_params:
truncated_url = self.getUrlWithoutParams(url)
self.visited_urls.add(truncated_url)
else:
self.visited_urls.add(url)
# The default method that's called by scrapy for each url in the start_url list
def parse(self, response):
# Get URL item
item = URLItem()
# Get parts of URL item
self.populateURLItem(item, response.url)
yield item
# Look for Forms
# Assumption: forms will have id attribute
# We will be using this id and url to uniquely identify each form
forms = response.css('form')
for form in forms:
formItem = FormItem()
formItem['url'] = response.url
form_id = form.css('::attr(id)').extract_first()
if form_id is None:
form_id = ''
formItem['id_attr'] = form_id
yield formItem
inputs = form.css('input')
for a in inputs:
inputItem = InputItem()
inputItem['url'] = response.url
inputItem['form_id'] = form_id
inputItem['complete'] = a.extract()
inputItem['type_attr'] = a.css('::attr(type)').extract_first()
yield inputItem
# Get url to visit next
links = response.css('a::attr(href)').extract()
for next_page in links:
# Check that url exist
if next_page is not None:
# Handle weirdass cases where hrefs has scheme:///domain
next_page = next_page.replace("///", "//", 1)
next_page = response.urljoin(next_page)
# Check that url is not visited yet
if not self.isVisited(next_page):
self.markAsVisited(next_page)
yield scrapy.Request(next_page, callback=self.parse)
| 33.181818
| 218
| 0.520939
| 567
| 5,110
| 4.622575
| 0.342152
| 0.030523
| 0.045784
| 0.053415
| 0.093857
| 0.078596
| 0.078596
| 0.078596
| 0.078596
| 0.078596
| 0
| 0.023419
| 0.331507
| 5,110
| 154
| 219
| 33.181818
| 0.743852
| 0.164384
| 0
| 0.135417
| 0
| 0.010417
| 0.15
| 0.051031
| 0
| 0
| 0
| 0
| 0
| 1
| 0.0625
| false
| 0.010417
| 0.020833
| 0
| 0.208333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cc2858f2edc96485c6ae59c62afeb79423f9cef
| 1,180
|
py
|
Python
|
ryu/gui/views/router_address_delete.py
|
isams1/Thesis
|
dfe03ce60169bd4e5b2eb6f1068a1c89fc9d9fd3
|
[
"Apache-2.0"
] | 3
|
2019-04-23T11:11:46.000Z
|
2020-11-04T20:14:17.000Z
|
ryu/gui/views/router_address_delete.py
|
isams1/Thesis
|
dfe03ce60169bd4e5b2eb6f1068a1c89fc9d9fd3
|
[
"Apache-2.0"
] | null | null | null |
ryu/gui/views/router_address_delete.py
|
isams1/Thesis
|
dfe03ce60169bd4e5b2eb6f1068a1c89fc9d9fd3
|
[
"Apache-2.0"
] | 3
|
2019-10-03T09:31:42.000Z
|
2021-05-15T04:41:12.000Z
|
import re
import logging
import httplib
import view_base
from models import rt_proxy
LOG = logging.getLogger('ryu.gui')
class RtAddrDel(view_base.ViewBase):
def __init__(self, host, port, dpid, address_id, status=None):
super(RtAddrDel, self).__init__()
self.host = host
self.port = port
self.dpid = dpid
self.address_id = address_id
self.status = status
def run(self):
LOG.debug('Router Address Delete Rule running')
if not self.status:
# set rule
return self._delete_address()
def _delete_address(self):
address = '%s:%s' % (self.host, self.port)
res = {'host': self.host,
'port': self.port,
'status': None}
address_no = {}
address_no['address_id'] = self.address_id
status = rt_proxy.delete_router_address(address, address_no, self.dpid)
if status[0]['command_result']:
command_result = status[0]['command_result']
res['status'] = command_result
else:
res['status'] = status
res['status'] = status
return self.json_response(res)
| 26.818182
| 79
| 0.594915
| 142
| 1,180
| 4.725352
| 0.330986
| 0.067064
| 0.035768
| 0.059613
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.002404
| 0.294915
| 1,180
| 44
| 80
| 26.818182
| 0.804087
| 0.00678
| 0
| 0.060606
| 0
| 0
| 0.099061
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.151515
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cc5fd243fb313db4d6da43f6b24a969983fb154
| 964
|
py
|
Python
|
Python-Files/model_conversion/convert_to_tflite.py
|
jcgeo9/ML-For-Fish-Recognition
|
0b5faba77d0b2c5452950637f047882c80fa6fb7
|
[
"Apache-2.0"
] | null | null | null |
Python-Files/model_conversion/convert_to_tflite.py
|
jcgeo9/ML-For-Fish-Recognition
|
0b5faba77d0b2c5452950637f047882c80fa6fb7
|
[
"Apache-2.0"
] | null | null | null |
Python-Files/model_conversion/convert_to_tflite.py
|
jcgeo9/ML-For-Fish-Recognition
|
0b5faba77d0b2c5452950637f047882c80fa6fb7
|
[
"Apache-2.0"
] | null | null | null |
# =============================================================================
# Created By : Giannis Kostas Georgiou
# Project : Machine Learning for Fish Recognition (Individual Project)
# =============================================================================
# Description : File in order to convert saved models to .tflite instances.
# To be used after the desired model are trained and saved
# How to use : Replace variables in CAPS according to needs of the dataset
# =============================================================================
import tensorflow as tf
model_path='PATH TO SAVED MODEL'
tflite_model_name='NAME OF THE NEWLY CREATED TFLITE MODEL'
#convert the model by loading the saved model to the converter
converter = tf.lite.TFLiteConverter.from_saved_model(model_path)
tflite_model = converter.convert()
#save the tflite model
with open(tflite_model_name+'.tflite', 'wb') as f:
f.write(tflite_model)
| 43.818182
| 79
| 0.572614
| 109
| 964
| 4.972477
| 0.53211
| 0.121771
| 0.055351
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.139004
| 964
| 21
| 80
| 45.904762
| 0.653012
| 0.672199
| 0
| 0
| 0
| 0
| 0.216393
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.142857
| 0
| 0.142857
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cc943b894a8b8ca43a398705ed5a7c52cece87e
| 492
|
py
|
Python
|
src/listIntersect/inter.py
|
rajitbanerjee/leetcode
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
[
"CC0-1.0"
] | null | null | null |
src/listIntersect/inter.py
|
rajitbanerjee/leetcode
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
[
"CC0-1.0"
] | null | null | null |
src/listIntersect/inter.py
|
rajitbanerjee/leetcode
|
720fcdd88d371e2d6592ceec8370a6760a77bb89
|
[
"CC0-1.0"
] | 1
|
2021-04-28T18:17:55.000Z
|
2021-04-28T18:17:55.000Z
|
# Definition for singly-linked list.
class ListNode:
def __init__(self, x):
self.val = x
self.next = None
class Solution:
def getIntersectionNode(self, headA: ListNode, headB: ListNode) -> ListNode:
seen = set()
curr = headA
while curr:
seen.add(curr)
curr = curr.next
curr = headB
while curr:
if curr in seen:
return curr
curr = curr.next
return None
| 21.391304
| 80
| 0.530488
| 54
| 492
| 4.759259
| 0.481481
| 0.124514
| 0.093385
| 0.124514
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.394309
| 492
| 22
| 81
| 22.363636
| 0.862416
| 0.069106
| 0
| 0.235294
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.117647
| false
| 0
| 0
| 0
| 0.352941
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ccaa4cf179ca9984d4a2effe3502e46bd80d7d5
| 1,214
|
py
|
Python
|
photon_stream_production/tests/test_drs_run_assignment.py
|
fact-project/photon_stream_production
|
ca2f946976c9a9717cfcd9364f2361ef385b45aa
|
[
"MIT"
] | null | null | null |
photon_stream_production/tests/test_drs_run_assignment.py
|
fact-project/photon_stream_production
|
ca2f946976c9a9717cfcd9364f2361ef385b45aa
|
[
"MIT"
] | 2
|
2019-01-17T12:11:27.000Z
|
2019-02-27T14:51:05.000Z
|
photon_stream_production/tests/test_drs_run_assignment.py
|
fact-project/photon_stream_production
|
ca2f946976c9a9717cfcd9364f2361ef385b45aa
|
[
"MIT"
] | null | null | null |
import numpy as np
import photon_stream as ps
import photon_stream_production as psp
import pkg_resources
import os
runinfo_path = pkg_resources.resource_filename(
'photon_stream_production',
os.path.join('tests', 'resources', 'runinfo_20161115_to_20170103.csv')
)
drs_fRunID_for_obs_run = psp.drs_run._drs_fRunID_for_obs_run
def test_drs_run_assignment():
ri = psp.runinfo.read(runinfo_path)
ro = psp.drs_run.assign_drs_runs(ri)
ri = ri[(ri.fNight > 20161229) & (ri.fNight <= 20170102)]
ro = ro[(ro.fNight > 20161229) & (ro.fNight <= 20170102)]
for i, row in ri.iterrows():
assert row.fNight == ro.loc[i, 'fNight']
assert row.fRunID == ro.loc[i, 'fRunID']
if row.fRunTypeKey == psp.runinfo.OBSERVATION_RUN_TYPE_KEY:
first_method_drs_run_id = drs_fRunID_for_obs_run(
runinfo=ri,
fNight=row.fNight,
fRunID=row.fRunID
)
second_method_drs_run_id = ro.loc[i, 'DrsRunID']
if np.isnan(first_method_drs_run_id):
assert np.isnan(second_method_drs_run_id)
else:
assert first_method_drs_run_id == second_method_drs_run_id
| 30.35
| 74
| 0.660626
| 171
| 1,214
| 4.339181
| 0.327485
| 0.072776
| 0.097035
| 0.113208
| 0.230458
| 0
| 0
| 0
| 0
| 0
| 0
| 0.052174
| 0.242175
| 1,214
| 39
| 75
| 31.128205
| 0.754348
| 0
| 0
| 0
| 0
| 0
| 0.074135
| 0.046129
| 0
| 0
| 0
| 0
| 0.137931
| 1
| 0.034483
| false
| 0
| 0.172414
| 0
| 0.206897
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ccca623d7f5e702eea65074c02fe6486e238208
| 10,450
|
py
|
Python
|
autoscaler/azure.py
|
gabrieladt/kops-ec2-autoscaler
|
8b90fa23caaacf9cf0a4310b65667769906af777
|
[
"MIT"
] | null | null | null |
autoscaler/azure.py
|
gabrieladt/kops-ec2-autoscaler
|
8b90fa23caaacf9cf0a4310b65667769906af777
|
[
"MIT"
] | null | null | null |
autoscaler/azure.py
|
gabrieladt/kops-ec2-autoscaler
|
8b90fa23caaacf9cf0a4310b65667769906af777
|
[
"MIT"
] | 1
|
2019-07-08T07:06:27.000Z
|
2019-07-08T07:06:27.000Z
|
import http
import logging
from typing import List, Tuple, MutableMapping
from datetime import datetime
import re
from requests.packages.urllib3 import Retry
import autoscaler.utils as utils
from autoscaler.autoscaling_groups import AutoScalingGroup
from autoscaler.azure_api import AzureApi, AzureScaleSet, AzureScaleSetInstance
from autoscaler.utils import TransformingFuture, AllCompletedFuture, CompletedFuture
logger = logging.getLogger(__name__)
_RETRY_TIME_LIMIT = 30
class AzureBoundedRetry(Retry):
"""
XXX: Azure sometimes sends us a Retry-After: 1200, even when we still have quota, causing our client to appear to hang.
Ignore them and just retry after 30secs
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
@staticmethod
def from_retry(retry):
new_retry = AzureBoundedRetry()
new_retry.total = retry.total
new_retry.connect = retry.connect
new_retry.read = retry.read
new_retry.backoff_factor = retry.backoff_factor
new_retry.BACKOFF_MAX = retry.BACKOFF_MAX
new_retry.status_forcelist = retry.status_forcelist
new_retry.method_whitelist = retry.method_whitelist
return new_retry
def get_retry_after(self, response):
retry_after = super().get_retry_after(response)
if response.status != http.HTTPStatus.TOO_MANY_REQUESTS or retry_after <= _RETRY_TIME_LIMIT:
return retry_after
headers = {}
for header in ['Retry-After',
'x-ms-ratelimit-remaining-subscription-reads',
'x-ms-ratelimit-remaining-subscription-writes',
'x-ms-ratelimit-remaining-tenant-reads',
'x-ms-ratelimit-remaining-tenant-writes',
'x-ms-ratelimit-remaining-subscription-resource-requests',
'x-ms-ratelimit-remaining-subscription-resource-entities-read',
'x-ms-ratelimit-remaining-tenant-resource-requests',
'x-ms-ratelimit-remaining-tenant-resource-entities-read']:
value = response.getheader(header)
if value is not None:
headers[header] = value
logger.warn("Azure request throttled: {}".format(headers))
return _RETRY_TIME_LIMIT
class AzureGroups(object):
def __init__(self, resource_groups, slow_scale_classes, client: AzureApi):
self.resource_groups = resource_groups
self.slow_scale_classes = slow_scale_classes
self.client = client
def get_all_groups(self, kube_nodes):
groups = []
if self.client:
for resource_group in self.resource_groups:
scale_sets_by_type = {}
for scale_set in self.client.list_scale_sets(resource_group.name):
scale_sets_by_type.setdefault((scale_set.location, scale_set.instance_type), []).append(scale_set)
for key, scale_sets in scale_sets_by_type.items():
location, instance_type = key
slow_scale = _get_azure_class(instance_type) in self.slow_scale_classes
groups.append(AzureVirtualScaleSet(location, resource_group.name, self.client, instance_type, slow_scale, scale_sets, kube_nodes))
return groups
_CLASS_PAT = re.compile(r'\w+_(?P<class>[A-Z]+).+')
def _get_azure_class(type_):
m = _CLASS_PAT.match(type_)
return m.group('class')
_SCALE_SET_SIZE_LIMIT = 100
# Appears as an unbounded scale set. Currently, Azure Scale Sets have a limit of 100 hosts.
class AzureVirtualScaleSet(AutoScalingGroup):
provider = 'azure'
def __init__(self, region, resource_group, client: AzureApi, instance_type, slow_scale: bool, scale_sets: List[AzureScaleSet], kube_nodes):
self.client = client
self.instance_type = instance_type
self.tags = {}
self.name = 'virtual_scale_set_' + instance_type + '_' + region + '_' + resource_group
self.scale_sets = dict((scale_set.name, scale_set) for scale_set in scale_sets)
self.desired_capacity = sum(scale_set.capacity for scale_set in scale_sets)
self.region = region
self.resource_group = resource_group
self.selectors = dict(self.tags)
# HACK: for matching node selectors
self.selectors['azure/type'] = self.instance_type
self.selectors['azure/class'] = _get_azure_class(self.instance_type)
self.slow_scale = slow_scale
self.min_size = 0
self.max_size = 10000
self.is_spot = False
self.vm_id_to_instance: MutableMapping[str, Tuple[str, AzureScaleSetInstance]] = {}
self.instances = {}
self.timeout_until = None
self.timeout_reason = None
self._global_priority = None
self.no_schedule_taints = {}
for scale_set in scale_sets:
if scale_set.timeout_until is not None:
if self.timeout_until is None or self.timeout_until < scale_set.timeout_until:
self.timeout_until = scale_set.timeout_until
self.timeout_reason = scale_set.name + ": " + scale_set.timeout_reason
if scale_set.priority is not None:
if self._global_priority is None:
self._global_priority = scale_set.priority
else:
self._global_priority = min(scale_set.priority, self._global_priority)
if not self.no_schedule_taints:
self.no_schedule_taints = scale_set.no_schedule_taints
if scale_set.capacity == 0:
continue
for instance in self.client.list_scale_set_instances(scale_set):
self.vm_id_to_instance[instance.vm_id] = (scale_set.name, instance)
self.instances[instance.vm_id] = AzureInstance(instance.vm_id, self.instance_type, instance.launch_time, self.tags)
self.nodes = [node for node in kube_nodes if node.instance_id in self.vm_id_to_instance]
self.unschedulable_nodes = [n for n in self.nodes if n.unschedulable]
self._id = (self.region, self.name)
def is_timed_out(self):
if self.timeout_until and datetime.now(self.timeout_until.tzinfo) < self.timeout_until:
logger.warn("{} is timed out until {} because {}".format(self._id, self.timeout_until, self.timeout_reason))
return True
return False
@property
def global_priority(self):
if self._global_priority is None:
return super().global_priority
return self._global_priority
def get_azure_instances(self):
return self.instances.values()
@property
def instance_ids(self):
return self.vm_id_to_instance.keys()
def set_desired_capacity(self, new_desired_capacity):
"""
sets the desired capacity of the underlying ASG directly.
note that this is for internal control.
for scaling purposes, please use scale() instead.
"""
scale_out = new_desired_capacity - self.desired_capacity
assert scale_out >= 0
if scale_out == 0:
return CompletedFuture(False)
futures = []
for scale_set in sorted(self.scale_sets.values(), key=lambda x: (x.priority, x.name)):
if scale_set.capacity < _SCALE_SET_SIZE_LIMIT:
if self.slow_scale:
new_group_capacity = scale_set.capacity + 1
else:
new_group_capacity = min(_SCALE_SET_SIZE_LIMIT, scale_set.capacity + scale_out)
scale_out -= (new_group_capacity - scale_set.capacity)
if scale_set.provisioning_state == 'Updating':
logger.warn("Update of {} already in progress".format(scale_set.name))
continue
if scale_set.provisioning_state == 'Failed':
logger.error("{} failed provisioning. Skipping it for scaling.".format(scale_set.name))
continue
# Update our cached version
self.scale_sets[scale_set.name].capacity = new_group_capacity
futures.append(self.client.update_scale_set(scale_set, new_group_capacity))
logger.info("Scaling Azure Scale Set {} to {}".format(scale_set.name, new_group_capacity))
if scale_out == 0:
break
if scale_out > 0:
logger.error("Not enough scale sets to reach desired capacity {} for {}".format(new_desired_capacity, self))
self.desired_capacity = new_desired_capacity - scale_out
logger.info("ASG: {} new_desired_capacity: {}".format(self, new_desired_capacity))
return TransformingFuture(True, AllCompletedFuture(futures))
def terminate_instances(self, vm_ids):
vm_ids = list(vm_ids)
instances = {}
for vm_id in vm_ids:
scale_set_name, instance = self.vm_id_to_instance[vm_id]
# Update our cached copy of the Scale Set
self.scale_sets[scale_set_name].capacity -= 1
instances.setdefault(scale_set_name, []).append(instance)
logger.info('Terminated instances %s', vm_ids)
futures = []
for scale_set_name, scale_set_instances in instances.items():
futures.append(self.client.terminate_scale_set_instances(self.scale_sets[scale_set_name], scale_set_instances))
return AllCompletedFuture(futures)
def scale_nodes_in(self, nodes):
"""
scale down asg by terminating the given node.
returns a future indicating when the request completes.
"""
for node in nodes:
self.nodes.remove(node)
return self.terminate_instances(node.instance_id for node in nodes)
def __str__(self):
return 'AzureVirtualScaleSet({name}, {selectors_hash})'.format(name=self.name, selectors_hash=utils.selectors_to_hash(self.selectors))
def __repr__(self):
return str(self)
class AzureInstance(object):
provider = 'azure'
def __init__(self, instance_id, instance_type, launch_time, tags):
self.id = instance_id
self.instance_type = instance_type
self.launch_time = launch_time
self.tags = tags
def __str__(self):
return 'AzureInstance({}, {})'.format(self.id, self.instance_type)
def __repr__(self):
return str(self)
| 40.980392
| 150
| 0.655598
| 1,272
| 10,450
| 5.099057
| 0.187893
| 0.062905
| 0.022202
| 0.025902
| 0.198736
| 0.106383
| 0.03099
| 0.012797
| 0.012797
| 0
| 0
| 0.003619
| 0.259522
| 10,450
| 255
| 151
| 40.980392
| 0.834583
| 0.057416
| 0
| 0.134409
| 0
| 0
| 0.085981
| 0.046321
| 0
| 0
| 0
| 0
| 0.005376
| 1
| 0.102151
| false
| 0
| 0.053763
| 0.032258
| 0.290323
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ccedf375dff61d5b7747bbbaf81aa8a41e6f3f6
| 1,780
|
py
|
Python
|
Python2/tareas/tarea_7.py
|
eveiramirez/python_class
|
7a3830cc92dc842b853b243c6b01e06993faa97e
|
[
"MIT"
] | null | null | null |
Python2/tareas/tarea_7.py
|
eveiramirez/python_class
|
7a3830cc92dc842b853b243c6b01e06993faa97e
|
[
"MIT"
] | null | null | null |
Python2/tareas/tarea_7.py
|
eveiramirez/python_class
|
7a3830cc92dc842b853b243c6b01e06993faa97e
|
[
"MIT"
] | 3
|
2021-04-09T19:12:15.000Z
|
2021-08-24T18:24:58.000Z
|
"""
NAME
tarea_7.py
VERSION
[1.0]
AUTHOR
Ignacio Emmanuel Ramirez Bernabe
CONTACT
iramirez@lcg.unam.mx
GITHUB
https://github.com/eveiramirez/python_class/blob/master/Python2/tareas/tarea_7.py
DESCRIPTION
Este programa contiene arrays estructurados para los arrays
creados en el ejercicio 1, los cuales son:
Produccion
Costos
Costos por g/L
CATEGORY
Numpy
"""
import numpy as np
# Crear array con la produccion de cada gen para cada temperatura
production = np.array([("Gen1", 5, 3), ("Gen2", 11, 7),
("Gen3", 4, 9), ("Gen4", 2, 6)],
dtype=[("name", (np.str_, 10)),
("production_cond1", np.int32),
("production_cond2", np.int32)])
# Crear array con los costos de induccion
costs = np.array([("Gen1", 3.5), ("Gen2", 5), ("Gen3", 7),
("Gen4", 4.3)], dtype=[("name", (np.str_, 10)),
("cost", np.float64)])
# Crear array con los costos por g/L para condicion 1
pc_cond1 = production["production_cond1"]/costs["cost"]
# Crear array con los costos por g/L para temperatura 2
pc_cond2 = production["production_cond2"]/costs["cost"]
# Crear lista con los costos por g/L para cada gene guardados en una
# tupla
gene_list = []
for gene in range(0, 4):
gene_list.append((f"Gen{gene+1}", pc_cond1[gene], pc_cond2[gene]))
# Crear array con los costos por g/L
prod_costs = np.array(gene_list, dtype=[("name", (np.str_, 10)),
("pc_cond1", np.float64),
("pc_cond2", np.float64)])
# Imprimir array de los costos por g/L
print(prod_costs)
| 29.180328
| 89
| 0.567978
| 237
| 1,780
| 4.177215
| 0.409283
| 0.054545
| 0.060606
| 0.066667
| 0.19596
| 0.111111
| 0.111111
| 0.089899
| 0.062626
| 0
| 0
| 0.048426
| 0.303933
| 1,780
| 60
| 90
| 29.666667
| 0.750605
| 0.45618
| 0
| 0
| 0
| 0
| 0.154574
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.055556
| 0
| 0.055556
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ccf03aa9b400d7a3b6f76334d043ce47040c33d
| 11,857
|
py
|
Python
|
iguanas/pipeline/_base_pipeline.py
|
paypal/Iguanas
|
166ea81b7d370eb4281a27aa449719ed1d38a74a
|
[
"Apache-2.0"
] | 20
|
2021-12-22T14:15:03.000Z
|
2022-03-31T22:46:42.000Z
|
iguanas/pipeline/_base_pipeline.py
|
paypal/Iguanas
|
166ea81b7d370eb4281a27aa449719ed1d38a74a
|
[
"Apache-2.0"
] | 12
|
2022-01-18T16:55:56.000Z
|
2022-03-10T11:39:39.000Z
|
iguanas/pipeline/_base_pipeline.py
|
paypal/Iguanas
|
166ea81b7d370eb4281a27aa449719ed1d38a74a
|
[
"Apache-2.0"
] | 5
|
2021-12-25T07:28:29.000Z
|
2022-02-23T09:40:03.000Z
|
"""
Base pipeline class. Main rule generator classes inherit from this one.
"""
from copy import deepcopy
from typing import List, Tuple, Union, Dict
from iguanas.pipeline.class_accessor import ClassAccessor
from iguanas.utils.typing import PandasDataFrameType, PandasSeriesType
import iguanas.utils.utils as utils
from iguanas.exceptions import DataFrameSizeError
class _BasePipeline:
"""
Base pipeline class. Main pipeline classes inherit from this one.
Parameters
----------
steps : List[Tuple[str, object]]
The steps to be applied as part of the pipeline.
verbose : int, optional
Controls the verbosity - the higher, the more messages. >0 : gives
the overall progress of the training of the pipeline; >1 : shows the
current step being trained.
Attributes
----------
steps_ : List[Tuple[str, object]]
The steps corresponding to the fitted pipeline.
rules : Rules
The Rules object containing the rules produced from fitting the
pipeline.
"""
def __init__(self,
steps: List[Tuple[str, object]],
verbose: int) -> None:
self.steps = steps
self.verbose = verbose
self.steps_ = None
self.rules = None
def get_params(self) -> dict:
"""
Returns the parameters of each step in the pipeline.
Returns
-------
dict
The parameters of each step in the pipeline.
"""
pipeline_params = {}
steps_ = self.steps if self.steps_ is None else self.steps_
for step_tag, step in steps_:
step_param_dict = deepcopy(step.__dict__)
pipeline_params[step_tag] = step_param_dict
# If step inherits from _BasePipeline, call its get_params to get
# the parameters each class in the pipeline
if issubclass(step.__class__, _BasePipeline):
step_param_dict = step.get_params()
pipeline_params.update(step_param_dict)
return pipeline_params
def _update_kwargs(self,
params: dict) -> None:
"""
Updates the given parameters of the given steps in the pipeline.
Parameters
----------
params : dict
A dictionary where each key corresponds to the tag used for the
pipeline step. Each value should be a dictionary of the parameters
(keys) and their new values (values).
"""
for step_tag, step in self.steps:
# If step inherits from _BasePipeline, call its _update_kwargs
if issubclass(step.__class__, _BasePipeline):
step._update_kwargs(params)
if step_tag in params.keys():
# If a parameter in `params` is not in the keyword arguments
# of the class (excl when kwargs is present), raise exception
for param in params[step_tag].keys():
if param not in step.__dict__.keys() and 'kwargs' not in step.__dict__.keys():
raise ValueError(
f'Parameter `{param}` not found in keyword arguments for class in step `{step_tag}`'
)
step.__dict__.update(params[step_tag])
def _pipeline_fit(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict],
y: Union[PandasSeriesType, dict],
sample_weight: Union[PandasSeriesType, dict]) -> None:
"""
Runs the following before applying the `fit` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
"""
step = self._check_accessor(step)
X, y, sample_weight = [
utils.return_dataset_if_dict(
step_tag=step_tag, df=df
) for df in (X, y, sample_weight)
]
step.fit(X, y, sample_weight)
def _pipeline_transform(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict]) -> PandasDataFrameType:
"""
Runs the following before applying the `transform` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
step = self._check_accessor(step)
X = utils.return_dataset_if_dict(step_tag=step_tag, df=X)
X = step.transform(X)
self._exception_if_no_cols_in_X(X, step_tag)
return X
def _pipeline_predict(self,
step: object,
X: Union[PandasDataFrameType, dict]) -> PandasSeriesType:
"""
Runs the following before applying the `predict` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
Parameters
----------
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
Returns
-------
PandasSeriesType
The prediction of the final step.
"""
step = self._check_accessor(step)
return step.predict(X)
def _pipeline_fit_transform(self,
step_tag: str,
step: object,
X: Union[PandasDataFrameType, dict],
y: Union[PandasSeriesType, dict],
sample_weight: Union[PandasSeriesType, dict]) -> PandasDataFrameType:
"""
Runs the following before applying the `fit_transform` method of `step`:
1. Checks the parameters of `step` for `ClassAccessor` objects. If a
`ClassAccessor` object is found, the parameter in `step` is updated
with the class attribute denoted by the `ClassAccessor` object.
2. Checks if `X`, `y` or `sample_weight` are dictionaries. If so,
then the dataset aligned to `step_tag` is extracted.
Parameters
----------
step_tag : str
The tag corresponding to the step.
step : object
The step in the pipeline.
X : Union[PandasDataFrameType, dict]
The dataset or dictionary of datasets for each pipeline step.
y : Union[PandasSeriesType, dict]
The binary target column or dictionary of binary target columns
for each pipeline step.
sample_weight : Union[PandasSeriesType, dict], optional
Row-wise weights or dictionary of row-wise weights for each
pipeline step. Defaults to None.
Returns
-------
PandasDataFrameType
The transformed dataset.
"""
step = self._check_accessor(step)
X, y, sample_weight = [
utils.return_dataset_if_dict(
step_tag=step_tag, df=df
) for df in (X, y, sample_weight)
]
X = step.fit_transform(X, y, sample_weight)
self._exception_if_no_cols_in_X(X, step_tag)
return X
def _check_accessor(self,
step: object) -> object:
"""
Checks whether the any of the parameters in the given `step` is of type
ClassAccessor. If so, then it runs the ClassAccessor's `get` method,
which extracts the given attribute from the given step in the pipeline,
and injects it into the parameter.
"""
def _check_accessor_iterable(iterable: Union[list, tuple],
pipeline_params: Dict[str, dict]) -> None:
"""
Iterates through an iterable - if the element is another iterable,
_check_accessor_iterable is called again. If the the element is a
CheckAccessor, its `get` method is called (which extracts the given
attribute from the given step in the pipeline) - this attribute is
then assigned in place of the original element.
"""
for idx, value in enumerate(iterable):
if isinstance(value, (list, tuple)):
_check_accessor_iterable(value, pipeline_params)
elif isinstance(value, ClassAccessor):
try:
iterable[idx] = value.get(pipeline_params)
except TypeError:
raise TypeError(
'`ClassAccessor` object must be within a mutable iterable.'
)
step_param_dict = step.__dict__
for param, value in step_param_dict.items():
# If parameter value is an instantiated class, but not a
# ClassAccessor, call _check_accessor again
if hasattr(value, '__dict__') and value.__dict__ and not isinstance(value, ClassAccessor):
self._check_accessor(value)
# If parameter value is a list or tuple, call
# _check_accessor_iterable
elif isinstance(value, (list, tuple)):
pipeline_params = self.get_params()
_check_accessor_iterable(value, pipeline_params)
# If the parameter value is a ClassAccessor, call its get method
elif isinstance(value, ClassAccessor):
pipeline_params = self.get_params()
step.__dict__[param] = value.get(pipeline_params)
return step
@staticmethod
def _exception_if_no_cols_in_X(X: PandasDataFrameType,
step_tag: str) -> Union[None, DataFrameSizeError]:
"""Raises an exception if `X` has no columns."""
if X.shape[1] == 0:
raise DataFrameSizeError(
f'`X` has been reduced to zero columns after the `{step_tag}` step in the pipeline.'
)
| 40.606164
| 112
| 0.580417
| 1,326
| 11,857
| 5.042232
| 0.151584
| 0.027221
| 0.021388
| 0.022884
| 0.547861
| 0.515256
| 0.481454
| 0.452139
| 0.425815
| 0.425815
| 0
| 0.001425
| 0.348908
| 11,857
| 291
| 113
| 40.745704
| 0.864637
| 0.452222
| 0
| 0.318182
| 0
| 0
| 0.042534
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.090909
| false
| 0
| 0.054545
| 0
| 0.2
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd008f314f201433a589af299e0dc00308ca8c5
| 6,306
|
py
|
Python
|
test_activity_merger.py
|
AlexanderMakarov/activitywatch-ets
|
36e5ac92c7834b9515a54c5d633ae5e45d6928bc
|
[
"MIT"
] | null | null | null |
test_activity_merger.py
|
AlexanderMakarov/activitywatch-ets
|
36e5ac92c7834b9515a54c5d633ae5e45d6928bc
|
[
"MIT"
] | null | null | null |
test_activity_merger.py
|
AlexanderMakarov/activitywatch-ets
|
36e5ac92c7834b9515a54c5d633ae5e45d6928bc
|
[
"MIT"
] | null | null | null |
import unittest
import datetime
from parameterized import parameterized
from activity_merger import Interval
from aw_core.models import Event
from typing import List, Tuple
def _build_datetime(seed: int) -> datetime.datetime:
return datetime.datetime(2000, 1, seed, seed, 0, 0).astimezone(datetime.timezone.utc)
def _build_timedelta(seed: int) -> datetime.timedelta:
return _build_datetime(seed + 1) - _build_datetime(1)
def build_intervals_linked_list(data: List[Tuple[int, bool, int]]) -> Interval:
"""
Builds intervals linked list from the list of tuples. Doesn't check parameters.
:param data: List of tuples (day of start, flag to return `Interval` from the function, duration).
:return: Chosen interval.
"""
result = None
previous = None
for (seed, is_target, duration) in data:
if not previous:
previous = Interval(_build_datetime(seed), _build_datetime(seed + duration))
else:
tmp = Interval(_build_datetime(seed), _build_datetime(seed + duration), previous)
previous.next = tmp
previous = tmp
if is_target:
assert result is None, f"Wrong parameters - '{seed}' interval is marked as result but is not first."
result = previous
return result
class TestInterval(unittest.TestCase):
@parameterized.expand([
(
"Simple the only interval",
build_intervals_linked_list([
(1, True, 1)
]),
1
),
(
"The same interval",
build_intervals_linked_list([
(1, False, 1),
(5, True, 1),
(6, False, 1)
]),
5
),
(
"Exact Interval right before",
build_intervals_linked_list([
(5, False, 1),
(6, True, 1),
(7, False, 1)
]),
5
),
(
"Exact Interval right after",
build_intervals_linked_list([
(3, False, 1),
(4, True, 1),
(5, False, 1)
]),
5
),
(
"Exact Interval far after",
build_intervals_linked_list([
(3, True, 1),
(4, False, 1),
(5, False, 1),
(6, False, 1),
]),
5
),
(
"Exact Interval far before",
build_intervals_linked_list([
(4, False, 1),
(5, False, 1),
(6, False, 1),
(7, True, 1),
]),
5
),
])
def test_find_closest_by_start(self, test_name, interval, expected_start_seed):
target = _build_datetime(5)
actual: Interval = interval.find_closest(target, datetime.timedelta(0), False)
expected = _build_datetime(expected_start_seed)
self.assertEqual(actual.start_time, expected, f"'{test_name}' case failed.")
@parameterized.expand([
(
"Simple the only interval",
build_intervals_linked_list([
(1, True, 1)
]),
1
),
(
"The same interval",
build_intervals_linked_list([
(1, False, 1),
(4, True, 1),
(6, False, 1),
]),
4
),
(
"Exact Interval right before",
build_intervals_linked_list([
(4, False, 1),
(6, True, 1),
(7, False, 1),
]),
4
),
(
"Exact Interval right after",
build_intervals_linked_list([
(1, False, 1),
(2, True, 1),
(4, False, 1),
]),
4
),
(
"Exact Interval far after",
build_intervals_linked_list([
(2, True, 1),
(3, False, 1),
(4, False, 1),
(5, False, 1),
]),
4
),
(
"Exact Interval far before",
build_intervals_linked_list([
(3, False, 1),
(4, False, 1),
(6, False, 1),
(7, True, 1),
]),
4
),
])
def test_find_closest_by_end(self, test_name, interval: Interval, expected_start_seed):
target = _build_datetime(5)
actual: Interval = interval.find_closest(target, datetime.timedelta(0), True)
expected = _build_datetime(expected_start_seed)
self.assertEqual(actual.start_time, expected, f"'{test_name}' case failed.")
@parameterized.expand([
(
"Event at middle",
build_intervals_linked_list([
(3, True, 5),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(3, True, 2),
(5, False, 1),
(6, False, 2),
]),
),
(
"Event start equal interval start",
build_intervals_linked_list([
(5, True, 5),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(5, True, 1),
(6, False, 4),
]),
),
(
"Event end equal interval end",
build_intervals_linked_list([
(4, True, 2),
]),
Event(1, _build_datetime(5), _build_timedelta(1)),
build_intervals_linked_list([
(4, True, 1),
(5, False, 1),
]),
),
])
def test_separate_new_at_middle(self, test_name: str, interval: Interval, event: Event,
expected_interval_offset_2_num_4: Interval):
actual: Interval = interval.separate_new_at_middle(event, datetime.timedelta(0))
self.assertListEqual(actual.get_range(-2, 4), expected_interval_offset_2_num_4.get_range(-2, 4),
f"'{test_name}' case failed.")
if __name__ == '__main__':
unittest.main()
| 30.172249
| 112
| 0.482873
| 629
| 6,306
| 4.602544
| 0.176471
| 0.053886
| 0.131261
| 0.157513
| 0.606563
| 0.572712
| 0.504663
| 0.493264
| 0.302591
| 0.288083
| 0
| 0.036193
| 0.4085
| 6,306
| 208
| 113
| 30.317308
| 0.739946
| 0.03235
| 0
| 0.594737
| 0
| 0
| 0.085719
| 0
| 0
| 0
| 0
| 0
| 0.021053
| 1
| 0.031579
| false
| 0
| 0.031579
| 0.010526
| 0.084211
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd0df6aa8a1e2d70124b017898c86056e7b29dd
| 4,526
|
py
|
Python
|
pommerman/agents/player_agent.py
|
alekseynp/playground
|
523cc924fe9fd269a8eb3e29c45ace1c5c85b12c
|
[
"Apache-2.0"
] | 8
|
2019-06-11T16:08:25.000Z
|
2020-10-28T09:03:53.000Z
|
pommerman/agents/player_agent.py
|
alekseynp/playground
|
523cc924fe9fd269a8eb3e29c45ace1c5c85b12c
|
[
"Apache-2.0"
] | 1
|
2019-06-21T03:57:35.000Z
|
2019-06-21T03:57:35.000Z
|
pommerman/agents/player_agent.py
|
alekseynp/playground
|
523cc924fe9fd269a8eb3e29c45ace1c5c85b12c
|
[
"Apache-2.0"
] | 1
|
2018-03-21T15:21:52.000Z
|
2018-03-21T15:21:52.000Z
|
"""
NOTE:
There are a few minor complications to fluid human control which make this
code a little more involved than trivial.
1. Key press-release cycles can be, and often are, faster than one tick of
the game/simulation, but the player still wants that cycle to count, i.e.
to lay a bomb!
2. When holding down a key, the player expects that action to be repeated,
at least after a slight delay.
3. But when holding a key down (say, move left) and simultaneously doing a
quick press-release cycle (put a bomb), we want the held-down key to keep
being executed, but the cycle should have happened in-between.
The way we solve this problem is by separating key-state and actions-to-do.
We hold the actions that need be executed in a queue (`self._action_q`) and
a state for all considered keys.
1. When a key is pressed down, we note the time and mark it as down.
2. If it is released quickly thereafter, before a game tick could happen,
we add its action into the queue. This often happens when putting bombs.
3. If it's still pressed down as we enter a game tick, we do some math to see
if it's time for a "repeat" event and, if so, push an action to the queue.
4. Just work off one item from the queue each tick.
This way, the input is "natural" and things like dropping a bomb while doing
a diagonal walk from one end to the other "just work".
"""
from time import time
from . import BaseAgent
from .. import characters
REPEAT_DELAY = 0.2 # seconds
REPEAT_INTERVAL = 0.1
class Keystate:
def __init__(self):
self.keydown_time = time()
self.last_repeat_time = None
self.fired = False
def should_fire(self):
if self.last_repeat_time is None:
# The first repetition:
if time() - self.keydown_time > REPEAT_DELAY:
return True
else:
# A repetition after the first:
if time() - self.last_repeat_time > REPEAT_INTERVAL:
return True
# No repetition yet
return False
def mark_fired(self):
self.last_repeat_time = time()
self.fired = True
class PlayerAgent(BaseAgent):
"""The Player Agent that lets the user control a character."""
def __init__(self, character=characters.Bomber, agent_control='arrows'):
super(PlayerAgent, self).__init__(character)
##
# @NOTE: DO NOT move this import outside the constructor. It will
# not work in headless environments like a Docker container
# and prevents Pommerman from running.
#
from pyglet.window import key
CONTROLS = {
'arrows': {
key.UP: 1,
key.DOWN: 2,
key.LEFT: 3,
key.RIGHT: 4,
key.SPACE: 5,
key.M: 6 # In Pommerman, this will freeze the game.
},
'wasd': {
key.W: 1,
key.S: 2,
key.A: 3,
key.D: 4,
key.E: 5,
key.Q: 6 # In Pommerman, this will freeze the game.
}
}
assert agent_control in CONTROLS, "Unknown control: {}".format(
agent_control)
self._key2act = CONTROLS[agent_control]
self._action_q = []
self._keystate = {}
def act(self, obs, action_space):
# Go through the keys and fire for those that needs repetition (because they're held down)
for k, state in self._keystate.items():
if state.should_fire():
self._action_q.append(k)
state.mark_fired()
act = 0
if self._action_q: # Work off the keys that are queued.
act = self._key2act[self._action_q.pop(0)]
return act
@staticmethod
def has_user_input():
return True
def on_key_press(self, k, mod):
# Ignore if we're not handling the key. Avoids "shadowing" ticks in
# multiplayer mode.
if k in self._key2act:
self._keystate[k] = Keystate()
def on_key_release(self, k, mod):
# We only need to act on keys for which we did something in the
# `key_press` event, and ignore any other key releases.
if k in self._keystate:
# Only mark this as a "press" upon release if it was a quick one,
# i.e. not held down and executed already
if not self._keystate[k].fired:
self._action_q.append(k)
del self._keystate[k]
| 33.776119
| 98
| 0.614671
| 658
| 4,526
| 4.132219
| 0.355623
| 0.022067
| 0.024274
| 0.02648
| 0.053696
| 0.024274
| 0.024274
| 0.024274
| 0
| 0
| 0
| 0.009003
| 0.312859
| 4,526
| 133
| 99
| 34.030075
| 0.865273
| 0.480115
| 0
| 0.073529
| 0
| 0
| 0.01508
| 0
| 0
| 0
| 0
| 0
| 0.014706
| 1
| 0.117647
| false
| 0
| 0.058824
| 0.014706
| 0.279412
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd0fd2e907a405a13689ee31a56a04909e02b9c
| 555
|
py
|
Python
|
spanglish/tests/fixtures/models/language.py
|
omaraljazairy/FedalAPI
|
2be0a19bb2629be9e2a0477f99477e4bfbd8901e
|
[
"MIT"
] | null | null | null |
spanglish/tests/fixtures/models/language.py
|
omaraljazairy/FedalAPI
|
2be0a19bb2629be9e2a0477f99477e4bfbd8901e
|
[
"MIT"
] | null | null | null |
spanglish/tests/fixtures/models/language.py
|
omaraljazairy/FedalAPI
|
2be0a19bb2629be9e2a0477f99477e4bfbd8901e
|
[
"MIT"
] | null | null | null |
""" fixtures that return an sql statement with a list of values to be inserted."""
def load_language():
""" return the sql and values of the insert queuery."""
sql = """
INSERT INTO Spanglish_Test.Language
(
`name`, `iso-639-1`
)
VALUES (%s, %s)
"""
values = [
(
'English', 'EN'
),
(
'Spanish', 'ES'
),
(
'Dutch', 'NL'
)
]
return {
'sql': sql,
'values': values
}
| 19.137931
| 82
| 0.405405
| 51
| 555
| 4.372549
| 0.686275
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.013378
| 0.461261
| 555
| 28
| 83
| 19.821429
| 0.732441
| 0.223423
| 0
| 0.086957
| 0
| 0
| 0.41866
| 0.055024
| 0
| 0
| 0
| 0
| 0
| 1
| 0.043478
| false
| 0
| 0
| 0
| 0.086957
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd1aa4b57039ede6d30d90d5b70dc7281d0f585
| 9,693
|
py
|
Python
|
main-hs2.py
|
tradewartracker/phase-one-product-hs2
|
38dd328a8211695c31f09a34832535dc2c82a5c2
|
[
"MIT"
] | null | null | null |
main-hs2.py
|
tradewartracker/phase-one-product-hs2
|
38dd328a8211695c31f09a34832535dc2c82a5c2
|
[
"MIT"
] | null | null | null |
main-hs2.py
|
tradewartracker/phase-one-product-hs2
|
38dd328a8211695c31f09a34832535dc2c82a5c2
|
[
"MIT"
] | null | null | null |
import datetime as dt
from os.path import dirname, join
import numpy as np
import pandas as pd
import pyarrow as pa
import pyarrow.parquet as pq
from bokeh.io import curdoc
from bokeh.layouts import column, gridplot, row
from bokeh.models import ColumnDataSource, DataRange1d, Select, HoverTool, Panel, Tabs, LinearColorMapper, Range1d
from bokeh.models import NumeralTickFormatter, Title, Label, Paragraph, Div, CustomJSHover, BoxAnnotation
from bokeh.models import ColorBar
from bokeh.palettes import brewer, Spectral6
from bokeh.plotting import figure
from bokeh.embed import server_document
from bokeh.transform import factor_cmap
#################################################################################
# This just loads in the data...
# Alot of this was built of this "cross-fire demo"
# https://github.com/bokeh/bokeh/blob/branch-2.3/examples/app/crossfilter/main.py
start_date = dt.datetime(2017,7,1)
end_date = dt.datetime(2022,1,1)
background = "#ffffff"
file = "./data"+ "/data.parquet"
df = pq.read_table(file).to_pandas()
df.sort_index(inplace=True)
options = df.index.unique(0).to_list()
#print(options)
product = "HS CODE 72, IRON AND STEEL"
level = "US Dollars"
#################################################################################
#These are functions used in the plot...
def growth_trade(foo):
# what this function does is take a dataframe and create a relative
return 100*((foo["china_exports"]/foo["china_exports"].shift(12)) - 1)
def cum_trade(foo):
outdf = pd.DataFrame([])
outdf["cuml_trade_2017"] = foo["china_exports"].loc["2017"].cumsum()
outdf.index = pd.date_range(start="2020-01-01", end="2020-12-01", freq = "MS")
outdf["cuml_trade_2020"] = foo["china_exports"].loc["2020"].cumsum()
return outdf
#################################################################################
# Then this makes the simple plots:
def make_plot():
height = int(1.15*533)
width = int(1.15*750)
foo = df.loc[product_select.value]
#foo = df.query("@a < a")
# below there is an object of selections which will be one of the values in
# the list of options. So the .value then grabs that particular option selected.
x = foo.index
if level_select.value == 'US Dollars':
y = foo['china_exports']
if level_select.value == 'Year over Year % Change':
y = growth_trade(foo)
if level_select.value == "Cumulative Purchases 2020 vs 2017":
cuml = cum_trade(foo)
x = cuml.index
y2017 = cuml["cuml_trade_2017"]
y2020 = cuml["cuml_trade_2020"]
title = "US Exports to China of " + product_select.value.title().upper()
if level_select.value != "Cumulative Purchases 2020 vs 2017":
# This is standard bokeh stuff so far
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan, xwheel_zoom", title = title,
x_range = (start_date,end_date) )
plot.line(x = x,
y = y, line_width=3.5, line_alpha=0.75, line_color = "slategray")
if level_select.value == "Cumulative Purchases 2020 vs 2017":
plot = figure(x_axis_type="datetime", plot_height = height, plot_width=width, toolbar_location = 'below',
tools = "box_zoom, reset, pan", title = title,
x_range = (dt.datetime(2020,1,1),dt.datetime(2021,2,1)) )
plot.line(x = x,
y = y2017, line_width=3.5, line_alpha=0.5, line_color = "red", line_dash = "dashed"
, legend_label= "2017")
plot.line(x = x,
y = y2020, line_width=3.5, line_alpha=0.75, line_color = "darkblue"
, legend_label= "2020")
plot.legend.title = 'Cumulative Purchases'
plot.legend.location = "top_left"
plot.legend.title_text_font_style = "bold"
# fixed attributes
plot.xaxis.axis_label = None
plot.yaxis.axis_label = ""
plot.axis.axis_label_text_font_style = "bold"
plot.grid.grid_line_alpha = 0.3
TIMETOOLTIPS = """
<div style="background-color:#F5F5F5; opacity: 0.95; border: 15px 15px 15px 15px;">
<div style = "text-align:left;">"""
if level_select.value == 'Year over Year % Change':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{0}%</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == 'US Dollars':
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> $data_x{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'$data_x': 'datetime'}))
if level_select.value == "Cumulative Purchases 2020 vs 2017":
#################################################################################
singlesource2020 = ColumnDataSource({
'xs': x.values,
'ys': y2020.values,
"dates": np.array(x),
})
c2020 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2020, color = "crimson",alpha=0.0)
singlesource2017 = ColumnDataSource({
'xs': x.values,
'ys': y2017.values,
"dates": np.array(pd.date_range(start="2017-01-01", end="2017-12-01", freq = "MS")),
})
c2017 = plot.circle(x="xs", y="ys", size=35,
source = singlesource2017, color = "darkblue",alpha=0.0)
TIMETOOLTIPS = TIMETOOLTIPS + """
<span style="font-size: 13px; font-weight: bold"> @dates{%b %Y}: $data_y{$0.0a}</span>
</div>
</div>
"""
plot.add_tools(HoverTool(tooltips = TIMETOOLTIPS, line_policy='nearest', formatters={'@dates': 'datetime'}, renderers = [c2017,c2020]))
if level_select.value == 'Year over Year % Change':
if y.max() > 1500:
plot.y_range.end = 1500
plot.title.text_font_size = '13pt'
plot.background_fill_color = background
plot.background_fill_alpha = 0.75
plot.border_fill_color = background
tradewar_box = BoxAnnotation(left=dt.datetime(2018,7,1), right=dt.datetime(2019,10,11), fill_color='red', fill_alpha=0.1)
plot.add_layout(tradewar_box)
tradewar_box = BoxAnnotation(left=dt.datetime(2020,1,1), right=dt.datetime(2021,12,31), fill_color='blue', fill_alpha=0.1)
plot.add_layout(tradewar_box)
#p.yaxis.axis_label =
plot.yaxis.axis_label_text_font_style = 'bold'
plot.yaxis.axis_label_text_font_size = "13px"
plot.sizing_mode= "scale_both"
if level_select.value != 'Year over Year % Change':
plot.yaxis.formatter = NumeralTickFormatter(format="($0. a)")
plot.yaxis.axis_label = "US Dollars"
if level_select.value == 'Year over Year % Change':
plot.yaxis.axis_label = level_select.value
plot.max_height = height
plot.max_width = width
plot.min_height = int(0.25*height)
plot.min_width = int(0.25*width)
return plot
def update_plot(attrname, old, new):
layout.children[0] = make_plot()
# This part is still not clear to me. but it tells it what to update and where to put it
# so it updates the layout and [0] is the first option (see below there is a row with the
# first entry the plot, then the controls)
level_select = Select(value=level, title='Tranformations', options=['US Dollars', 'Year over Year % Change', "Cumulative Purchases 2020 vs 2017"])
level_select.on_change('value', update_plot)
#print(sorted(options))
product_select = Select(value=product, title='Product', options=sorted(options), width=400)
# This is the key thing that creates teh selection object
product_select.on_change('value', update_plot)
# Change the value upone selection via the update plot
div0 = Div(text = """Categories are at both the HS2 and HS4 level. Only Phase One covered products as defined in Annex 6-1 of The Agreement within that HS Code are shown. Red marks the period of Section 301 tariffs and retaliation. Blue is period of agreement.\n
\n
\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
div1 = Div(text = """Transformations: US Dollars, year over year growth rate and cumulative purchases in 2017 vs 2020.\n The later transformation cumulates Chinese purchases over each month in 2017 and 2020 and compares each. Because 2017 is the benchmark year for The Agreement, this measure provides a sense, for each product category, China's progress towards meeting their purchase commitments.\n
""", width=400, background = background, style={"justify-content": "space-between", "display": "flex"} )
controls = column(product_select, div0, level_select, div1)
height = int(1.95*533)
width = int(1.95*675)
layout = row(make_plot(), controls, sizing_mode = "scale_height", max_height = height, max_width = width,
min_height = int(0.25*height), min_width = int(0.25*width))
curdoc().add_root(layout)
curdoc().title = "us-china-products"
| 37.280769
| 400
| 0.613123
| 1,263
| 9,693
| 4.585115
| 0.285827
| 0.030392
| 0.033155
| 0.034191
| 0.336211
| 0.305129
| 0.256605
| 0.242618
| 0.223968
| 0.172854
| 0
| 0.050317
| 0.235221
| 9,693
| 259
| 401
| 37.42471
| 0.730878
| 0.094089
| 0
| 0.251656
| 0
| 0.039735
| 0.270389
| 0.008772
| 0.006623
| 0
| 0
| 0
| 0
| 1
| 0.02649
| false
| 0
| 0.099338
| 0.006623
| 0.145695
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd3b57ef203189fa0937ba41bdb1a37dbdad462
| 2,223
|
py
|
Python
|
aiohttp_middlewares/https.py
|
alxpy/aiohttp-middlewares
|
377740d21cdaf3142523eb81b0cee4c6dd01f6b5
|
[
"BSD-3-Clause"
] | 34
|
2017-05-14T11:31:41.000Z
|
2022-03-24T06:07:31.000Z
|
aiohttp_middlewares/https.py
|
alxpy/aiohttp-middlewares
|
377740d21cdaf3142523eb81b0cee4c6dd01f6b5
|
[
"BSD-3-Clause"
] | 77
|
2017-10-20T19:40:59.000Z
|
2022-03-01T05:07:36.000Z
|
aiohttp_middlewares/https.py
|
alxpy/aiohttp-middlewares
|
377740d21cdaf3142523eb81b0cee4c6dd01f6b5
|
[
"BSD-3-Clause"
] | 2
|
2019-11-06T12:45:33.000Z
|
2021-11-24T14:55:28.000Z
|
"""
================
HTTPS Middleware
================
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
Usage
=====
.. code-block:: python
from aiohttp import web
from aiohttp_middlewares import https_middleware
# Basic usage
app = web.Application(middlewares=[https_middleware()])
# Specify custom headers to match, not `X-Forwarded-Proto: https`
app = web.Application(
middlewares=https_middleware({"Forwarded": "https"})
)
"""
import logging
from aiohttp import web
from aiohttp.web_middlewares import _Handler, _Middleware
from .annotations import DictStrStr
DEFAULT_MATCH_HEADERS = {"X-Forwarded-Proto": "https"}
logger = logging.getLogger(__name__)
def https_middleware(match_headers: DictStrStr = None) -> _Middleware:
"""
Change scheme for current request when aiohttp application deployed behind
reverse proxy with HTTPS enabled.
This middleware is required to use, when your aiohttp app deployed behind
nginx with HTTPS enabled, after aiohttp discounted
``secure_proxy_ssl_header`` keyword argument in
https://github.com/aio-libs/aiohttp/pull/2299.
:param match_headers:
Dict of header(s) from reverse proxy to specify that aiohttp run behind
HTTPS. By default:
.. code-block:: python
{"X-Forwarded-Proto": "https"}
"""
@web.middleware
async def middleware(
request: web.Request, handler: _Handler
) -> web.StreamResponse:
"""Change scheme of current request when HTTPS headers matched."""
headers = DEFAULT_MATCH_HEADERS
if match_headers is not None:
headers = match_headers
matched = any(
request.headers.get(key) == value for key, value in headers.items()
)
if matched:
logger.debug(
"Substitute request URL scheme to https",
extra={
"headers": headers,
"request_headers": dict(request.headers),
},
)
request = request.clone(scheme="https")
return await handler(request)
return middleware
| 25.848837
| 79
| 0.645524
| 244
| 2,223
| 5.77459
| 0.360656
| 0.0511
| 0.038325
| 0.042583
| 0.251242
| 0.251242
| 0.146203
| 0.146203
| 0.146203
| 0.146203
| 0
| 0.002427
| 0.258659
| 2,223
| 85
| 80
| 26.152941
| 0.852549
| 0.469186
| 0
| 0
| 0
| 0
| 0.082936
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.035714
| false
| 0
| 0.142857
| 0
| 0.25
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd5217ab9022ac6fb992de8575b10b6f886806f
| 1,452
|
py
|
Python
|
backtest.py
|
YangTaoCN/IntroNeuralNetworks
|
45b0311f85c9cdd9d3f0806e0059201e2655697f
|
[
"MIT"
] | null | null | null |
backtest.py
|
YangTaoCN/IntroNeuralNetworks
|
45b0311f85c9cdd9d3f0806e0059201e2655697f
|
[
"MIT"
] | null | null | null |
backtest.py
|
YangTaoCN/IntroNeuralNetworks
|
45b0311f85c9cdd9d3f0806e0059201e2655697f
|
[
"MIT"
] | null | null | null |
import pandas_datareader.data as pdr
import yfinance as fix
import numpy as np
fix.pdr_override()
def back_test(strategy, seq_len, ticker, start_date, end_date, dim):
"""
A simple back test for a given date period
:param strategy: the chosen strategy. Note to have already formed the model, and fitted with training data.
:param seq_len: length of the days used for prediction
:param ticker: company ticker
:param start_date: starting date
:type start_date: "YYYY-mm-dd"
:param end_date: ending date
:type end_date: "YYYY-mm-dd"
:param dim: dimension required for strategy: 3dim for LSTM and 2dim for MLP
:type dim: tuple
:return: Percentage errors array that gives the errors for every test in the given date range
"""
data = pdr.get_data_yahoo(ticker, start_date, end_date)
stock_data = data["Adj Close"]
errors = []
for i in range((len(stock_data) // 10) * 10 - seq_len - 1):
x = np.array(stock_data.iloc[i: i + seq_len, 1]).reshape(dim) / 200
y = np.array(stock_data.iloc[i + seq_len + 1, 1]) / 200
predict = strategy.predict(x)
while predict == 0:
predict = strategy.predict(x)
error = (predict - y) / 100
errors.append(error)
total_error = np.array(errors)
print(f"Average error = {total_error.mean()}")
# If you want to see the full error list then print the following statement
# print(errors)
| 40.333333
| 111
| 0.669421
| 223
| 1,452
| 4.251121
| 0.452915
| 0.031646
| 0.022152
| 0.037975
| 0.126582
| 0.044304
| 0
| 0
| 0
| 0
| 0
| 0.018018
| 0.235537
| 1,452
| 35
| 112
| 41.485714
| 0.836036
| 0.43595
| 0
| 0.111111
| 0
| 0
| 0.059055
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.055556
| false
| 0
| 0.166667
| 0
| 0.222222
| 0.055556
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd64e7eef2ac9aae41c0784aa1ab81588c6d2ef
| 2,278
|
py
|
Python
|
src/tespy/components/subsystems.py
|
jbueck/tespy
|
dd7a2633ce12f33b4936ae902f4fe5df29191690
|
[
"MIT"
] | null | null | null |
src/tespy/components/subsystems.py
|
jbueck/tespy
|
dd7a2633ce12f33b4936ae902f4fe5df29191690
|
[
"MIT"
] | null | null | null |
src/tespy/components/subsystems.py
|
jbueck/tespy
|
dd7a2633ce12f33b4936ae902f4fe5df29191690
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8
"""Module for custom component groups.
It is possible to create subsystems of component groups in tespy. The subsystem
class is the base class for custom subsystems.
This file is part of project TESPy (github.com/oemof/tespy). It's copyrighted
by the contributors recorded in the version control history of the file,
available from its original location tespy/components/subsystems.py
SPDX-License-Identifier: MIT
"""
import logging
# %%
class subsystem:
r"""
Class subsystem is the base class of all TESPy subsystems.
Parameters
----------
label : str
The label of the subsystem.
Example
-------
Basic example for a setting up a tespy.components.subsystems.subsystem
object. This example does not run a tespy calculation!
>>> from tespy.components import subsystem
>>> mysub = subsystem('mySubsystem')
>>> type(mysub)
<class 'tespy.components.subsystems.subsystem'>
>>> mysub.get_attr('label')
'mySubsystem'
"""
def __init__(self, label):
if not isinstance(label, str):
msg = 'Subsystem label must be of type str!'
logging.error(msg)
raise ValueError(msg)
elif len([x for x in [';', ', ', '.'] if x in label]) > 0:
msg = 'Can\'t use ' + str([';', ', ', '.']) + ' in label.'
logging.error(msg)
raise ValueError(msg)
else:
self.label = label
self.comps = {}
self.conns = {}
self.create_comps()
self.create_conns()
def get_attr(self, key):
r"""
Get the value of a subsystem's attribute.
Parameters
----------
key : str
The attribute you want to retrieve.
Returns
-------
out :
Value of specified attribute.
"""
if key in self.__dict__:
return self.__dict__[key]
else:
msg = 'Subsystem ' + self.label + ' has no attribute ' + key + '.'
logging.error(msg)
raise KeyError(msg)
def create_comps(self):
"""Create the subsystem's components."""
return
def create_conns(self):
"""Create the subsystem's connections."""
return
| 25.311111
| 79
| 0.579017
| 266
| 2,278
| 4.890977
| 0.394737
| 0.036895
| 0.057648
| 0.046118
| 0.086088
| 0.05073
| 0
| 0
| 0
| 0
| 0
| 0.001267
| 0.306848
| 2,278
| 89
| 80
| 25.595506
| 0.822673
| 0.498244
| 0
| 0.3
| 0
| 0
| 0.093071
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.133333
| false
| 0
| 0.033333
| 0
| 0.3
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd7fdf07b75be54fc81ee90365afd1023ab4167
| 7,940
|
py
|
Python
|
fairscale/optim/oss.py
|
blefaudeux/fairscale
|
aa5850107a37c7d5644b6079516e7ae1079ff5e8
|
[
"BSD-3-Clause"
] | 1
|
2020-07-23T22:30:36.000Z
|
2020-07-23T22:30:36.000Z
|
fairscale/optim/oss.py
|
blefaudeux/fairscale
|
aa5850107a37c7d5644b6079516e7ae1079ff5e8
|
[
"BSD-3-Clause"
] | null | null | null |
fairscale/optim/oss.py
|
blefaudeux/fairscale
|
aa5850107a37c7d5644b6079516e7ae1079ff5e8
|
[
"BSD-3-Clause"
] | null | null | null |
# Copyright (c) Facebook, Inc. and its affiliates. All rights reserved.
#
# This source code is licensed under the BSD license found in the
# LICENSE file in the root directory of this source tree.
import copy
import logging
from typing import TYPE_CHECKING, Any, Callable, List, Optional, Type
import torch
import torch.distributed as dist
from torch.optim import SGD, Optimizer
from .utils import broadcast_object, recursive_copy_to_device
if TYPE_CHECKING:
from torch.optim.optimizer import _params_t
else:
_params_t = Any
class OSS(Optimizer):
"""Wraps an arbitrary :class:`optim.Optimizer <torch.optim.Optimizer>`
optimizer and shards its state as described by ZeRO_.
::
opt = OSS(params, optim=torch.optim.Adam, lr=0.01)
.. _ZeRO: https://arxiv.org/abs/1910.02054
Pipe combines pipeline parallelism with checkpointing to reduce peak
memory required to train while minimizing device under-utilization.
You should determine the balance when defining a :class:`Pipe` module, as
balancing will not be done automatically. The module will be partitioned
into multiple devices according to the given balance. You may rely on
heuristics to find your own optimal configuration.
Args:
params (list of tensors):
parameters to be optimized
Keyword Args:
optim (torch.nn.Optimizer):
optimizer to shard (default: SGD)
group (group):
torch.distributed group (default: group.WORLD)
"""
optim: Optimizer
in_super_constructor: bool
def __init__(
self,
params: _params_t,
optim: Type[Optimizer] = SGD,
group: Any = dist.group.WORLD,
**defaults: Any
):
self.in_super_constructor = True
super().__init__(params, defaults)
self.in_super_constructor = False
self.group = group
self.rank = dist.get_rank(group)
param_groups = self.partition_parameters()
self.optim = optim(param_groups[self.rank], **defaults)
# Optional consolidated optimizer state
self._global_state_dict = []
def partition_parameters(self) -> List[List[dict]]:
"""Partitions parameters across distributed ranks.
Returns a list of param_groups (which is a list of dict) where each
element of the list contains the param_groups for a rank. Element 0
corresponds to rank 0, etc. We need all the ranks for the broadcast
inside step().
"""
world_size = dist.get_world_size(self.group)
param_groups: List[List] = [list() for _ in range(world_size)]
sizes = [0] * world_size
for param_group in self.param_groups:
param_lists: List[List] = [list() for _ in range(world_size)]
for param in param_group["params"]:
# Add this param to rank with smallest size.
rank = sizes.index(min(sizes))
param_lists[rank].append(param)
sizes[rank] += param.numel()
for rank, params in enumerate(param_lists):
if len(params) > 0:
pg = copy.copy(param_group)
pg["params"] = params
param_groups[rank].append(pg)
return param_groups
def step(self, closure: Optional[Callable[[], float]] = None) -> Optional[float]:
loss = self.optim.step(closure=closure)
for rank, param_groups in enumerate(self.partition_parameters()):
for param_group in param_groups:
for param in param_group["params"]:
dist.broadcast(param, rank, group=self.group)
return loss
def state_dict(self) -> dict:
""" Gets this rank's state_dict. """
return self.optim.state_dict()
def _collect_state_dict(self) -> List[dict]:
"""
Collect all the state shards
"""
empty_buffer = torch.empty([1], dtype=torch.uint8)
global_optim_state = []
local_state = self.state_dict()
if len(local_state["state"]) == 0:
return []
for rank in range(dist.get_world_size(group=self.group)):
if rank == self.rank:
logging.info("Saving self state")
global_optim_state.append(
recursive_copy_to_device(
local_state, non_blocking=True, device=torch.device("cpu")
)
)
# Sync with other replicas
broadcast_object(empty_buffer, src_rank=rank)
else:
# Reuse the param_groups from this rank, these are shared across replicas
logging.info("Receiving state from rank %s ", rank)
replica_state = {
"state": broadcast_object(empty_buffer, src_rank=rank),
"param_groups": local_state["param_groups"],
}
# Fetch from the other replicas
global_optim_state.append(
recursive_copy_to_device(
replica_state, non_blocking=True, device=torch.device("cpu")
)
)
logging.info("State from rank %s received", rank)
return global_optim_state
def _broadcast_state_dict(self) -> None:
"""
Broadcast this rank's state shard, discard others
"""
empty_buffer = torch.empty([1], dtype=torch.uint8)
local_state = self.state_dict()
if len(local_state["state"]) == 0:
return
for rank in range(dist.get_world_size(group=self.group)):
if rank == self.rank:
# Send the state to the reference replica
logging.info(
"Sending the sharded SGD state to the reference replica from rank %s",
rank,
)
broadcast_object(local_state["state"], src_rank=rank)
else:
# Discard this tensor/rank, broadcast necessary for syncing
logging.info("Discarding broadcast from rank %s", rank)
broadcast_object(empty_buffer, src_rank=rank)
def consolidate_state_dict(self, recipient_rank: int = 0) -> List[dict]:
""" Update the consolidated state_dict list, one per rank.
This needs to be called on all replicas """
if self.rank == recipient_rank:
# Pull the sharded state from all the other replicas
# Store all the states in order, rank by rank
logging.info("Pulling the sharded SGD state from all replicas")
self._global_state_dict = self._collect_state_dict()
else:
# Acknowledge broadcasts, and send this rank's shard when needed
self._broadcast_state_dict()
@property
def global_state_dict(self):
"""
Return the last known global optimizer state, which consist of a list of the shards.
NOTE: This is limited to the replica which was responsible for the consolidation.
The state may also not be up to date, depending on when `consolidate_state_dict` was last called
"""
assert (
len(self._global_state_dict) > 0
), "The optimizer state is not materialized, please call consolidate_state_dict on every replica beforehand"
return self._global_state_dict
def load_state_dict(self, state_dict: dict) -> None:
""" Loads this rank's state_dict. """
self.optim.load_state_dict(state_dict)
def add_param_group(self, param_group: dict) -> None:
super().add_param_group(param_group)
if not self.in_super_constructor:
param_groups = self.partition_parameters()[self.rank]
if len(param_groups) == len(self.optim.param_groups) + 1:
self.optim.add_param_group(param_groups[-1])
| 37.990431
| 116
| 0.614987
| 973
| 7,940
| 4.848921
| 0.258993
| 0.043875
| 0.022043
| 0.016109
| 0.197117
| 0.169139
| 0.133319
| 0.109792
| 0.045782
| 0.045782
| 0
| 0.004699
| 0.303149
| 7,940
| 208
| 117
| 38.173077
| 0.848003
| 0.288287
| 0
| 0.188034
| 0
| 0
| 0.072434
| 0.004076
| 0
| 0
| 0
| 0
| 0.008547
| 1
| 0.08547
| false
| 0
| 0.068376
| 0
| 0.239316
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd93bcec91ffc966a787c6dda07671b2cad8b23
| 603
|
py
|
Python
|
homepage/urls.py
|
r0kym/SNI-backend
|
5fdc25df21846fadb313d439acba73782a6248c3
|
[
"MIT"
] | 1
|
2021-06-03T22:07:24.000Z
|
2021-06-03T22:07:24.000Z
|
homepage/urls.py
|
r0kym/SNI-backend
|
5fdc25df21846fadb313d439acba73782a6248c3
|
[
"MIT"
] | 1
|
2020-07-19T11:10:22.000Z
|
2020-07-19T11:10:22.000Z
|
homepage/urls.py
|
r0kym/SNI-backend
|
5fdc25df21846fadb313d439acba73782a6248c3
|
[
"MIT"
] | 2
|
2020-07-02T12:05:03.000Z
|
2020-07-02T18:34:39.000Z
|
"""
URLconf of the homepage
"""
from django.urls import path, include
from . import views
urlpatterns = [
path('', views.home, name='home'),
path('auth', views.auth, name='auth'),
path('auth/public', views.auth_public, name='auth-public'),
path('auth/full', views.auth_full, name='auth-full'),
path('auth/invite', views.auth_invite, name='auth-invite'),
path('callback/sni', views.sni_callback, name='sni_callback'),
path('logout', views.logout, name='logout'),
path('403', views.no_perm, name='no-permission'),
path('404', views.not_found, name='not-found'),
]
| 27.409091
| 66
| 0.656716
| 83
| 603
| 4.686747
| 0.337349
| 0.082262
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.011628
| 0.144279
| 603
| 21
| 67
| 28.714286
| 0.742248
| 0.038143
| 0
| 0
| 0
| 0
| 0.241259
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.153846
| 0
| 0.153846
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd940fc315fde5b1737f292edb3bdacd8fa4aa7
| 3,058
|
py
|
Python
|
srcflib/email/__init__.py
|
mas90/srcf-python
|
09ce45c65d2ddbec2cdfc559a7b5983398dbdfa0
|
[
"MIT"
] | null | null | null |
srcflib/email/__init__.py
|
mas90/srcf-python
|
09ce45c65d2ddbec2cdfc559a7b5983398dbdfa0
|
[
"MIT"
] | null | null | null |
srcflib/email/__init__.py
|
mas90/srcf-python
|
09ce45c65d2ddbec2cdfc559a7b5983398dbdfa0
|
[
"MIT"
] | null | null | null |
"""
Notification email machinery, for tasks to send credentials and instructions to users.
Email templates placed inside the `templates` directory of this module should:
- extend from `layout`
- provide `subject` and `body` blocks
"""
from enum import Enum
import os.path
from jinja2 import Environment, FileSystemLoader
from sqlalchemy.orm import Session as SQLASession
from srcf.database import Member, Society
from srcf.mail import send_mail
from ..plumbing import Owner, owner_desc, owner_name, owner_website
ENV = Environment(loader=FileSystemLoader(os.path.join(os.path.dirname(__file__), "templates")),
trim_blocks=True, lstrip_blocks=True)
ENV.filters.update({"is_member": lambda mem: isinstance(mem, Member),
"is_society": lambda soc: isinstance(soc, Society),
"owner_name": owner_name,
"owner_desc": owner_desc,
"owner_website": owner_website})
CURRENT_WRAPPER = None
class Layout(Enum):
"""
Base layout template to be inherited by an email-specific template.
"""
SUBJECT = "/common/subject.j2"
"""
Subject line of the email.
"""
BODY = "/common/body.j2"
"""
Main content of the email.
"""
class EmailWrapper:
"""
Context manager for email sending, used to augment emails with additional metadata.
"""
def __init__(self, subject: str = None, body: str = None, context: dict = None):
self._layouts = {Layout.SUBJECT: subject,
Layout.BODY: body}
self._context = context
def render(self, template: str, layout: Layout, target: Owner, context: dict = None):
"""
Render an email template with Jinja using the provided context.
"""
context = dict(context or (), layout=layout.value, target=target)
out = ENV.get_template(template).render(context)
custom = self._layouts.get(layout)
if custom:
if self._context:
context.update(self._context)
out = custom.format(out, **context)
if layout == Layout.SUBJECT:
out = " ".join(out.split())
return out
def __enter__(self):
global CURRENT_WRAPPER
if CURRENT_WRAPPER:
raise RuntimeError("Another context is already active")
CURRENT_WRAPPER = self
def __exit__(self, exception_type, exception_value, traceback):
global CURRENT_WRAPPER
CURRENT_WRAPPER = None
DEFAULT_WRAPPER = EmailWrapper(subject="[SRCF] {}")
def send(target: Owner, template: str, context: dict = None, session: SQLASession = None):
"""
Render and send an email to the target member or society.
"""
wrapper = CURRENT_WRAPPER or DEFAULT_WRAPPER
subject = wrapper.render(template, Layout.SUBJECT, target, context)
body = wrapper.render(template, Layout.BODY, target, context)
recipient = (owner_desc(target, True), target.email)
send_mail(recipient, subject, body, copy_sysadmins=False, session=session)
| 30.888889
| 96
| 0.657292
| 358
| 3,058
| 5.47486
| 0.351955
| 0.05
| 0.021429
| 0.027551
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.001299
| 0.244604
| 3,058
| 98
| 97
| 31.204082
| 0.847186
| 0.16416
| 0
| 0.081633
| 0
| 0
| 0.057179
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.102041
| false
| 0
| 0.142857
| 0
| 0.346939
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd9cb84780ce4068a648d1e9469d9570121c655
| 5,852
|
py
|
Python
|
src/train_nn.py
|
anirudhbhashyam/911-Calls-Seattle-Predictions
|
8c975ab6c6a85d514ad74388778e1b635ed3e63d
|
[
"MIT"
] | null | null | null |
src/train_nn.py
|
anirudhbhashyam/911-Calls-Seattle-Predictions
|
8c975ab6c6a85d514ad74388778e1b635ed3e63d
|
[
"MIT"
] | null | null | null |
src/train_nn.py
|
anirudhbhashyam/911-Calls-Seattle-Predictions
|
8c975ab6c6a85d514ad74388778e1b635ed3e63d
|
[
"MIT"
] | null | null | null |
import os
from typing import Union
import tensorflow as tf
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.model_selection import train_test_split, KFold
import utility as ut
from variables import *
# Read the data.
train_data = pd.read_csv(os.path.join(DATA_PATH, ".".join([DATA_TRAIN, DATA_EXT])), header = 0)
# Get the labels.
Y = train_data.pop(LABEL)
sample_weights = np.ones(Y.shape[0])
for i in range(10, 24):
sample_weights[train_data["_".join(("hour", str(i)))] == 1] = 1.5
# -- For classification -- #
# CLASSES = np.unique(Y)
# N_CLASSES = len(CLASSES)
# Y = Y.replace(dict(zip(CLASSES, range(0, len(CLASSES)))))
# Data shape parameters.
N_FEATURES = train_data.shape[1]
N_SAMPLES = train_data.shape[0]
# Split the training data.
X_train, X_val, Y_train, Y_val = train_test_split(train_data, Y, shuffle = True, random_state = 7919)
def build_and_compile(input_: tuple = (WB_SIZE, N_FEATURES),
loss_func: str = "mae") -> tf.keras.Model:
"""
Build and compile a TensorFLow LSTM network.
Parameters
----------
input_ :
Shape of the trainining data. Should specify
`(batch_size` or `window_size, n_features)`
loss_func :
Loss function to use for training.
Returns
-------
`tf.keras.Model` :
A compiled TensorFlow model.
"""
# Seqential keras model.
model = tf.keras.models.Sequential([
tf.keras.layers.LSTM(50, input_shape = input_, return_sequences = True),
tf.keras.layers.LSTM(50, return_sequences = False),
tf.keras.layers.GaussianNoise(1.0),
tf.keras.layers.Dense(1024, activation = "relu"),
tf.keras.layers.Dropout(0.7),
tf.keras.layers.Dense(128, activation = "relu"),
tf.keras.layers.Dropout(0.2),
tf.keras.layers.Dense(64, activation = "relu"),
tf.keras.layers.GaussianNoise(0.2),
# tf.keras.layers.Dense(32, activation = "relu"),
# tf.keras.layers.GaussianNoise(0.7),
tf.keras.layers.Dense(1, activation = "relu")
])
# Compile the model.
model.compile(
loss = loss_func,
optimizer = "adam"
)
return model
def train(model: tf.keras.Model,
train_data: np.ndarray,
train_labels: np.ndarray,
val_data: np.ndarray,
val_labels: np.ndarray,
epochs: int = 200,
sample_weights: np.array = None,
cross_val = False) -> pd.DataFrame:
"""
Trains the TensorFlow `model`.
Parameters
----------
model :
A TensorFlow compiled model.
train_data :
The data to be trained. Shape must be consistent with what is passed during model compilation.
train_labels :
The ground truth predictions.
val_data :
The data to be used as validation.
val_labels :
The ground truth validation predictions.
epochs :
Total number of epochs to train.
sample_weights :
Weights for `train_data` to use during training.
Returns
-------
pd.DataFrame:
Training information.
"""
# Check for overfitting.
early_stopping = tf.keras.callbacks.EarlyStopping(
monitor = "val_loss",
min_delta = 0.001,
patience = 100,
restore_best_weights = False)
history = model.fit(
train_data.reshape(-1, WB_SIZE, N_FEATURES),
train_labels,
sample_weight = sample_weights,
validation_data = (val_data.reshape(-1, WB_SIZE, N_FEATURES), val_labels),
verbose = 1,
epochs = epochs,
callbacks = early_stopping)
return pd.DataFrame(history.history)
# def cross_validate(train_data: pd.DataFrame,
# train_labels: pd.DataFrame,
# epochs: int = 50,
# sample_weights: np.array = None,
# folds: int = 2) -> pd.DataFrame:
# splits = KFold(n_splits = folds, shuffle = True)
# print("Starting cross validation.")
# accuracy = list()
# val_loss = list()
# models = list()
# for i, (train_index, test_index) in enumerate(splits.split(train_data, train_labels)):
# print(f"Iteration {i}\n")
# X_train, X_val, Y_train, Y_val = train_data[train_index], train_data[test_index], train_data[train_index], train_labels[test_index]
# model = build_and_compile((WB_SIZE, N_FEATURES), "mae")
# history_df = train(model, X_train, Y_train, epochs)
# # train_stats(history_df, i)
# scores = model.evaluate(X_val.reshape(-1, WB_SIZE, N_FEATURES), Y_val)
# print(f"Validation loss: {scores}\n")
# #of {scores[0]} {model.metrics_names[1]} of {scores[1] * 100:.2f}%")
# # accuracy.append(scores[1] * 100)
# val_loss.append(scores)
# models.append(model)
# return models[np.argmin(val_loss)]
def train_stats(history_df: pd.DataFrame, it: int = None) -> None:
"""
Produces training statistics once training has run its course.
Parameters
----------
history_df :
The history as returned by Keras `fit` method.
it :
To be used with cross validation. Specifies the name of the learning curve based on the cross validation itertation `it`.
Returns
-------
`None`
"""
# Learning curve.
plt.rcParams["figure.dpi"] = 160
history_df.loc[:, ["loss", "val_loss"]].plot()
plt.title("Model Loss")
plt.ylabel("Loss")
plt.xlabel("Epoch")
name = TRAIN_FIG_SAVE_NAME
if it is not None:
name = "_".join([name, str(it)])
plt.savefig(os.path.join(TRAIN_FIG_SAVE_PATH, ".".join([name, FIG_EXT])))
# Stats
print(f"Minimum validation loss: {history_df['val_loss'].min()}")
# plt.plot(f"Accuracy: {history_df['train_accuracy']}")
# plt.plot(f"Validation Accuracy: {history_df['val_accuracy']}")
return None
def main():
model = build_and_compile((WB_SIZE, N_FEATURES))
# model = cross_validate(np.array(train_data), np.array(Y))
history_df = train(model, np.array(X_train), np.array(Y_train), np.array(X_val), np.array(Y_val))
# train_stats(history_df)
# Save trained model (better to use checkpoints).
model.save(os.path.join(NN_MODEL_SAVE_PATH, NN_MODEL_SAVE_NAME))
if __name__ == "__main__":
main()
| 27.866667
| 135
| 0.681647
| 840
| 5,852
| 4.564286
| 0.266667
| 0.039906
| 0.040689
| 0.023474
| 0.163276
| 0.11085
| 0.085029
| 0.031299
| 0.013041
| 0
| 0
| 0.015196
| 0.179084
| 5,852
| 209
| 136
| 28
| 0.782889
| 0.49607
| 0
| 0
| 0
| 0
| 0.048939
| 0.010267
| 0
| 0
| 0
| 0
| 0
| 1
| 0.052632
| false
| 0
| 0.118421
| 0
| 0.210526
| 0.013158
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cd9fdc42b14ec8f2d6ab3af8d353bbdb853608c
| 1,971
|
py
|
Python
|
pdserver/objects.py
|
Gustavo6046/polydung
|
e8626c67b0f59e00a2400b5a5c644e3f6b925e00
|
[
"MIT"
] | null | null | null |
pdserver/objects.py
|
Gustavo6046/polydung
|
e8626c67b0f59e00a2400b5a5c644e3f6b925e00
|
[
"MIT"
] | null | null | null |
pdserver/objects.py
|
Gustavo6046/polydung
|
e8626c67b0f59e00a2400b5a5c644e3f6b925e00
|
[
"MIT"
] | null | null | null |
import base64
import random
import string
import netbyte
import numpy as np
try:
import simplejson as json
except ImportError:
import json
kinds = {}
class PDObject(object):
def __init__(self, game, kind, id, pos, properties):
self.game = game
self.kind = kind
self.id = id or ''.join([random.choice(string.ascii_letters + string.digits + "#$%*") for _ in range(100)])
self.pos = np.array(pos)
self.properties = properties
self.game.handle_object_creation(self)
def __getitem__(self, key): # a shortcut for Netbyte
return self.properties[key]
def __setitem__(self, key, value): # not only a shortcut for Netbyte
self.properties[key] = value
self.game.update_object(self)
def __call__(self, key, **kwargs):
nbe = netbyte.Netbyte()
nbe['self'] = self
nbe['game'] = self.game
for k, v in kwargs.items():
nbe[k] = v
nbe.execute_instructions(*self.kind.functions[key])
def tick(self, timedelta):
self('tick', timedelta=timedelta)
def serialize(self):
return json.dumps({
"kind": self.kind.name,
'id': self.id,
'pos': self.pos.tolist(),
"properties": self.properties
})
@classmethod
def deserialize(cls, game, js):
data = json.loads(js)
return cls(game, kinds[data['kind']], data['id'], data['pos'], data['properties'])
class PDClass(object):
def __init__(self, game, name, functions=()):
self.functions = dict(functions)
self.name = name
kinds[name] = self
nbe = netbyte.Netbyte()
def serializable(self):
return {
'name': self.name,
'functions': {k: nbe.dump(v, name="{}.{}".format(self.name, k)) for k, v in self.functions.items()}
}
| 27.375
| 115
| 0.559614
| 228
| 1,971
| 4.723684
| 0.324561
| 0.044568
| 0.024141
| 0.031569
| 0.038997
| 0
| 0
| 0
| 0
| 0
| 0
| 0.003687
| 0.312024
| 1,971
| 72
| 116
| 27.375
| 0.79056
| 0.027397
| 0
| 0.037037
| 0
| 0
| 0.037598
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.166667
| false
| 0
| 0.148148
| 0.055556
| 0.425926
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cdc48fef2a5dcb4bffb7cadff760f5a6da8ed72
| 2,486
|
py
|
Python
|
preprocessor/base.py
|
shayanthrn/AGAIN-VC
|
41934f710d117d524b4a0bfdee7e9b845a56d422
|
[
"MIT"
] | 3
|
2022-02-21T09:40:00.000Z
|
2022-02-27T13:52:19.000Z
|
preprocessor/base.py
|
shayanthrn/AGAIN-VC
|
41934f710d117d524b4a0bfdee7e9b845a56d422
|
[
"MIT"
] | null | null | null |
preprocessor/base.py
|
shayanthrn/AGAIN-VC
|
41934f710d117d524b4a0bfdee7e9b845a56d422
|
[
"MIT"
] | 1
|
2022-02-21T09:40:02.000Z
|
2022-02-21T09:40:02.000Z
|
import os
import logging
import numpy as np
from tqdm import tqdm
from functools import partial
from multiprocessing.pool import ThreadPool
import pyworld as pw
from util.dsp import Dsp
logger = logging.getLogger(__name__)
def preprocess_one(input_items, module, output_path=''):
input_path, basename = input_items
y = module.load_wav(input_path)
if module.config.dtype == 'wav':
ret = y
elif module.config.dtype == 'melspectrogram':
ret = module.wav2mel(y)
elif module.config.dtype == 'f0':
f0, sp, ap = pw.wav2world(y.astype(np.float64), module.config.sample_rate)
ret = f0
if (f0 == 0).all():
logger.warn(f'f0 returns all zeros: {input_path}')
elif module.config.dtype == 's3prl_spec':
ret = module.wav2s3prl_spec(y)
if ret is None:
logger.warn(f'S3PRL spectrogram returns NoneType: {input_path}')
elif module.config.dtype == 'resemblyzer':
y = resemblyzer.preprocess_wav(input_path)
ret = module.wav2resemblyzer(y)
else:
logger.warn(f'Not implement feature type {module.config.dtype}')
if output_path == '':
return ret
else:
if type(ret) is np.ndarray:
np.save(os.path.join(output_path, f'{basename}.npy'), ret)
else:
logger.warn(f'Feature {module.config.dtype} is not saved: {input_path}.')
return 1
class BasePreproceccor():
def __init__(self, config):
self.dsp_modules = {}
for feat in config.feat_to_preprocess:
self.dsp_modules[feat] = Dsp(config.feat[feat])
def preprocess(self, input_path, output_path, feat, njobs):
file_dict = self.gen_file_dict(input_path)
logger.info(f'Starting to preprocess from {input_path}.')
self.preprocess_from_file_dict(file_dict=file_dict, output_path=output_path, feat=feat, njobs=njobs)
logger.info(f'Saving processed file to {output_path}.')
return
def preprocess_from_file_dict(self, file_dict, output_path, feat, njobs):
os.makedirs(os.path.join(output_path, feat), exist_ok=True)
module = self.dsp_modules[feat]
task = partial(preprocess_one, module=module, output_path=os.path.join(output_path, feat))
with ThreadPool(njobs) as pool:
_ = list(tqdm(pool.imap(task, file_dict.items()), total=len(file_dict), desc=f'Preprocessing '))
def gen_file_dict(self, input_path):
raise NotImplementedError
| 37.666667
| 108
| 0.666935
| 338
| 2,486
| 4.724852
| 0.310651
| 0.068879
| 0.074515
| 0.052599
| 0.107702
| 0.067627
| 0
| 0
| 0
| 0
| 0
| 0.008264
| 0.221239
| 2,486
| 65
| 109
| 38.246154
| 0.816632
| 0
| 0
| 0.052632
| 0
| 0
| 0.134755
| 0.016895
| 0
| 0
| 0
| 0
| 0
| 1
| 0.087719
| false
| 0
| 0.140351
| 0
| 0.298246
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cde121b7cc2a3e5e4fa33ad8b2f5852ba028e54
| 2,970
|
py
|
Python
|
test/core/s3_table_test_base.py
|
adidas/m3d-api
|
755d676452e4b10075fa65f9acfdbf30a6ee828e
|
[
"Apache-2.0"
] | 24
|
2019-09-26T13:15:14.000Z
|
2021-11-10T11:10:04.000Z
|
test/core/s3_table_test_base.py
|
adidas/m3d-api
|
755d676452e4b10075fa65f9acfdbf30a6ee828e
|
[
"Apache-2.0"
] | null | null | null |
test/core/s3_table_test_base.py
|
adidas/m3d-api
|
755d676452e4b10075fa65f9acfdbf30a6ee828e
|
[
"Apache-2.0"
] | 11
|
2019-09-26T13:27:10.000Z
|
2020-11-04T03:13:20.000Z
|
import os
from test.core.emr_system_unit_test_base import EMRSystemUnitTestBase
from test.core.tconx_helper import TconxHelper
class S3TableTestBase(EMRSystemUnitTestBase):
default_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test101.json"
multi_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test102.json"
single_partition_tconx = \
"test/resources/s3_table_test_base/tconx-bdp-emr_test-dev-bi_test103.json"
def env_setup(
self,
tmpdir,
destination_system,
destination_database,
destination_environment,
destination_table
):
"""
This function builds on top of EMRSystemUnitTestBase.env_setup() and adds test-specific tconx file.
:param tmpdir: test case specific temporary directory where configuration files will be created.
:param destination_system: destination system code
:param destination_database: destination database code
:param destination_environment: destination environment code
:param destination_table: destination table code
:return: Function will return several parameters:
m3d_config_path: paths of test-specific config.json. Should be passed to M3D API calls.
scon_emr_path: paths of test-specific scon_emr
tconx_path: paths of test-specific tconx
m3d_config_dict: contents of test-specific config.json as dict
scon_emr_dict: contents of test-specific scon_emr as dict
"""
m3d_config_file, scon_emr_file, m3d_config_dict, scon_emr_dict = \
super(S3TableTestBase, self).env_setup(
tmpdir,
destination_system,
destination_database,
destination_environment
)
# tconx specific part
tconx_file = TconxHelper.setup_tconx_from_file(
m3d_config_dict["tags"]["config"],
destination_system,
destination_database,
destination_environment,
destination_table,
S3TableTestBase.default_tconx
)
return m3d_config_file, scon_emr_file, tconx_file, \
m3d_config_dict, scon_emr_dict
@staticmethod
def assert_one_hql_sent(dump_dir, expected_hql):
generated_files = map(lambda f: os.path.join(dump_dir, f), os.listdir(dump_dir))
hql_files = list(filter(lambda f: os.path.isfile(f) and f.endswith(".hql"), generated_files))
assert len(hql_files) == 1
hql_file = hql_files[0]
with open(hql_file, 'r') as hql_f:
generated_hql = hql_f.read()
generated_hql_processed = generated_hql.strip().lower()
expected_hql_processed = expected_hql.strip().lower()
assert generated_hql_processed == expected_hql_processed
| 37.594937
| 108
| 0.662626
| 347
| 2,970
| 5.368876
| 0.302594
| 0.030059
| 0.037574
| 0.032206
| 0.3489
| 0.2657
| 0.242083
| 0.17445
| 0.095008
| 0.095008
| 0
| 0.011553
| 0.27138
| 2,970
| 78
| 109
| 38.076923
| 0.849353
| 0.285522
| 0
| 0.217391
| 0
| 0
| 0.114925
| 0.107463
| 0
| 0
| 0
| 0
| 0.065217
| 1
| 0.043478
| false
| 0
| 0.065217
| 0
| 0.217391
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ce01d2d1af3efb76606596d816ab61448b4bddc
| 2,911
|
bzl
|
Python
|
sqlc/private/sqlc_toolchain.bzl
|
dmayle/rules_sqlc
|
c465542827a086994e9427e2c792bbc4355c3e70
|
[
"Apache-2.0"
] | 2
|
2020-12-09T16:01:14.000Z
|
2021-02-15T09:24:27.000Z
|
sqlc/private/sqlc_toolchain.bzl
|
dmayle/rules_sqlc
|
c465542827a086994e9427e2c792bbc4355c3e70
|
[
"Apache-2.0"
] | 2
|
2020-12-08T16:46:25.000Z
|
2020-12-09T16:17:55.000Z
|
sqlc/private/sqlc_toolchain.bzl
|
dmayle/rules_sqlc
|
c465542827a086994e9427e2c792bbc4355c3e70
|
[
"Apache-2.0"
] | 3
|
2021-07-28T20:39:10.000Z
|
2022-01-26T19:33:28.000Z
|
# Copyright 2020 Plezentek, Inc. All rights reserved
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
load(
"//sqlc/private:providers.bzl",
"SQLCRelease",
)
load(
"//sqlc/private/rules_go/lib:platforms.bzl",
"PLATFORMS",
)
def _sqlc_toolchain_impl(ctx):
release = ctx.attr.release[SQLCRelease]
cross_compile = ctx.attr.goos != release.goos or ctx.attr.goarch != release.goarch
return [platform_common.ToolchainInfo(
name = ctx.label.name,
cross_compile = cross_compile,
default_goos = ctx.attr.goos,
default_goarch = ctx.attr.goarch,
actions = struct(),
flags = struct(),
release = release,
)]
sqlc_toolchain = rule(
_sqlc_toolchain_impl,
attrs = {
"goos": attr.string(
mandatory = True,
doc = "Default target OS",
),
"goarch": attr.string(
mandatory = True,
doc = "Default target architecture",
),
"release": attr.label(
mandatory = True,
providers = [SQLCRelease],
cfg = "exec",
doc = "The SQLC release this toolchain is based on",
),
},
doc = "Defines a SQLC toolchain based on a release",
provides = [platform_common.ToolchainInfo],
)
def declare_toolchains(host, release):
host_goos, _, host_goarch = host.partition("_")
for p in PLATFORMS:
toolchain_name = "sqlc_" + p.name
impl_name = toolchain_name + "-impl"
cgo_constraints = (
"@com_plezentek_rules_sqlc//sqlc/toolchain:cgo_off",
"@com_plezentek_rules_sqlc//sqlc/toolchain:cgo_on",
)
constraints = [c for c in p.constraints if c not in cgo_constraints]
sqlc_toolchain(
name = impl_name,
goos = p.goos,
goarch = p.goarch,
release = release,
tags = ["manual"],
visibility = ["//visibility:public"],
)
native.toolchain(
name = toolchain_name,
toolchain_type = "@com_plezentek_rules_sqlc//sqlc:toolchain",
exec_compatible_with = [
"@com_plezentek_rules_sqlc//sqlc/toolchain:" + host_goos,
"@com_plezentek_rules_sqlc//sqlc/toolchain:" + host_goarch,
],
target_compatible_with = constraints,
toolchain = ":" + impl_name,
)
| 32.707865
| 86
| 0.61113
| 328
| 2,911
| 5.259146
| 0.390244
| 0.075362
| 0.049275
| 0.06087
| 0.151884
| 0.151884
| 0.132174
| 0
| 0
| 0
| 0
| 0.003857
| 0.28753
| 2,911
| 88
| 87
| 33.079545
| 0.827869
| 0.196153
| 0
| 0.144928
| 0
| 0
| 0.214531
| 0.125107
| 0
| 0
| 0
| 0
| 0
| 1
| 0.028986
| false
| 0
| 0
| 0
| 0.043478
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ce0a4f2656bca31c4698766977c076b08c6dfcd
| 4,041
|
py
|
Python
|
configs/tracker_configs/new_test_20e_cam_1_new_short.py
|
nolanzzz/mtmct
|
8bbbc7ff2fa53ab8af424feaac3cf7424b87fff0
|
[
"MIT"
] | 17
|
2021-09-01T23:13:14.000Z
|
2022-03-28T11:12:37.000Z
|
configs/tracker_configs/new_test_20e_cam_1_new_short.py
|
nolanzzz/MTMCT
|
8bbbc7ff2fa53ab8af424feaac3cf7424b87fff0
|
[
"MIT"
] | 4
|
2022-01-21T05:47:09.000Z
|
2022-03-31T04:44:01.000Z
|
configs/tracker_configs/new_test_20e_cam_1_new_short.py
|
nolanzzz/MTMCT
|
8bbbc7ff2fa53ab8af424feaac3cf7424b87fff0
|
[
"MIT"
] | 6
|
2021-12-16T02:08:43.000Z
|
2022-03-09T06:18:32.000Z
|
root = {
"general" : {
"display_viewer" : False,
#The visible GPUS will be restricted to the numbers listed here. The pytorch (cuda:0) numeration will start at 0
#This is a trick to get everything onto the wanted gpus because just setting cuda:4 in the function calls will
#not work for mmdetection. There will still be things on gpu cuda:0.
"cuda_visible_devices" : "1",
"save_track_results" : True
},
"data" : {
# To increase the speed while developing an specific interval of all frames can be set.
"selection_interval" : [0,10000],
"source" : {
"base_folder" : "/u40/zhanr110/MTA_ext_short/test",
# "base_folder" : "/Users/nolanzhang/Projects/mtmct/data/MTA_ext_short/test",
"cam_ids" : [1]
}
},
"detector" : {
# "mmdetection_config" : "detectors/mmdetection/configs/faster_rcnn_r50_fpn_1x_gta.py",
"mmdetection_config" : "detectors/mmdetection/configs/mta/faster_rcnn_r50_mta.py",
# "mmdetection_checkpoint_file" : "work_dirs/detector/faster_rcnn_gta22.07_epoch_5.pth",
"mmdetection_checkpoint_file" : "detectors/mmdetection/work_dirs/GtaDataset_30e/epoch_20.pth",
"device" : "cuda:0",
#Remove all detections with a confidence less than min_confidence
"min_confidence" : 0.8,
},
"feature_extractor" : {
"feature_extractor_name" : "abd_net_extractor"
,"reid_strong_extractor": {
"reid_strong_baseline_config": "feature_extractors/reid_strong_baseline/configs/softmax_triplet.yml",
"checkpoint_file": "work_dirs/feature_extractor/strong_reid_baseline/resnet50_model_reid_GTA_softmax_triplet.pth",
"device": "cuda:0,1"
,"visible_device" : "0,1"}
,"abd_net_extractor" : dict(abd_dan=['cam', 'pam'], abd_dan_no_head=False, abd_dim=1024, abd_np=2, adam_beta1=0.9,
adam_beta2=0.999, arch='resnet50', branches=['global', 'abd'], compatibility=False, criterion='htri',
cuhk03_classic_split=False, cuhk03_labeled=False, dan_dan=[], dan_dan_no_head=False, dan_dim=1024,
data_augment=['crop,random-erase'], day_only=False, dropout=0.5, eval_freq=5, evaluate=False,
fixbase=False, fixbase_epoch=10, flip_eval=False, gamma=0.1, global_dim=1024,
global_max_pooling=False, gpu_devices='1', height=384, htri_only=False, label_smooth=True,
lambda_htri=0.1, lambda_xent=1, lr=0.0003, margin=1.2, max_epoch=80, min_height=-1,
momentum=0.9, night_only=False, np_dim=1024, np_max_pooling=False, np_np=2, np_with_global=False,
num_instances=4, of_beta=1e-06, of_position=['before', 'after', 'cam', 'pam', 'intermediate'],
of_start_epoch=23, open_layers=['classifier'], optim='adam', ow_beta=0.001,
pool_tracklet_features='avg', print_freq=10, resume='', rmsprop_alpha=0.99
, load_weights='work_dirs/feature_extractor/abd-net/checkpoint_ep30_non_clean.pth.tar'
# , load_weights='work_dirs/feature_extractor/abd-net/resnet50-19c8e357.pth'
, root='work_dirs/datasets'
, sample_method='evenly'
, save_dir='work_dirs/feature_extractor/abd-net/log/eval-resnet50'
, seed=1, seq_len=15,
sgd_dampening=0, sgd_nesterov=False, shallow_cam=True, source_names=['mta_ext'], split_id=0,
start_epoch=0, start_eval=0, stepsize=[20, 40], target_names=['market1501'],
test_batch_size=100, train_batch_size=64, train_sampler='', use_avai_gpus=False, use_cpu=False,
use_metric_cuhk03=False, use_of=True, use_ow=True, visualize_ranks=False, weight_decay=0.0005,
width=128, workers=4)
},
"tracker" : {
"type" : "DeepSort",
"nn_budget" : 100
}
}
| 48.686747
| 130
| 0.632022
| 520
| 4,041
| 4.619231
| 0.486538
| 0.023314
| 0.024979
| 0.039967
| 0.083264
| 0.046628
| 0.034138
| 0.034138
| 0
| 0
| 0
| 0.052285
| 0.247464
| 4,041
| 82
| 131
| 49.280488
| 0.737586
| 0.18832
| 0
| 0
| 0
| 0
| 0.287462
| 0.16055
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0
| 0
| 0
| 0.018868
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ce29cc9381fd7dde956750ac0935a544001e2ba
| 22,057
|
py
|
Python
|
ogusa/tax.py
|
hdoupe/OG-USA
|
f7e4d600b7a2993c7d1b53e23bfe29cfccaea770
|
[
"CC0-1.0"
] | null | null | null |
ogusa/tax.py
|
hdoupe/OG-USA
|
f7e4d600b7a2993c7d1b53e23bfe29cfccaea770
|
[
"CC0-1.0"
] | 2
|
2020-09-02T22:58:36.000Z
|
2020-09-03T19:29:46.000Z
|
ogusa/tax.py
|
prrathi/OG-USA
|
2e5c116bb8656ab190a59e431a8d57415fe26b08
|
[
"CC0-1.0"
] | null | null | null |
'''
------------------------------------------------------------------------
Functions for taxes in the steady state and along the transition path.
------------------------------------------------------------------------
'''
# Packages
import numpy as np
from ogusa import utils
'''
------------------------------------------------------------------------
Functions
------------------------------------------------------------------------
'''
def replacement_rate_vals(nssmat, wss, factor_ss, j, p):
'''
Calculates replacement rate values for the social security system.
Args:
nssmat (Numpy array): initial guess at labor supply, size = SxJ
new_w (scalar): steady state real wage rate
factor_ss (scalar): scaling factor converting model units to
dollars
j (int): index of lifetime income group
p (OG-USA Specifications object): model parameters
Returns:
theta (Numpy array): social security replacement rate value for
lifetime income group j
'''
if j is not None:
e = p.e[:, j]
else:
e = p.e
# adjust number of calendar years AIME computed from int model periods
equiv_periods = int(round((p.S / 80.0) * p.AIME_num_years)) - 1
if e.ndim == 2:
dim2 = e.shape[1]
else:
dim2 = 1
earnings = (e * (wss * nssmat * factor_ss)).reshape(p.S, dim2)
# get highest earning years for number of years AIME computed from
highest_earn =\
(-1.0 * np.sort(-1.0 * earnings[:p.retire[-1], :],
axis=0))[:equiv_periods]
AIME = highest_earn.sum(0) / ((12.0 * (p.S / 80.0)) * equiv_periods)
PIA = np.zeros(dim2)
# Compute level of replacement using AIME brackets and PIA rates
for j in range(dim2):
if AIME[j] < p.AIME_bkt_1:
PIA[j] = p.PIA_rate_bkt_1 * AIME[j]
elif AIME[j] < p.AIME_bkt_2:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (AIME[j] - p.AIME_bkt_1))
else:
PIA[j] = (p.PIA_rate_bkt_1 * p.AIME_bkt_1 +
p.PIA_rate_bkt_2 * (p.AIME_bkt_2 - p.AIME_bkt_1) +
p.PIA_rate_bkt_3 * (AIME[j] - p.AIME_bkt_2))
# Set the maximum monthly replacment rate from SS benefits tables
PIA[PIA > p.PIA_maxpayment] = p.PIA_maxpayment
if p.PIA_minpayment != 0.0:
PIA[PIA < p.PIA_minpayment] = p.PIA_minpayment
theta = (PIA * (12.0 * p.S / 80.0)) / (factor_ss * wss)
return theta
def ETR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the effective tax rate on wealth.
.. math::
T_{j,s,t}^{w} = \frac{h^{w}p_{w}b_{j,s,t}}{h^{w}b_{j,s,t} + m^{w}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_w (Numpy array): effective tax rate on wealth, size = SxJ
'''
tau_w = (p_wealth * h_wealth * b) / (h_wealth * b + m_wealth)
return tau_w
def MTR_wealth(b, h_wealth, m_wealth, p_wealth):
r'''
Calculates the marginal tax rate on wealth from the wealth tax.
.. math::
\frac{\partial T_{j,s,t}^{w}}{\partial b_{j,s,t}} = \frac{h^{w}m^{w}p_{w}}{(b_{j,s,t}h^{w}m^{w})^{2}}
Args:
b (Numpy array): savings
h_wealth (scalar): parameter of wealth tax function
p_wealth (scalar): parameter of wealth tax function
m_wealth (scalar): parameter of wealth tax function
Returns:
tau_prime (Numpy array): marginal tax rate on wealth, size = SxJ
'''
tau_prime = ((b * h_wealth * m_wealth * p_wealth) /
((b * h_wealth + m_wealth) ** 2) +
ETR_wealth(b, h_wealth, m_wealth, p_wealth))
return tau_prime
def ETR_income(r, w, b, n, factor, e, etr_params, p):
'''
Calculates effective personal income tax rate.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): effective tax rate on total income
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
tau = ((phi0 * (income - ((income ** -phi1) + phi2) **
(-1 / phi1))) / income)
elif p.tax_func_type == 'DEP_totalinc':
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def MTR_income(r, w, b, n, factor, mtr_capital, e, etr_params,
mtr_params, p):
r'''
Generates the marginal tax rate on labor income for households.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
mtr_capital (bool): whether to compute the marginal tax rate on
capital income or labor income
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
tau (Numpy array): marginal tax rate on income source
'''
X = (w * e * n) * factor
Y = (r * b) * factor
X2 = X ** 2
Y2 = Y ** 2
income = X + Y
income2 = income ** 2
if p.tax_func_type == 'GS':
if p.analytical_mtrs:
phi0 = np.squeeze(etr_params[..., 0])
phi1 = np.squeeze(etr_params[..., 1])
phi2 = np.squeeze(etr_params[..., 2])
else:
phi0 = np.squeeze(mtr_params[..., 0])
phi1 = np.squeeze(mtr_params[..., 1])
phi2 = np.squeeze(mtr_params[..., 2])
tau = (phi0*(1 - (income ** (-phi1 - 1) *
((income ** -phi1) + phi2) **
((-1 - phi1) / phi1))))
elif p.tax_func_type == 'DEP_totalinc':
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
max_income = np.squeeze(etr_params[..., 4])
min_income = np.squeeze(etr_params[..., 5])
shift_income = np.squeeze(etr_params[..., 8])
shift = np.squeeze(etr_params[..., 10])
d_etr = ((max_income - min_income) * ((2 * A * income + B) /
((A * income2 + B * income + 1) ** 2)))
etr = (((max_income - min_income) *
((A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income) +
shift_income + shift)
tau = (d_etr * income) + (etr)
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
max_income = np.squeeze(mtr_params[..., 4])
min_income = np.squeeze(mtr_params[..., 5])
shift_income = np.squeeze(mtr_params[..., 8])
shift = np.squeeze(mtr_params[..., 10])
tau_income = (((max_income - min_income) *
(A * income2 + B * income) /
(A * income2 + B * income + 1)) + min_income)
tau = tau_income + shift_income + shift
else: # DEP or linear
if p.analytical_mtrs:
A = np.squeeze(etr_params[..., 0])
B = np.squeeze(etr_params[..., 1])
C = np.squeeze(etr_params[..., 2])
D = np.squeeze(etr_params[..., 3])
max_x = np.squeeze(etr_params[..., 4])
min_x = np.squeeze(etr_params[..., 5])
max_y = np.squeeze(etr_params[..., 6])
min_y = np.squeeze(etr_params[..., 7])
shift_x = np.squeeze(etr_params[..., 8])
shift_y = np.squeeze(etr_params[..., 9])
shift = np.squeeze(etr_params[..., 10])
share = np.squeeze(etr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
etr = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
if mtr_capital:
d_etr = ((1-share) * ((tau_y + shift_y) ** (-share)) *
(max_y - min_y) * ((2 * C * Y + D) /
((C * Y2 + D * Y + 1)
** 2)) *
((tau_x + shift_x) ** share))
tau = d_etr * income + etr
else:
d_etr = (share * ((tau_x + shift_x) ** (share - 1)) *
(max_x - min_x) * ((2 * A * X + B) /
((A * X2 + B * X + 1)
** 2)) *
((tau_y + shift_y) ** (1 - share)))
tau = d_etr * income + etr
else:
A = np.squeeze(mtr_params[..., 0])
B = np.squeeze(mtr_params[..., 1])
C = np.squeeze(mtr_params[..., 2])
D = np.squeeze(mtr_params[..., 3])
max_x = np.squeeze(mtr_params[..., 4])
min_x = np.squeeze(mtr_params[..., 5])
max_y = np.squeeze(mtr_params[..., 6])
min_y = np.squeeze(mtr_params[..., 7])
shift_x = np.squeeze(mtr_params[..., 8])
shift_y = np.squeeze(mtr_params[..., 9])
shift = np.squeeze(mtr_params[..., 10])
share = np.squeeze(mtr_params[..., 11])
tau_x = ((max_x - min_x) * (A * X2 + B * X) /
(A * X2 + B * X + 1) + min_x)
tau_y = ((max_y - min_y) * (C * Y2 + D * Y) /
(C * Y2 + D * Y + 1) + min_y)
tau = (((tau_x + shift_x) ** share) *
((tau_y + shift_y) ** (1 - share))) + shift
return tau
def get_biz_tax(w, Y, L, K, p, method):
r'''
Finds total business income tax revenue.
.. math::
R_{t}^{b} = \tau_{t}^{b}(Y_{t} - w_{t}L_{t}) - \tau_{t}^{b}\delta_{t}^{\tau}K_{t}^{\tau}
Args:
r (array_like): real interest rate
Y (array_like): aggregate output
L (array_like): aggregate labor demand
K (array_like): aggregate capital demand
Returns:
business_revenue (array_like): aggregate business tax revenue
'''
if method == 'SS':
delta_tau = p.delta_tau[-1]
tau_b = p.tau_b[-1]
else:
delta_tau = p.delta_tau[:p.T]
tau_b = p.tau_b[:p.T]
business_revenue = tau_b * (Y - w * L) - tau_b * delta_tau * K
return business_revenue
def net_taxes(r, w, b, n, bq, factor, tr, theta, t, j, shift, method,
e, etr_params, p):
'''
Calculate net taxes paid for each household.
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
bq (Numpy array): bequests received
factor (scalar): scaling factor converting model units to
dollars
tr (Numpy array): government transfers to the household
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
net_tax (Numpy array): net taxes paid for each household
'''
T_I = income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p)
pension = pension_amount(w, n, theta, t, j, shift, method, e, p)
T_BQ = bequest_tax_liab(r, b, bq, t, j, method, p)
T_W = wealth_tax_liab(r, b, t, j, method, p)
net_tax = T_I - pension + T_BQ + T_W - tr
return net_tax
def income_tax_liab(r, w, b, n, factor, t, j, method, e, etr_params, p):
'''
Calculate income and payroll tax liability for each household
Args:
r (array_like): real interest rate
w (array_like): real wage rate
b (Numpy array): savings
n (Numpy array): labor supply
factor (scalar): scaling factor converting model units to
dollars
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
etr_params (Numpy array): effective tax rate function parameters
p (OG-USA Specifications object): model parameters
Returns:
T_I (Numpy array): total income and payroll taxes paid for each
household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
w = utils.to_timepath_shape(w)
income = r * b + w * e * n
labor_income = w * e * n
T_I = ETR_income(r, w, b, n, factor, e, etr_params, p) * income
if method == 'SS':
T_P = p.tau_payroll[-1] * labor_income
elif method == 'TPI':
length = w.shape[0]
if len(b.shape) == 1:
T_P = p.tau_payroll[t: t + length] * labor_income
elif len(b.shape) == 2:
T_P = (p.tau_payroll[t: t + length].reshape(length, 1) *
labor_income)
else:
T_P = (p.tau_payroll[t:t + length].reshape(length, 1, 1) *
labor_income)
elif method == 'TPI_scalar':
T_P = p.tau_payroll[0] * labor_income
income_payroll_tax_liab = T_I + T_P
return income_payroll_tax_liab
def pension_amount(w, n, theta, t, j, shift, method, e, p):
'''
Calculate public pension benefit amounts for each household.
Args:
w (array_like): real wage rate
n (Numpy array): labor supply
theta (Numpy array): social security replacement rate value for
lifetime income group j
t (int): time period
j (int): index of lifetime income group
shift (bool): whether computing for periods 0--s or 1--(s+1),
=True for 1--(s+1)
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
e (Numpy array): effective labor units
p (OG-USA Specifications object): model parameters
Returns:
pension (Numpy array): pension amount for each household
'''
if j is not None:
if method == 'TPI':
if n.ndim == 2:
w = w.reshape(w.shape[0], 1)
else:
if method == 'TPI':
w = utils.to_timepath_shape(w)
pension = np.zeros_like(n)
if method == 'SS':
# Depending on if we are looking at b_s or b_s+1, the
# entry for retirement will change (it shifts back one).
# The shift boolean makes sure we start replacement rates
# at the correct age.
if shift is False:
pension[p.retire[-1]:] = theta * w
else:
pension[p.retire[-1] - 1:] = theta * w
elif method == 'TPI':
length = w.shape[0]
if not shift:
# retireTPI is different from retire, because in TP income
# we are counting backwards with different length lists.
# This will always be the correct location of retirement,
# depending on the shape of the lists.
retireTPI = (p.retire[t: t + length] - p.S)
else:
retireTPI = (p.retire[t: t + length] - 1 - p.S)
if len(n.shape) == 1:
if not shift:
retireTPI = p.retire[t] - p.S
else:
retireTPI = p.retire[t] - 1 - p.S
pension[retireTPI:] = (
theta[j] * p.replacement_rate_adjust[t] * w[retireTPI:])
elif len(n.shape) == 2:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:] = (
theta * p.replacement_rate_adjust[t + tt] * w[tt])
else:
for tt in range(pension.shape[0]):
pension[tt, retireTPI[tt]:, :] = (
theta.reshape(1, p.J) *
p.replacement_rate_adjust[t + tt] * w[tt])
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
pension = theta * p.replacement_rate_adjust[0] * w
return pension
def wealth_tax_liab(r, b, t, j, method, p):
'''
Calculate wealth tax liability for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_W (Numpy array): wealth tax liability for each household
'''
if j is not None:
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_W = (ETR_wealth(b, p.h_wealth[-1], p.m_wealth[-1],
p.p_wealth[-1]) * b)
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
elif len(b.shape) == 2:
T_W = (ETR_wealth(b, p.h_wealth[t:t + length],
p.m_wealth[t:t + length],
p.p_wealth[t:t + length]) * b)
else:
T_W = (ETR_wealth(
b, p.h_wealth[t:t + length].reshape(length, 1, 1),
p.m_wealth[t:t + length].reshape(length, 1, 1),
p.p_wealth[t:t + length].reshape(length, 1, 1)) * b)
elif method == 'TPI_scalar':
T_W = (ETR_wealth(b, p.h_wealth[0], p.m_wealth[0],
p.p_wealth[0]) * b)
return T_W
def bequest_tax_liab(r, b, bq, t, j, method, p):
'''
Calculate liability due from taxes on bequests for each household.
Args:
r (array_like): real interest rate
b (Numpy array): savings
bq (Numpy array): bequests received
t (int): time period
j (int): index of lifetime income group
method (str): adjusts calculation dimensions based on 'SS' or
'TPI'
p (OG-USA Specifications object): model parameters
Returns:
T_BQ (Numpy array): bequest tax liability for each household
'''
if j is not None:
lambdas = p.lambdas[j]
if method == 'TPI':
if b.ndim == 2:
r = r.reshape(r.shape[0], 1)
else:
lambdas = np.transpose(p.lambdas)
if method == 'TPI':
r = utils.to_timepath_shape(r)
if method == 'SS':
T_BQ = p.tau_bq[-1] * bq
elif method == 'TPI':
length = r.shape[0]
if len(b.shape) == 1:
T_BQ = p.tau_bq[t:t + length] * bq
elif len(b.shape) == 2:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1) * bq / lambdas
else:
T_BQ = p.tau_bq[t:t + length].reshape(length, 1, 1) * bq
elif method == 'TPI_scalar':
# The above methods won't work if scalars are used. This option
# is only called by the SS_TPI_firstdoughnutring function in TPI.
T_BQ = p.tau_bq[0] * bq
return T_BQ
| 36.823038
| 109
| 0.518611
| 3,005
| 22,057
| 3.658902
| 0.092845
| 0.051569
| 0.045839
| 0.068759
| 0.743975
| 0.686403
| 0.618008
| 0.592542
| 0.55698
| 0.538699
| 0
| 0.018618
| 0.340073
| 22,057
| 598
| 110
| 36.884615
| 0.736741
| 0.325792
| 0
| 0.556575
| 0
| 0
| 0.008172
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.033639
| false
| 0
| 0.006116
| 0
| 0.073395
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ce4e6e88e3b37747a733ee2057c09e983742a39
| 478
|
py
|
Python
|
PythonDAdata/3358OS_06_Code/code6/pd_plotting.py
|
shijiale0609/Python_Data_Analysis
|
c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e
|
[
"MIT"
] | 1
|
2020-02-22T18:55:54.000Z
|
2020-02-22T18:55:54.000Z
|
PythonDAdata/3358OS_06_Code/code6/pd_plotting.py
|
shijiale0609/Python_Data_Analysis
|
c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e
|
[
"MIT"
] | null | null | null |
PythonDAdata/3358OS_06_Code/code6/pd_plotting.py
|
shijiale0609/Python_Data_Analysis
|
c18b5ed006c171bbb6fcb6be5f51b2686edc8f7e
|
[
"MIT"
] | 1
|
2020-02-22T18:55:57.000Z
|
2020-02-22T18:55:57.000Z
|
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
df = pd.read_csv('transcount.csv')
df = df.groupby('year').aggregate(np.mean)
gpu = pd.read_csv('gpu_transcount.csv')
gpu = gpu.groupby('year').aggregate(np.mean)
df = pd.merge(df, gpu, how='outer', left_index=True, right_index=True)
df = df.replace(np.nan, 0)
df.plot()
df.plot(logy=True)
df[df['gpu_trans_count'] > 0].plot(kind='scatter', x='trans_count', y='gpu_trans_count', loglog=True)
plt.show()
| 26.555556
| 101
| 0.717573
| 85
| 478
| 3.917647
| 0.447059
| 0.036036
| 0.054054
| 0.132132
| 0.156156
| 0
| 0
| 0
| 0
| 0
| 0
| 0.004651
| 0.100418
| 478
| 17
| 102
| 28.117647
| 0.769767
| 0
| 0
| 0
| 0
| 0
| 0.194561
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.230769
| 0
| 0.230769
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ce783ade7ec4e76f4c0abea82bc09661b19e042
| 29,965
|
py
|
Python
|
src/dataops/pandas_db.py
|
ShizhuZhang/ontask_b
|
acbf05ff9b18dae0a41c67d1e41774e54a890c40
|
[
"MIT"
] | null | null | null |
src/dataops/pandas_db.py
|
ShizhuZhang/ontask_b
|
acbf05ff9b18dae0a41c67d1e41774e54a890c40
|
[
"MIT"
] | null | null | null |
src/dataops/pandas_db.py
|
ShizhuZhang/ontask_b
|
acbf05ff9b18dae0a41c67d1e41774e54a890c40
|
[
"MIT"
] | null | null | null |
# -*- coding: utf-8 -*-
from __future__ import unicode_literals, print_function
import logging
import os.path
import subprocess
from collections import OrderedDict
from itertools import izip
import numpy as np
import pandas as pd
from django.conf import settings
from django.core.cache import cache
from django.db import connection
from sqlalchemy import create_engine
from dataops.formula_evaluation import evaluate_node_sql
from ontask import fix_pctg_in_name
SITE_ROOT = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
table_prefix = '__ONTASK_WORKFLOW_TABLE_'
df_table_prefix = table_prefix + '{0}'
upload_table_prefix = table_prefix + 'UPLOAD_{0}'
# Query to count the number of rows in a table
query_count_rows = 'SELECT count(*) from "{0}"'
logger = logging.getLogger(__name__)
# Translation between pandas data type names, and those handled in OnTask
pandas_datatype_names = {
'object': 'string',
'int64': 'integer',
'float64': 'double',
'bool': 'boolean',
'datetime64[ns]': 'datetime'
}
# Translation between SQL data type names, and those handled in OnTask
sql_datatype_names = {
'text': 'string',
'bigint': 'integer',
'double precision': 'double',
'boolean': 'boolean',
'timestamp without time zone': 'datetime'
}
# DB Engine to use with Pandas (required by to_sql, from_sql
engine = None
def create_db_connection(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: Password to connect with the database
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
return create_engine(database_url, echo=False, paramstyle='format')
def create_db_engine(dialect, driver, username, password, host, dbname):
"""
Function that creates the engine object to connect to the database. The
object is required by the pandas functions to_sql and from_sql
:param dialect: Dialect for the engine (oracle, mysql, postgresql, etc)
:param driver: DBAPI driver (psycopg2, ...)
:param username: Username to connect with the database
:param password: Password to connect with the database
:param host: Host to connect with the database
:param dbname: database name
:return: the engine
"""
# DB engine
database_url = \
'{dialect}{driver}://{user}:{password}@{host}/{database_name}'.format(
dialect=dialect,
driver=driver,
user=username,
password=password,
host=host,
database_name=dbname,
)
engine = create_db_connection(dialect, driver, username, password, host,
dbname)
if settings.DEBUG:
print('Creating engine with ', database_url)
return engine
def destroy_db_engine(db_engine):
"""
Method that disposes of the given engine (to guarantee there are no
connections available
:param db_engine: Engine to destroy
:return: Nothing
"""
db_engine.dispose()
def pg_restore_table(filename):
"""
Function that given a file produced with a pg_dump, it uploads its
content to the existing database
:param filename: File in pg_dump format to restore
:return:
"""
process = subprocess.Popen(['psql',
'-d',
settings.DATABASES['default']['NAME'],
'-q',
'-f',
filename])
process.wait()
def delete_all_tables():
"""
Delete all tables related to existing workflows
:return:
"""
cursor = connection.cursor()
table_list = connection.introspection.get_table_list(cursor)
for tinfo in table_list:
if not tinfo.name.startswith(table_prefix):
continue
cursor.execute('DROP TABLE "{0}";'.format(tinfo.name))
# To make sure the table is dropped.
connection.commit()
return
def is_table_in_db(table_name):
cursor = connection.cursor()
return next(
(True for x in connection.introspection.get_table_list(cursor)
if x.name == table_name),
False
)
def is_wf_table_in_db(workflow):
return is_table_in_db(create_table_name(workflow.id))
def create_table_name(pk):
"""
:param pk: Primary Key of a workflow
:return: The unique table name to use to store a workflow data frame
"""
return df_table_prefix.format(pk)
def create_upload_table_name(pk):
"""
:param pk: Primary key of a workflow
:return: The unique table to use to upload a new data frame
"""
return upload_table_prefix.format(pk)
def load_from_db(pk, columns=None, filter_exp=None):
"""
Load the data frame stored for the workflow with the pk
:param pk: Primary key of the workflow
:param columns: Optional list of columns to load (all if NOne is given)
:param filter_exp: JSON expression to filter a subset of rows
:return: data frame
"""
return load_table(create_table_name(pk),
columns=columns,
filter_exp=filter_exp)
def load_table(table_name, columns=None, filter_exp=None):
"""
Load a data frame from the SQL DB.
FUTURE WORK:
Consider to store the dataframes in Redis to reduce load/store time.
The trick is to use a compressed format:
SET: redisConn.set("key", df.to_msgpack(compress='zlib'))
GET: pd.read_msgpack(redisConn.get("key"))
Need to agree on a sensible item name that does not collide with anything
else and a policy to detect a cached dataframe and remove it when the data
changes (difficult to detect? Perhaps df_new.equals(df_current))
If feasible, a write-through system could be easily implemented.
:param table_name: Table name to read from the db in to data frame
:param view: Optional view object to restrict access to the DB
:return: data_frame or None if it does not exist.
"""
if table_name not in connection.introspection.table_names():
return None
if settings.DEBUG:
print('Loading table ', table_name)
if columns or filter_exp:
# A list of columns or a filter exp is given
query, params = get_filter_query(table_name, columns, filter_exp)
result = pd.read_sql_query(query, engine, params=params)
else:
# No view given, so simply get the whole table
result = pd.read_sql(table_name, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_query(query):
"""
Load a data frame from the SQL DB running the given query.
:param query: Query to run in the DB
:return: data_frame or None if it does not exist.
"""
if settings.DEBUG:
print('Loading query ', query)
result = pd.read_sql_query(query, engine)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def load_df_from_csvfile(file, skiprows=0, skipfooter=0):
"""
Given a file object, try to read the content as a CSV file and transform
into a data frame. The skiprows and skipfooter are number of lines to skip
from the top and bottom of the file (see read_csv in pandas).
It also tries to convert as many columns as possible to date/time format
(testing the conversion on every string column).
:param filename: File object to read the CSV content
:param skiprows: Number of lines to skip at the top of the document
:param skipfooter: Number of lines to skip at the bottom of the document
:return: Resulting data frame, or an Exception.
"""
data_frame = pd.read_csv(
file,
index_col=False,
infer_datetime_format=True,
quotechar='"',
skiprows=skiprows,
skipfooter=skipfooter
)
# Strip white space from all string columns and try to convert to
# datetime just in case
for x in list(data_frame.columns):
if data_frame[x].dtype.name == 'object':
# Column is a string! Remove the leading and trailing white
# space
data_frame[x] = data_frame[x].str.strip().fillna(data_frame[x])
# Try the datetime conversion
try:
series = pd.to_datetime(data_frame[x],
infer_datetime_format=True)
# Datetime conversion worked! Update the data_frame
data_frame[x] = series
except (ValueError, TypeError):
pass
return data_frame
def load_df_from_sqlconnection(conn_item, pwd=None):
"""
Load a DF from a SQL connection open with the parameters given in conn_item.
:param conn_item: SQLConnection object with the connection parameters.
:return: Data frame or raise an exception.
"""
# Get the connection
db_connection = create_db_connection(conn_item.conn_type,
conn_item.conn_driver,
conn_item.db_user,
pwd,
conn_item.db_host,
conn_item.db_name)
# Try to fetch the data
result = pd.read_sql(conn_item.db_table, db_connection)
# After reading from the DB, turn all None into NaN
result.fillna(value=np.nan, inplace=True)
return result
def store_table(data_frame, table_name):
"""
Store a data frame in the DB
:param data_frame: The data frame to store
:param table_name: The name of the table in the DB
:return: Nothing. Side effect in the DB
"""
with cache.lock(table_name):
# We ovewrite the content and do not create an index
data_frame.to_sql(table_name,
engine,
if_exists='replace',
index=False)
return
def delete_table(pk):
"""Delete the table representing the workflow with the given PK. Due to
the dual use of the database, the command has to be executed directly on
the DB.
"""
try:
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}";'.format(create_table_name(pk)))
connection.commit()
except Exception:
logger.error(
'Error while dropping table {0}'.format(create_table_name(pk))
)
def delete_upload_table(pk):
"""Delete the table used to merge data into the workflow with the given
PK. Due to the dual use of the database, the command has to be executed
directly on the DB.
"""
cursor = connection.cursor()
cursor.execute('DROP TABLE "{0}"'.format(create_upload_table_name(pk)))
connection.commit()
def get_table_column_types(table_name):
"""
:param table_name: Table name
:return: List of pairs (column name, SQL type)
"""
cursor = connection.cursor()
cursor.execute("""select column_name, data_type from
INFORMATION_SCHEMA.COLUMNS where table_name = '{0}'""".format(table_name))
return cursor.fetchall()
def df_column_types_rename(table_name):
"""
:param table_name: Primary key of the workflow containing this data frame (table)
:return: List of data type strings translated to the proper values
"""
column_types = get_table_column_types(table_name)
# result = [table_name[x].dtype.name for x in list(table_name.columns)]
# for tname, ntname in pandas_datatype_names.items():
# result[:] = [x if x != tname else ntname for x in result]
return [sql_datatype_names[x] for __, x in
get_table_column_types(table_name)]
def df_drop_column(pk, column_name):
"""
Drop a column from the DB table storing a data frame
:param pk: Workflow primary key to obtain table name
:param column_name: Column name
:return: Drops the column from the corresponding DB table
"""
query = 'ALTER TABLE "{0}" DROP COLUMN "{1}"'.format(
create_table_name(pk),
column_name
)
cursor = connection.cursor()
cursor.execute(query)
def get_subframe(pk, cond_filter, column_names=None):
"""
Execute a select query to extract a subset of the dataframe and turn the
resulting query set into a data frame.
:param pk: Workflow primary key
:param cond_filter: Condition object to filter the data (or None)
:param column_names: [list of column names], QuerySet with the data rows
:return:
"""
# Get the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Create the DataFrame and set the column names
result = pd.DataFrame.from_records(cursor.fetchall(), coerce_float=True)
result.columns = [c.name for c in cursor.description]
return result
def get_table_cursor(pk, cond_filter, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param cond_filter: Condition object to filter the data (or None)
:param column_names: optional list of columns to select
:return: ([list of column names], QuerySet with the data rows)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}" from "{1}"'.format(
'", "'.join(safe_column_names),
create_table_name(pk)
)
else:
query = 'SELECT * from "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter.formula)
if cond_filter:
# The condition may be empty, in which case, nothing is needed.
query += ' WHERE ' + cond_filter
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor
def get_table_data(pk, cond_filter, column_names=None):
# Get first the cursor
cursor = get_table_cursor(pk, cond_filter, column_names)
# Return the data
return cursor.fetchall()
def execute_select_on_table(pk, fields, values, column_names=None):
"""
Execute a select query in the database with an optional filter obtained
from the jquery QueryBuilder.
:param pk: Primary key of the workflow storing the data
:param fields: List of fields to add to the WHERE clause
:param values: parameters to match the previous fields
:param column_names: optional list of columns to select
:return: QuerySet with the data rows
"""
# Create the query
if column_names:
safe_column_names = ['"' + fix_pctg_in_name(x) + '"'
for x in column_names]
query = 'SELECT {0}'.format(','.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(pk))
# See if the action has a filter or not
cursor = connection.cursor()
if fields:
query += ' WHERE ' + \
' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in fields])
cursor.execute(query, values)
else:
# Execute the query
cursor.execute(query)
# Get the data
return cursor.fetchall()
def get_table_queryset(tablename):
query = 'SELECT * from "{0}";'.format(tablename)
try:
cursor = connection.cursor()
cursor.execute(query)
except Exception:
return None
return cursor.fetchall()
def query_to_dicts(query_string, *query_args):
"""
Run a simple query and produce a generator that returns the results as
a bunch of dictionaries with keys for the column values selected.
"""
cursor = connection.cursor()
cursor.execute(query_string, query_args)
col_names = [desc[0] for desc in cursor.description]
while True:
row = cursor.fetchone()
if row is None:
break
row_dict = OrderedDict(izip(col_names, row))
yield row_dict
return
def update_row(pk, set_fields, set_values, where_fields, where_values):
"""
Given a primary key, pairs (set_field, set_value), and pairs (where_field,
where_value), it updates the row in the table selected with the
list of (where field = where value) with the values in the assignments in
the list of (set_fields, set_values)
:param pk: Primary key to detect workflow
:param set_fields: List of field names to be updated
:param set_values: List of values to update the fields of the previous list
:param where_fields: List of fields used to filter the row in the table
:param where_values: List of values of the previous fields to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}"'.format(create_table_name(pk))
# Add the SET field = value clauses
query += ' SET ' + ', '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in set_fields])
# And finally add the WHERE clause
query += ' WHERE ' + ' AND '.join(['"{0}" = %s'.format(fix_pctg_in_name(x))
for x in where_fields])
# Concatenate the values as parameters to the query
parameters = set_values + where_values
# Execute the query
cursor = connection.cursor()
cursor.execute(query, parameters)
connection.commit()
def increase_row_integer(pk, set_field, where_field, where_value):
"""
Given a primary key, a field set_field, and a pair (where_field,
where_value), it increases the field in the appropriate row
:param pk: Primary key to detect workflow
:param set_field: name of the field to be increased
:param where_field: Field used to filter the row in the table
:param where_value: Value of the previous field to filter the row
:return: The table in the workflow pointed by PK is modified.
"""
# First part of the query with the table name
query = 'UPDATE "{0}" SET "{1}" = "{1}" + 1 WHERE "{2}" = %s'.format(
create_table_name(pk),
set_field,
where_field
)
# Execute the query
cursor = connection.cursor()
cursor.execute(query, [where_value])
connection.commit()
def get_table_row_by_key(workflow, cond_filter, kv_pair, column_names=None):
"""
Select the set of elements after filtering and with the key=value pair
:param workflow: workflow object to get to the table
:param cond_filter: Condition object to filter the data (or None)
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:param column_names: Optional list of column names to select
:return: A dictionary with the (column_name, value) data or None if the
row has not been found
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow.id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# See if the action has a filter or not
if cond_filter is not None:
cond_filter, filter_fields = \
evaluate_node_sql(cond_filter.formula)
query += ' AND (' + cond_filter + ')'
fields = fields + filter_fields
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
qs = cursor.fetchall()
# If there is anything different than one element, return None
if len(qs) != 1:
return None
# Get the only element
qs = qs[0]
# ZIP the values to create a dictionary
return OrderedDict(zip(workflow.get_column_names(), qs))
def get_column_stats_from_df(df_column):
"""
Given a data frame with a single column, return a set of statistics
depending on its type.
:param df_column: data frame with a single column
:return: A dictionary with keys depending on the type of column
{'min': minimum value (integer, double an datetime),
'q1': Q1 value (0.25) (integer, double),
'mean': mean value (integer, double),
'median': median value (integer, double),
'mean': mean value (integer, double),
'q3': Q3 value (0.75) (integer, double),
'max': maximum value (integer, double an datetime),
'std': standard deviation (integer, double),
'counts': (integer, double, string, datetime, Boolean',
'mode': (integer, double, string, datetime, Boolean,
or None if the column has all its values to NaN
"""
if len(df_column.loc[df_column.notnull()]) == 0:
# The column has no data
return None
# Dictionary to return
result = {
'min': 0,
'q1': 0,
'mean': 0,
'median': 0,
'q3': 0,
'max': 0,
'std': 0,
'mode': None,
'counts': {},
}
data_type = pandas_datatype_names[df_column.dtype.name]
if data_type == 'integer' or data_type == 'double':
quantiles = df_column.quantile([0, .25, .5, .75, 1])
result['min'] = '{0:g}'.format(quantiles[0])
result['q1'] = '{0:g}'.format(quantiles[.25])
result['mean'] = '{0:g}'.format(df_column.mean())
result['median'] = '{0:g}'.format(quantiles[.5])
result['q3'] = '{0:g}'.format(quantiles[.75])
result['max'] = '{0:g}'.format(quantiles[1])
result['std'] = '{0:g}'.format(df_column.std())
result['counts'] = df_column.value_counts().to_dict()
mode = df_column.mode()
if len(mode) == 0:
mode = '--'
result['mode'] = mode[0]
return result
def get_filter_query(table_name, column_names, filter_exp):
"""
Given a set of columns and a filter expression, return a pair of SQL query
and params to be executed
:param table_name: Table to query
:param column_names: list of columns to consider or None to consider all
:param filter_exp: Text filter expression
:return: (sql query, sql params)
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(table_name)
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if filter_exp:
filter_txt, filter_fields = evaluate_node_sql(filter_exp)
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
if filter_fields:
fields.extend(filter_fields)
return (query, fields)
def search_table_rows(workflow_id,
cv_tuples=None,
any_join=True,
order_col_name=None,
order_asc=True,
column_names=None,
pre_filter=None):
"""
Select rows where for every (column, value) pair, column contains value (
as in LIKE %value%, these are combined with OR if any is TRUE, or AND if
any is false, and the result is ordered by the given column and type (if
given)
:param workflow_id: workflow object to get to the table
:param cv_tuples: A column, value, type tuple to search the value in the
column
:param any_join: Boolean encoding if values should be combined with OR (or
AND)
:param order_col_name: Order results by this column
:param order_asc: Order results in ascending values (or descending)
:param column_names: Optional list of column names to select
:param pre_filter: Optional filter condition to pre filter the query set.
the query is built with these terms as requirement AND the cv_tuples.
:return: The resulting query set
"""
# Create the query
if column_names:
safe_column_names = [fix_pctg_in_name(x) for x in column_names]
query = 'SELECT "{0}"'.format('", "'.join(safe_column_names))
else:
query = 'SELECT *'
# Add the table
query += ' FROM "{0}"'.format(create_table_name(workflow_id))
# Calculate the first suffix to add to the query
filter_txt = ''
filter_fields = []
if pre_filter:
filter_txt, filter_fields = evaluate_node_sql(pre_filter)
if cv_tuples:
likes = []
tuple_fields = []
for name, value, data_type in cv_tuples:
# Make sure we escape the name and search as text
name = fix_pctg_in_name(name)
mod_name = '(CAST("{0}" AS TEXT) LIKE %s)'.format(name)
# Create the second part of the query setting column LIKE '%value%'
likes.append(mod_name)
tuple_fields.append('%' + value + '%')
# Combine the search subqueries
if any_join:
tuple_txt = '(' + ' OR '.join(likes) + ')'
else:
tuple_txt = '(' + ' AND '.join(likes) + ')'
# Build the query so far appending the filter and/or the cv_tuples
if filter_txt or cv_tuples:
query += ' WHERE '
fields = []
# If there has been a suffix from the filter, add it.
if filter_txt:
query += filter_txt
fields.extend(filter_fields)
# If there is a pre-filter, the suffix needs to be "AND" with the ones
# just calculated
if filter_txt and cv_tuples:
query += ' AND '
if cv_tuples:
query += tuple_txt
fields.extend(tuple_fields)
# Add the order if needed
if order_col_name:
query += ' ORDER BY "{0}"'.format(fix_pctg_in_name(order_col_name))
if not order_asc:
query += ' DESC'
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
# Get the data
return cursor.fetchall()
def delete_table_row_by_key(workflow_id, kv_pair):
"""
Delete the row in the table attached to a workflow with the given key,
value pairs
:param workflow_id: workflow object to get to the table
:param kv_pair: A key=value pair to identify the row. Key is suppose to
be unique.
:return: Drops that row from the table in the DB
"""
# Create the query
query = 'DELETE FROM "{0}"'.format(create_table_name(workflow_id))
# Create the second part of the query setting key=value
query += ' WHERE ("{0}" = %s)'.format(fix_pctg_in_name(kv_pair[0]))
fields = [kv_pair[1]]
# Execute the query
cursor = connection.cursor()
cursor.execute(query, fields)
def num_rows(pk, cond_filter=None):
"""
Obtain the number of rows of the table storing workflow with given pk
:param pk: Primary key of the table storing the data frame
:param cond_filter: Condition element to filter the query
:return:
"""
return num_rows_by_name(create_table_name(pk), cond_filter)
def num_rows_by_name(table_name, cond_filter=None):
"""
Given a table name, get its number of rows
:param table_name: Table name
:param cond_filter: Condition element used to filter the query
:return: integer
"""
# Initial query with the table name
query = query_count_rows.format(table_name)
fields = []
if cond_filter is not None:
cond_filter, fields = evaluate_node_sql(cond_filter)
query += ' WHERE ' + cond_filter
cursor = connection.cursor()
cursor.execute(query, fields)
return cursor.fetchone()[0]
def check_wf_df(workflow):
"""
Check the consistency between the information stored in the workflow
and the structure of the underlying dataframe
:param workflow: Workflow object
:return: Boolean stating the result of the check. True: Correct.
"""
# Get the df
df = load_from_db(workflow.id)
# Set values in case there is no df
if df is not None:
dfnrows = df.shape[0]
dfncols = df.shape[1]
df_col_names = list(df.columns)
else:
dfnrows = 0
dfncols = 0
df_col_names = []
# Check 1: Number of rows and columns
if workflow.nrows != dfnrows:
return False
if workflow.ncols != dfncols:
return False
# Identical sets of columns
wf_cols = workflow.columns.all()
if [x.name for x in wf_cols] != df_col_names:
return False
# Identical data types
for n1, n2 in zip(wf_cols, df_col_names):
df_dt = pandas_datatype_names[df[n2].dtype.name]
if n1.data_type == 'boolean' and df_dt == 'string':
# This is the case of a column with Boolean and Nulls
continue
if n1.data_type != df_dt:
return False
return True
| 31.776246
| 86
| 0.64275
| 4,133
| 29,965
| 4.518993
| 0.125575
| 0.026021
| 0.018847
| 0.009049
| 0.435455
| 0.374204
| 0.348557
| 0.326819
| 0.304278
| 0.28163
| 0
| 0.005019
| 0.26858
| 29,965
| 942
| 87
| 31.809979
| 0.847151
| 0.41702
| 0
| 0.325359
| 0
| 0.002392
| 0.077164
| 0.010428
| 0.009569
| 0
| 0
| 0
| 0
| 1
| 0.086124
| false
| 0.019139
| 0.033493
| 0.002392
| 0.205742
| 0.009569
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ce82884bd68028c036284e33b78a44ed716634f
| 3,881
|
py
|
Python
|
ducktape/template.py
|
rancp/ducktape-docs
|
e1a3b1b7e68beedf5f8d29a4e5f196912a20e264
|
[
"Apache-2.0"
] | null | null | null |
ducktape/template.py
|
rancp/ducktape-docs
|
e1a3b1b7e68beedf5f8d29a4e5f196912a20e264
|
[
"Apache-2.0"
] | null | null | null |
ducktape/template.py
|
rancp/ducktape-docs
|
e1a3b1b7e68beedf5f8d29a4e5f196912a20e264
|
[
"Apache-2.0"
] | null | null | null |
# Copyright 2015 Confluent Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from ducktape.utils.util import package_is_installed
from jinja2 import Template, FileSystemLoader, PackageLoader, ChoiceLoader, Environment
import os.path
import inspect
class TemplateRenderer(object):
def render_template(self, template, **kwargs):
"""
Render a template using the context of the current object, optionally with overrides.
:param template: the template to render, a Template or a str
:param kwargs: optional override parameters
:return: the rendered template
"""
if not hasattr(template, 'render'): template = Template(template)
ctx = dict(self.__class__.__dict__)
ctx.update(self.__dict__)
return template.render(ctx, **kwargs)
@staticmethod
def _package_search_path(module_name):
"""
:param module_name: Name of a module
:return: (package, package_search_path) where package is the package containing the module,
and package_search_path is a path relative to the package in which to search for templates.
"""
module_parts = module_name.split(".")
package = module_parts[0]
# Construct path relative to package under which "templates" would be found
directory = ""
for d in module_parts[1: -1]:
directory = os.path.join(directory, d)
return package, os.path.join(directory, "templates")
def render(self, path, **kwargs):
"""
Render a template loaded from a file.
template files referenced in file f should be in a sibling directory of f called "templates".
:param path: path, relative to the search paths, to the template file
:param kwargs: optional override parameters
:return: the rendered template
"""
if not hasattr(self, 'template_loader'):
class_dir = os.path.dirname(inspect.getfile(self.__class__))
module_name = self.__class__.__module__
package, package_search_path = self._package_search_path(module_name)
loaders = []
msg = ""
if os.path.isdir(class_dir):
# FileSystemLoader overrides PackageLoader if the path containing this directory
# is a valid directory. FileSystemLoader throws an error from which ChoiceLoader
# doesn't recover if the directory is invalid
loaders.append(FileSystemLoader(os.path.join(class_dir, 'templates')))
else:
msg += "Will not search in %s for template files since it is not a valid directory. " % class_dir
if package_is_installed(package):
loaders.append(PackageLoader(package, package_search_path))
else:
msg += "Will not search in package %s for template files because it cannot be imported."
if len(loaders) == 0:
# Expect at least one of FileSystemLoader and PackageLoader to be present
raise EnvironmentError(msg)
self.template_loader = ChoiceLoader(loaders)
self.template_env = Environment(loader=self.template_loader, trim_blocks=True, lstrip_blocks=True)
template = self.template_env.get_template(path)
return self.render_template(template, **kwargs)
| 42.184783
| 113
| 0.670188
| 481
| 3,881
| 5.274428
| 0.345114
| 0.02365
| 0.040205
| 0.02838
| 0.096965
| 0.07568
| 0.058337
| 0.058337
| 0.058337
| 0.058337
| 0
| 0.004511
| 0.257408
| 3,881
| 91
| 114
| 42.648352
| 0.875781
| 0.417934
| 0
| 0.051282
| 0
| 0
| 0.092857
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.076923
| false
| 0
| 0.128205
| 0
| 0.307692
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ceb4e48c8b6f66fc03698755dae7d3610a03921
| 1,258
|
py
|
Python
|
handlers/product_add.py
|
MuchkoM/CalorieMatchBot
|
ca26a1f6195079e10dd798ca9e77968438f2aa01
|
[
"MIT"
] | null | null | null |
handlers/product_add.py
|
MuchkoM/CalorieMatchBot
|
ca26a1f6195079e10dd798ca9e77968438f2aa01
|
[
"MIT"
] | null | null | null |
handlers/product_add.py
|
MuchkoM/CalorieMatchBot
|
ca26a1f6195079e10dd798ca9e77968438f2aa01
|
[
"MIT"
] | null | null | null |
from telegram import Update
from telegram.ext import Updater, CallbackContext, ConversationHandler, CommandHandler, MessageHandler, Filters
from db import DBConnector
import re
str_matcher = r"\"(?P<name>.+)\"\s*(?P<fat>\d+)\s*/\s*(?P<protein>\d+)\s*/\s*(?P<carbohydrates>\d+)\s*(?P<kcal>\d+)"
ADD_1 = 0
def add_0(update: Update, _: CallbackContext):
update.message.reply_text('Enter new product in format\n'
'"name" fat/protein/carbohydrates kcal')
return ADD_1
def add_1(update: Update, context: CallbackContext):
db_connect: DBConnector = context.bot_data['db_connect']
result = re.match(str_matcher, update.message.text)
if result:
db_connect.products.insert(result.groupdict())
update.message.reply_text('Product was added')
else:
update.message.reply_text('Message have wrong format')
return ConversationHandler.END
def add_handler(updater: Updater):
"""/product_add - Add product to list known products"""
updater.dispatcher.add_handler(ConversationHandler(
entry_points=[CommandHandler('product_add', add_0)],
states={
ADD_1: [MessageHandler(Filters.text & ~Filters.command, add_1)]
},
fallbacks=[]
))
| 32.25641
| 116
| 0.67806
| 156
| 1,258
| 5.326923
| 0.416667
| 0.024067
| 0.064982
| 0.079422
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.007835
| 0.188394
| 1,258
| 38
| 117
| 33.105263
| 0.806072
| 0.038951
| 0
| 0
| 0
| 0.037037
| 0.177057
| 0.089776
| 0
| 0
| 0
| 0
| 0
| 1
| 0.111111
| false
| 0
| 0.148148
| 0
| 0.333333
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cebb0fff2532d5f8a3a2e41a74346938730be3d
| 1,298
|
py
|
Python
|
python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_dataset.py
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
ee45bee6f96cdb6d91184abc16f41bba1546c943
|
[
"BSD-3-Clause"
] | 2
|
2017-08-13T14:09:32.000Z
|
2018-07-16T23:39:00.000Z
|
python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_dataset.py
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
ee45bee6f96cdb6d91184abc16f41bba1546c943
|
[
"BSD-3-Clause"
] | null | null | null |
python-packages/nolearn-0.5/build/lib.linux-x86_64-2.7/nolearn/tests/test_dataset.py
|
rajegannathan/grasp-lift-eeg-cat-dog-solution-updated
|
ee45bee6f96cdb6d91184abc16f41bba1546c943
|
[
"BSD-3-Clause"
] | 2
|
2018-04-02T06:45:11.000Z
|
2018-07-16T23:39:02.000Z
|
from mock import patch
import numpy as np
def test_dataset_simple():
from ..dataset import Dataset
data = object()
target = object()
dataset = Dataset(data, target)
assert dataset.data is data
assert dataset.target is target
@patch('nolearn.dataset.np.load')
def test_dataset_with_filenames(load):
from ..dataset import Dataset
data = 'datafile'
target = 'targetfile'
dataset = Dataset(data, target)
assert load.call_count == 2
assert dataset.target is load.return_value
def test_dataset_train_test_split():
from ..dataset import Dataset
data = np.arange(100)
target = np.array([0] * 50 + [1] * 50)
dataset = Dataset(data, target)
assert dataset.split_indices.classes.tolist() == [0, 1]
assert dataset.split_indices.n_train == 75
assert dataset.split_indices.n_test == 25
X_train, X_test, y_train, y_test = dataset.train_test_split()
assert len(X_train) == len(y_train)
assert len(X_test) == len(y_test)
def test_dataset_scale():
from ..dataset import Dataset
data = np.arange(100).astype('float')
target = np.array([0] * 100)
dataset = Dataset(data, target)
dataset.scale()
assert dataset.data[0] == -1.7148160424389376
assert dataset.data[-1] == 1.7148160424389376
| 24.961538
| 65
| 0.682589
| 179
| 1,298
| 4.793296
| 0.268156
| 0.141026
| 0.065268
| 0.111888
| 0.39627
| 0.177156
| 0.090909
| 0.090909
| 0
| 0
| 0
| 0.05706
| 0.20339
| 1,298
| 51
| 66
| 25.45098
| 0.772727
| 0
| 0
| 0.222222
| 0
| 0
| 0.035439
| 0.01772
| 0
| 0
| 0
| 0
| 0.305556
| 1
| 0.111111
| false
| 0
| 0.166667
| 0
| 0.277778
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cec0b60edcd31e7b741951f8b76edad6144ee56
| 1,345
|
py
|
Python
|
src/Cipher/MultiLevelCaesarDecrypt.py
|
EpicTofuu/Assignment
|
293f99d20e8fa7d688c16a56c48a554bcd3c9e7d
|
[
"Apache-2.0"
] | null | null | null |
src/Cipher/MultiLevelCaesarDecrypt.py
|
EpicTofuu/Assignment
|
293f99d20e8fa7d688c16a56c48a554bcd3c9e7d
|
[
"Apache-2.0"
] | null | null | null |
src/Cipher/MultiLevelCaesarDecrypt.py
|
EpicTofuu/Assignment
|
293f99d20e8fa7d688c16a56c48a554bcd3c9e7d
|
[
"Apache-2.0"
] | null | null | null |
import Cipher.tk
from Cipher.tk import EncryptDecryptCoord, GetChiSquared, Mode
def MultiDecrypt (message, alphabet, usables = 3, lan = "English", transformations = [], lowestchi = 9999, ogMessage = ""):
msg = ""
prev = (9999, (0, 0)) # (chi, key)
for i in range (len(message)):
for k in range (1, len (alphabet)):
msg = EncryptDecryptCoord(message, (i,k), alphabet, Mode.DECRYPT)
chi = GetChiSquared (msg, lan)
if (round (chi, 3) < round (prev[0], 3)):
prev = (chi, (i,k))
# base case
if (prev[0] >= lowestchi):
v = ogMessage
for tr in transformations:
v = EncryptDecryptCoord (v, tr, alphabet, Mode.DECRYPT)
return (v, lowestchi, transformations)
if (len(transformations) == 0): # only set lowest chi on the first run
lowestchi = prev[0]
ogMessage = message
transformations.append (prev[1])
return MultiDecrypt (EncryptDecryptCoord (message, prev[1], alphabet, Mode.DECRYPT), alphabet, usables, lan, transformations, prev[0], ogMessage)
'''
# testing do write it here
a = " abcdefghijklmnopqrstuvwxyz"
p=[]
for c in a:
p.append (c)
print ("starting...")
print (MultiDecrypt ("dtyktckcxlbd", p))
# original 231
'''
| 32.804878
| 150
| 0.584387
| 149
| 1,345
| 5.275168
| 0.409396
| 0.025445
| 0.072519
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.025105
| 0.289219
| 1,345
| 41
| 151
| 32.804878
| 0.797071
| 0.042379
| 0
| 0
| 0
| 0
| 0.006579
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.047619
| false
| 0
| 0.095238
| 0
| 0.238095
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1ced85b293ca7dbd18aca02752e3ef9bf70663c2
| 4,125
|
py
|
Python
|
alphacoders/__init__.py
|
whoiscc/alphacoders
|
685d1e7e02a7276ae0518114b0c6aab58914aab7
|
[
"MIT"
] | 7
|
2019-09-22T16:16:15.000Z
|
2020-08-27T23:53:07.000Z
|
alphacoders/__init__.py
|
whoiscc/alphacoders
|
685d1e7e02a7276ae0518114b0c6aab58914aab7
|
[
"MIT"
] | 1
|
2020-08-27T23:53:02.000Z
|
2020-08-28T06:10:10.000Z
|
alphacoders/__init__.py
|
whoiscc/alphacoders
|
685d1e7e02a7276ae0518114b0c6aab58914aab7
|
[
"MIT"
] | null | null | null |
#
from aiohttp.client_exceptions import ClientError
from lxml import html
from pathlib import Path
from asyncio import create_task
from functools import wraps
def start_immediately(task):
@wraps(task)
def wrapper(*args, **kwargs):
return create_task(task(*args, **kwargs))
return wrapper
@start_immediately
async def download_page(client, url):
count = 0
while True:
print(f"(retry = {count}) download url: {url}")
try:
async with client.get(url) as resp:
assert resp.status == 200
return await resp.text()
except ClientError:
pass
finally:
count += 1
@start_immediately
async def download_image(client, url, target_dir, name):
count = 0
while True:
print(f"(retry = {count}) download image: {url} -> {target_dir / name}")
try:
async with client.get(url) as resp:
content = await resp.read()
target_dir.mkdir(exist_ok=True)
(target_dir / name).write_bytes(content)
return
except ClientError:
pass
finally:
count += 1
def download_search(client, keyword, page):
safe_keyword = keyword.replace(" ", "+")
# url = f"https://mobile.alphacoders.com/by-resolution/5?search={safe_keyword}&page={page}"
url = f"https://wall.alphacoders.com/search.php?search={safe_keyword}&page={page}"
return download_page(client, url)
@start_immediately
async def query_image_id(client, keyword=None, page=None, document=None):
if document is None:
assert keyword is not None and page is not None
search = await download_search(client, keyword, page)
document = html.fromstring(search)
a_list = document.xpath('//div[@class="boxgrid"]/a')
href_list = [a.attrib["href"] for a in a_list]
return href_list
def query_page_count(document):
count_string = document.xpath('//ul[@class="pagination"]/li[last() - 1]/a/text()')[
0
]
return int(count_string)
@start_immediately
async def query_image_url(client, detail_path):
url = f"https://wall.alphacoders.com/{detail_path}"
detail = await download_page(client, url)
document = html.fromstring(detail)
image = document.xpath('//div[@class="center img-container-desktop"]/a')[0]
return image.attrib["href"]
@start_immediately
async def download_image_by_id(manager, client, image_id, target_dir):
image_url = await query_image_url(client, image_id)
name = image_url.split("/")[-1]
await download_image(client, image_url, target_dir, name)
manager.complete_count += 1
class SingleTask:
def __init__(self, keyword, limit=None):
self.keyword = keyword
self.limit = limit
self.complete_count = 0
self.triggered = False
async def run(self, client):
assert not self.triggered
self.triggered = True
first_search_doc = html.fromstring(
await download_search(client, self.keyword, 1)
)
page_count = query_page_count(first_search_doc)
download_image_task_list = []
image_count = 0
for page in range(1, page_count + 1):
if page == 1:
partial_list = await query_image_id(client, document=first_search_doc)
else:
partial_list = await query_image_id(
client, keyword=self.keyword, page=page
)
if self.limit is not None:
partial_list = partial_list[: self.limit - image_count]
image_count += len(partial_list)
for image_id in partial_list:
download_image_task_list.append(
download_image_by_id(self, client, image_id, Path(self.keyword))
)
if self.limit is not None and image_count == self.limit:
break
for task in download_image_task_list:
await task
@start_immediately
async def execute_single_task(manager, client):
return await manager.run(client)
| 30.555556
| 95
| 0.629333
| 517
| 4,125
| 4.829787
| 0.234043
| 0.04165
| 0.050461
| 0.057669
| 0.287545
| 0.204245
| 0.082499
| 0.055266
| 0.031237
| 0
| 0
| 0.006319
| 0.27103
| 4,125
| 134
| 96
| 30.783582
| 0.824077
| 0.021576
| 0
| 0.209524
| 0
| 0
| 0.085544
| 0.021076
| 0
| 0
| 0
| 0
| 0.028571
| 1
| 0.047619
| false
| 0.019048
| 0.047619
| 0.009524
| 0.190476
| 0.019048
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cef547e153ff6ac5a327c151e5950b2c7563ac2
| 1,298
|
py
|
Python
|
scripts/data_extract.py
|
amichalski2/WBC-SHAP
|
b69a4a8746aaf7a8dfacfdb4dbd85b4868d73ad0
|
[
"MIT"
] | null | null | null |
scripts/data_extract.py
|
amichalski2/WBC-SHAP
|
b69a4a8746aaf7a8dfacfdb4dbd85b4868d73ad0
|
[
"MIT"
] | null | null | null |
scripts/data_extract.py
|
amichalski2/WBC-SHAP
|
b69a4a8746aaf7a8dfacfdb4dbd85b4868d73ad0
|
[
"MIT"
] | null | null | null |
import os
import cv2
import random
import numpy as np
from tensorflow.keras.utils import to_categorical
from scripts.consts import class_dict
def get_data(path, split=0.2):
X, y = [], []
for directory in os.listdir(path):
dirpath = os.path.join(path, directory)
print(directory, len(os.listdir(dirpath)))
for file in os.listdir(dirpath):
filepath = os.path.join(dirpath, file)
img = cv2.imread(filepath, cv2.IMREAD_UNCHANGED)
if img.shape != (360, 363, 3):
img = cv2.resize(img, (360, 363), cv2.INTER_CUBIC)
X.append(img)
y.append(class_dict[directory])
data = list(zip(X, y))
random.shuffle(data)
X, y = zip(*data)
num_train = int((1.0 - split) * len(y))
X_train, X_valid = np.array(X[:num_train]).astype(
'float32'), np.array(X[num_train:]).astype('float32')
y_train, y_valid = np.array(
y[:num_train]).reshape(-1, 1), np.array(y[num_train:]).reshape((-1, 1))
X_train = X_train / 255.0
X_valid = X_valid / 255.0
y_train, y_valid = to_categorical(y_train), to_categorical(y_valid)
print(X_train.shape, y_train.shape)
print(X_valid.shape, y_valid.shape)
return X_train, y_train, X_valid, y_valid
| 25.45098
| 79
| 0.617874
| 197
| 1,298
| 3.903553
| 0.314721
| 0.052016
| 0.028609
| 0.028609
| 0.140442
| 0.140442
| 0.140442
| 0.06502
| 0
| 0
| 0
| 0.038776
| 0.244992
| 1,298
| 50
| 80
| 25.96
| 0.745918
| 0
| 0
| 0
| 0
| 0
| 0.010786
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.03125
| false
| 0
| 0.1875
| 0
| 0.25
| 0.09375
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cf117501c6990cccaec0505efbf96de4aa8d218
| 299
|
py
|
Python
|
opentimesheet/profiles/tests/test_models.py
|
valerymelou/opentimesheet-server
|
0da97ebb3c3e59962132d1bc5e83e1d727f7331b
|
[
"MIT"
] | null | null | null |
opentimesheet/profiles/tests/test_models.py
|
valerymelou/opentimesheet-server
|
0da97ebb3c3e59962132d1bc5e83e1d727f7331b
|
[
"MIT"
] | 95
|
2021-02-20T21:53:29.000Z
|
2022-01-14T17:24:50.000Z
|
opentimesheet/profiles/tests/test_models.py
|
valerymelou/opentimesheet-server
|
0da97ebb3c3e59962132d1bc5e83e1d727f7331b
|
[
"MIT"
] | null | null | null |
import pytest
from opentimesheet.core.tests import TenantTestCase
@pytest.mark.usefixtures("profile")
class TestProfile(TenantTestCase):
def test__str__(self):
assert (
self.profile.first_name + " " + self.profile.last_name
== self.profile.__str__()
)
| 23
| 66
| 0.665552
| 31
| 299
| 6.096774
| 0.645161
| 0.174603
| 0.15873
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.230769
| 299
| 12
| 67
| 24.916667
| 0.821739
| 0
| 0
| 0
| 0
| 0
| 0.026756
| 0
| 0
| 0
| 0
| 0
| 0.111111
| 1
| 0.111111
| false
| 0
| 0.222222
| 0
| 0.444444
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cf4fd8e02d4b81f15724ee999de5c59e9d28d5e
| 3,210
|
py
|
Python
|
setup.py
|
rohernandezz/coldtype
|
724234fce454699a469d17b6c78ae50fa8138169
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
rohernandezz/coldtype
|
724234fce454699a469d17b6c78ae50fa8138169
|
[
"Apache-2.0"
] | null | null | null |
setup.py
|
rohernandezz/coldtype
|
724234fce454699a469d17b6c78ae50fa8138169
|
[
"Apache-2.0"
] | null | null | null |
import setuptools
long_description = """
# Coldtype
### Programmatic display typography
More info available at: [coldtype.goodhertz.com](https://coldtype.goodhertz.com)
"""
setuptools.setup(
name="coldtype",
version="0.6.6",
author="Rob Stenson / Goodhertz",
author_email="rob@goodhertz.com",
description="Functions for manual vectorized typesetting",
long_description=long_description,
long_description_content_type="text/markdown",
url="https://github.com/goodhertz/coldtype",
#package_dir={"": "coldtype"},
packages=[
"coldtype",
"coldtype.sh",
"coldtype.fx",
"coldtype.img",
"coldtype.time",
"coldtype.midi",
"coldtype.pens",
"coldtype.text",
"coldtype.grid",
"coldtype.color",
"coldtype.capture",
"coldtype.blender",
"coldtype.geometry",
"coldtype.time.nle",
"coldtype.renderer",
"coldtype.webserver",
"coldtype.renderable",
"coldtype.fontgoggles",
"coldtype.interpolation",
"coldtype.renderer.winman",
"coldtype.fontgoggles.font",
"coldtype.fontgoggles.misc",
"coldtype.fontgoggles.compile",
],
include_package_data=True,
package_data={
"": [
"webserver/webviewer.html",
"demo/RecMono-CasualItalic.ttf",
"demo/ColdtypeObviously-VF.ttf",
"demo/MutatorSans.ttf",
"demo/demo.py",
"demo/midi.py",
"demo/blank.py",
"demo/boiler.py",
"renderer/picklejar.py",
"renderer/.coldtype.py"
],
},
entry_points={
'console_scripts': [
'coldtype = coldtype.renderer:main'
],
},
extras_require={
"skia": [
"skia-python>=86.0",
],
"viewer": [
"glfw",
"PyOpenGL",
"PyOpenGL-accelerate",
"skia-python>=86.0",
"skia-pathops", # can this be taken from skia-python?
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
"noise",
"ufo2ft",
"numpy",
],
"webviewer": [
"SimpleWebSocketServer",
"watchdog<2.0.0", # https://github.com/gorakhargosh/watchdog/issues/702
],
"experimental": [
"pynput",
"rtmidi",
"noise",
],
"c": [
"srt",
"noise",
],
"unicode": [
"unicodedata2"
],
"blender": [
"skia-pathops"
],
"notebook": [
"skia-pathops",
"skia-python",
]
},
install_requires=[
"lxml",
"fonttools[ufo]",
"fontPens",
"fontParts",
"more-itertools",
"easing-functions",
"timecode",
"mido",
"defcon",
"freetype-py",
"uharfbuzz>=0.14.0",
"python-bidi"
],
classifiers=[
"Programming Language :: Python :: 3",
"Operating System :: OS Independent",
],
)
| 25.68
| 83
| 0.50405
| 259
| 3,210
| 6.189189
| 0.525097
| 0.03743
| 0.026201
| 0.03743
| 0.093575
| 0.093575
| 0.093575
| 0.093575
| 0.093575
| 0.093575
| 0
| 0.013365
| 0.347352
| 3,210
| 124
| 84
| 25.887097
| 0.75179
| 0.052648
| 0
| 0.193277
| 0
| 0
| 0.456042
| 0.102733
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0
| 0.008403
| 0
| 0.008403
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cf68294fceda7fcab026e615d3065d3c8dac6d6
| 5,064
|
py
|
Python
|
GFOLD_problem.py
|
xdedss/SuccessiveConvexification
|
8b330b64a31f546ce92c1e34036c212484cbae5e
|
[
"MIT"
] | null | null | null |
GFOLD_problem.py
|
xdedss/SuccessiveConvexification
|
8b330b64a31f546ce92c1e34036c212484cbae5e
|
[
"MIT"
] | null | null | null |
GFOLD_problem.py
|
xdedss/SuccessiveConvexification
|
8b330b64a31f546ce92c1e34036c212484cbae5e
|
[
"MIT"
] | 1
|
2021-01-18T11:47:19.000Z
|
2021-01-18T11:47:19.000Z
|
# -*- coding: utf-8 -*-
# GFOLD_static_p3p4
min_=min
from cvxpy import *
import cvxpy_codegen as cpg
from time import time
import numpy as np
import sys
import GFOLD_params
''' As defined in the paper...
PROBLEM 3: Minimum Landing Error (tf roughly solved)
MINIMIZE : norm of landing error vector
SUBJ TO :
0) initial conditions satisfied (position, velocity)
1) final conditions satisfied (altitude, velocity)
2) dynamics always satisfied
3) x stays in cone at all times
4) relaxed convexified mass and thrust constraints
5) thrust pointing constraint
6) sub-surface flight constraint
PROBLEM 4: Minimum Fuel Use
MAXIMIZE : landing mass, opt variables are dynamical and
SUBJ TO :
0) same constraints as p1, plus:
1) landing point must be equal or better than that found by p1
'''
def solve(params, params_super = None, codegen = False, verbose=False):
#super params
if (params_super == None):
params_super = GFOLD_params.SuperParams() # default
N = params_super.N
#优化变量
x =Variable(6,N,name='var_x') # state vector (3position,3velocity)
u =Variable(3,N,name='var_u') # u = Tc/mass because Tc[:,n]/m[n] is not allowed by DCP
z= Variable(1,N,name='var_z') # z = ln(mass)
s= Variable(1,N,name='var_s') # thrust slack parameter
# Parameters
x0 = Parameter(6, 1, name="x0")
xf = Parameter(6, 1, name="xf")
z0_term_inv = Parameter(1, N, name="z0_term_inv", sign='positive')
z0_term_log = Parameter(1, N, name="z0_term_log")
g = Parameter(3, 1, name="g_vec")
p_cs_cos = Parameter(1, N, name='p_cs_cos')
sparse_params = Parameter(7, 1, name="sparse_params", sign='positive')
m_wet_log = Parameter(2, 1, name='m_wet_log')
if (not codegen):
x0.value = params.x0.reshape(6, 1)
xf.value = params.xf.reshape(6, 1)
z0_term_inv.value = params.z0_term_inv.reshape(1, N)
z0_term_log.value = params.z0_term_log.reshape(1, N)
g.value = params.g.reshape(3, 1)
p_cs_cos.value = params.p_cs_cos.reshape(1, N)
m_wet_log.value = [params.m_wet_log, 0]
sparse_params.value = np.array([
params.alpha_dt,
params.G_max,
params.V_max,
params.y_gs_cot,
params.r1,
params.r2,
params.tf
]).reshape(7, 1)
alpha_dt, G_max, V_max, y_gs_cot, r1, r2, tf_ = sparse_params
dt = tf_ * (1/N) # Integration dt
# constraints
con = []
con += [x[0:3,0] == x0[0:3]] # initial pos
con += [x[3:6,0] == x0[3:6]] # initial vel
con += [x[0:3,N-1] == xf[0:3]] # final pos
con += [x[3:6,N-1]== xf[3:6]] # final vel
con += [s[0,N-1] == 0] # thrust at the end must be zero
con += [u[:,0] == s[0,0]*np.array([1,0,0])] # thrust direction starts straight
con += [u[:,N-1] == s[0,N-1]*np.array([1,0,0])] # and ends straight
con += [z[0,0] == m_wet_log[0,0]] # convexified (7)
for n in range(0,N-1):
#dynamics
con += [x[3:6,n+1] == x[3:6,n] + (dt*0.5)*((u[:,n]+g[:,0]) + (u[:,n+1]+g[:,0]))]
con += [x[0:3,n+1] == x[0:3,n] + (dt*0.5)*(x[3:6,n+1]+x[3:6,n])]
# glideslope cone
con += [ norm( (x[0:3,n])[1:3] ) - y_gs_cot*(x[0,n]) <= 0 ]
con += [ norm(x[3:6,n]) <= V_max ] # velocity
#con += [norm(u[:,n+1]-u[:,n]) <= dt*T_max/m_dry * 3]
con += [z[0,n+1] == z[0,n] - (alpha_dt*0.5)*(s[0,n] + s[0,n+1])] # mass decreases
con += [norm(u[:,n]) <= s[0,n]] # limit thrust magnitude & also therefore, mass
# Thrust pointing constraint
con += [ u[0,n] >= p_cs_cos[0,n]*s[0,n] ]
if n > 0:
#z0_term = m_wet - alpha * r2 * (n) * dt # see ref [2], eq 34,35,36
#z0 = log(z0_term)
z0 = z0_term_log[0,n]
mu_1 = r1*(z0_term_inv[0,n])
mu_2 = r2*(z0_term_inv[0,n])
#更正一处原项目与论文不符之处
# 示意图:https://www.desmos.com/calculator/wtcfgnepe1
con += [s[0,n] >= mu_1 * (1 - (z[0,n] - z0) + (z[0,n] - z0)**2 *0.5)] # lower thrust bound
con += [s[0,n] <= mu_2 * (1 - (z[0,n] - z0))] # upper thrust bound
#Objective
objective = Minimize(-z[0,N-1])
problem=Problem(objective, con)
if codegen:
cpg.codegen(problem, codegen_path)
else:
obj_opt = problem.solve(solver=ECOS, verbose=verbose)
return (
obj_opt,
np.array(x.value), # r,v
np.array(u.value), # u (acceleration)
np.exp(np.array(z.value)) # mass
) if type(x.value) != type(None) else (None, None, None, None)
if __name__ == '__main__':
if (len(sys.argv) > 2 and sys.argv[1] == 'codegen'):
codegen_path = sys.argv[2]
solve(None, None, True)
else:
print("invalid input")
print(sys.argv)
| 33.536424
| 102
| 0.540482
| 802
| 5,064
| 3.290524
| 0.253117
| 0.015915
| 0.009094
| 0.009094
| 0.082607
| 0.032967
| 0.006821
| 0.006821
| 0.006821
| 0
| 0
| 0.05501
| 0.296406
| 5,064
| 150
| 103
| 33.76
| 0.685658
| 0.150276
| 0
| 0.02381
| 0
| 0
| 0.035331
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.011905
| false
| 0
| 0.071429
| 0
| 0.095238
| 0.02381
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cf6d2a88472187827c03695f3dd1ceab02478dc
| 6,154
|
py
|
Python
|
Hints.py
|
SarienFates/MMRandomizer
|
7c677140d83e94167fecee35e8c25216a51bdd56
|
[
"MIT"
] | 36
|
2018-08-23T05:01:33.000Z
|
2021-03-17T03:25:20.000Z
|
Hints.py
|
SarienFates/MMRandomizer
|
7c677140d83e94167fecee35e8c25216a51bdd56
|
[
"MIT"
] | 11
|
2018-09-07T21:43:13.000Z
|
2019-02-10T22:40:11.000Z
|
Hints.py
|
SarienFates/MMRandomizer
|
7c677140d83e94167fecee35e8c25216a51bdd56
|
[
"MIT"
] | 7
|
2018-08-22T09:49:12.000Z
|
2020-01-12T20:23:29.000Z
|
import io
import hashlib
import logging
import os
import struct
import random
from HintList import getHint, getHintGroup, Hint
from Utils import local_path
#builds out general hints based on location and whether an item is required or not
def buildGossipHints(world, rom):
stoneAddresses = [0x938e4c, 0x938EA8, 0x938F04, 0x938F60, 0x938FBC, 0x939018, 0x939074, 0x9390D0, 0x93912C, 0x939188,
0x9391E4, 0x939240, 0x93929C, 0x9392F8, 0x939354, 0x9393B0, 0x93940C, 0x939468, 0x9394C4, 0x939520,
0x93957C, 0x9395D8, 0x939634, 0x939690, 0x9396EC, 0x939748, 0x9397A4, 0x939800, 0x93985C, 0x9398B8,
0x939914, 0x939970] #address for gossip stone text boxes, byte limit is 92
alwaysLocations = getHintGroup('alwaysLocation')#These location will always have a hint somewhere in the world.
sometimesSpace = (int((len(stoneAddresses) - len(alwaysLocations)*2)/2))
sometimesLocations = getHintGroup('location')#A random selection of these locations will be in the hint pool.
random.shuffle(sometimesLocations)
sometimesLocations = sometimesLocations[0:sometimesSpace]
hintList = alwaysLocations
hintList.extend(alwaysLocations)
hintList.extend(sometimesLocations)
locationData = []
for hint in hintList:
for locationWorld in world.get_locations():
if hint.name == locationWorld.name:
locationData.extend([locationWorld])
#hopefully fixes weird VC error where the last character from a previous text box would sometimes spill over into the next box.
for address in range(stoneAddresses[0], 0x9399D8):
rom.write_byte(address, 0x08)
#shuffles the stone addresses for randomization, always locations will be placed first and twice
random.shuffle(stoneAddresses)
#loops through shuffled locations and addresses and builds hint.
while locationData:
currentLoc = locationData.pop(0)
Block_code = getBytes((getHint(currentLoc.name).text))
if currentLoc.item.type == 'Map' or currentLoc.item.type == 'Compass' or currentLoc.item.type == 'BossKey' or currentLoc.item.type == 'SmallKey':
Block_code.extend(getBytes((getHint(currentLoc.item.type).text)))
else:
Block_code.extend(getBytes((getHint(currentLoc.item.name).text)))
endText(Block_code)
if len(Block_code) > 92:
print('Too many characters in hint')
Block_code = getBytes("I am Error.")
Block_code.extend(getBytes(currentLoc.name))
Block_code.extend(getBytes('&'))
Block_code.extend(getBytes(currentLoc.item.name))
rom.write_bytes(stoneAddresses.pop(0), Block_code)
junkHints = getHintGroup('junkHint')
random.shuffle(junkHints)
while stoneAddresses:
junkHint = junkHints.pop()
Block_code = getBytes(junkHint.text)
endText(Block_code)
rom.write_bytes(stoneAddresses.pop(0), Block_code)
return rom
# builds boss reward text that is displayed at the temple of time altar for child and adult, pull based off of item in a fixed order.
def buildBossRewardHints(world, rom):
bossRewardsSpiritualStones = ['Kokiri Emerald', 'Goron Ruby', 'Zora Sapphire']
bossRewardsMedallions = ['Forest Medallion', 'Fire Medallion', 'Water Medallion', 'Shadow Medallion', 'Spirit Medallion', 'Light Medallion']
# text that appears at altar as a child.
Block_code = []
Block_code = getBytes(getHint('Spiritual Stone Text Start').text)
for reward in bossRewardsSpiritualStones:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Spiritual Stone Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95ED95, Block_code)
# text that appears at altar as an adult.
Block_code = []
for reward in bossRewardsMedallions:
buildBossString(Block_code, reward, world)
Block_code = setRewardColor(Block_code)
Block_code.extend(getBytes(getHint('Medallion Text End').text))
Block_code.extend([0x0B])
endText(Block_code)
rom.write_bytes(0x95DB94, Block_code)
return rom
# pulls text string from hintlist for reward after sending the location to hintlist.
def buildBossString(Block_code, reward, world):
for location in world.get_locations():
if location.item.name == reward:
Block_code.extend([0x08])
Block_code.extend(getBytes(getHint(location.name).text))
return Block_code
# alternates through color set commands in child and adult boss reward hint strings setting the colors at the start of the string to correspond with the reward found at the location.
# skips over color commands at the end of stings to set color back to white.
def setRewardColor(Block_code):
rewardColors = [0x42, 0x41, 0x43, 0x45, 0x46, 0x44]
colorWhite = True
for i, byte in enumerate(Block_code):
if byte == 0x05 and colorWhite:
Block_code[i + 1] = rewardColors.pop(0)
colorWhite = False
elif byte == 0x05 and not colorWhite:
colorWhite = True
return Block_code
#sets the end of text byte in the text box.
def endText(byteArray):
return byteArray.extend([0x02])
# reads array of characters and converts them to an array of bytes.
def getBytes(string):
byteCode = []
for char in string:
if char == '^':
byteCode.extend([0x04])#box break
elif char == '&':
byteCode.extend([0x01])#new line
elif char == '@':
byteCode.extend([0x0F])#print player name
elif char == '#':
byteCode.extend([0x05, 0x40]) #sets color to white
else:
char = char.encode('utf-8')
char = char.hex()
byte = int('0x' + char, 16)
byteCode.extend([byte])
return byteCode
| 41.581081
| 183
| 0.666071
| 720
| 6,154
| 5.629167
| 0.35
| 0.084382
| 0.040711
| 0.045398
| 0.188502
| 0.134222
| 0.118431
| 0.096718
| 0.07698
| 0.07698
| 0
| 0.061392
| 0.248294
| 6,154
| 147
| 184
| 41.863946
| 0.814743
| 0.20377
| 0
| 0.203704
| 0
| 0
| 0.063794
| 0
| 0
| 0
| 0.074356
| 0
| 0
| 1
| 0.055556
| false
| 0.009259
| 0.074074
| 0.009259
| 0.185185
| 0.009259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cf8bcf99614d3811354f1616a7df6d5dc7e2771
| 786
|
py
|
Python
|
bokeh/models/tests/test_callbacks.py
|
ndepal/bokeh
|
1b514f28fe40eeb71954eac0c113b2debdb2eda9
|
[
"BSD-3-Clause"
] | 1
|
2018-11-14T19:08:18.000Z
|
2018-11-14T19:08:18.000Z
|
bokeh/models/tests/test_callbacks.py
|
ndepal/bokeh
|
1b514f28fe40eeb71954eac0c113b2debdb2eda9
|
[
"BSD-3-Clause"
] | 1
|
2021-05-09T02:45:17.000Z
|
2021-05-09T02:45:17.000Z
|
bokeh/models/tests/test_callbacks.py
|
ndepal/bokeh
|
1b514f28fe40eeb71954eac0c113b2debdb2eda9
|
[
"BSD-3-Clause"
] | 1
|
2020-06-17T05:47:16.000Z
|
2020-06-17T05:47:16.000Z
|
from pytest import raises
from bokeh.models import CustomJS, Slider
def test_js_callback():
slider = Slider()
cb = CustomJS(code="foo();", args=dict(x=slider))
assert 'foo()' in cb.code
assert cb.args['x'] is slider
cb = CustomJS(code="foo();", args=dict(x=3))
assert 'foo()' in cb.code
assert cb.args['x'] is 3
with raises(AttributeError): # kwargs not supported
CustomJS(code="foo();", x=slider)
def test_py_callback():
slider = Slider()
foo = None # fool pyflakes
def cb(x=slider):
foo()
cb = CustomJS.from_py_func(cb)
assert 'foo()' in cb.code
assert cb.args['x'] is slider
def cb(x=4):
foo()
cb = CustomJS.from_py_func(cb)
assert 'foo()' in cb.code
assert cb.args['x'] is 4
| 22.457143
| 56
| 0.605598
| 119
| 786
| 3.932773
| 0.277311
| 0.08547
| 0.094017
| 0.111111
| 0.529915
| 0.529915
| 0.529915
| 0.529915
| 0.405983
| 0.405983
| 0
| 0.006734
| 0.244275
| 786
| 34
| 57
| 23.117647
| 0.781145
| 0.043257
| 0
| 0.48
| 0
| 0
| 0.056075
| 0
| 0
| 0
| 0
| 0
| 0.32
| 1
| 0.16
| false
| 0
| 0.08
| 0
| 0.24
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cf8fa369efacd3241e998562ea192fd61a8484a
| 639
|
py
|
Python
|
tests/test_0150-attributeerrors.py
|
martindurant/awkward-1.0
|
a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_0150-attributeerrors.py
|
martindurant/awkward-1.0
|
a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38
|
[
"BSD-3-Clause"
] | null | null | null |
tests/test_0150-attributeerrors.py
|
martindurant/awkward-1.0
|
a3221ee1bab6551dd01d5dd07a1d2dc24fd02c38
|
[
"BSD-3-Clause"
] | null | null | null |
# BSD 3-Clause License; see https://github.com/jpivarski/awkward-1.0/blob/master/LICENSE
from __future__ import absolute_import
import sys
import pytest
import numpy
import awkward1
class Dummy(awkward1.Record):
@property
def broken(self):
raise AttributeError("I'm broken!")
def test():
behavior = {}
behavior["Dummy"] = Dummy
array = awkward1.Array([{"x": 1}, {"x": 2}, {"x": 3}], behavior=behavior)
array.layout.setparameter("__record__", "Dummy")
with pytest.raises(AttributeError) as err:
array[1].broken
assert str(err.value) == "I'm broken!" # not "no field named 'broken'"
| 23.666667
| 88
| 0.666667
| 84
| 639
| 4.964286
| 0.607143
| 0.009592
| 0.038369
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.019231
| 0.186228
| 639
| 26
| 89
| 24.576923
| 0.782692
| 0.181534
| 0
| 0
| 0
| 0
| 0.086538
| 0
| 0
| 0
| 0
| 0
| 0.058824
| 1
| 0.117647
| false
| 0
| 0.294118
| 0
| 0.470588
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cf9ec2d86459244af14ac26fb2ee812ead9cfcb
| 1,262
|
py
|
Python
|
ievv_opensource/demo/batchframeworkdemo/apps.py
|
appressoas/ievv_opensource
|
63e87827952ddc8f6f86145b79478ef21d6a0990
|
[
"BSD-3-Clause"
] | null | null | null |
ievv_opensource/demo/batchframeworkdemo/apps.py
|
appressoas/ievv_opensource
|
63e87827952ddc8f6f86145b79478ef21d6a0990
|
[
"BSD-3-Clause"
] | 37
|
2015-10-26T09:14:12.000Z
|
2022-02-10T10:35:33.000Z
|
ievv_opensource/demo/batchframeworkdemo/apps.py
|
appressoas/ievv_opensource
|
63e87827952ddc8f6f86145b79478ef21d6a0990
|
[
"BSD-3-Clause"
] | 1
|
2015-11-06T07:56:34.000Z
|
2015-11-06T07:56:34.000Z
|
from django.apps import AppConfig
from ievv_opensource import ievv_batchframework
from ievv_opensource.ievv_batchframework import batchregistry
class HelloWorldAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('Hello world! %r', self.kwargs)
class HelloWorldAsyncAction(ievv_batchframework.Action):
def execute(self):
self.logger.info('\n\n\n\n\n\n\n\nHello world, async! %r\n\n\n\n\n', self.kwargs)
class BatchFrameworkDemoAppConfig(AppConfig):
name = 'ievv_opensource.demo.batchframeworkdemo'
verbose_name = "IEVV Batchframework demo"
def ready(self):
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld',
mode=batchregistry.ActionGroup.MODE_SYNCHRONOUS,
actions=[
HelloWorldAction
]))
batchregistry.Registry.get_instance().add_actiongroup(
batchregistry.ActionGroup(
name='batchframeworkdemo_helloworld_async',
mode=batchregistry.ActionGroup.MODE_ASYNCHRONOUS,
actions=[
HelloWorldAsyncAction
]
)
)
| 33.210526
| 89
| 0.655309
| 116
| 1,262
| 6.982759
| 0.353448
| 0.024691
| 0.02963
| 0.02963
| 0.395062
| 0.388889
| 0.380247
| 0.380247
| 0.380247
| 0.251852
| 0
| 0
| 0.260697
| 1,262
| 37
| 90
| 34.108108
| 0.868167
| 0
| 0
| 0.275862
| 0
| 0.034483
| 0.150555
| 0.098257
| 0
| 0
| 0
| 0
| 0
| 1
| 0.103448
| false
| 0
| 0.103448
| 0
| 0.37931
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
1cfa6189a373dd681dccafae6be7e17e2a430784
| 4,438
|
py
|
Python
|
evaluate.py
|
DeppMeng/DANNet
|
831eb70d44a4a0b6f6f57ca2014521fc64d1906c
|
[
"Apache-2.0"
] | null | null | null |
evaluate.py
|
DeppMeng/DANNet
|
831eb70d44a4a0b6f6f57ca2014521fc64d1906c
|
[
"Apache-2.0"
] | null | null | null |
evaluate.py
|
DeppMeng/DANNet
|
831eb70d44a4a0b6f6f57ca2014521fc64d1906c
|
[
"Apache-2.0"
] | null | null | null |
import os
import torch
import numpy as np
from PIL import Image
import torch.nn as nn
from torch.utils import data
from network import *
from dataset.zurich_night_dataset import zurich_night_DataSet
from configs.test_config import get_arguments
palette = [128, 64, 128, 244, 35, 232, 70, 70, 70, 102, 102, 156, 190, 153, 153, 153, 153, 153, 250, 170, 30,
220, 220, 0, 107, 142, 35, 152, 251, 152, 70, 130, 180, 220, 20, 60, 255, 0, 0, 0, 0, 142, 0, 0, 70,
0, 60, 100, 0, 80, 100, 0, 0, 230, 119, 11, 32]
zero_pad = 256 * 3 - len(palette)
for i in range(zero_pad):
palette.append(0)
def colorize_mask(mask):
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(palette)
return new_mask
def main():
os.environ['CUDA_VISIBLE_DEVICES'] = '0'
device = torch.device("cuda")
args = get_arguments()
if not os.path.exists(args.save):
os.makedirs(args.save)
if args.model == 'PSPNet':
model = PSPNet(num_classes=args.num_classes)
if args.model == 'DeepLab':
model = Deeplab(num_classes=args.num_classes)
if args.model == 'RefineNet':
model = RefineNet(num_classes=args.num_classes, imagenet=False)
saved_state_dict = torch.load(args.restore_from)
model_dict = model.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
model.load_state_dict(saved_state_dict)
lightnet = LightNet()
saved_state_dict = torch.load(args.restore_from_light)
model_dict = lightnet.state_dict()
saved_state_dict = {k: v for k, v in saved_state_dict.items() if k in model_dict}
model_dict.update(saved_state_dict)
lightnet.load_state_dict(saved_state_dict)
model = model.to(device)
lightnet = lightnet.to(device)
model.eval()
lightnet.eval()
testloader = data.DataLoader(zurich_night_DataSet(args.data_dir, args.data_list, set=args.set))
interp = nn.Upsample(size=(1080, 1920), mode='bilinear', align_corners=True)
weights = torch.log(torch.FloatTensor(
[0.36869696, 0.06084986, 0.22824049, 0.00655399, 0.00877272, 0.01227341, 0.00207795, 0.0055127, 0.15928651,
0.01157818, 0.04018982, 0.01218957, 0.00135122, 0.06994545, 0.00267456, 0.00235192, 0.00232904, 0.00098658,
0.00413907])).cuda()
weights = (torch.mean(weights) - weights) / torch.std(weights) * args.std + 1.0
for index, batch in enumerate(testloader):
if index % 10 == 0:
print('%d processd' % index)
image, name = batch
image = image.to(device)
with torch.no_grad():
r = lightnet(image)
enhancement = image + r
if args.model == 'RefineNet':
output2 = model(enhancement)
else:
_, output2 = model(enhancement)
weights_prob = weights.expand(output2.size()[0], output2.size()[3], output2.size()[2], 19)
weights_prob = weights_prob.transpose(1, 3)
output2 = output2 * weights_prob
output = interp(output2).cpu().data[0].numpy()
output = output.transpose(1,2,0)
output = np.asarray(np.argmax(output, axis=2), dtype=np.uint8)
output_col = colorize_mask(output)
output = Image.fromarray(output)
###### get the enhanced image
mean_std = ([0.485, 0.456, 0.406], [0.229, 0.224, 0.225])
enhancement = enhancement.cpu().data[0].numpy().transpose(1,2,0)
enhancement = enhancement * mean_std[1] + mean_std[0]
enhancement = (enhancement - enhancement.min()) / (enhancement.max()-enhancement.min())
enhancement = enhancement[:, :, ::-1] * 255 # change to BGR
enhancement = Image.fromarray(enhancement.astype(np.uint8))
###### get the light
light = r.cpu().data[0].numpy().transpose(1, 2, 0)
light = (light-light.min()) / (light.max() - light.min())
light = light[:, :, ::-1] * 255 # change to BGR
light = Image.fromarray(light.astype(np.uint8))
name = name[0].split('/')[-1]
output.save('%s/%s' % (args.save, name))
output_col.save('%s/%s_color.png' % (args.save, name.split('.')[0]))
enhancement.save('%s/%s_enhancement.png' % (args.save, name.split('.')[0]))
light.save('%s/%s_light.png' % (args.save, name.split('.')[0]))
if __name__ == '__main__':
main()
| 37.294118
| 116
| 0.630689
| 624
| 4,438
| 4.350962
| 0.304487
| 0.046409
| 0.051565
| 0.027993
| 0.20442
| 0.18453
| 0.141436
| 0.141436
| 0.069245
| 0.069245
| 0
| 0.113906
| 0.220595
| 4,438
| 118
| 117
| 37.610169
| 0.671003
| 0.014421
| 0
| 0.066667
| 0
| 0
| 0.033035
| 0.004818
| 0
| 0
| 0
| 0
| 0
| 1
| 0.022222
| false
| 0
| 0.1
| 0
| 0.133333
| 0.011111
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e8000d1d11cc25ead163054e538c9037a92ad52f
| 2,090
|
py
|
Python
|
test/pyfrechet_visualize.py
|
compgeomTU/frechetForCurves
|
625bfe32a45d23b194226b4ac7713ded09bd2825
|
[
"MIT"
] | null | null | null |
test/pyfrechet_visualize.py
|
compgeomTU/frechetForCurves
|
625bfe32a45d23b194226b4ac7713ded09bd2825
|
[
"MIT"
] | null | null | null |
test/pyfrechet_visualize.py
|
compgeomTU/frechetForCurves
|
625bfe32a45d23b194226b4ac7713ded09bd2825
|
[
"MIT"
] | null | null | null |
# Author: Will Rodman
# wrodman@tulane.edu
#
# Command line to run program:
# python3 pyfrechet_visualize.py
import sys, os, unittest
sys.path.insert(0, "../")
from pyfrechet.distance import StrongDistance
from pyfrechet.visualize import FreeSpaceDiagram, Trajectories
TEST_DATA = "sp500"
if TEST_DATA == "sp500":
REACHABLE_EPSILON = 5
UNREACHABLE_EPSILON = 1
REVERSE_CURVE = False
elif TEST_DATA == "trajectory":
REACHABLE_EPSILON = 70
UNREACHABLE_EPSILON = 60
REVERSE_CURVE = True
CURVE_1 = f"{TEST_DATA}_data/sample_1.txt"
CURVE_2 = f"{TEST_DATA}_data/sample_2.txt"
class pyfrechet_optimise(unittest.TestCase):
global REACHABLE_EPSILON
global UNREACHABLE_EPSILON
global REVERSE_CURVE
global CURVE_1
global CURVE_2
def test_fail_BinarySearch_instance_argument(self):
class BadClass(): pass
with self.assertRaises(TypeError):
bc = BadClass()
FreeSpaceDiagram(bc)
def test_FreeSpaceDiagram_plot(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd = FreeSpaceDiagram(sd)
fsd.plot()
def test_FreeSpaceDiagram__addEpsilonSlider(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
fsd.addEpsilonSlider(UNREACHABLE_EPSILON, REACHABLE_EPSILON, 1)
fsd.plot()
def test_FreeSpaceDiagram__weighted_cells(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, False)
def test_FreeSpaceDiagram__gridlines(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
fsd = FreeSpaceDiagram(sd)
sd.setFreeSpace(REACHABLE_EPSILON)
fsd.plot(True, True)
def test_Trajectories(self):
sd = StrongDistance.setCurves(CURVE_1, CURVE_2, REVERSE_CURVE)
t = Trajectories(sd)
t.plot()
if __name__ == '__main__':
unittest.main()
| 27.866667
| 71
| 0.704306
| 242
| 2,090
| 5.797521
| 0.31405
| 0.068425
| 0.071276
| 0.10335
| 0.385602
| 0.292231
| 0.292231
| 0.292231
| 0.292231
| 0.292231
| 0
| 0.018811
| 0.211483
| 2,090
| 74
| 72
| 28.243243
| 0.832524
| 0.04689
| 0
| 0.269231
| 0
| 0
| 0.044814
| 0.029204
| 0
| 0
| 0
| 0
| 0.019231
| 1
| 0.115385
| false
| 0.019231
| 0.057692
| 0
| 0.211538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e800c566ae2b10b4aec8821bd3d2096d44b1f7e9
| 6,403
|
py
|
Python
|
py_ser_freeastro/core.py
|
nww2007/py_ser_freeastro
|
5806cf83316f48a6db0abe4a88e4485fc04a1b4d
|
[
"MIT"
] | null | null | null |
py_ser_freeastro/core.py
|
nww2007/py_ser_freeastro
|
5806cf83316f48a6db0abe4a88e4485fc04a1b4d
|
[
"MIT"
] | null | null | null |
py_ser_freeastro/core.py
|
nww2007/py_ser_freeastro
|
5806cf83316f48a6db0abe4a88e4485fc04a1b4d
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python3
# vim:fileencoding=UTF-8
# -*- coding: UTF-8 -*-
"""
Created on 15 juny 2019 y.
@author: Vlsdimir Nekrasov nww2007@mail.ru
"""
import sys
import struct
import numpy as np
from progress.bar import Bar
import logging
logging.basicConfig(format = u'%(filename)s:%(lineno)d: %(levelname)-8s [%(asctime)s] %(message)s', level = logging.DEBUG, stream=sys.stdout)
# class ser(np.array):
class ser(object):
"""
A set of methods for working with a set of images in the SER format.
"""
def __init__(self, fname):
"""
Download information from file.
"""
# super.__init__()
# luids
self.MONO = 0
self.BAYER_RGGB = 8
self.BAYER_GRBG = 9
self.BAYER_GBRG = 10
self.BAYER_BGGR = 11
self.BAYER_CYYM = 16
self.BAYER_YCMY = 17
self.BAYER_YMCY = 18
self.BAYER_MYYC = 19
self.RGB = 100
self.BGR = 101
self.fname = fname
with open(self.fname, 'rb') as fd:
# Download information from the header.
self.header = fd.read(178)
self.parse_header()
# Download images.
self.frames = np.zeros((self.framecount, self.imageheight, self.imagewidth))
bar = Bar('Downloading', max=self.framecount)
for frame in range(self.framecount):
# for frame in range(1):
bar.next()
t_frame = fd.read(self.imageheight * self.imagewidth * self.pixeldepthperplane//8)
for line in range(self.imageheight):
for pixel in range(self.imagewidth):
index = (line * self.imagewidth + pixel) * 2
self.frames[frame][line][pixel] = struct.unpack('<H', t_frame[index:index+2])[0]
bar.finish()
# Download the trailer
self.trailer = fd.read(self.framecount * 8)
self.parse_trailer()
def parse_header(self):
"""
Parse the title.
"""
self.fileid = self.header[0:14]
self.luid = struct.unpack('<i', self.header[14:18])[0]
self.colorid = struct.unpack('<i', self.header[18:22])[0]
self.littleendian_FALSE = 0
self.littleendian_TRUE = 1
self.littleendian = struct.unpack('<i', self.header[22:26])[0]
self.imagewidth = struct.unpack('<i', self.header[26:30])[0]
self.imageheight = struct.unpack('<i', self.header[30:34])[0]
self.pixeldepthperplane = struct.unpack('<i', self.header[34:38])[0]
self.framecount = struct.unpack('<i', self.header[38:42])[0]
self.observer = self.header[42:82]
self.telescope = self.header[82:122]
self.datetime = struct.unpack('<q', self.header[122:130])[0]
self.datetime_utc = struct.unpack('<q', self.header[130:138])[0]
# logging.info('{0}x{1}'.format(self.imagewidth, self.imageheight))
def parse_trailer(self):
"""
Parse the trailer
"""
for i in range(0, self.framecount*8, 8):
tuli = (struct.unpack('<Q', self.trailer[i:i+8])[0])
def main(argv):
logging.info('%s started.\n' % argv[0])
fn = './images/ASICAP_2019-05-10_01_43_36_523.SER'
frames = ser(fn)
# logging.debug(type(frames))
# logging.debug(type(object))
# # darks_fn = './images/ASICAP_2019-05-10_02_12_00_621.SER'
# # offsets_fn = './images/ASICAP_2019-05-10_02_30_47_294.SER'
#
# # frames = ser.ser()
# # frames.read(darks_fn)
# # frames.read(lights_fn)
# # ser_fr = serialise_frames(frames)
# # logging.debug('std1={}'.format(ser_fr.std()))
# # hist_fr = get_hist(ser_fr)
# # plt.plot(hist_fr)
# # plt.grid()
# # plt.show()
#
# fnames = [
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_34_52_584.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_36_05_343.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_34_373.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_47_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_37_58_784.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_06_703.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_17_476.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_27_330.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_36_623.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_39_48_239.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_20_816.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_32_118.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_47_796.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_40_59_999.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_10_321.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_41_41_276.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_07_956.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_19_287.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_31_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_42_43_981.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_07_152.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_43_36_180.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_01_167.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_33_214.SER',
# '/home/nww/ASICAP/tmp/ASICAP_2019-05-25_15_44_58_952.SER',
# ]
#
# print('{};{};{};{};{}'.format('File', 'Temperature', 'Exposure', 'Gain', 'std'))
# for fn in fnames:
# print('{}'.format(fn), flush=True, file=sys.stderr)
# frames = ser.ser()
# frames.read(fn)
# ser_fr = serialise_frames(frames)
#
# config = configparser.ConfigParser()
# config.read(fn + '.txt')
#
# print('{};{};{};{};{}'.format(fn, config['ZWO ASI120MC']['temperature'], config['ZWO ASI120MC']['exposure'], config['ZWO ASI120MC']['gain'], ser_fr.std()))
logging.info('%s finished.\n' % argv[0])
return 0
if __name__ == '__main__':
sys.exit(main(sys.argv))
| 38.341317
| 165
| 0.580353
| 895
| 6,403
| 3.943017
| 0.264804
| 0.079343
| 0.095211
| 0.113347
| 0.386795
| 0.31567
| 0.277132
| 0.263531
| 0.263531
| 0.254463
| 0
| 0.127188
| 0.259566
| 6,403
| 166
| 166
| 38.572289
| 0.617169
| 0.509449
| 0
| 0
| 0
| 0.016393
| 0.060209
| 0.022536
| 0
| 0
| 0
| 0
| 0
| 1
| 0.065574
| false
| 0
| 0.081967
| 0
| 0.180328
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e801eb5a0287658d171711d53067b93a3c272ccf
| 10,915
|
py
|
Python
|
sgdml_dataset_generation/readers/fchk.py
|
humeniuka/sGDML_dataset_generation
|
a99f792b6aac7ff869ebcd1bd7a7226ca81f43ee
|
[
"MIT"
] | null | null | null |
sgdml_dataset_generation/readers/fchk.py
|
humeniuka/sGDML_dataset_generation
|
a99f792b6aac7ff869ebcd1bd7a7226ca81f43ee
|
[
"MIT"
] | null | null | null |
sgdml_dataset_generation/readers/fchk.py
|
humeniuka/sGDML_dataset_generation
|
a99f792b6aac7ff869ebcd1bd7a7226ca81f43ee
|
[
"MIT"
] | null | null | null |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
__all__ = ["FormattedCheckpointFile"]
# # Imports
import numpy as np
import scipy.linalg as sla
from collections import OrderedDict
import re
import logging
# # Local Imports
from sgdml_dataset_generation import units
from sgdml_dataset_generation.units import hbar
# # Logging
logger = logging.getLogger(__name__)
logging.basicConfig(format="[%(module)-12s] %(message)s", level=logging.INFO)
class FormattedCheckpointFile(object):
"""
reads all fields from formatted checkpoint files produced by the quantum chemistry
programs Gaussian 16 and QChem.
Parameters
----------
f : File
file handle opened for reading a formatted checkpoint file
The user has to ensure the file handle is opened and closed at the end.
The fields of the checkpoint file can be accessed by their names (see example below).
Array fields are stored as 1D numpy arrays of float (R) or integer (I) type.
Example
-------
>>> with open("freq.fchk") as f:
>>> fchk = FormattedCheckpointFile(f)
>>> print(fchk["Number of atoms"])
"""
def __init__(self, f):
self.filename = f.name
self.data = OrderedDict()
# accumulate all lines belonging to the same field (whithout newlines)
acc = ""
dtype = None
for line_number, line in enumerate(f.readlines()):
# count lines starting from 1
line_number += 1
# The name of a field starts in the first column and with a capital letter
if re.match(r"^[A-Z].*", line):
if len(acc) > 0 and not dtype is None:
# All lines belonging to the previous field must have been read,
# so we convert it to a numpy array.
try:
if dtype == str:
self.data[field] = acc
else:
# numerical types
array = np.fromstring(acc, dtype=dtype, sep=" ")
assert len(array) == count
self.data[field] = array
except (ValueError,AssertionError) as err:
logger.warning(f"A problem occurred reading field `{field}` in line {line_number:10} in {f.name} .")
logger.warning(err)
self.data[field] = np.zeros(count, dtype=dtype)
# reset accumulator
acc = ""
try:
if len(line) < 43:
# skip title and method
logger.debug(f"skipping line {line_number:10} in {f.name}: `{line.strip()}`")
continue
# First 43 columns are reserved for the field name
field = line[0:43].strip()
logger.debug(f"field `{field}` encountered")
# Colum 43 contains a character indicating the data type:
# I -> integer
# R -> real
type_char = line[43]
if type_char == "I":
dtype = int
elif type_char == "R":
dtype = float
elif type_char == "C":
dtype = str
else:
dtype = None
# skip lines without I or R data type markers
logger.debug(f"skipping line {line_number:10} in {f.name}: `{line.strip()}` .")
continue
# If column 47-48 contain the string "N=", we are dealing with an array
# and the last integer indicates the number of elements
if line[47:49] == "N=":
count = int(line[49:])
else:
# scalar value
self.data[field] = dtype(line[49:])
except Exception as err:
logger.error(f"An error occurred while reading line {line_number:10} in {f.name} .")
raise err
else:
acc += " " + line
# read last field
if len(acc) > 0:
self.data[field] = np.fromstring(acc, dtype=dtype, sep=" ")
assert len(self.data[field]) == count
def __getitem__(self, key):
"""
access data fields by their names
Parameters
----------
key : str
name of field that should be retrieved (e.g. 'Number of atoms')
Returns
-------
field : float, int or ndarray
a KeyError is raised if the field is not present in the formatted checkpoint file
"""
return self.data[key]
def keys(self):
"""
list names of all fields present in the formatted checkpoint file
Returns
-------
keys : list of str
field names
"""
return self.data.keys()
def harmonic_approximation(self):
"""
extract the position, gradient and Hessian of the potential energy in cartesian coordinates
The potential is expanded to second order around the current position x0:
E(x) = E(x0) + grad(E)^T.(x-x0) + 1/2 (x-x0)^T . hess(E) . (x-x0)
A frequency calculation has to be present in the formatted checkpoint file.
The frequency calculation should be performed in a separate Gaussian 16 job using the
following route line for the ground state calculation:
#P functional/basis Freq NoSymm IOp(7/32=5)
and the following route line for an excited state frequency calculation:
#P functional/basis TD=(Nstates=2, Root=1, NAC) Freq NoSymm IOp(7/32=5)
Returns
-------
pos : ndarray (3*nat,)
cartesian coordinates x0
energy : ndarray (1,)
total energy E(x0) of state of interest (in Hartree)
grad : ndarray (3*nat,)
cartesian gradient dE/dx(x0) (in Hartree/bohr)
hess : ndarray (3*nat,3*nat)
cartesian force constants d^2E/(dxdx)(x0) (in Hartree/bohr^2)
"""
try:
nat = self.data["Number of atoms"]
# total energy of state of interest
energy = np.array(self.data["Total Energy"])
# geometry
pos = self.data["Current cartesian coordinates"]
# cartesian gradient
grad = self.data["Cartesian Gradient"]
# Only the lower triangular part of the Hessian is stored.
hess = np.zeros((3*nat,3*nat))
row, col = np.tril_indices(3*nat)
hess[row,col] = self.data["Cartesian Force Constants"]
# Hessian is symmetric, H^T = H
hess[col,row] = hess[row,col]
except KeyError as err:
logger.error(f"A required field could not be found in formatted checkpoint file {self.filename} .")
raise err
return pos, energy, grad, hess
def nonadiabatic_coupling(self):
"""
extract non-adiabatic coupling vector between ground and excited state (Root=I), if present.
Only Gaussian 16 saves the NAC vector in the checkpoint file, while QChem writes it to the output file.
Returns
-------
nac : ndarray (3*nat,)
1st order derivative coupling <0|d/dx|I>
"""
try:
nac = self.data["Nonadiabatic coupling"]
except KeyError as err:
logger.error(f"The field `Nonadiabatic coupling` could not be found in the formatted checkpoint file {self.filename} .")
raise err
if (nac == 0.0).all():
logger.warning(f"All components of non-adiabatic coupling vector in {self.filename} are zero.")
return nac
def vibrational_groundstate(self, zero_threshold=100.0):
"""
The vibrational ground state belonging to the harmonic potential is given by
1/4 T
psi (x) = (det(Gamma ) / pi^N) exp{ -1/2 (x-x ) Gamma (x-x ) }
0 0 0 0 0
provided that x0 is the minimum. This function computes the width parameter matrix
Gamma_0 from the Hessian at the minimum.
Optional
--------
zero_threshold : float > 0
threshold for considering normal mode frequencies as zero (in cm-1)
Returns
-------
x0 : ndarray (3*nat,)
center of Gaussian, in cartesian coordinates (bohr)
Gamma0 : ndarray (3*nat,3*nat)
symmetric, positive semi-definite matrix of width parameters (bohr^{-2})
en_zpt : float
zero-point energy (Hartree)
"""
x0, energy, grad, hess = self.harmonic_approximation()
mass = self.masses()
# diagonals of M^{1/2} and M^{-1/2}
msq = np.sqrt(mass)
imsq = 1.0/msq
# mass-weighted Hessian H
hess_mwc = np.einsum('i,ij,j->ij', imsq, hess, imsq)
# diagonalize symmetric H = V.diag(w).V^T
w2,V = sla.eigh(hess_mwc)
# vibrational energies
w = np.sqrt(w2)
# zero-point energy
en_zpt = 0.5 * hbar * np.sum(w)
logger.info("Normal mode frequencies (cm-1)")
logger.info(w*units.hartree_to_wavenumbers)
if not (w * units.hartree_to_wavenumbers > zero_threshold).all():
logger.warning("At a minimum all frequencies should be positive, found imaginary ones.")
# select non-zero vibrational modes
non_zero = (w * units.hartree_to_wavenumbers) > zero_threshold
# number of non singular dimensions
num_non_zero = np.count_nonzero( non_zero )
dim = x0.shape[0]
logger.info(f"number of zero modes : {dim - num_non_zero}")
# L = hbar^{-1/2} M^{1/2} V w^{1/2}
L = hbar**(-1/2) * np.einsum('i,ij,j->ij', msq, V[:,non_zero], np.sqrt(w[non_zero]))
# Gamma_0 = L . L^T
Gamma_0 = np.einsum('ij,kj->ik', L, L)
return x0, Gamma_0, en_zpt
def masses(self):
"""
atomic masses in a.u.
Returns
-------
masses : ndarray (3*nat,)
masses for each cartesian coordinate in multiples of electron mass
"""
mass = self.data["Real atomic weights"] * units.amu_to_aumass
mass = np.repeat(mass, 3)
return mass
def atomic_numbers(self):
"""
atomic numbers
Returns
-------
numbers : ndarray(nat,)
atomic number for each atom
"""
return self.data["Atomic numbers"]
| 38.298246
| 132
| 0.536143
| 1,316
| 10,915
| 4.396657
| 0.287994
| 0.023505
| 0.013308
| 0.011061
| 0.144314
| 0.109402
| 0.080539
| 0.033529
| 0.02074
| 0.02074
| 0
| 0.019153
| 0.368575
| 10,915
| 284
| 133
| 38.433099
| 0.820371
| 0.40055
| 0
| 0.166667
| 0
| 0.017544
| 0.163988
| 0.003974
| 0
| 0
| 0
| 0
| 0.026316
| 1
| 0.070175
| false
| 0
| 0.061404
| 0
| 0.201754
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e803805e9e6f0689766fc36aba000586323cb80c
| 3,097
|
py
|
Python
|
fuzzywuzzy/process.py
|
rhasspy/fuzzywuzzy
|
e5b486c756b392481ec8e1382eedce280e56fd69
|
[
"MIT"
] | 3
|
2015-10-18T18:00:08.000Z
|
2021-03-23T06:42:02.000Z
|
fuzzywuzzy/process.py
|
rhasspy/fuzzywuzzy
|
e5b486c756b392481ec8e1382eedce280e56fd69
|
[
"MIT"
] | 20
|
2015-01-16T18:46:53.000Z
|
2016-02-18T22:01:00.000Z
|
process.py
|
agile-geoscience/fuzzylas
|
0298292878d7fb6c0a788a1a2a21b543f49432bd
|
[
"Apache-2.0"
] | 10
|
2015-08-11T10:12:56.000Z
|
2022-02-20T14:45:50.000Z
|
#!/usr/bin/env python
# encoding: utf-8
"""
process.py
Copyright (c) 2011 Adam Cohen
Permission is hereby granted, free of charge, to any person obtaining
a copy of this software and associated documentation files (the
"Software"), to deal in the Software without restriction, including
without limitation the rights to use, copy, modify, merge, publish,
distribute, sublicense, and/or sell copies of the Software, and to
permit persons to whom the Software is furnished to do so, subject to
the following conditions:
The above copyright notice and this permission notice shall be
included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND,
EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF
MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND
NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE
LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION
OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION
WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
"""
from fuzz import *
import sys, os
import utils
#######################################
# Find Best Matchs In List Of Choices #
#######################################
def extract(query, choices, processor=None, scorer=None, limit=5):
# choices = a list of objects we are attempting to extract values from
# query = an object representing the thing we want to find
# scorer f(OBJ, QUERY) --> INT. We will return the objects with the highest score
# by default, we use score.WRatio() and both OBJ and QUERY should be strings
# processor f(OBJ_A) --> OBJ_B, where the output is an input to scorer
# for example, "processor = lambda x: x[0]" would return the first element in a collection x (of, say, strings)
# this would then be used in the scoring collection
if choices is None or len(choices) == 0:
return []
# default, turn whatever the choice is into a string
if processor is None:
processor = lambda x: utils.asciidammit(x)
# default: wratio
if scorer is None:
scorer = WRatio
sl = list()
for choice in choices:
processed = processor(choice)
score = scorer(query, processed)
tuple = (choice, score)
sl.append(tuple)
sl.sort(key=lambda i: -1*i[1])
return sl[:limit]
##########################
# Find Single Best Match #
##########################
def extractOne(query, choices, processor=None, scorer=None, score_cutoff=0):
# convenience method which returns the single best choice
# optional parameter: score_cutoff.
# If the best choice has a score of less than score_cutoff
# we will return none (intuition: not a good enough match)
best_list = extract(query, choices, processor, scorer, limit=1)
if len(best_list) > 0:
best = best_list[0]
if best[1] > score_cutoff:
return best
else:
return None
else:
return None
| 35.193182
| 119
| 0.668389
| 440
| 3,097
| 4.684091
| 0.438636
| 0.042698
| 0.030568
| 0.027171
| 0.033964
| 0.033964
| 0
| 0
| 0
| 0
| 0
| 0.006271
| 0.22764
| 3,097
| 87
| 120
| 35.597701
| 0.855351
| 0.635777
| 0
| 0.142857
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.107143
| 0
| 0.357143
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e803dd122dcc777403d95c61024e9847e1e285a3
| 2,063
|
py
|
Python
|
day03/day03.py
|
robfalck/AoC2017
|
fa19f3fb42d979b60888a1954bea571c9d4ee735
|
[
"Apache-2.0"
] | null | null | null |
day03/day03.py
|
robfalck/AoC2017
|
fa19f3fb42d979b60888a1954bea571c9d4ee735
|
[
"Apache-2.0"
] | null | null | null |
day03/day03.py
|
robfalck/AoC2017
|
fa19f3fb42d979b60888a1954bea571c9d4ee735
|
[
"Apache-2.0"
] | null | null | null |
from __future__ import print_function, division, absolute_import
import numpy as np
INPUT = 265149
def part1(number):
skip = 2
d = 1
row = None
col = None
for shell_idx in range(1, 10000):
size = shell_idx * 2 + 1
a = d + skip
b = a + skip
c = b + skip
d = c + skip
skip = skip + 2
if a <= number <= b:
# top
col = -(size // 2) + (b - number)
row = size // 2
elif b <= number <= c:
# left
row = size // 2 - (c - number)
col = -(size // 2)
elif c <= number <= d:
# bottom
row = -(size // 2)
col = row + (number - c)
elif number < a:
# right
col = size // 2
row = col - (a - number)
if row is not None and col is not None:
manh_dist = abs(row) + abs(col)
return manh_dist
def part2(number):
"""
A brute-force approach to part 2.
"""
map = np.zeros((11, 11), dtype=int)
row = 5
col = 5
map[row, col] = 1
heading = 'RIGHT'
dcol = 1
drow = 0
nsteps = 70
for i in range(nsteps):
row += drow
col += dcol
sum_at_next = map[row-1:row+2, col-1:col+2].sum()
map[row, col] = sum_at_next
if sum_at_next > number:
return sum_at_next
# Determine if we need to change heading
if heading == 'RIGHT' and map[row-1, col] == 0:
heading = 'UP'
drow = -1
dcol = 0
elif heading == 'UP' and map[row, col-1] == 0:
heading = 'LEFT'
drow = 0
dcol = -1
elif heading == 'LEFT' and map[row+1, col] == 0:
heading = 'DOWN'
drow = 1
dcol = 0
elif heading == 'DOWN' and map[row, col+1] == 0:
heading = 'RIGHT'
drow = 0
dcol = 1
if __name__ == '__main__':
print(part1(number=INPUT))
print(part2(number=INPUT))
| 21.946809
| 64
| 0.448376
| 268
| 2,063
| 3.354478
| 0.279851
| 0.046719
| 0.040044
| 0.03337
| 0.140156
| 0.140156
| 0.093437
| 0
| 0
| 0
| 0
| 0.050644
| 0.435288
| 2,063
| 93
| 65
| 22.182796
| 0.72103
| 0.046049
| 0
| 0.138462
| 0
| 0
| 0.022074
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.030769
| false
| 0
| 0.030769
| 0
| 0.092308
| 0.046154
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e805511a04665665499fc07f9e4ac3855c825235
| 1,801
|
py
|
Python
|
ahrs/common/geometry.py
|
jaluebbe/ahrs
|
4b4a33b1006e0d455a71ac8379a2697202361758
|
[
"MIT"
] | 184
|
2019-09-06T07:58:52.000Z
|
2022-03-31T04:27:09.000Z
|
ahrs/common/geometry.py
|
geoKinga/ahrs
|
87f9210cfcf6c545d86ae8588a93f012020164ee
|
[
"MIT"
] | 48
|
2019-11-13T15:42:46.000Z
|
2022-03-31T23:53:53.000Z
|
ahrs/common/geometry.py
|
geoKinga/ahrs
|
87f9210cfcf6c545d86ae8588a93f012020164ee
|
[
"MIT"
] | 34
|
2019-12-19T16:22:00.000Z
|
2022-03-14T09:51:50.000Z
|
# -*- coding: utf-8 -*-
"""
Geometrical functions
---------------------
References
----------
.. [W1] Wikipedia: https://de.wikipedia.org/wiki/Ellipse#Ellipsengleichung_(Parameterform)
.. [WAE] Wolfram Alpha: Ellipse. (http://mathworld.wolfram.com/Ellipse.html)
"""
import numpy as np
from typing import Union
def circle(center: Union[list, np.ndarray], radius: float = 1.0, num_points: int = 20) -> np.ndarray:
"""
Build a circle with the given characteristics.
Parameters
----------
c : array-like
2D Coordinates of center.
r : float
Radius of the circle.
num_points : int
Number of points to build.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the circle.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
x = center[0] + radius*np.cos(R)
y = center[1] + radius*np.sin(R)
return np.array([x, y]).transpose()
def ellipse(center: Union[list, np.ndarray], phi: float, axes: Union[list, np.ndarray], num_points: int = 20) -> np.ndarray:
"""
Build an ellipse with the given characteristics.
Parameters
----------
center : array-like
2D Coordinates of center.
phi : float
Angle, in radians, of the major axis w.r.t. the X-axis
axes : array-like
Lengths of major and minor axes, respectively.
num_points : int
Number of points. Defaults to 20.
Returns
-------
points : numpy.ndarray
N-by-2 array with the coordinates of the ellipse.
"""
R = np.linspace(0.0, 2.0*np.pi, num_points+1)
a, b = axes
x = center[0] + a*np.cos(R)*np.cos(phi) - b*np.sin(R)*np.sin(phi)
y = center[1] + a*np.cos(R)*np.sin(phi) + b*np.sin(R)*np.cos(phi)
return np.array([x, y]).transpose()
| 27.287879
| 124
| 0.599667
| 264
| 1,801
| 4.064394
| 0.325758
| 0.050326
| 0.044734
| 0.050326
| 0.514445
| 0.38397
| 0.212488
| 0.160298
| 0.160298
| 0.160298
| 0
| 0.020319
| 0.23487
| 1,801
| 65
| 125
| 27.707692
| 0.758345
| 0.533037
| 0
| 0.307692
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.153846
| false
| 0
| 0.153846
| 0
| 0.461538
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e8072be9245c30cff262ca9e32de353f2b9c7c66
| 4,435
|
py
|
Python
|
htdocs/plotting/auto/scripts100/p116.py
|
jamayfieldjr/iem
|
275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a
|
[
"MIT"
] | 1
|
2019-10-07T17:01:24.000Z
|
2019-10-07T17:01:24.000Z
|
htdocs/plotting/auto/scripts100/p116.py
|
jamayfieldjr/iem
|
275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a
|
[
"MIT"
] | null | null | null |
htdocs/plotting/auto/scripts100/p116.py
|
jamayfieldjr/iem
|
275b77a65f3b12e26e6cbdb230786b9c7d2b9c9a
|
[
"MIT"
] | null | null | null |
"""Monthly HDD/CDD Totals."""
import datetime
from pandas.io.sql import read_sql
from pyiem.plot.use_agg import plt
from pyiem.util import get_dbconn, get_autoplot_context
from pyiem.exceptions import NoDataFound
PDICT = {'cdd': 'Cooling Degree Days',
'hdd': 'Heating Degree Days'}
def get_description():
""" Return a dict describing how to call this plotter """
desc = dict()
desc['data'] = True
desc['report'] = True
desc['description'] = """This chart presents monthly cooling degree days
or heating degree days for a 20 year period of your choice. The 20 year
limit is for plot usability only, the data download has all available
years contained."""
y20 = datetime.date.today().year - 19
desc['arguments'] = [
dict(type='station', name='station', default='IATDSM',
label='Select Station', network='IACLIMATE'),
dict(type='select', options=PDICT, default='cdd', name='var',
label='Select Variable'),
dict(type='year', name='syear', default=y20,
label='For plotting, year to start 20 years of plot'),
]
return desc
def plotter(fdict):
""" Go """
import seaborn as sns
ctx = get_autoplot_context(fdict, get_description())
pgconn = get_dbconn('coop')
station = ctx['station']
varname = ctx['var']
table = "alldata_%s" % (station[:2], )
df = read_sql("""
SELECT year, month, sum(precip) as sum_precip,
avg(high) as avg_high,
avg(low) as avg_low,
sum(cdd(high,low,60)) as cdd60,
sum(cdd(high,low,65)) as cdd65,
sum(hdd(high,low,60)) as hdd60,
sum(hdd(high,low,65)) as hdd65,
sum(case when precip >= 0.01 then 1 else 0 end) as rain_days,
sum(case when snow >= 0.1 then 1 else 0 end) as snow_days
from """+table+""" WHERE station = %s GROUP by year, month
""", pgconn, params=(station,), index_col=None)
if df.empty:
raise NoDataFound("No Data Found.")
df['monthdate'] = df[['year', 'month']].apply(
lambda x: datetime.date(x[0], x[1], 1), axis=1)
df.set_index('monthdate', inplace=True)
res = """\
# IEM Climodat https://mesonet.agron.iastate.edu/climodat/
# Report Generated: %s
# Climate Record: %s -> %s
# Site Information: [%s] %s
# Contact Information: Daryl Herzmann akrherz@iastate.edu 515.294.5978
""" % (datetime.date.today().strftime("%d %b %Y"),
ctx['_nt'].sts[station]['archive_begin'].date(),
datetime.date.today(), station, ctx['_nt'].sts[station]['name'])
res += """# THESE ARE THE MONTHLY %s (base=65) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (PDICT[varname].upper(), station)
second = """# THESE ARE THE MONTHLY %s (base=60) FOR STATION %s
YEAR JAN FEB MAR APR MAY JUN JUL AUG SEP \
OCT NOV DEC
""" % (
PDICT[varname].upper(), station)
minyear = df['year'].min()
maxyear = df['year'].max()
for yr in range(minyear, maxyear + 1):
res += ("%4i" % (yr,))
second += "%4i" % (yr,)
for mo in range(1, 13):
ts = datetime.date(yr, mo, 1)
if ts not in df.index:
res += ("%7s" % ("M",))
second += "%7s" % ("M",)
continue
row = df.loc[ts]
res += ("%7.0f" % (row[varname+"65"],))
second += "%7.0f" % (row[varname+"60"],)
res += ("\n")
second += "\n"
res += ("MEAN")
second += "MEAN"
for mo in range(1, 13):
df2 = df[df['month'] == mo]
res += ("%7.0f" % (df2[varname+"65"].mean(), ))
second += "%7.0f" % (df2[varname+"60"].mean(), )
res += ("\n")
second += "\n"
res += second
y1 = int(fdict.get('syear', 1990))
fig, ax = plt.subplots(1, 1, figsize=(8., 6.))
fig.text(0.5, 0.95, "[%s] %s (%s-%s)" % (
station, ctx['_nt'].sts[station]['name'], y1, y1 + 20), ha='center',
fontsize=16)
ax.set_title(r"%s base=60$^\circ$F" % (PDICT[varname], ))
filtered = df[(df['year'] >= y1) & (df['year'] <= (y1 + 20))]
df2 = filtered[
['month', 'year', varname + '60']
].pivot('year', 'month', varname + '60')
sns.heatmap(df2, annot=True, fmt=".0f", linewidths=.5, ax=ax)
return fig, df, res
if __name__ == '__main__':
plotter(dict(syear=1990))
| 34.92126
| 76
| 0.555355
| 606
| 4,435
| 4.011551
| 0.384488
| 0.024681
| 0.020979
| 0.018511
| 0.138215
| 0.126697
| 0.061703
| 0.061703
| 0.061703
| 0.061703
| 0
| 0.038426
| 0.266516
| 4,435
| 126
| 77
| 35.198413
| 0.708884
| 0.017587
| 0
| 0.09434
| 0
| 0
| 0.378889
| 0.019359
| 0
| 0
| 0
| 0
| 0
| 1
| 0.018868
| false
| 0
| 0.056604
| 0
| 0.09434
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e807b65f64a7deff0b619d067868e319ecf01061
| 12,502
|
py
|
Python
|
examples/horovod/ray_torch_shuffle.py
|
krfricke/ray_shuffling_data_loader
|
b238871d45218c655cd0fcd78b8bf2a3940087f9
|
[
"Apache-2.0"
] | 16
|
2021-05-13T08:03:03.000Z
|
2021-09-30T00:20:01.000Z
|
examples/horovod/ray_torch_shuffle.py
|
krfricke/ray_shuffling_data_loader
|
b238871d45218c655cd0fcd78b8bf2a3940087f9
|
[
"Apache-2.0"
] | 12
|
2021-05-04T22:18:01.000Z
|
2021-07-14T12:10:40.000Z
|
examples/horovod/ray_torch_shuffle.py
|
krfricke/ray_shuffling_data_loader
|
b238871d45218c655cd0fcd78b8bf2a3940087f9
|
[
"Apache-2.0"
] | 5
|
2021-05-18T02:57:50.000Z
|
2021-07-01T11:23:05.000Z
|
import os
import pickle
import time
import timeit
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import numpy as np
import torch
import tempfile
import horovod.torch as hvd
from horovod.ray import RayExecutor
from ray_shuffling_data_loader.torch_dataset import (TorchShufflingDataset)
from ray_shuffling_data_loader.data_generation import (generate_data,
DATA_SPEC)
import argparse
DEFAULT_DATA_DIR = "s3://shuffling-data-loader-benchmarks/data/"
numpy_to_torch_dtype = {
np.bool: torch.bool,
np.uint8: torch.uint8,
np.int8: torch.int8,
np.int16: torch.int16,
np.int32: torch.int32,
np.int64: torch.int64,
np.float16: torch.float16,
np.float32: torch.float32,
np.float64: torch.float64,
np.complex64: torch.complex64,
np.complex128: torch.complex128
}
# Training settings
parser = argparse.ArgumentParser(description="PyTorch MNIST Example")
parser.add_argument(
"--batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for training (default: 64)")
parser.add_argument(
"--test-batch-size",
type=int,
default=250000,
metavar="N",
help="input batch size for testing (default: 1000)")
parser.add_argument(
"--epochs",
type=int,
default=10,
metavar="N",
help="number of epochs to train (default: 10)")
parser.add_argument(
"--lr",
type=float,
default=0.01,
metavar="LR",
help="learning rate (default: 0.01)")
parser.add_argument(
"--momentum",
type=float,
default=0.5,
metavar="M",
help="SGD momentum (default: 0.5)")
parser.add_argument(
"--no-cuda",
action="store_true",
default=False,
help="disables CUDA training")
parser.add_argument(
"--seed",
type=int,
default=42,
metavar="S",
help="random seed (default: 42)")
parser.add_argument(
"--log-interval",
type=int,
default=10,
metavar="N",
help=("how many batches to wait before logging training "
"status"))
parser.add_argument(
"--fp16-allreduce",
action="store_true",
default=False,
help="use fp16 compression during allreduce")
parser.add_argument(
"--use-adasum",
action="store_true",
default=False,
help="use adasum algorithm to do reduction")
parser.add_argument(
"--gradient-predivide-factor",
type=float,
default=1.0,
help=("apply gradient predivide factor in optimizer "
"(default: 1.0)"))
parser.add_argument("--num-workers", type=int, default=None)
parser.add_argument("--num-hosts", type=int, default=None)
parser.add_argument("--num-workers-per-host", type=int, default=None)
parser.add_argument("--cpus-per-worker", type=int, default=1)
parser.add_argument("--mock-train-step-time", type=float, default=1.0)
# Synthetic training data generation settings.
parser.add_argument("--cache-files", action="store_true", default=False)
parser.add_argument("--num-rows", type=int, default=2 * (10**7))
parser.add_argument("--num-files", type=int, default=25)
parser.add_argument("--max-row-group-skew", type=float, default=0.0)
parser.add_argument("--num-row-groups-per-file", type=int, default=5)
parser.add_argument("--data-dir", type=str, default=DEFAULT_DATA_DIR)
# Shuffling data loader settings.
parser.add_argument("--num-reducers", type=int, default=32)
parser.add_argument("--max-concurrent-epochs", type=int, default=2)
parser.add_argument("--address", default="auto")
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(1, 10, kernel_size=5)
self.conv2 = nn.Conv2d(10, 20, kernel_size=5)
self.conv2_drop = nn.Dropout2d()
self.fc1 = nn.Linear(320, 50)
self.fc2 = nn.Linear(50, 10)
def forward(self, x):
x = F.relu(F.max_pool2d(self.conv1(x), 2))
x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2))
x = x.view(-1, 320)
x = F.relu(self.fc1(x))
x = F.dropout(x, training=self.training)
x = self.fc2(x)
return F.log_softmax(x)
def train_main(args, filenames):
# Horovod: initialize library.
hvd.init()
torch.manual_seed(args.seed)
if torch.cuda.is_available() and not args.no_cuda:
# Horovod: pin GPU to local rank.
torch.cuda.set_device(hvd.local_rank())
torch.cuda.manual_seed(args.seed)
# Horovod: limit # of CPU threads to be used per worker.
torch.set_num_threads(1)
rank = hvd.rank()
train_dataset = create_dataset(
filenames,
batch_size=args.batch_size,
rank=rank,
num_epochs=args.epochs,
world_size=hvd.size(),
num_reducers=args.num_reducers,
max_concurrent_epochs=args.max_concurrent_epochs)
model = Net()
# By default, Adasum doesn"t need scaling up learning rate.
lr_scaler = hvd.size() if not args.use_adasum else 1
if torch.cuda.is_available() and not args.no_cuda:
# Move model to GPU.
model.cuda()
# If using GPU Adasum allreduce, scale learning rate by local_size.
if args.use_adasum and hvd.nccl_built():
lr_scaler = hvd.local_size()
# Horovod: scale learning rate by lr_scaler.
optimizer = optim.SGD(
model.parameters(), lr=args.lr * lr_scaler, momentum=args.momentum)
# Horovod: broadcast parameters & optimizer state.
hvd.broadcast_parameters(model.state_dict(), root_rank=0)
hvd.broadcast_optimizer_state(optimizer, root_rank=0)
# Horovod: (optional) compression algorithm.
compression = (hvd.Compression.fp16
if args.fp16_allreduce else hvd.Compression.none)
# Horovod: wrap optimizer with DistributedOptimizer.
optimizer = hvd.DistributedOptimizer(
optimizer,
named_parameters=model.named_parameters(),
compression=compression,
op=hvd.Adasum if args.use_adasum else hvd.Average,
gradient_predivide_factor=args.gradient_predivide_factor)
def _train(epoch):
model.train()
# Horovod: set epoch to sampler for shuffling.
train_dataset.set_epoch(epoch)
start_epoch = timeit.default_timer()
last_batch_time = start_epoch
batch_wait_times = []
for batch_idx, (data, target) in enumerate(train_dataset):
batch_wait_times.append(timeit.default_timer() - last_batch_time)
if torch.cuda.is_available() and not args.no_cuda:
if isinstance(data, list):
data = [t.cuda() for t in data]
target = target.cuda()
optimizer.zero_grad()
# output = model(data)
if batch_idx % args.log_interval == 0:
print(
f"Processing batch {batch_idx} in epoch {epoch} on worker "
f"{rank}.")
time.sleep(args.mock_train_step_time)
# TODO(Clark): Add worker synchronization barrier here.
# loss = F.nll_loss(output, target)
# loss.backward()
# optimizer.step()
last_batch_time = timeit.default_timer()
epoch_duration = timeit.default_timer() - start_epoch
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nEpoch {epoch}, worker {rank} stats over "
f"{len(batch_wait_times)} steps: {epoch_duration:.3f}")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
return batch_wait_times
print(f"Starting training on worker {rank}.")
batch_wait_times = []
for epoch in range(args.epochs):
batch_wait_times.extend(_train(epoch))
batch_wait_times.pop(0)
print(f"Done training on worker {rank}.")
avg_batch_wait_time = np.mean(batch_wait_times)
std_batch_wait_time = np.std(batch_wait_times)
max_batch_wait_time = np.max(batch_wait_times)
min_batch_wait_time = np.min(batch_wait_times)
print(f"\nWorker {rank} training stats over {args.epochs} epochs:")
print(f"Mean batch wait time: {avg_batch_wait_time:.3f}s +- "
f"{std_batch_wait_time}")
print(f"Max batch wait time: {max_batch_wait_time:.3f}s")
print(f"Min batch wait time: {min_batch_wait_time:.3f}s")
# TODO(Clark): Add logic to the dataset abstraction so we don't have to do
# this.
if rank == 0:
print("Waiting in rank 0 worker to let other workers consume queue...")
time.sleep(10)
print("Done waiting in rank 0 worker.")
def create_dataset(filenames, *, batch_size, rank, num_epochs, world_size,
num_reducers, max_concurrent_epochs):
print(f"Creating Torch shuffling dataset for worker {rank} with "
f"{batch_size} batch size, {num_epochs} epochs, {num_reducers} "
f"reducers, and {world_size} trainers.")
feature_columns = list(DATA_SPEC.keys())
feature_types = [
numpy_to_torch_dtype[dtype] for _, _, dtype in DATA_SPEC.values()
]
label_column = feature_columns.pop()
label_type = feature_types.pop()
return TorchShufflingDataset(
filenames,
num_epochs,
world_size,
batch_size,
rank,
num_reducers=num_reducers,
max_concurrent_epochs=max_concurrent_epochs,
feature_columns=feature_columns,
feature_types=feature_types,
label_column=label_column,
label_type=label_type)
if __name__ == "__main__":
args = parser.parse_args()
from ray_shuffling_data_loader.stats import human_readable_size
import ray
print("Connecting to Ray cluster...")
ray.init(address=args.address)
num_rows = args.num_rows
num_files = args.num_files
num_row_groups_per_file = args.num_row_groups_per_file
max_row_group_skew = args.max_row_group_skew
data_dir = args.data_dir
cache_path = os.path.join(tempfile.gettempdir(), "data_cache")
filenames = None
if args.cache_files and os.path.exists(cache_path):
try:
with open(cache_path, "rb") as f:
filenames, num_bytes = pickle.load(f)
except Exception as exc:
print(f"Cache load failed - {exc}")
if not filenames:
print(f"Generating {num_rows} rows over {num_files} files, with "
f"{num_row_groups_per_file} row groups per file and at most "
f"{100 * max_row_group_skew:.1f}% row group skew.")
filenames, num_bytes = generate_data(num_rows, num_files,
num_row_groups_per_file,
max_row_group_skew, data_dir)
if args.cache_files:
with open(os.path.join(tempfile.gettempdir(), "data_cache"),
"wb") as f:
pickle.dump((filenames, num_bytes), f)
print(f"Generated {len(filenames)} files containing {num_rows} rows "
f"with {num_row_groups_per_file} row groups per file, totalling "
f"{human_readable_size(num_bytes)}.")
print("Create Ray executor")
worker_kwargs = {}
num_workers = args.num_workers
num_hosts = args.num_hosts
num_workers_per_host = args.num_workers_per_host
if num_workers is not None:
if num_hosts is not None:
raise ValueError(
"Only one of --num-workers and --num-hosts should be used.")
worker_kwargs["num_workers"] = num_workers
elif num_hosts is not None:
worker_kwargs["num_hosts"] = num_hosts
if num_workers_per_host is None:
raise ValueError("When giving --num-hosts, --num-workers-per-host "
"must also be given.")
worker_kwargs["num_workers_per_host"] = num_workers_per_host
cpus_per_worker = args.cpus_per_worker
settings = RayExecutor.create_settings(timeout_s=30)
executor = RayExecutor(
settings,
use_gpu=True,
gpus_per_worker=1,
cpus_per_worker=cpus_per_worker,
**worker_kwargs)
executor.start()
executor.run(train_main, args=[args, filenames])
executor.shutdown()
print("Done consuming batches.")
| 35.925287
| 79
| 0.654375
| 1,689
| 12,502
| 4.622262
| 0.201303
| 0.042654
| 0.054438
| 0.016396
| 0.256052
| 0.192135
| 0.170104
| 0.130652
| 0.120917
| 0.103241
| 0
| 0.017533
| 0.233563
| 12,502
| 347
| 80
| 36.028818
| 0.797224
| 0.06431
| 0
| 0.191126
| 0
| 0
| 0.201456
| 0.043854
| 0
| 0
| 0
| 0.002882
| 0
| 1
| 0.017065
| false
| 0
| 0.05802
| 0
| 0.088737
| 0.068259
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e80814f0c20b1ee061421e9446a7dafff2db52a5
| 13,051
|
py
|
Python
|
tests/test_main/test_base/tests.py
|
PitonX60/django-firebird
|
407bd5916a8ae37184d06adb3b943d6bb4f7076f
|
[
"BSD-3-Clause"
] | 51
|
2015-01-13T00:16:36.000Z
|
2022-01-28T12:18:22.000Z
|
tests/test_main/test_base/tests.py
|
PitonX60/django-firebird
|
407bd5916a8ae37184d06adb3b943d6bb4f7076f
|
[
"BSD-3-Clause"
] | 84
|
2015-01-28T19:08:22.000Z
|
2022-03-26T02:04:03.000Z
|
tests/test_main/test_base/tests.py
|
PitonX60/django-firebird
|
407bd5916a8ae37184d06adb3b943d6bb4f7076f
|
[
"BSD-3-Clause"
] | 47
|
2015-01-13T00:53:27.000Z
|
2021-08-03T04:20:31.000Z
|
# -*- coding: utf-8 -*-
from datetime import datetime, timedelta
from django.conf import settings
from django.db import connection, DatabaseError
from django.db.models import F, DateField, DateTimeField, IntegerField, TimeField, CASCADE
from django.db.models.fields.related import ForeignKey
from django.db.models.functions import (
Extract, ExtractDay, ExtractHour, ExtractMinute, ExtractMonth,
ExtractSecond, ExtractWeek, ExtractWeekDay, ExtractYear, Trunc, TruncDate,
TruncDay, TruncHour, TruncMinute, TruncMonth, TruncSecond, TruncTime,
TruncYear,
)
from django.test import TestCase, TransactionTestCase, override_settings
from django.utils import timezone
from .models import BigS, FieldsTest, Foo, Bar, DTModel
def microsecond_support(value):
return value if connection.features.supports_microsecond_precision else value.replace(microsecond=0)
def truncate_to(value, kind, tzinfo=None):
# Convert to target timezone before truncation
if tzinfo is not None:
value = value.astimezone(tzinfo)
def truncate(value, kind):
if kind == 'second':
return value.replace(microsecond=0)
if kind == 'minute':
return value.replace(second=0, microsecond=0)
if kind == 'hour':
return value.replace(minute=0, second=0, microsecond=0)
if kind == 'day':
if isinstance(value, datetime):
return value.replace(hour=0, minute=0, second=0, microsecond=0)
return value
if kind == 'month':
if isinstance(value, datetime):
return value.replace(day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(day=1)
# otherwise, truncate to year
if isinstance(value, datetime):
return value.replace(month=1, day=1, hour=0, minute=0, second=0, microsecond=0)
return value.replace(month=1, day=1)
value = truncate(value, kind)
if tzinfo is not None:
# If there was a daylight saving transition, then reset the timezone.
value = timezone.make_aware(value.replace(tzinfo=None), tzinfo)
return value
class FirebirdTest(TestCase):
def setUp(self):
pass
def test_server_version(self):
version = connection.server_version
self.assertNotEqual(version, '')
def test_firebird_version(self):
version = connection.ops.firebird_version
self.assertNotEqual(version, [])
class DatabaseOperationsTest(TestCase):
def setUp(self):
self.ops = connection.ops
def test_get_sequence_name(self):
sq_name = self.ops.get_sequence_name('TEST')
self.assertEqual(sq_name, '"TEST_SQ"')
def test_drop_sequence_sql(self):
sql = self.ops.drop_sequence_sql('TEST')
self.assertEqual(sql, 'DROP SEQUENCE "TEST_SQ"')
def test_date_extract_sql(self):
sql = self.ops.date_extract_sql('week_day', 'DATE_FIELD')
value = "EXTRACT(WEEKDAY FROM DATE_FIELD) + 1"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('year', 'DATE_FIELD')
value = "EXTRACT(YEAR FROM DATE_FIELD)"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('month', 'DATE_FIELD')
value = "EXTRACT(MONTH FROM DATE_FIELD)"
self.assertEqual(sql, value)
sql = self.ops.date_extract_sql('day', 'DATE_FIELD')
value = "EXTRACT(DAY FROM DATE_FIELD)"
self.assertEqual(sql, value)
def test_datetime_trunc_sql(self):
sql = self.ops.datetime_trunc_sql('year', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-01-01 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('month', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-01 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('day', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' 00:00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('hour', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':00:00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('minute', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':'||EXTRACT(minute FROM DATE_FIELD)||':00' AS TIMESTAMP)"
self.assertEqual(sql, value)
sql = self.ops.datetime_trunc_sql('second', 'DATE_FIELD', None)
value = "CAST(EXTRACT(year FROM DATE_FIELD)||'-'||EXTRACT(month FROM DATE_FIELD)||'-'||EXTRACT(day FROM DATE_FIELD)||' '||EXTRACT(hour FROM DATE_FIELD)||':'||EXTRACT(minute FROM DATE_FIELD)||':'||TRUNC(EXTRACT(second FROM DATE_FIELD)) AS TIMESTAMP)"
self.assertEqual(sql, value)
def test_time_trunc_sql(self):
sql = self.ops.time_trunc_sql('hour', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':00:00' AS TIME)"
self.assertEqual(sql, out)
sql = self.ops.time_trunc_sql('minute', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':' || EXTRACT(minute FROM TIME_FIELD) || ':00' AS TIME)"
self.assertEqual(sql, out)
sql = self.ops.time_trunc_sql('second', 'TIME_FIELD')
out = "CAST(EXTRACT(hour FROM TIME_FIELD) || ':' || EXTRACT(minute FROM TIME_FIELD) || ':' || TRUNC(EXTRACT(second FROM TIME_FIELD)) AS TIME)"
self.assertEqual(sql, out)
class DatabaseSchemaTests(TransactionTestCase):
def test_no_index_for_foreignkey(self):
"""
FirebirdSQL already creates indexes automatically for foreign keys. (#70).
"""
index_sql = connection.schema_editor()._model_indexes_sql(Bar)
self.assertEqual(index_sql, [])
def test_fk_index_creation(self):
new_field = ForeignKey(Foo, on_delete=CASCADE)
new_field.set_attributes_from_name(None)
with connection.schema_editor() as editor:
editor.add_field(
Bar,
new_field
)
# Just return indexes others that not automaically created by Fk
indexes = editor._get_field_indexes(Bar, new_field)
self.assertEqual(indexes, [])
def test_fk_remove_issue70(self):
with connection.schema_editor() as editor:
editor.remove_field(
Bar,
Bar._meta.get_field("a")
)
self.assertRaises(DatabaseError)
class SlugFieldTests(TestCase):
def test_slugfield_max_length(self):
"""
Make sure SlugField honors max_length (#9706)
"""
bs = BigS.objects.create(s='slug' * 50)
bs = BigS.objects.get(pk=bs.pk)
self.assertEqual(bs.s, 'slug' * 50)
class DateFieldTests(TestCase):
def tests_date_interval(self):
obj = FieldsTest()
obj.pub_date = datetime.now()
obj.mod_date = obj.pub_date + timedelta(days=3)
obj.save()
objs = FieldsTest.objects.filter(mod_date__gte=F('pub_date') + timedelta(days=3)).all()
self.assertEqual(len(objs), 1)
@override_settings(USE_TZ=False)
class DateFunctionTests(TestCase):
def create_model(self, start_datetime, end_datetime):
return DTModel.objects.create(
name=start_datetime.isoformat(),
start_datetime=start_datetime, end_datetime=end_datetime,
start_date=start_datetime.date(), end_date=end_datetime.date(),
start_time=start_datetime.time(), end_time=end_datetime.time(),
duration=(end_datetime - start_datetime),
)
def test_trunc_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
msg = 'output_field must be either DateField, TimeField, or DateTimeField'
with self.assertRaisesMessage(ValueError, msg):
list(DTModel.objects.annotate(truncated=Trunc('start_datetime', 'year', output_field=IntegerField())))
with self.assertRaisesMessage(AssertionError, "'name' isn't a DateField, TimeField, or DateTimeField."):
list(DTModel.objects.annotate(truncated=Trunc('name', 'year', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'second')))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'month')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_date', 'month', output_field=DateTimeField())))
with self.assertRaisesMessage(ValueError, "Cannot truncate TimeField 'start_time' to DateTimeField"):
list(DTModel.objects.annotate(truncated=Trunc('start_time', 'second', output_field=DateTimeField())))
def test_datetime_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_datetime', kind, output_field=DateTimeField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime, kind)),
(truncate_to(end_datetime, kind))
],
lambda m: (m.truncated)
)
def test_date_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_date', kind, output_field=DateField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime.date(), kind)),
(truncate_to(end_datetime.date(), kind))
],
lambda m: (m.truncated)
)
def test_time_kind(kind):
self.assertQuerysetEqual(
DTModel.objects.annotate(
truncated=Trunc('start_time', kind, output_field=TimeField())
).order_by('start_datetime'),
[
(truncate_to(start_datetime.time(), kind)),
(truncate_to(end_datetime.time(), kind))
],
lambda m: (m.truncated)
)
test_date_kind('year')
test_date_kind('month')
test_date_kind('day')
test_time_kind('hour')
test_time_kind('minute')
test_time_kind('second')
test_datetime_kind('year')
test_datetime_kind('month')
test_datetime_kind('day')
test_datetime_kind('hour')
test_datetime_kind('minute')
test_datetime_kind('second')
qs = DTModel.objects.filter(start_datetime__date=Trunc('start_datetime', 'day', output_field=DateField()))
self.assertEqual(qs.count(), 2)
def test_trunc_time_func(self):
start_datetime = microsecond_support(datetime(2015, 6, 15, 14, 30, 50, 321000))
end_datetime = microsecond_support(datetime(2016, 6, 15, 14, 10, 50, 123000))
if settings.USE_TZ:
start_datetime = timezone.make_aware(start_datetime, is_dst=False)
end_datetime = timezone.make_aware(end_datetime, is_dst=False)
self.create_model(start_datetime, end_datetime)
self.create_model(end_datetime, start_datetime)
self.assertQuerysetEqual(
DTModel.objects.annotate(extracted=TruncTime('start_datetime')).order_by('start_datetime'),
[
(start_datetime.time()),
(end_datetime.time()),
],
lambda m: (m.extracted)
)
self.assertEqual(DTModel.objects.filter(start_datetime__time=TruncTime('start_datetime')).count(), 2)
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date')))
with self.assertRaisesMessage(ValueError, "Cannot truncate DateField 'start_date' to TimeField"):
list(DTModel.objects.annotate(truncated=TruncTime('start_date', output_field=DateField())))
| 42.930921
| 257
| 0.64815
| 1,528
| 13,051
| 5.338351
| 0.152487
| 0.038617
| 0.039843
| 0.034326
| 0.549344
| 0.490622
| 0.46267
| 0.408851
| 0.393037
| 0.382248
| 0
| 0.014994
| 0.233469
| 13,051
| 303
| 258
| 43.072607
| 0.80038
| 0.026588
| 0
| 0.252137
| 0
| 0.029915
| 0.184067
| 0.031139
| 0
| 0
| 0
| 0
| 0.153846
| 1
| 0.098291
| false
| 0.004274
| 0.038462
| 0.008547
| 0.213675
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e80f9dcf1fbaf95d64db548a68a4a0e2a7c25dc9
| 3,993
|
py
|
Python
|
tests/test_liif.py
|
Yshuo-Li/mmediting-test
|
ff8349a183b3d266495a53be0c8ad8e342e8b461
|
[
"Apache-2.0"
] | 2
|
2021-05-16T14:49:23.000Z
|
2022-03-28T01:16:44.000Z
|
tests/test_liif.py
|
Yshuo-Li/mmediting-test
|
ff8349a183b3d266495a53be0c8ad8e342e8b461
|
[
"Apache-2.0"
] | null | null | null |
tests/test_liif.py
|
Yshuo-Li/mmediting-test
|
ff8349a183b3d266495a53be0c8ad8e342e8b461
|
[
"Apache-2.0"
] | 2
|
2021-04-22T12:10:14.000Z
|
2021-05-19T02:09:48.000Z
|
import numpy as np
import torch
import torch.nn as nn
from mmcv.runner import obj_from_dict
from mmcv.utils.config import Config
from mmedit.models import build_model
from mmedit.models.losses import L1Loss
from mmedit.models.registry import COMPONENTS
@COMPONENTS.register_module()
class BP(nn.Module):
"""A simple BP network for testing LIIF.
Args:
in_dim (int): Input dimension.
out_dim (int): Output dimension.
"""
def __init__(self, in_dim, out_dim):
super().__init__()
self.layer = nn.Linear(in_dim, out_dim)
def forward(self, x):
shape = x.shape[:-1]
x = self.layer(x.view(-1, x.shape[-1]))
return x.view(*shape, -1)
def test_liif():
model_cfg = dict(
type='LIIF',
generator=dict(
type='EDSR',
in_channels=3,
out_channels=3,
mid_channels=8,
num_blocks=1),
imnet=dict(type='BP', in_dim=8, out_dim=3),
local_ensemble=True,
feat_unfold=True,
cell_decode=True,
rgb_mean=(0.4488, 0.4371, 0.4040),
rgb_std=(1., 1., 1.),
eval_bsize=30000,
pixel_loss=dict(type='L1Loss', loss_weight=1.0, reduction='mean'))
scale_max = 4
train_cfg = None
test_cfg = Config(dict(metrics=['PSNR', 'SSIM'], crop_border=scale_max))
# build restorer
restorer = build_model(model_cfg, train_cfg=train_cfg, test_cfg=test_cfg)
# test attributes
assert restorer.__class__.__name__ == 'LIIF'
assert isinstance(restorer.imnet, BP)
assert isinstance(restorer.pixel_loss, L1Loss)
# prepare data
inputs = torch.rand(1, 3, 22, 11)
targets = torch.rand(1, 128 * 64, 3)
coord = torch.rand(1, 128 * 64, 2)
cell = torch.rand(1, 128 * 64, 2)
data_batch = {'lq': inputs, 'gt': targets, 'coord': coord, 'cell': cell}
# prepare optimizer
optim_cfg = dict(type='Adam', lr=1e-4, betas=(0.9, 0.999))
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
# test train_step and forward_test (cpu)
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# test train_step and forward_test (gpu)
if torch.cuda.is_available():
restorer = restorer.cuda()
data_batch = {
'lq': inputs.cuda(),
'gt': targets.cuda(),
'coord': coord.cuda(),
'cell': cell.cuda()
}
# train_step
optimizer = obj_from_dict(optim_cfg, torch.optim,
dict(params=restorer.parameters()))
outputs = restorer.train_step(data_batch, optimizer)
assert isinstance(outputs, dict)
assert isinstance(outputs['log_vars'], dict)
assert isinstance(outputs['log_vars']['loss_pix'], float)
assert outputs['num_samples'] == 1
assert outputs['results']['lq'].shape == data_batch['lq'].shape
assert outputs['results']['gt'].shape == data_batch['gt'].shape
assert torch.is_tensor(outputs['results']['output'])
assert outputs['results']['output'].size() == (1, 128 * 64, 3)
# val_step
result = restorer.val_step(data_batch, meta=[{'gt_path': ''}])
assert isinstance(result, dict)
assert isinstance(result['eval_result'], dict)
assert result['eval_result'].keys() == set({'PSNR', 'SSIM'})
assert isinstance(result['eval_result']['PSNR'], np.float64)
assert isinstance(result['eval_result']['SSIM'], np.float64)
| 34.721739
| 77
| 0.615577
| 516
| 3,993
| 4.579457
| 0.284884
| 0.081253
| 0.0584
| 0.045705
| 0.419382
| 0.372408
| 0.336014
| 0.336014
| 0.336014
| 0.336014
| 0
| 0.030931
| 0.238918
| 3,993
| 114
| 78
| 35.026316
| 0.746627
| 0.069371
| 0
| 0.26506
| 0
| 0
| 0.080033
| 0
| 0
| 0
| 0
| 0
| 0.289157
| 1
| 0.036145
| false
| 0
| 0.096386
| 0
| 0.156627
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e80fc2194a7c4cdddb23cee4ca11cad0caafde7b
| 3,739
|
py
|
Python
|
database/signals.py
|
ccraddock/beiwe-backend-cc
|
b37c2604800aafcf81c93bc14673ada6aed17a39
|
[
"BSD-3-Clause"
] | null | null | null |
database/signals.py
|
ccraddock/beiwe-backend-cc
|
b37c2604800aafcf81c93bc14673ada6aed17a39
|
[
"BSD-3-Clause"
] | null | null | null |
database/signals.py
|
ccraddock/beiwe-backend-cc
|
b37c2604800aafcf81c93bc14673ada6aed17a39
|
[
"BSD-3-Clause"
] | null | null | null |
from django.utils import timezone
from django.core.exceptions import ObjectDoesNotExist
from django.db.models.signals import post_save, pre_save
from django.dispatch import receiver
from database.study_models import DeviceSettings, Study, Survey, SurveyArchive
@receiver(post_save, sender=Study)
def populate_study_device_settings(sender, **kwargs):
"""
Ensure that every newly created Study object has a DeviceSettings object. This essentially
makes the OneToOneField have null=False in both directions.
"""
my_study = kwargs['instance']
if kwargs['created'] and not hasattr(my_study, 'device_settings'):
# If my_study has just been created and doesn't have a DeviceSettings
# attached to it, create one with the default parameters.
DeviceSettings.objects.create(study=my_study)
@receiver(pre_save, sender=Survey)
def create_survey_archive(sender, **kwargs):
"""
Ensure that every time a Survey is edited, a SurveyArchive (SA) is stored which holds the
current contents of the Survey before saving, as well as a pair of timestamps marking the
time range over which the SA applies.
"""
# The Survey instance being passed has the updated contents of the Survey. To get
# the preexisting contents of the Survey, make a database call using the passed
# instance's primary key. If we get an ObjectDoesNotExist error short-circuit because
# that means it is the initial save operation.
my_survey_plus_updates = kwargs['instance']
try:
my_survey = Survey.objects.get(pk=my_survey_plus_updates.pk)
except ObjectDoesNotExist:
return
# All fields present in AbstractSurvey, plus the study foreign key which is
# separately present in Survey and SurveyArchive.
survey_fields = [f.name for f in super(Survey, my_survey)._meta.fields]
survey_fields.append('study_id')
# Prepare a new archive containing the archive-specific information
new_archive = SurveyArchive(survey=my_survey, archive_start=my_survey.last_updated)
try:
# Get the most recent archive for this Survey, to check whether the Survey has been edited
last_archive = my_survey.archives.latest('archive_end')
except SurveyArchive.DoesNotExist:
survey_dirty = True # If there is no previous archive, we automatically make a new one
else:
survey_dirty = False
for shared_field in survey_fields:
# Update all of the shared fields in the archive to have the original survey's values
if shared_field == 'name':
setattr(new_archive, shared_field, '{0} {1}'.format(getattr(my_survey, shared_field), timezone.now().isoformat()))
else:
setattr(new_archive, shared_field, getattr(my_survey, shared_field))
if not survey_dirty and getattr(my_survey, shared_field) != getattr(last_archive, shared_field):
# If the survey has been edited since the last archive was made, mark the survey as
# dirty. This tells us that we have to make a new archive object.
survey_dirty = True
if survey_dirty:
# If the survey has been edited, save the new archive. This automatically sets the
# archive_end field to be the current time.
new_archive.save()
else:
# If the survey has not been edited, we don't save the new archive. Update the
# previous archive to extend to the current time. Note that object.update saves the
# object, unlike QuerySet.update. See base_models.AbstractModel for details.
last_archive.update(archive_end=timezone.now())
| 46.7375
| 127
| 0.70099
| 510
| 3,739
| 5.021569
| 0.347059
| 0.031238
| 0.018743
| 0.022257
| 0.100742
| 0.018743
| 0
| 0
| 0
| 0
| 0
| 0.000701
| 0.237229
| 3,739
| 79
| 128
| 47.329114
| 0.897265
| 0.457074
| 0
| 0.189189
| 0
| 0
| 0.035998
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.054054
| false
| 0
| 0.135135
| 0
| 0.216216
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e810f0787ee0e3fe022cdf8b169ed014bf9c5752
| 673
|
py
|
Python
|
docs/examples/notify/notify_skeleton.py
|
Blakstar26/npyscreen
|
d47f9c78dc9fea6f66aaef60403e748bb89e52f7
|
[
"BSD-2-Clause"
] | null | null | null |
docs/examples/notify/notify_skeleton.py
|
Blakstar26/npyscreen
|
d47f9c78dc9fea6f66aaef60403e748bb89e52f7
|
[
"BSD-2-Clause"
] | null | null | null |
docs/examples/notify/notify_skeleton.py
|
Blakstar26/npyscreen
|
d47f9c78dc9fea6f66aaef60403e748bb89e52f7
|
[
"BSD-2-Clause"
] | null | null | null |
import npyscreen
class NotifyBaseExample(npyscreen.Form):
def create(self):
key_of_choice = 'p'
what_to_display = 'Press {} for popup \n Press escape key to quit'.format(key_of_choice)
self.how_exited_handers[npyscreen.wgwidget.EXITED_ESCAPE] = self.exit_application
self.add(npyscreen.FixedText, value=what_to_display)
def exit_application(self):
self.parentApp.setNextForm(None)
self.editing = False
class MyApplication(npyscreen.NPSAppManaged):
def onStart(self):
self.addForm('MAIN', NotifyBaseExample, name='To be improved upon')
if __name__ == '__main__':
TestApp = MyApplication().run()
| 29.26087
| 96
| 0.707281
| 81
| 673
| 5.617284
| 0.592593
| 0.021978
| 0.048352
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.190193
| 673
| 23
| 97
| 29.26087
| 0.834862
| 0
| 0
| 0
| 0
| 0
| 0.115727
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.2
| false
| 0
| 0.066667
| 0
| 0.4
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e8112e27cfea3db2efc0a5a799487bb00e1650b2
| 5,656
|
py
|
Python
|
practicioner_bundle/ch15-neural_style/pyimagesearch/nn/conv/minigooglenet.py
|
romanroson/pis_code
|
1221c39c23bec62ba419f9a324f88b0d8e5e4b5b
|
[
"MIT"
] | 1
|
2020-06-07T04:02:16.000Z
|
2020-06-07T04:02:16.000Z
|
practicioner_bundle/ch15-neural_style/pyimagesearch/nn/conv/minigooglenet.py
|
romanroson/pis_code
|
1221c39c23bec62ba419f9a324f88b0d8e5e4b5b
|
[
"MIT"
] | null | null | null |
practicioner_bundle/ch15-neural_style/pyimagesearch/nn/conv/minigooglenet.py
|
romanroson/pis_code
|
1221c39c23bec62ba419f9a324f88b0d8e5e4b5b
|
[
"MIT"
] | 2
|
2020-03-25T10:51:54.000Z
|
2020-09-18T09:36:44.000Z
|
# -*- coding: utf-8 -*-
"""Implementation of MiniGoogLeNet architecture.
This implementation is based on the original implemetation of GoogLeNet.
The authors of the net used BN before Activation layer.
This should be switched.
"""
from keras.layers.normalization import BatchNormalization
from keras.layers.convolutional import Conv2D
from keras.layers.convolutional import AveragePooling2D
from keras.layers.convolutional import MaxPooling2D
from keras.layers.core import Activation
from keras.layers.core import Dropout
from keras.layers.core import Dense
from keras.layers import Flatten
from keras.layers import Input
from keras.models import Model
from keras.layers import concatenate
from keras import backend as K
class MiniGoogLeNet:
"""Implementation of MiniGoogLeNet architecture
"""
@staticmethod
def conv_module(x, filter_num, filter_x_size, filter_y_size, stride, chanel_dim, padding="same"):
"""Define conv layer
Arguments:
x {Tensor} -- input layer to the function
filter_num {int} -- number of filters our CONV layer is going to learn
filter_x_size {int} -- x-size of each of the filter_num filters that will be learned
filter_y_size {int} -- y-size of each of the filter_num filters that will be learned
stride {int} -- stride of the CONV layer
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Keyword Arguments:
padding {str} -- type of padding to be applied to the CONV layer (default: {"same"})
Returns:
Tensor -- convolutional module
"""
# define a CONV => BN => RELU pattern
x = Conv2D(filter_num, (filter_x_size, filter_y_size), strides=stride, padding=padding)(x)
x = BatchNormalization(axis=chanel_dim)(x)
x = Activation("relu")(x)
# return the block
return x
@staticmethod
def inception_module(x, numK1x1, numK3x3, chanel_dim): # pylint: disable=invalid-name
"""Define inception module
Arguments:
x {Tensor} -- input layer
numK1x1 {int} -- number of 1x1 filters
numK3x3 {int} -- number of 3x3 filters
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- inception module
"""
# define two CONV modules, then concatenate across the channel dimension
conv_1x1 = MiniGoogLeNet.conv_module(x, numK1x1, 1, 1, (1, 1), chanel_dim)
conv_3x3 = MiniGoogLeNet.conv_module(x, numK3x3, 3, 3, (1, 1), chanel_dim)
x = concatenate([conv_1x1, conv_3x3], axis=chanel_dim)
# return the block
return x
@staticmethod
def downsample_module(x, filter_num, chanel_dim):
"""Define downsample module
Arguments:
x {Tensor} -- input layer
filter_num {int} -- number of filters our CONV layer is going to learn
chanel_dim {int} -- channel dimension, derived from “channels last” or “channels first”
Returns:
Tensor -- downsample module
"""
# define the CONV module and POOL, then concatenate across the channel dimensions
conv_3x3 = MiniGoogLeNet.conv_module(x, filter_num, 3, 3, (2, 2), chanel_dim, padding="valid")
pool = MaxPooling2D((3, 3), strides=(2, 2))(x)
x = concatenate([conv_3x3, pool], axis=chanel_dim)
# return the block
return x
@staticmethod
def build(width, height, depth, classes):
"""Build MiniGoogLeNet architecture
Arguments:
width {int} -- [description]
height {int} -- [description]
depth {int} -- [description]
classes {int} -- [description]
Returns:
obj -- MiniGoogLeNet model
"""
# initialize the input shape to be "channels last" and the channels dimension itself
input_shape = (height, width, depth)
chanel_dim = -1
# if we are using "channels first", update the input shape and channels dimension
if K.image_data_format() == "channels_first":
input_shape = (depth, height, width)
chanel_dim = 1
# define the model input and first CONV module
inputs = Input(shape=input_shape)
x = MiniGoogLeNet.conv_module(inputs, 96, 3, 3, (1, 1), chanel_dim)
# two Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 32, 32, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 32, 48, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 80, chanel_dim)
# four Inception modules followed by a downsample module
x = MiniGoogLeNet.inception_module(x, 112, 48, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 96, 64, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 80, 80, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 48, 96, chanel_dim)
x = MiniGoogLeNet.downsample_module(x, 96, chanel_dim)
# two Inception modules followed by global POOL and dropout
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = MiniGoogLeNet.inception_module(x, 176, 160, chanel_dim)
x = AveragePooling2D((7, 7))(x)
x = Dropout(0.5)(x)
# softmax classifier
x = Flatten()(x)
x = Dense(classes)(x)
x = Activation("softmax")(x)
# create the model
model = Model(inputs, x, name="googlenet")
# return the constructed network architecture
return model
| 39.277778
| 102
| 0.647454
| 703
| 5,656
| 5.103841
| 0.221906
| 0.062709
| 0.041806
| 0.06466
| 0.440635
| 0.360089
| 0.319119
| 0.229376
| 0.212096
| 0.212096
| 0
| 0.025042
| 0.265736
| 5,656
| 143
| 103
| 39.552448
| 0.838912
| 0.423798
| 0
| 0.157895
| 0
| 0
| 0.014691
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.070175
| false
| 0
| 0.210526
| 0
| 0.368421
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e811afcf8e2f5fbd26eb4879dec2b68ca870f0fa
| 1,475
|
py
|
Python
|
03/triangle.py
|
machinelearningdeveloper/aoc_2016
|
e2c2f7909b09c2ad27f87e05a80f2b2feee6a3a2
|
[
"MIT"
] | null | null | null |
03/triangle.py
|
machinelearningdeveloper/aoc_2016
|
e2c2f7909b09c2ad27f87e05a80f2b2feee6a3a2
|
[
"MIT"
] | null | null | null |
03/triangle.py
|
machinelearningdeveloper/aoc_2016
|
e2c2f7909b09c2ad27f87e05a80f2b2feee6a3a2
|
[
"MIT"
] | null | null | null |
"""Test whether putative triangles, specified as triples of side lengths,
in fact are possible."""
def load_triangles(filename):
"""Load triangles from filename."""
triangles = []
with open(filename) as f:
for line in f:
if line.strip():
triangles.append(tuple([int(side) for side in line.split()]))
return triangles
def load_triangles_from_cols(filename):
"""Instead of loading one triangle per line,
load one-third each of three triangles per line."""
xs = []
ys = []
zs = []
with open(filename) as f:
for line in f:
if line.strip():
x, y, z = [int(side) for side in line.split()]
xs.append(x)
ys.append(y)
zs.append(z)
return ([(xs[i], xs[i+1], xs[i+2]) for i in range(0, len(xs), 3)]
+ [(ys[i], ys[i+1], ys[i+2]) for i in range(0, len(ys), 3)]
+ [(zs[i], zs[i+1], zs[i+2]) for i in range(0, len(zs), 3)])
def is_possible(*sides):
"""The sum of the lengths of every pair of sides in a, b, c
must be larger than the length of the remaining side,
or the putative triangle is impossible."""
for a in [0, 1]:
for b in range(a + 1, 3):
if a == 0:
c = 1 if b == 2 else 2
elif a == 1:
c = 0
if sum([sides[a], sides[b]]) <= sides[c]:
return False
return True
| 31.382979
| 76
| 0.51661
| 225
| 1,475
| 3.364444
| 0.324444
| 0.036988
| 0.019815
| 0.023778
| 0.239102
| 0.239102
| 0.239102
| 0.173052
| 0.10568
| 0.10568
| 0
| 0.022845
| 0.347119
| 1,475
| 46
| 77
| 32.065217
| 0.76324
| 0.24678
| 0
| 0.193548
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.096774
| false
| 0
| 0
| 0
| 0.225806
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e812c612130c7c8d3bd444f6ef7c0936cdb5b8cc
| 9,594
|
py
|
Python
|
backend/api/tests/mixins/credit_trade_relationship.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 18
|
2017-05-10T21:55:11.000Z
|
2021-03-01T16:41:32.000Z
|
backend/api/tests/mixins/credit_trade_relationship.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 1,167
|
2017-03-04T00:18:43.000Z
|
2022-03-03T22:31:51.000Z
|
backend/api/tests/mixins/credit_trade_relationship.py
|
amichard/tfrs
|
ed3973016cc5c2ae48999d550a23b41a5ddad807
|
[
"Apache-2.0"
] | 48
|
2017-03-09T17:19:39.000Z
|
2022-02-24T16:38:17.000Z
|
# -*- coding: utf-8 -*-
# pylint: disable=no-member,invalid-name,duplicate-code
"""
REST API Documentation for the NRS TFRS Credit Trading Application
The Transportation Fuels Reporting System is being designed to streamline
compliance reporting for transportation fuel suppliers in accordance with
the Renewable & Low Carbon Fuel Requirements Regulation.
OpenAPI spec version: v1
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import datetime
import json
import logging
from typing import Callable
from collections import namedtuple, defaultdict
from enum import Enum
from api.models.CreditTrade import CreditTrade
from api.models.CreditTradeStatus import CreditTradeStatus
class CreditTradeRelationshipMixin(object):
"""
Mixin to provide user mapping for related parties to credit transactions
"""
class UserRelationship(Enum):
"""
Enumerates the ways in which a client (user) can be related to a
credit trade
"""
INITIATOR = 1
RESPONDENT = 2
THIRD_PARTY = 3
GOVERNMENT_ANALYST = 4
GOVERNMENT_DIRECTOR = 5
user_map = {
UserRelationship.INITIATOR: 'fs_user_1',
UserRelationship.RESPONDENT: 'fs_user_2',
UserRelationship.THIRD_PARTY: 'fs_user_3',
UserRelationship.GOVERNMENT_ANALYST: 'gov_analyst',
UserRelationship.GOVERNMENT_DIRECTOR: 'gov_director'
}
class CreditTradeFlowHooksMixin(object):
ChangeRecord = namedtuple('ChangeRecord', [
'trade_id',
'requesting_username',
'relationship',
'expected_to_be_successful',
'data_before_request',
'data_after_request',
'response_code'
])
PreChangeRecord = namedtuple('PreChangeRecord', [
'trade_id',
'current_status',
'rescinded',
'status_change'
])
StatusChange = namedtuple('StatusChange', [
'relationship',
'status',
'rescinded'
])
def _sensible_status_changes(self, current_status, rescinded):
"""
Return a list of valid potential status changes for a given starting
state
"""
status_changes = defaultdict(lambda: [])
status_changes[('Draft', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Submitted', False),
self.StatusChange(self.UserRelationship.INITIATOR,
'Cancelled', False)
]
status_changes[('Submitted', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Submitted', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Accepted', False),
self.StatusChange(self.UserRelationship.RESPONDENT,
'Refused', False)
]
status_changes[('Accepted', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Accepted', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Accepted', True), # rescind
self.StatusChange(self.UserRelationship.GOVERNMENT_ANALYST,
'Recommended', False),
self.StatusChange(self.UserRelationship.GOVERNMENT_ANALYST,
'Not Recommended', False)
]
status_changes[('Recommended', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Recommended', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Recommended', True), # rescind
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Approved', False),
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Declined', False)
]
status_changes[('Not Recommended', False)] = [
self.StatusChange(self.UserRelationship.INITIATOR,
'Not Recommended', True), # rescind
self.StatusChange(self.UserRelationship.RESPONDENT,
'Not Recommended', True), # rescind
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Approved', False),
self.StatusChange(self.UserRelationship.GOVERNMENT_DIRECTOR,
'Declined', False)
]
return status_changes[(current_status, rescinded)]
def _path_builder(self, node, path=[], valid_paths=[]):
"""
Recursively build an array of valid paths through the status tree
"""
s = self._sensible_status_changes(node.status, node.rescinded)
is_leaf = not s
path = path + [node]
if is_leaf:
valid_paths.append(path) # end of the line
for branch in s:
self._path_builder(branch, path, valid_paths)
return valid_paths
def check_credit_trade_workflow(
self,
before_change_callback: Callable[[PreChangeRecord], None] = lambda x: None,
after_change_callback: Callable[[ChangeRecord], None] = lambda x: None,
path_end_callback: Callable[[], None] = lambda: None,
modify_request_payload: Callable[[dict], None] = lambda x: None
):
"""
Evaluate all normal status paths through the application via
REST API as appropriate users
with callbacks for tests:
before_change_callback called just before a status change.
Initial status and trade_id may be None
after_change_callback called after a change
data_before_request can be None if this was a creation
path_end_callback called when this pathway is done
(another will begin unless this was the last)
"""
initiating_org = self.users[
self.user_map[
self.UserRelationship.INITIATOR
]].organization
responding_org = self.users[
self.user_map[
self.UserRelationship.RESPONDENT
]].organization
payload = {
'fairMarketValuePerCredit': 1,
'initiator': initiating_org.id,
'numberOfCredits': 1,
'respondent': responding_org.id,
'tradeEffectiveDate': datetime.datetime.today().strftime('%Y-%m-%d'),
'type': self.credit_trade_types['sell'].id,
'zeroReason': None
}
valid_paths = (self._path_builder(
self.StatusChange(self.UserRelationship.INITIATOR, 'Draft', False)
))
for path in valid_paths:
logging.debug('evaluating path: {}'.format(
'\n'.join(
[
'{} sets status to {} and rescinded to {}'.format(
c.relationship, c.status, c.rescinded) for c in path
]
)))
trade_id = None
response_data = None
for node in path:
before_change_callback(self.PreChangeRecord(
trade_id,
CreditTrade.objects.filter(
id=trade_id
).first().status.status if trade_id else None,
CreditTrade.objects.filter(
id=trade_id
).first().is_rescinded if trade_id else None,
node
))
payload['status'] = CreditTradeStatus.objects.get_by_natural_key(node.status).id
payload['is_rescinded'] = node.rescinded
modify_request_payload(payload)
if not trade_id:
response = self.clients[self.user_map[node.relationship]].post(
'/api/credit_trades',
content_type='application/json',
data=json.dumps(payload)
)
else:
response = self.clients[self.user_map[node.relationship]].put(
'/api/credit_trades/{}'.format(trade_id),
content_type='application/json',
data=json.dumps(payload)
)
previous_response_data = response_data
response_data = json.loads(response.content.decode('utf-8'))
trade_id = response_data['id'] if 'id' in response_data else trade_id
after_change_callback(self.ChangeRecord(
trade_id,
self.user_map[node.relationship],
node.relationship,
True,
previous_response_data,
response_data,
response.status_code
))
path_end_callback()
| 36.340909
| 96
| 0.575985
| 897
| 9,594
| 6.012263
| 0.296544
| 0.07417
| 0.066753
| 0.120156
| 0.308734
| 0.287224
| 0.239941
| 0.158353
| 0.057853
| 0.057853
| 0
| 0.002698
| 0.343235
| 9,594
| 263
| 97
| 36.479087
| 0.853198
| 0.17636
| 0
| 0.291429
| 0
| 0
| 0.098927
| 0.00916
| 0
| 0
| 0
| 0
| 0
| 1
| 0.017143
| false
| 0
| 0.045714
| 0
| 0.114286
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e814b12d587f4fdbbd8d27aa48d2d332f3fb169b
| 14,345
|
py
|
Python
|
mybot.py
|
johnnyboiii3020/matchmaking-bot
|
c36df430fd8b3292f34fb2e156e65d9914e0e497
|
[
"MIT"
] | null | null | null |
mybot.py
|
johnnyboiii3020/matchmaking-bot
|
c36df430fd8b3292f34fb2e156e65d9914e0e497
|
[
"MIT"
] | null | null | null |
mybot.py
|
johnnyboiii3020/matchmaking-bot
|
c36df430fd8b3292f34fb2e156e65d9914e0e497
|
[
"MIT"
] | null | null | null |
import discord
import json
import random
import os
from discord.ext import commands
TOKEN = ""
client = commands.Bot(command_prefix = '--')
os.chdir(r'D:\Programming\Projects\Discord bot\jsonFiles')
SoloCounter = 30
SolominCounter = 10
Queueiter = 1
T_Queueiter = 1
TeamCounter = 50
TeamminCounter = 20
extensions = [
"cogs.Matchmaking",
"cogs.Moderator"
]
@client.event
async def on_ready():
botInfo = await client.application_info()
oauthlink = discord.utils.oauth_url(botInfo.id)
print('---------')
print('Username: {}'.format(client.user.name))
print('ID: {}'.format(client.user.id))
print('Server count: {}'.format(str(len(client.servers))))
print('Member count: {}'.format(str(len(set(client.get_all_members())))))
print('OAuth URL: {}'.format(oauthlink))
print('Cogs: {}'.format(client.cogs))
print('---------')
######################### Register Team #################################
@client.command(pass_context = True)
@commands.has_role('Registered')
async def registerTeam( ctx , teamName , player1: discord.Member , player2: discord.Member , player3: discord.Member , player4: discord.Member , player5: discord.Member):
if ctx.message.channel.id == "549911021511245834":
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
players = [player1 , player2 , player3 , player4 , player5]
await update_data_Team(ctx , Teams , teamName , players)
with open('Teams.json' , 'w') as f:
json.dump(Teams , f , indent = 2)
async def update_data_Team(ctx , Teams , teamName , players):
if not teamName in Teams:
Teams[teamName] = {}
Teams[teamName]["teamElo"] = 0
Teams[teamName]["Players"] = []
Role = teamName
await client.create_role(ctx.message.server , name = Role, hoist = True , mentionable = True )
TeamRole = discord.utils.get(ctx.message.server.roles , name = Role)
for player in players:
print(player)
Teams[teamName]["Players"].append(player.mention)
await client.add_roles(player , TeamRole)
await client.say("{} is Registered as Team Cheers!!!!".format(teamName))
else:
await client.say("you are already registered")
############################ Register Solo ###################################
@client.command(pass_context = True)
async def registersolo( ctx , name: discord.Member):
if ctx.message.channel.id == "549911021511245834":
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
await update_data_solo(Solo , name , ctx)
with open('Solo.json' , 'w') as f:
json.dump(Solo , f , indent = 2)
async def update_data_solo( Solo , name , player):
if not player.message.author.mention in Solo:
author = player.message.author.mention
member = player.message.author
Solo[author] = {}
Solo[author]["name"] = name
Solo[author]["Elo"] = 0
nickname = str(Solo[author]["Elo"]) + "~" + Solo[author]["name"]
Role = discord.utils.get(player.message.server.roles , name = 'Registered')
member.nick = nickname
await client.add_roles(member , Role)
await client.say("{} is Registered as Solo Cheers Guys!!!!".format(author))
else:
await client.say("you are already registered")
############################### Win Team ################################
@client.command(pass_context = True)
@commands.has_role('Mod')
async def winT(ctx , T_Queueno , Team , Team2):
with open('Teams_Queue.json' , 'r') as f:
Teams_Queue = json.load(f)
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
Teams[Team]["teamElo"] = Teams[Team]["teamElo"] + TeamCounter
Teams[Team2]["teamElo"] = Teams[Team2]["teamElo"] - TeamminCounter
await display_win_team(Team , Team2)
with open('Teams.json' , 'r') as f:
json.dump(Teams , f , indent = 2)
###############CReate Team Queue Channel###########################
@client.command(pass_context = True)
@commands.has_role('Mod')
async def CreateTQueueChannel(ctx):
with open('Teams_Queue.json' , 'r') as f:
Teams_Queue = json.load(f)
Teams_Queue["1"] = []
with open('Teams_Queue.json' , 'w') as f:
json.dump(Teams_Queue , f , indent = 2)
########################## Join Team Queue ###################
@client.command(pass_context = True)
@commands.has_role('Registered')
async def joinQT(ctx , TeamName):
if ctx.message.channel.id == "549910313995206687":
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
if "{}".format(TeamName) in Teams:
with open('Teams_Queue.json' , 'r') as f:
Teams_Queue = json.load(f)
await update_data_Team_Queue(Teams_Queue , TeamName)
with open('Teams_Queue.json' , 'w') as f:
json.dump(Teams_Queue , f , indent = 2)
else:
await client.say("{} is not registerd".format(TeamName))
async def update_data_Team_Queue(Teams_Queue , TeamName):
global T_Queueiter
T_Queueno = T_Queueiter
if len(Teams_Queue["{}".format(T_Queueno)]) >= 1:
Teams_Queue[str(T_Queueno)].append(TeamName)
await display_Team_Queue(T_Queueno , Teams_Queue , TeamName)
await display_match(T_Queueno , Teams_Queue)
T_Queueiter += 1
T_Queueno = T_Queueiter
Teams_Queue[str(T_Queueno)] = []
else:
if not TeamName in Teams_Queue[str(T_Queueno)]:
Teams_Queue[str(T_Queueno)].append(TeamName)
await display_Team_Queue(T_Queueno , Teams_Queue , TeamName)
else:
await client.say("{} is already in queue" .format(TeamName))
async def display_Team_Queue(T_Queueno , Teams_Queue , TeamName):
embed = discord.Embed(
title = "Team Queue : {}".format(T_Queueno),
description = "5 v 5 Custom Games"
)
embed.add_field(name = 'Team:' , value = "\n".join("<@{}>".format(Teams_Queue[T_Queueno])) , inline = False)
await client.say(embed = embed)
async def display_match(T_Queueno , Teams_Queue):
embed = discord.Embed(
title= "Team Matchup Queue : {}".format(T_Queueno),
description = "5 v 5 Custom Games"
)
embed.add_field(name = 'Teams:' , value = "\n".join(Teams_Queue[str(T_Queueno)]) , inline = False)
with open('Maps.json' , 'r') as f:
Maps = json.load(f)
embed.add_field(name = 'Map:' , value = random.choice(Maps["Maps"]))
await client.say(embed = embed)
################Show Queue#################
@client.command(pass_context = True)
@commands.has_role('Registered')
async def showQ(ctx , Queueno):
if ctx.message.channel.id == "549910313995206687":
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
if len(Queue[str(Queueno)]) < 0 :
await client.say("Queue is empty")
else:
await DisplayQueue(Queue , Queueno)
###############Show Team Points##########
@client.command(pass_context = True)
@commadns.has_role('Registered')
async def pointsT(ctx , TeamName):
if ctx.message.channel.id == "551095980251021323":
with open('Teams.json' , 'r') as f:
Teams = json.load(f)
if TeamName in Teams:
await client.say("{}".format(Teams[TeamName][teamElo]))
####################Show Points ###############
@client.command(pass_context = True)
@commands.has_role('Registered')
async def points(ctx):
if ctx.message.channel.id == "551095980251021323":
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
if ctx.message.author.mention in Solo:
await client.say("{}".format(Solo[ctx.message.author.mention]["Elo"]) + " points{}".format(ctx.message.author.mention))
######################### Win Solo ##############################
@client.command(pass_context = True)
@commands.has_role('Mod' )
async def winS(ctx , Queueno , Teamno , Teamno2):
with open('Solo_Teams.json' , 'r') as f:
Solo_Teams = json.load(f)
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
await update_winS(Solo_Teams , Solo , Queueno , Teamno , Teamno2)
with open('Solo.json' , 'w') as f:
json.dump(Solo , f , indent = 2)
async def update_winS(Solo_Teams , Solo , Queueno , Teamno , Teamno2):
for player in Solo_Teams[str(Queueno)][str(Teamno)]:
Solo[player]["Elo"] = Solo[player]["Elo"] + SoloCounter
await update_nick(player)
for players in Solo_Teams[str(Queueno)][str(Teamno2)]:
Solo[players]["Elo"] = Solo[players]["Elo"] - SolominCounter
await update_nick(player)
await display_updates(Solo_Teams , Teamno , Teamno2 , Queueno)
async def update_nick(name):
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
nickname = str(Solo[name]["Elo"]) + "~" + str(Solo[name]["name"])
server = client.get_server("549553345044545536")
member = server.get_member(name[2:len(name)-1])
member.nick = nickname
async def display_updates(Solo_Teams , Teamno , Teamno2 , Queueno):
embed = discord.Embed(
title = "Updates:"
)
embed.add_field(name = 'Winning Team + {}'.format(SoloCounter) , value = '\n'.join(Solo_Teams[str(Queueno)][str(Teamno)]))
embed.add_field(name = 'Losing Team - {}'.format(SolominCounter) , value = '\n'.join(Solo_Teams[str(Queueno)][str(Teamno2)]))
await client.say(embed = embed)
####Leave Queue #####
@client.command(pass_context = True)
@commands.has_role('Registered')
async def leaveQ(ctx):
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
await update_data_lQueue(Queue , ctx.message.author)
with open('Queue.json' , 'w') as f:
json.dump(Queue , f , indent = 2)
async def update_data_lQueue( Queue , author):
print(Queueiter)
if author.mention in Queue[str(Queueiter)]:
Queue[str(Queueiter)].remove(author.mention)
await client.say("{} has left the queue".format(author.mention))
else:
await client.say("{} is not in the queue".format(author.mention))
###Create Queue Channel ####
@client.command(pass_context = True)
@commands.has_role('Mod')
async def CreateQueueChannel(ctx):
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
Queue[Queueiter] = []
await client.say("Queue Channel is Created")
with open('Queue.json' , 'w') as f:
json.dump(Queue , f , indent = 2)
#############Join Queue#########
@client.command(pass_context = True)
@commands.has_role('Registered')
async def joinQ(ctx):
with open('Solo.json' , 'r') as f:
Solo = json.load(f)
if ctx.message.author.mention in Solo:
with open('Queue.json' , 'r') as f:
Queue = json.load(f)
await update_data_Queue( Queue , ctx.message.author)
with open('Queue.json' , 'w') as f:
json.dump(Queue , f , indent = 2)
else:
await client.say("{} is not registered".format(ctx.message.author))
async def update_data_Queue(Queue , author):
global Queueiter
Queueno = Queueiter
if len(Queue["{}".format(Queueno)]) >= 9:
Queue[str(Queueno)].append(author.mention)
await DisplayQueue(Queue , Queueno)
await Create_solo_teams(Queue , Queueno)
Queueiter = Queueiter + 1
Queueno = Queueiter
Queue[str(Queueno)] = []
else:
if not author.mention in Queue[str(Queueno)]:
Queue[str(Queueno)].append(author.mention)
await client.say("{} joined".format(author.mention))
await DisplayQueue( Queue , Queueno)
else:
await client.say("{} already in queue" .format(author.mention))
async def DisplayQueue( Queue , Queueno):
embed = discord.Embed(
title = 'Queue:{}'.format(Queueno),
description = "5 v 5 Custom Games:"
)
embed.add_field(name = "Lobby" , value = '\n'.join(Queue[str(Queueno)]), inline = True)
await client.say(embed = embed)
async def Create_solo_teams(Queue , Queueno):
with open('Solo_Teams.json' , 'r') as f:
Solo_Teams = json.load(f)
await update_Solo_teams(Solo_Teams , Queueno , Queue)
with open('Solo_Teams.json' , 'w') as f:
json.dump(Solo_Teams , f , indent = 2)
async def update_Solo_teams( Solo_Teams , Queueno , Queue):
if not Queueno in Solo_Teams:
Solo_Teams[str(Queueno)] = {}
Solo_Teams[str(Queueno)]["Team1"] = []
Solo_Teams[str(Queueno)]["Team2"] = []
for x in range(0 , 5):
Queuerand = random.choice(Queue[str(Queueno)])
Queue[str(Queueno)].remove(Queuerand)
Solo_Teams[str(Queueno)]["Team1"].append(Queuerand)
for x in range(0 , 5):
Queuerand = random.choice(Queue[str(Queueno)])
Queue[str(Queueno)].remove(Queuerand)
Solo_Teams[str(Queueno)]["Team2"].append(Queuerand)
await Display_solo_teams(Solo_Teams , Queueno)
async def Display_solo_teams( Solo_Teams , Queueno):
embed = discord.Embed(
title = 'Queueno.:{}'.format(Queueno),
description = '5 v 5 Custom Games'
)
embed.add_field(name = "Team1:", value = '\n'.join(Solo_Teams[str(Queueno)]["Team1"]) , inline = True)
embed.add_field(name = "Team2:", value = '\n'.join(Solo_Teams[str(Queueno)]["Team2"]) , inline = False)
with open('Maps.json' , 'r') as f:
Maps = json.load(f)
embed.add_field(name = "Map:", value = random.choice(Maps["Maps"]) , inline = False)
embed.add_field(name = "Host of The Match" , value = random.choice(Solo_Teams[str(Queueno)]["Team1"]) , inline = False)
await client.say(embed = embed)
if __name__ == '__main__':
for extension in extensions:
try:
client.load_extension(extension)
except Exception as e:
print('Failed to load extension {}\n{}: {}'.format(extension, type(e).__name__, e))
client.run(TOKEN)
| 40.408451
| 171
| 0.594772
| 1,762
| 14,345
| 4.738933
| 0.111805
| 0.035569
| 0.017605
| 0.02012
| 0.603832
| 0.522874
| 0.463832
| 0.382036
| 0.326228
| 0.325269
| 0
| 0.018072
| 0.23625
| 14,345
| 354
| 172
| 40.522599
| 0.744067
| 0.01199
| 0
| 0.450331
| 0
| 0
| 0.110995
| 0.00232
| 0
| 0
| 0
| 0
| 0
| 1
| 0
| false
| 0.039735
| 0.016556
| 0
| 0.016556
| 0.036424
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e814cda6088e4f32617041b4185b65bf218042f4
| 18,547
|
py
|
Python
|
conversation.py
|
markemus/economy
|
d7b3be9b2095393d7ee5c8967b9fcee8998776bb
|
[
"MIT"
] | 2
|
2017-05-09T22:46:18.000Z
|
2021-09-07T06:04:57.000Z
|
conversation.py
|
markemus/economy
|
d7b3be9b2095393d7ee5c8967b9fcee8998776bb
|
[
"MIT"
] | null | null | null |
conversation.py
|
markemus/economy
|
d7b3be9b2095393d7ee5c8967b9fcee8998776bb
|
[
"MIT"
] | 3
|
2017-07-20T21:22:30.000Z
|
2020-10-17T13:04:28.000Z
|
import database as d
import numpy as np
import random
from transitions import Machine
#Conversations are markov chains. Works as follows: a column vector for each CURRENT state j, a row vector for each TARGET state i.
#Each entry i,j = the probability of moving to state i from state j.
#target state D = end of conversation. We start in state D when initializing conversation.
#row vectors sum to 1, internal lists are columns.
#Conversation is a singleton. DO NOT CREATE NEW CONVERSATION OBJECTS.
class Conversation(object):
#a. stores, b.manufacturers, c.friends, d. myself, e.end conversation
topicMatrix = [
[0.00,0.20,0.15,0.15,0.25],
[0.20,0.00,0.15,0.15,0.25],
[0.15,0.15,0.00,0.20,0.25],
[0.15,0.15,0.20,0.00,0.25],
[0.50,0.50,0.50,0.50,0.00]
]
#a. different store, b. new topic, c. end convo, d. prices
storeMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different manufacturer, b. new topic, c. end convo, d. prices
manuMatrix = [
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.25],
[0.0,0.0,0.25,0.50],
[1.0,1.0,0.25,0.00]
]
#a. different friend, b. new topic, c. end convo, d. family, e. job, /f. skills
friendMatrix = [
[0.0,0.0,0.2,0.1,0.1],
[0.0,0.0,0.2,0.2,0.2],
[0.0,0.0,0.2,0.5,0.5],
[0.5,0.5,0.2,0.0,0.2],
[0.5,0.5,0.2,0.2,0.0]
]
# friendMatrix = [
# [0.00,0.00,0.15,0.1,0.1,0.1],
# [0.00,0.00,0.15,0.2,0.2,0.2],
# [0.00,0.00,0.15,0.5,0.5,0.5],
# [0.34,0.34,0.15,0.0,0.1,0.1],
# [0.33,0.33,0.15,0.1,0.0,0.1],
# [0.33,0.33,0.25,0.1,0.1,0.0]
# ]
#a. introduction, b. new topic, c. end convo, d. myfamily, e. myjob, /f. myskills
myselfMatrix = [
[0.00,1,0.2,0.0,0.0],
[0.25,0,0.2,0.2,0.2],
[0.25,0,0.2,0.5,0.5],
[0.25,0,0.2,0.0,0.3],
[0.25,0,0.2,0.3,0.0]
]
# myselfMatrix = [
# [0.0,1,0.15,0.00,0.00,0.00],
# [0.2,0,0.15,0.20,0.20,0.20],
# [0.2,0,0.15,0.50,0.50,0.50],
# [0.2,0,0.15,0.00,0.15,0.15],
# [0.2,0,0.15,0.15,0.00,0.15],
# [0.2,0,0.15,0.15,0.15,0.00]
# ]
states = ['topic','store','manu','friend', 'myself', 'exit']
transitions = [
{'trigger' : 'toTopic', 'source' : '*', 'dest' : 'topic'},
{'trigger' : 'toStore', 'source' : 'topic', 'dest' : 'store'},
{'trigger' : 'toManu' , 'source' : 'topic', 'dest' : 'manu' },
{'trigger' : 'toFriend', 'source' : 'topic', 'dest' : 'friend' },
{'trigger' : 'toMyself', 'source' : 'topic', 'dest' : 'myself'},
{'trigger' : 'toExit', 'source' : '*', 'dest' : 'exit'}
]
def __init__(self):
self.isPlayer = False
self.firstPerson = None
self.secondPerson = None
self.target = None
self.machine = Machine(model=self, states=Conversation.states, transitions=Conversation.transitions, initial='exit')
self.menuDict = {
'topic' : [self.toStore, self.toManu, self.toFriend, self.toMyself, self.toExit],
'store' : [self.different, self.toTopic, self.toExit, self.prices],
'manu' : [self.different, self.toTopic, self.toExit, self.prices],
'friend' : [self.different, self.toTopic, self.toExit, self.family, self.job],
'myself' : [self.introduction, self.toTopic, self.toExit, self.myfamily, self.myjob]
}
self.machine.on_enter_topic('topicHandler')
self.machine.on_enter_store('storeHandler')
self.machine.on_enter_manu('manuHandler')
self.machine.on_enter_friend('friendHandler')
self.machine.on_enter_myself('myselfHandler')
self.machine.on_enter_exit('exitHandler')
def beginConversation(self, firstPerson, secondPerson, isPlayer=False):
self.isPlayer = isPlayer
self.firstPerson = firstPerson
self.secondPerson = secondPerson
self.introduction()
self.toTopic()
def introduction(self):
p2 = self.firstPerson.peopleManager(self.secondPerson)
p1 = self.secondPerson.peopleManager(self.firstPerson)
p2.name = self.secondPerson.name
p1.name = self.firstPerson.name
p2.updateOpinion(1)
p1.updateOpinion(1)
def different(self):
if self.state == 'friend':
testTarget = self.firstPerson.randomPerson(self.target)
if testTarget is not None:
self.target = testTarget.person
else:
self.target = None
elif self.state == 'manu':
testTarget = self.firstPerson.randomManu(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
elif self.state == 'store':
testTarget = self.firstPerson.randomStore(self.target)
if testTarget is not None:
self.target = testTarget.store
else:
self.target = None
def prices(self):
if self.target is not None:
firstProfile = self.firstPerson.unitManager(self.target, self.secondPerson)
secondProfile = self.secondPerson.unitManager(self.target, self.firstPerson)
firstPrices = firstProfile.getPricesWithDayNum()
secondPrices = secondProfile.getPricesWithDayNum()
firstDayNum = firstPrices[1]
secondDayNum = secondPrices[1]
if firstDayNum > secondDayNum:
prices = firstPrices[0]
secondProfile.updatePrices(prices, firstDayNum)
#thoughts
self.firstPerson.think("I told " + self.secondPerson.name + " about the prices at " + self.target.name + ".")
self.secondPerson.think(self.firstPerson.name + " told me about the prices at " + self.target.name + ".")
elif secondDayNum > firstDayNum:
prices = secondPrices[0]
firstProfile.updatePrices(prices, secondDayNum)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about the prices at " + self.target.name + ".")
self.secondPerson.think("I told " + self.firstPerson.name + " about the prices at " + self.target.name + ".")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s prices.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s prices.")
else:
if self.state == 'store':
self.firstPerson.think(self.secondPerson.name + " listened to me gripe about how I can't find anywhere to shop.")
self.secondPerson.think(self.firstPerson.name + " told me that they can't find anywhere to shop.")
elif self.state == 'manu':
self.firstPerson.think("I mentioned to " + self.secondPerson.name + " that I don't know anything about the local industry.")
self.secondPerson.think(self.firstPerson.name + " told me that they don't know much about the local industry.")
else:
self.firstPerson.think("There is a bug in conversation.prices. (not manu or store)")
self.secondPerson.think("There is a bug in conversation.prices. (not manu or store)")
def family(self):
if self.target is not None:
#info: family, people
#profiles
p1 = self.firstPerson.peopleManager(self.target)
p2 = self.secondPerson.peopleManager(self.target)
#variables
f1 = p1.getFamily()
f2 = p2.getFamily()
ff = []
#update profiles
for a, b in zip(f1, f2):
if a[-1] >= b[-1]:
ff.append(a)
else:
ff.append(b)
p1.updateFamily(*ff)
p2.updateFamily(*ff)
#thoughts
self.firstPerson.think(self.secondPerson.name + " and I gossipped about " + self.target.name + "'s family.")
self.secondPerson.think(self.firstPerson.name + " and I gossipped about " + self.target.name + "'s family.")
else:
self.firstPerson.think("I don't really know anything about my friends' families.")
self.secondPerson.think("I don't really know anything about my friends' families.")
def job(self):
if self.target is not None:
#profiles
firstProfile = self.firstPerson.peopleManager(self.target)
secondProfile = self.secondPerson.peopleManager(self.target)
#variables
firstJob = firstProfile.getJob()
secondJob = secondProfile.getJob()
#update profiles
if firstJob[1] > secondJob[1]:
secondProfile.updateJob(*firstJob)
self.firstPerson.think("I told " + self.secondPerson.name + " what " + self.target.name + " does for a living.")
self.secondPerson.think(self.firstPerson.name + " told me what " + self.target.name + " does for a living.")
elif secondJob[1] > firstJob[1]:
firstProfile.updateJob(*secondJob)
self.firstPerson.think(self.secondPerson.name + " told me what " + self.target.name + " does for a living.")
self.secondPerson.think("I told " + self.firstPerson.name + " about " + self.target.name + " does for a living.")
else:
self.firstPerson.think(self.secondPerson.name + " and I talked about " + self.target.name + "'s job.")
self.secondPerson.think(self.firstPerson.name + " and I talked about " + self.target.name + "'s job.")
else:
self.firstPerson.think("I don't know what any of my friends do for a living!")
self.secondPerson.think("I don't know what any of my friends do for a living!")
# def skills(self):
# #info: skills
# if self.target is not None:
# #profiles
# firstProfile = self.firstPerson.peopleManager(self.target)
# secondProfile = self.secondPerson.peopleManager(self.target)
# #variables
# firstSkills = firstProfile.getSkills()
# secondSkills = secondProfile.getSkills()
# #update profiles
# if firstSkills[1] > secondSkills[1]:
# secondProfile.updateSkills(*firstSkills)
# self.firstPerson.think("I told " + self.secondPerson.name + " about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# elif secondSkills[1] > firstSkills[1]:
# firstProfile.updateSkills(*secondSkills)
# self.firstPerson.think(self.secondPerson.name + " told me about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think("I told " + self.firstPerson.name + " about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think(self.secondPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# self.secondPerson.think(self.firstPerson.name + " and I talked about how good " + self.target.name + " is with their hands.")
# else:
# self.firstPerson.think("I should spend more time doing things with my friends.")
# self.secondPerson.think("I should spend more time doing things with my friends.")
def myfamily(self):
#info: family, people
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
firstOwn = self.firstPerson.peopleManager(self.firstPerson)
secondOwn = self.secondPerson.peopleManager(self.secondPerson)
#update profiles
firstProfile.updateFamily(firstOwn.getFather(), firstOwn.getMother(), firstOwn.getSpouse(), firstOwn.getSiblings(), firstOwn.getChildren())
secondProfile.updateFamily(secondOwn.getFather(), secondOwn.getMother(), secondOwn.getSpouse(), secondOwn.getSiblings(), secondOwn.getChildren())
#thoughts
self.firstPerson.think(self.secondPerson.name + " caught me up on their family life.")
self.secondPerson.think(self.firstPerson.name + " caught me up on their family life.")
def myjob(self):
#info: jobs, jobUnits, *salaries
#profiles
firstProfile = self.secondPerson.peopleManager(self.firstPerson)
secondProfile = self.firstPerson.peopleManager(self.secondPerson)
#variables
firstJob = self.firstPerson.getJob()
secondJob = self.secondPerson.getJob()
dayNum = self.firstPerson.model.getDayNum()
try:
firstJobType = firstJob.getJobType()
firstJobUnit = firstJob.getUnit()
firstJobLoc = firstJobUnit.getName()
firstSalary = firstJob.getSalary()
except:
firstJobType = "Jobhunter"
firstJobUnit = None
firstJobLoc = "home"
firstSalary = 0
try:
secondJobType = secondJob.getJobType()
secondJobUnit = secondJob.getUnit()
secondJobLoc = secondJobUnit.getName()
secondSalary = secondJob.getSalary()
except:
secondJobType = "Jobhunter"
secondJobUnit = None
secondJobLoc = "home"
secondSalary = 0
#update profiles
if dayNum > firstProfile.getJob()[1]:
firstProfile.updateJob(firstJob, dayNum)
if dayNum > firstProfile.getSalary()[1]:
firstProfile.updateSalary(firstSalary, dayNum)
if dayNum > secondProfile.getJob()[1]:
secondProfile.updateJob(secondJob, dayNum)
if dayNum > secondProfile.getSalary()[1]:
secondProfile.updateSalary(firstSalary, dayNum)
if firstJobUnit is not None:
self.secondPerson.unitManager(firstJobUnit, self.firstPerson)
if secondJobUnit is not None:
self.firstPerson.unitManager(secondJobUnit, self.secondPerson)
#thoughts
self.firstPerson.think(self.secondPerson.name + " told me about their job as a " + secondJobType + " at " + secondJobLoc + ".")
self.secondPerson.think(self.firstPerson.name + " told me about their job as a " + firstJobType + " at " + firstJobLoc + ".")
# def myskills(self):
# #info skills
# #profiles
# firstProfile = self.secondPerson.peopleManager(self.firstPerson)
# secondProfile = self.firstPerson.peopleManager(self.secondPerson)
# #variables
# firstSkills = self.firstPerson.getSkills()
# secondSkills = self.secondPerson.getSkills()
# dayNum = self.firstPerson.model.getDayNum()
# #update profiles
# if dayNum > firstProfile.getSkills()[1]:
# firstProfile.updateSkills(firstSkills, dayNum)
# if dayNum > secondProfile.getSkills()[1]:
# secondProfile.updateSkills(secondSkills, dayNum)
# #thoughts
# self.firstPerson.think(self.secondPerson.name + " and I talked shop for a while.")
# self.secondPerson.think(self.firstPerson.name + " and I talked shop for a while.")
#dialogues are chosen here, but the actual method call is in the handler (eg prices)
def talk(self, matrix, stateVector):
if self.isPlayer:
# stateVector = playerChoice
pass
else:
#get dialogue probabilities given last dialogue
probArray = np.dot(matrix, stateVector)
prob = probArray.tolist()
#choose dialogue
choice = random.random()
stateVector = [0 for i in range(len(prob))]
for i in range(len(prob)):
outcome = prob[i]
if outcome >= choice:
stateVector[i] = 1
return stateVector
else:
choice = choice - outcome
def topicHandler(self):
matrix = Conversation.topicMatrix
stateVector = [0,0,0,0,1]
# self.firstPerson.think("topicHandler")
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def storeHandler(self):
matrix = Conversation.storeMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("storeHandler")
self.different()
while self.state == 'store':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def manuHandler(self):
matrix = Conversation.manuMatrix
stateVector = [0,1,0,0]
# self.firstPerson.think("manuHandler")
self.different()
while self.state == 'manu':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def friendHandler(self):
matrix = Conversation.friendMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("friendHandler")
self.different()
while self.state == 'friend':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def myselfHandler(self):
matrix = Conversation.myselfMatrix
stateVector = [0,1,0,0,0]
# self.firstPerson.think("myselfHandler")
while self.state == 'myself':
stateVector = self.talk(matrix, stateVector)
for i in range(len(stateVector)):
if stateVector[i] == 1:
self.menuDict[self.state][i]()
break
def exitHandler(self):
self.isPlayer = False
Convo = Conversation()
| 40.942605
| 153
| 0.586025
| 2,147
| 18,547
| 5.05496
| 0.129017
| 0.013268
| 0.010504
| 0.00774
| 0.500691
| 0.461716
| 0.437114
| 0.394545
| 0.348567
| 0.30176
| 0
| 0.038397
| 0.289481
| 18,547
| 453
| 154
| 40.942605
| 0.785172
| 0.219335
| 0
| 0.269504
| 0
| 0
| 0.106914
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.056738
| false
| 0.003546
| 0.014184
| 0
| 0.102837
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e815ba1c0be05931c138a14af77c58193d1bc5db
| 3,812
|
py
|
Python
|
blog/views.py
|
farman99ahmed/diyblog
|
2e4548037c95b5563d2fdba3d05b488330a5e2b4
|
[
"MIT"
] | null | null | null |
blog/views.py
|
farman99ahmed/diyblog
|
2e4548037c95b5563d2fdba3d05b488330a5e2b4
|
[
"MIT"
] | null | null | null |
blog/views.py
|
farman99ahmed/diyblog
|
2e4548037c95b5563d2fdba3d05b488330a5e2b4
|
[
"MIT"
] | null | null | null |
from django.shortcuts import render, redirect
from .forms import AuthorForm, BlogForm, NewUserForm
from .models import Author, Blog
from django.contrib.auth import login, authenticate, logout
from django.contrib import messages
from django.contrib.auth.forms import AuthenticationForm
from django.contrib.auth.decorators import login_required
# Create your views here.
def get_authors(request):
context = {'authors': Author.objects.all()}
return render(request, "blog/get_authors.html", context)
@login_required
def get_author(request, id):
author = Author.objects.get(pk = id)
blogs = Blog.objects.filter(author = id)
context = {'author': author, 'blogs': blogs}
return render(request, "blog/get_author.html", context)
@login_required
def post_put_author(request, id = 0):
if request.method == "GET":
if id == 0:
form = AuthorForm()
else:
author = Author.objects.get(pk = id)
form = AuthorForm(instance = author)
return render(request, "blog/post_put_authors.html", {"form": form})
else:
if id == 0:
form = AuthorForm(request.POST)
else:
author = Author.objects.get(pk = id)
form = AuthorForm(request.POST, instance = author)
if form.is_valid():
form.save()
return redirect('get_authors')
@login_required
def delete_author(request, id):
author = Author.objects.get(pk = id)
author.delete()
return redirect('get_authors')
def get_blogs(request):
context = {'blogs': Blog.objects.all()}
return render(request, "blog/get_blogs.html", context)
@login_required
def get_blog(request, id):
blog = {'blog': Blog.objects.get(pk = id)}
return render(request, "blog/get_blog.html", blog)
@login_required
def post_put_blog(request, id = 0):
if request.method == "GET":
if id == 0:
form = BlogForm()
else:
blog = Blog.objects.get(pk = id)
form = BlogForm(instance = blog)
return render(request, "blog/post_put_blogs.html", {"form": form})
else:
if id == 0:
form = BlogForm(request.POST)
else:
blog = Blog.objects.get(pk = id)
form = BlogForm(request.POST, instance = blog)
if form.is_valid():
form.save()
return redirect('get_blogs')
@login_required
def delete_blog(request, id):
blog = Blog.objects.get(pk = id)
blog.delete()
return redirect('get_blogs')
def register_request(request):
if request.method == "POST":
form = NewUserForm(request.POST)
if form.is_valid():
user = form.save()
login(request, user)
messages.success(request, "Registration successful." )
return redirect("get_blogs")
messages.error(request, "Unsuccessful registration. Invalid information.")
form = NewUserForm()
return render (request=request, template_name="blog/register.html", context={"register_form":form})
def login_request(request):
if request.method == "POST":
form = AuthenticationForm(request, data=request.POST)
if form.is_valid():
username = form.cleaned_data.get('username')
password = form.cleaned_data.get('password')
user = authenticate(username=username, password=password)
if user is not None:
login(request, user)
messages.info(request, f"You are now logged in as {username}.")
return redirect("get_blogs")
else:
messages.error(request,"Invalid username or password.")
else:
messages.error(request,"Invalid username or password.")
form = AuthenticationForm()
return render(request=request, template_name="blog/login.html", context={"login_form":form})
def logout_request(request):
logout(request)
messages.info(request, "You have successfully logged out.")
return redirect("get_blogs")
| 33.147826
| 100
| 0.670776
| 476
| 3,812
| 5.277311
| 0.168067
| 0.038217
| 0.06051
| 0.044586
| 0.462182
| 0.392516
| 0.308121
| 0.216561
| 0.157643
| 0.029459
| 0
| 0.00198
| 0.204879
| 3,812
| 114
| 101
| 33.438596
| 0.82679
| 0.006034
| 0
| 0.455446
| 0
| 0
| 0.135727
| 0.018748
| 0
| 0
| 0
| 0
| 0
| 1
| 0.108911
| false
| 0.039604
| 0.069307
| 0
| 0.326733
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e81917ac02c5b07e90d8b7911baf13d1cd91a319
| 574
|
py
|
Python
|
downloadMusic/main.py
|
yaosir0317/my_first
|
387fe21aa529bca1d08ed45e13269aca23dce251
|
[
"MIT"
] | null | null | null |
downloadMusic/main.py
|
yaosir0317/my_first
|
387fe21aa529bca1d08ed45e13269aca23dce251
|
[
"MIT"
] | null | null | null |
downloadMusic/main.py
|
yaosir0317/my_first
|
387fe21aa529bca1d08ed45e13269aca23dce251
|
[
"MIT"
] | null | null | null |
from enum import Enum
import requests
class MusicAPP(Enum):
qq = "qq"
wy = "netease"
PRE_URL = "http://www.musictool.top/"
headers = {"User-Agent": "Mozilla/5.0 (Macintosh; Intel Mac OS X 10_15_6) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/85.0.4183.102 Safari/537.36"}
def get_music_list(name, app, page=1):
data = {"input": name, "filter": "name", "type": app, "page": page}
resp = requests.post(url=PRE_URL, headers=headers, data=data)
print(resp.text)
print(resp.json())
if __name__ == '__main__':
get_music_list("画", MusicAPP.qq)
| 24.956522
| 149
| 0.663763
| 89
| 574
| 4.101124
| 0.674157
| 0.054795
| 0.065753
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0.058577
| 0.167247
| 574
| 22
| 150
| 26.090909
| 0.705021
| 0
| 0
| 0
| 0
| 0.071429
| 0.343206
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.071429
| false
| 0
| 0.142857
| 0
| 0.428571
| 0.142857
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|
e81d5f97eebe858da4677a4562a912397f01a885
| 5,487
|
py
|
Python
|
rdkit/ML/InfoTheory/BitRank.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 1,609
|
2015-01-05T02:41:13.000Z
|
2022-03-30T21:57:24.000Z
|
rdkit/ML/InfoTheory/BitRank.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 3,412
|
2015-01-06T12:13:33.000Z
|
2022-03-31T17:25:41.000Z
|
rdkit/ML/InfoTheory/BitRank.py
|
kazuyaujihara/rdkit
|
06027dcd05674787b61f27ba46ec0d42a6037540
|
[
"BSD-3-Clause"
] | 811
|
2015-01-11T03:33:48.000Z
|
2022-03-28T11:57:49.000Z
|
#
# Copyright (C) 2001,2002,2003 greg Landrum and Rational Discovery LLC
#
""" Functionality for ranking bits using info gains
**Definitions used in this module**
- *sequence*: an object capable of containing other objects which supports
__getitem__() and __len__(). Examples of these include lists, tuples, and
Numeric arrays.
- *IntVector*: an object containing integers which supports __getitem__() and
__len__(). Examples include lists, tuples, Numeric Arrays, and BitVects.
**NOTE**: Neither *sequences* nor *IntVectors* need to support item assignment.
It is perfectly acceptable for them to be read-only, so long as they are
random-access.
"""
import numpy
from rdkit.ML.InfoTheory import entropy
def FormCounts(bitVects, actVals, whichBit, nPossibleActs, nPossibleBitVals=2):
""" generates the counts matrix for a particular bit
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- whichBit: an integer, the bit number to use.
- nPossibleActs: the (integer) number of possible activity values.
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
**Returns**
a Numeric array with the counts
**Notes**
This is really intended for internal use.
"""
if len(bitVects) != len(actVals):
raise ValueError('var and activity lists should be the same length')
res = numpy.zeros((nPossibleBitVals, nPossibleActs), numpy.integer)
for i in range(len(bitVects)):
res[bitVects[i][whichBit], actVals[i]] += 1
return res
def CalcInfoGains(bitVects, actVals, nPossibleActs, nPossibleBitVals=2):
""" Calculates the information gain for a set of points and activity values
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- nPossibleActs: the (integer) number of possible activity values.
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
**Returns**
a list of floats
"""
if len(bitVects) != len(actVals):
raise ValueError('var and activity lists should be the same length')
nBits = len(bitVects[0])
res = numpy.zeros(nBits, numpy.float)
for bit in range(nBits):
counts = FormCounts(bitVects, actVals, bit, nPossibleActs, nPossibleBitVals=nPossibleBitVals)
res[bit] = entropy.InfoGain(counts)
return res
def RankBits(bitVects, actVals, nPossibleBitVals=2, metricFunc=CalcInfoGains):
""" Rank a set of bits according to a metric function
**Arguments**
- bitVects: a *sequence* containing *IntVectors*
- actVals: a *sequence*
- nPossibleBitVals: (optional) if specified, this integer provides the maximum
value attainable by the (increasingly inaccurately named) bits in _bitVects_.
- metricFunc: (optional) the metric function to be used. See _CalcInfoGains()_
for a description of the signature of this function.
**Returns**
A 2-tuple containing:
- the relative order of the bits (a list of ints)
- the metric calculated for each bit (a list of floats)
"""
nPossibleActs = max(actVals) + 1
metrics = metricFunc(bitVects, actVals, nPossibleActs, nPossibleBitVals=nPossibleBitVals)
bitOrder = list(numpy.argsort(metrics))
bitOrder.reverse()
return bitOrder, metrics
def AnalyzeSparseVects(bitVects, actVals):
""" #DOC
**Arguments**
- bitVects: a *sequence* containing SBVs
- actVals: a *sequence*
**Returns**
a list of floats
**Notes**
- these need to be bit vects and binary activities
"""
nPts = len(bitVects)
if nPts != len(actVals):
raise ValueError('var and activity lists should be the same length')
nBits = bitVects[0].GetSize()
actives = numpy.zeros(nBits, numpy.integer)
inactives = numpy.zeros(nBits, numpy.integer)
nActives, nInactives = 0, 0
for i in range(nPts):
sig, act = bitVects[i], actVals[i]
onBitList = sig.GetOnBits()
if act:
for bit in onBitList:
actives[bit] += 1
nActives += 1
else:
for bit in onBitList:
inactives[bit] += 1
nInactives += 1
resTbl = numpy.zeros((2, 2), numpy.integer)
res = []
gains = []
for bit in range(nBits):
nAct, nInact = actives[bit], inactives[bit]
if nAct or nInact:
resTbl[0, 0] = nAct
resTbl[1, 0] = nPts - nAct
resTbl[0, 1] = nInact
resTbl[1, 1] = nPts - nInact
gain = entropy.InfoGain(resTbl)
gains.append(gain)
res.append((bit, gain, nAct, nInact))
return res, gains
def SparseRankBits(bitVects, actVals, metricFunc=AnalyzeSparseVects):
""" Rank a set of bits according to a metric function
**Arguments**
- bitVects: a *sequence* containing SBVs
- actVals: a *sequence*
- metricFunc: (optional) the metric function to be used. See _SparseCalcInfoGains()_
for a description of the signature of this function.
**Returns**
A 2-tuple containing:
- the relative order of the bits (a list of ints)
- the metric calculated for each bit (a list of floats)
**Notes**
- these need to be bit vects and binary activities
"""
info, metrics = metricFunc(bitVects, actVals)
bitOrder = list(numpy.argsort(metrics))
bitOrder.reverse()
return bitOrder, info
| 27.163366
| 97
| 0.68635
| 682
| 5,487
| 5.483871
| 0.269795
| 0.024064
| 0.01123
| 0.034759
| 0.506417
| 0.477273
| 0.459091
| 0.459091
| 0.459091
| 0.341979
| 0
| 0.008619
| 0.217605
| 5,487
| 201
| 98
| 27.298507
| 0.862567
| 0.536905
| 0
| 0.241935
| 0
| 0
| 0.060657
| 0
| 0
| 0
| 0
| 0
| 0
| 1
| 0.080645
| false
| 0
| 0.032258
| 0
| 0.193548
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| null | 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
| 0
|
1
| 0
|