id stringlengths 1 8 | text stringlengths 6 1.05M | dataset_id stringclasses 1
value |
|---|---|---|
132896 | <gh_stars>1-10
from retic.runtime import *
from retic.transient import *
from retic.typing import *
def check1(val):
try:
val.N
return val
except:
raise CheckError(val)
def check0(val):
try:
val.setrecursionlimit
return val
except:
raise CheckError(val)
def check4(val):
try:
val.parse_args
return val
except:
raise CheckError(val)
def check3(val):
try:
val.OptionParser
return val
except:
raise CheckError(val)
def check2(val):
try:
val.Rest
return val
except:
raise CheckError(val)
from compat import xrange
import util
from math import sin, cos, sqrt
import optparse
import time
check_type_function(check0(sys).setrecursionlimit)(100000)
class Stream(retic_actual(object)):
def __init__(self, n, rest):
self.N = n
self.Rest = rest
__init__ = check_type_function(__init__)
Stream = check_type_class(Stream, ['__init__'])
def CountFrom(n):
return Stream(n, (lambda : CountFrom((n + 1))))
CountFrom = check_type_function(CountFrom)
def Sift(n, s):
check_type_int(n)
f = check1(s).N
while ((f % n) == 0):
s = check_type_function(check2(s).Rest)()
f = check1(s).N
return Stream(f, (lambda : Sift(n, check_type_function(check2(s).Rest)())))
Sift = check_type_function(Sift)
def Sieve(s):
return Stream(check1(s).N, (lambda : Sieve(Sift(check_type_int(check1(s).N), check_type_function(check2(s).Rest)()))))
Sieve = check_type_function(Sieve)
def GetPrimes():
return Sieve(CountFrom(2))
GetPrimes = check_type_function(GetPrimes)
def StreamGet(s, n):
while (n > 0):
s = check_type_function(check2(s).Rest)()
n = (n - 1)
return check1(s).N
StreamGet = check_type_function(StreamGet)
def main(arg, timer):
times = []
last = (- 1)
for i in check_type_function(xrange)(arg):
t0 = check_type_function(timer)()
last = StreamGet(GetPrimes(), 9999)
tk = check_type_function(timer)()
check_type_void(check_type_function(times.append)((tk - t0)))
check_type_function(print)(last)
return times
main = check_type_function(main)
if (__name__ == '__main__'):
parser = check_type_function(check3(optparse).OptionParser)(usage='%prog [options]', description='Test the performance of the Sieve benchmark')
check_type_function(util.add_standard_options_to)(parser)
(options, args) = check_type_tuple(check_type_function(check4(parser).parse_args)(), 2)
check_type_function(util.run_benchmark)(options, 1, main)
| StarcoderdataPython |
6498513 | from sklearn.metrics import mean_absolute_error, mean_squared_error
from sklearn.ensemble import RandomForestRegressor
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
import pandas as pd
import json
import pickle
import numpy as np
dataset = pd.read_csv('C:/xampp/htdocs/subjectsuggestionsystem/public/run_python/train_format_phase1.csv')
dataset_test = pd.read_csv('C:/xampp/htdocs/subjectsuggestionsystem/public/run_python/test_format_phase2.csv')
dataset = dataset.dropna()
dataset_test = dataset.dropna()
print("Tập dữ liệu ban đầu hiển thị 10 dòng")
print(dataset.head(10))
print("\n")
X = dataset.iloc[:, 0:3].values
y = dataset.iloc[:, 3].values
print("Phân ra tập huấn luyện")
print(X)
print("\n")
print("Phân ra tập kiểm tra")
print(y)
print("\n")
X_train, X_test, y_train, y_test = train_test_split(
X, y, test_size=0.2, random_state=0)
# Feature Scaling
# sc = StandardScaler()
# X_train = sc.fit_transform(X_train)
# X_test = sc.transform(X_test)
# Tham số quan trọng nhất của RandomForestRegressor lớp là n_estimators tham số
# Tham số này xác định số lượng cây trong rừng ngẫu nhiên.
# Bắt đầu với n_estimator=20 để xem thuật toán hoạt động như thế nào
regressor = RandomForestRegressor(n_estimators=5, random_state=100)
regressor.fit(X_train, y_train)
y_pred = regressor.predict(X_test)
print('Tập kết quả dự đoán sau khi huấn luyện mô hình')
print(y_pred)
print("\n")
# Chuyển đổi export file json
# class NumpyEncoder(json.JSONEncoder):
# def default(self, obj):
# if isinstance(obj, np.integer):
# return int(obj)
# elif isinstance(obj, np.floating):
# return float(obj)
# elif isinstance(obj, np.ndarray):
# return obj.tolist()
# return json.JSONEncoder.default(self, obj)
# dumped = json.dumps(y_pred, cls=NumpyEncoder)
# with open('result.json', 'w') as f:
# json.dump(dumped, f)
print('Tập dữ liệu khác đưa vào kiểm tra hiển thị 10 dòng')
print(dataset_test.head(10))
data_train = dataset_test.iloc[:, 0:3].values
data_test = dataset_test.iloc[:, 3].values
print("\n")
print("Đầu vào các thuộc tính trích từ tập dữ liệu khác")
print(data_train)
# ran_data_arr = np.array(data_train)
# ran_data_num = ran_data_arr.reshape(1, -1)
pred_single_row = regressor.predict(data_train)
#pred_single_row = round(float(pred_single_row), 2)
# print(pred_single_row)
# print("\n")
print("Kết quả dự đoán")
print(pred_single_row)
print("\n")
# input_msv = int(input("Ma SV: "))
# input_mmh = int(input("Ma MH: "))
# input_nhhk = int(input("Ma NHHK: "))
# ran_data = [input_msv, input_mmh, input_nhhk]
# # ran_data = [1607138, 234, 20192]
# ran_data_arr = np.array(ran_data)
# ran_data_num = ran_data_arr.reshape(1, -1)
# pred_single_row = regressor.predict(ran_data_num)
# pred_single_row = round(float(pred_single_row), 2)
# print("Kết quả hồi quy")
# print(pred_single_row)
# print("\n")
# Lỗi tuyệt đối trung bình
print('Mean Absolute Error:', mean_absolute_error(y_test, y_pred))
# Lỗi bình phương
print('Mean Squared Error:', mean_squared_error(y_test, y_pred))
# Lỗi bình phương trung bình gốc
print('Root Mean Squared Error:', np.sqrt(mean_squared_error(y_test, y_pred)))
#print("Lưu mô hình vào file result.pickle")
#with open("train_model.pickle", "wb") as file:
# pickle.dump(regressor, file)
# with open('result.json', 'r') as j:
# json_data = json.load(j)
# print(json_data)
| StarcoderdataPython |
5024783 | <filename>src/scMDC.py
from sklearn.metrics.pairwise import paired_distances
from sklearn.decomposition import PCA
from sklearn import metrics
from sklearn.cluster import KMeans
import torch
import torch.nn as nn
from torch.autograd import Variable
from torch.nn import Parameter
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader, TensorDataset
from layers import NBLoss, ZINBLoss, MeanAct, DispAct
import numpy as np
import math, os
from utils import cluster_acc, torch_PCA
from preprocess import read_dataset, normalize
import scanpy as sc
def buildNetwork(layers, type, activation="relu"):
net = []
for i in range(1, len(layers)):
net.append(nn.Linear(layers[i-1], layers[i]))
if type=="encode" and i==len(layers)-1:
break
if activation=="relu":
net.append(nn.ReLU())
elif activation=="sigmoid":
net.append(nn.Sigmoid())
return nn.Sequential(*net)
class scMultiCluster(nn.Module):
def __init__(self, input_dim1, input_dim2, zencode_dim=[64,16], zdecode_dim=[16,64],
encodeLayer1=[64, 32, 12], decodeLayer1=[12, 32, 64], encodeLayer2=[8], decodeLayer2=[8],
activation="relu", sigma1=2.5, sigma2=0., alpha=1., gamma1=.01, gamma2=.1, gamma3=0.001, cutoff = 0.5):
super(scMultiCluster, self).__init__()
self.cutoff = cutoff
self.activation = activation
self.sigma1 = sigma1
self.sigma2 = sigma2
self.alpha = alpha
self.gamma1 = gamma1
self.gamma2 = gamma2
self.gamma3 = gamma3
self.z_dim = zencode_dim[-1]
self.encoder1 = buildNetwork([input_dim1]+encodeLayer1, type="encode", activation=activation)
self.decoder1 = buildNetwork(decodeLayer1, type="decode", activation=activation)
self.encoder2 = buildNetwork([input_dim2]+encodeLayer2, type="encode", activation=activation)
self.decoder2 = buildNetwork(decodeLayer2, type="decode", activation=activation)
self.latent_enc = buildNetwork([encodeLayer1[-1]+encodeLayer2[-1]]+zencode_dim, type="encode", activation=activation)
self.latent_dec = buildNetwork(zdecode_dim+[encodeLayer1[-1]+encodeLayer2[-1]], type="encode", activation=activation)
self.dec_mean1 = nn.Sequential(nn.Linear(decodeLayer1[-1], input_dim1), MeanAct())
self.dec_disp1 = nn.Sequential(nn.Linear(decodeLayer1[-1], input_dim1), DispAct())
self.dec_mean2 = nn.Sequential(nn.Linear(decodeLayer2[-1], input_dim2), MeanAct())
self.dec_disp2 = nn.Sequential(nn.Linear(decodeLayer2[-1], input_dim2), DispAct())
self.dec_pi1 = nn.Sequential(nn.Linear(decodeLayer1[-1], input_dim1), nn.Sigmoid())
self.zinb_loss = ZINBLoss()
self.NBLoss = NBLoss()
self.mse = nn.MSELoss()
def save_model(self, path):
torch.save(self.state_dict(), path)
def load_model(self, path):
pretrained_dict = torch.load(path, map_location=lambda storage, loc: storage)
model_dict = self.state_dict()
pretrained_dict = {k: v for k, v in pretrained_dict.items() if k in model_dict}
model_dict.update(pretrained_dict)
self.load_state_dict(model_dict)
def soft_assign(self, z):
q = 1.0 / (1.0 + torch.sum((z.unsqueeze(1) - self.mu)**2, dim=2) / self.alpha)
q = q**((self.alpha+1.0)/2.0)
q = (q.t() / torch.sum(q, dim=1)).t()
return q
def cal_latent(self, z):
sum_y = torch.sum(torch.square(z), dim=1)
num = -2.0 * torch.matmul(z, z.t()) + torch.reshape(sum_y, [-1, 1]) + sum_y
num = num / self.alpha
num = torch.pow(1.0 + num, -(self.alpha + 1.0) / 2.0)
zerodiag_num = num - torch.diag(torch.diag(num))
latent_p = (zerodiag_num.t() / torch.sum(zerodiag_num, dim=1)).t()
return num, latent_p
def target_distribution(self, q):
p = q**2 / q.sum(0)
return (p.t() / p.sum(1)).t()
def forward(self, x1, x2):
h1 = self.encoder1(x1+torch.randn_like(x1) * self.sigma1)
h2 = self.encoder2(x2+torch.randn_like(x2) * self.sigma2)
h1_ = self.decoder1(h1)
mean1 = self.dec_mean1(h1_)
disp1 = self.dec_disp1(h1_)
h2_ = self.decoder2(h2)
mean2 = self.dec_mean2(h2_)
disp2 = self.dec_disp2(h2_)
pi1 = self.dec_pi1(h1_)
h10 = self.encoder1(x1)
h20 = self.encoder2(x2)
combine_latent0 = torch.cat([h10, h20], dim=-1)
z0 = self.latent_enc(combine_latent0)
combine_latent0_ = self.latent_dec(z0)
q = self.soft_assign(z0)
num, lq = self.cal_latent(z0)
return z0, q, num, lq, mean1, mean2, disp1, disp2, pi1, combine_latent0, combine_latent0_
def forward_AE(self, x1, x2):
h1 = self.encoder1(x1+torch.randn_like(x1) * self.sigma1)
h2 = self.encoder2(x2+torch.randn_like(x2) * self.sigma2)
h1_ = self.decoder1(h1)
mean1 = self.dec_mean1(h1_)
disp1 = self.dec_disp1(h1_)
h2_ = self.decoder2(h2)
mean2 = self.dec_mean2(h2_)
disp2 = self.dec_disp2(h2_)
pi1 = self.dec_pi1(h1_)
h10 = self.encoder1(x1)
h20 = self.encoder2(x2)
combine_latent0 = torch.cat([h10, h20], dim=-1)
z0 = self.latent_enc(combine_latent0)
combine_latent0_ = self.latent_dec(z0)
num, lq = self.cal_latent(z0)
return z0, num, lq, mean1, mean2, disp1, disp2, pi1, combine_latent0, combine_latent0_
def encodeBatch(self, X1, X2, batch_size=256):
use_cuda = torch.cuda.is_available()
if use_cuda:
self.cuda()
encoded = []
num = X1.shape[0]
num_batch = int(math.ceil(1.0*X1.shape[0]/batch_size))
for batch_idx in range(num_batch):
x1batch = X1[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
x2batch = X2[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
inputs1 = Variable(x1batch)
inputs2 = Variable(x2batch)
z,_,_,_,_,_,_,_,_,_ = self.forward_AE(inputs1, inputs2)
encoded.append(z.data)
encoded = torch.cat(encoded, dim=0)
return encoded
def cluster_loss(self, p, q):
def kld(target, pred):
return torch.mean(torch.sum(target*torch.log(target/(pred+1e-6)), dim=-1))
kldloss = kld(p, q)
return kldloss
def kldloss(self, p, q):
c1 = -torch.sum(p * torch.log(q))
c2 = -torch.sum(p * torch.log(p))
l = c1 - c2
return l
def pretrain_autoencoder(self, X1, X_raw1, sf1, X2, X_raw2, sf2,
batch_size=256, lr=0.005, epochs=400, ae_save=True, ae_weights='AE_weights.pth.tar'):
num_batch = int(math.ceil(1.0*X1.shape[0]/batch_size))
use_cuda = torch.cuda.is_available()
if use_cuda:
self.cuda()
dataset = TensorDataset(torch.Tensor(X1), torch.Tensor(X_raw1), torch.Tensor(sf1), torch.Tensor(X2), torch.Tensor(X_raw2), torch.Tensor(sf2))
dataloader = DataLoader(dataset, batch_size=batch_size, shuffle=True)
print("Pretraining stage")
optimizer = optim.Adam(filter(lambda p: p.requires_grad, self.parameters()), lr=lr, amsgrad=True)
counts = 0
for epoch in range(epochs):
for batch_idx, (x1_batch, x_raw1_batch, sf1_batch, x2_batch, x_raw2_batch, sf2_batch) in enumerate(dataloader):
x1_tensor = Variable(x1_batch).cuda()
x_raw1_tensor = Variable(x_raw1_batch).cuda()
sf1_tensor = Variable(sf1_batch).cuda()
x2_tensor = Variable(x2_batch).cuda()
x_raw2_tensor = Variable(x_raw2_batch).cuda()
sf2_tensor = Variable(sf2_batch).cuda()
zbatch, z_num, lqbatch, mean1_tensor, mean2_tensor, disp1_tensor, disp2_tensor, pi1_tensor, combine_latent0, combine_latent0_ = self.forward_AE(x1_tensor, x2_tensor)
#recon_loss1 = self.mse(mean1_tensor, x1_tensor)
recon_loss1 = self.zinb_loss(x=x_raw1_tensor, mean=mean1_tensor, disp=disp1_tensor, pi=pi1_tensor, scale_factor=sf1_tensor)
#recon_loss2 = self.mse(mean2_tensor, x2_tensor)
recon_loss2 = self.NBLoss(x=x_raw2_tensor, mean=mean2_tensor, disp=disp2_tensor, scale_factor=sf2_tensor)
recon_loss_latent = self.mse(combine_latent0_, combine_latent0)
lpbatch = self.target_distribution(lqbatch)
lqbatch = lqbatch + torch.diag(torch.diag(z_num))
lpbatch = lpbatch + torch.diag(torch.diag(z_num))
kl_loss = self.kldloss(lpbatch, lqbatch)
if counts > epochs * self.cutoff:
loss = recon_loss1 + recon_loss2 + recon_loss_latent * self.gamma2 + kl_loss * self.gamma3
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Pretrain epoch [{}/{}], ZINB loss:{:.4f}, NB loss:{:.4f}, latent MSE loss:{:.8f}, KL loss:{:.8f}'.format(
batch_idx+1, epoch+1, recon_loss1.item(), recon_loss2.item(), recon_loss_latent.item(), kl_loss.item()))
else:
loss = recon_loss1 + recon_loss2
optimizer.zero_grad()
loss.backward()
optimizer.step()
print('Pretrain epoch [{}/{}], ZINB loss:{:.4f}, NB loss:{:.4f}'.format(
batch_idx+1, epoch+1, recon_loss1.item(), recon_loss2.item()))
counts +=1
if ae_save:
torch.save({'ae_state_dict': self.state_dict(),
'optimizer_state_dict': optimizer.state_dict()}, ae_weights)
def save_checkpoint(self, state, index, filename):
newfilename = os.path.join(filename, 'FTcheckpoint_%d.pth.tar' % index)
torch.save(state, newfilename)
def fit(self, X1, X_raw1, sf1, X2, X_raw2, sf2, y=None, lr=.1, n_clusters = 4,
batch_size=256, num_epochs=10, update_interval=1, tol=1e-3, save_dir=""):
'''X: tensor data'''
use_cuda = torch.cuda.is_available()
if use_cuda:
self.cuda()
print("Clustering stage")
X1 = torch.tensor(X1).cuda()
X_raw1 = torch.tensor(X_raw1).cuda()
sf1 = torch.tensor(sf1).cuda()
X2 = torch.tensor(X2).cuda()
X_raw2 = torch.tensor(X_raw2).cuda()
sf2 = torch.tensor(sf2).cuda()
self.mu = Parameter(torch.Tensor(n_clusters, self.z_dim))
optimizer = optim.Adadelta(filter(lambda p: p.requires_grad, self.parameters()), lr=lr, rho=.95)
print("Initializing cluster centers with kmeans.")
kmeans = KMeans(n_clusters, n_init=20)
Zdata = self.encodeBatch(X1, X2, batch_size=batch_size)
self.y_pred = kmeans.fit_predict(Zdata.data.cpu().numpy())
self.y_pred_last = self.y_pred
self.mu.data.copy_(torch.Tensor(kmeans.cluster_centers_))
if y is not None:
acc = np.round(cluster_acc(y, self.y_pred), 5)
nmi = np.round(metrics.normalized_mutual_info_score(y, self.y_pred), 5)
ari = np.round(metrics.adjusted_rand_score(y, self.y_pred), 5)
print('Initializing k-means: ACC= %.4f, NMI= %.4f, ARI= %.4f' % (acc, nmi, ari))
self.train()
num = X1.shape[0]
num_batch = int(math.ceil(1.0*X1.shape[0]/batch_size))
final_acc, final_nmi, final_ari, final_epoch = 0, 0, 0, 0
for epoch in range(num_epochs):
if epoch%update_interval == 0:
Zdata = self.encodeBatch(X1, X2, batch_size=batch_size)
q = self.soft_assign(Zdata)
p = self.target_distribution(q).data
# evalute the clustering performance
self.y_pred = torch.argmax(q, dim=1).data.cpu().numpy()
if y is not None:
final_acc = acc = np.round(cluster_acc(y, self.y_pred), 5)
final_nmi = nmi = np.round(metrics.normalized_mutual_info_score(y, self.y_pred), 5)
final_epoch = ari = np.round(metrics.adjusted_rand_score(y, self.y_pred), 5)
print('Clustering %d: ACC= %.4f, NMI= %.4f, ARI= %.4f' % (epoch+1, acc, nmi, ari))
# save current model
if (epoch>0 and delta_label < tol) or epoch%10 == 0:
self.save_checkpoint({'epoch': epoch+1,
'state_dict': self.state_dict(),
'mu': self.mu,
'p': p,
'q': q,
'y_pred': self.y_pred,
'y_pred_last': self.y_pred_last,
'y': y
}, epoch+1, filename=save_dir)
# check stop criterion
delta_label = np.sum(self.y_pred != self.y_pred_last).astype(np.float32) / num
self.y_pred_last = self.y_pred
if epoch>0 and delta_label < tol:
print('delta_label ', delta_label, '< tol ', tol)
print("Reach tolerance threshold. Stopping training.")
break
# train 1 epoch for clustering loss
train_loss = 0.0
recon_loss1_val = 0.0
recon_loss2_val = 0.0
recon_loss_latent_val = 0.0
cluster_loss_val = 0.0
kl_loss_val = 0.0
for batch_idx in range(num_batch):
x1_batch = X1[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
x_raw1_batch = X_raw1[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
sf1_batch = sf1[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
x2_batch = X2[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
x_raw2_batch = X_raw2[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
sf2_batch = sf2[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
pbatch = p[batch_idx*batch_size : min((batch_idx+1)*batch_size, num)]
optimizer.zero_grad()
inputs1 = Variable(x1_batch)
rawinputs1 = Variable(x_raw1_batch)
sfinputs1 = Variable(sf1_batch)
inputs2 = Variable(x2_batch)
rawinputs2 = Variable(x_raw2_batch)
sfinputs2 = Variable(sf2_batch)
target1 = Variable(pbatch)
zbatch, qbatch, z_num, lqbatch, mean1_tensor, mean2_tensor, disp1_tensor, disp2_tensor, pi1_tensor, combine_latent0, combine_latent0_ = self.forward(inputs1, inputs2)
cluster_loss = self.cluster_loss(target1, qbatch)
#recon_loss1 = self.mse(mean1_tensor, inputs1)
recon_loss1 = self.zinb_loss(x=rawinputs1, mean=mean1_tensor, disp=disp1_tensor, pi=pi1_tensor, scale_factor=sfinputs1)
#recon_loss2 = self.mse(mean2_tensor, inputs2)
recon_loss2 = self.NBLoss(x=rawinputs2, mean=mean2_tensor, disp=disp2_tensor, scale_factor=sfinputs2)
recon_loss_latent = self.mse(combine_latent0_, combine_latent0)
target2 = self.target_distribution(lqbatch)
lqbatch = lqbatch + torch.diag(torch.diag(z_num))
target2 = target2 + torch.diag(torch.diag(z_num))
kl_loss = self.kldloss(target2, lqbatch)
loss = recon_loss_latent * self.gamma2 + cluster_loss * self.gamma1 + kl_loss * self.gamma3 + recon_loss1 + recon_loss2
loss.backward()
torch.nn.utils.clip_grad_norm_(self.mu, 1)
optimizer.step()
cluster_loss_val += cluster_loss.data * len(inputs1)
recon_loss1_val += recon_loss1.data * len(inputs1)
recon_loss2_val += recon_loss2.data * len(inputs2)
recon_loss_latent_val += recon_loss_latent.data * len(inputs1)
kl_loss_val += kl_loss.data * len(inputs1)
train_loss = recon_loss1_val + recon_loss2_val + recon_loss_latent_val + cluster_loss_val + kl_loss_val
print("#Epoch %3d: Total: %.4f Clustering Loss: %.8f ZINB Loss: %.4f NB Loss: %.4f Latent MSE Loss: %.4f KL Loss: %.4f" % (
epoch + 1, train_loss / num, cluster_loss_val / num, recon_loss1_val / num, recon_loss2_val / num, recon_loss_latent_val / num, kl_loss_val / num))
return self.y_pred, final_acc, final_nmi, final_ari, final_epoch
| StarcoderdataPython |
5052534 | <reponame>jl45621/coach
import mxnet as mx
import numpy as np
import os
import pytest
import sys
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
from rl_coach.architectures.mxnet_components.heads.head import NormalizedRSSInitializer
@pytest.mark.unit_test
def test_normalized_rss_initializer():
target_rss = 0.5
units = 10
dense = mx.gluon.nn.Dense(units=units, weight_initializer=NormalizedRSSInitializer(target_rss))
dense.initialize()
input_data = mx.random.uniform(shape=(25, 5))
output_data = dense(input_data)
weights = dense.weight.data()
assert weights.shape == (10, 5)
rss = weights.square().sum(axis=1).sqrt()
np.testing.assert_almost_equal(rss.asnumpy(), np.tile(target_rss, units))
| StarcoderdataPython |
8133275 | <filename>assignment-5-discreteevent-simulation-marctheshark3/partB/monte-carlo-simulation(approximate).py
import numpy as np
from scipy.interpolate import UnivariateSpline
import seaborn as sns
import matplotlib.pyplot as plt
import matplotlib.mlab as mlab
import scipy.stats as st
import TransitionMatrix as TM
from TransitionMatrix import SetTransitionMatrix_NULL
def configuration(m,spaces):
if m == 1:
if spaces == 4:
spaces = 20
elif spaces == 17 or spaces == 24:
spaces = 38
elif spaces == 37:
spaces = 48
elif spaces == 42:
spaces = 15
elif spaces == 62 or spaces == 69:
spaces = 71
elif spaces == 84:
spaces = 92
elif spaces == 97:
spaces = 94
return spaces
elif m == 2:
if spaces == 4:
spaces = 13
elif spaces == 17 or spaces == 24:
spaces =30
elif spaces == 37:
spaces = 48
elif spaces == 42:
spaces = 15
elif spaces == 62 or spaces == 69:
spaces = 55
elif spaces == 84:
spaces = 75
elif spaces == 97:
spaces = 70
return spaces
else:
spaces = spaces
def num_gen(m):
turn_stats = [] # setting turn_stats for every game
spaces = 0
turns = 0
move_bank = []
i = 0
#while turns < 104:
for turns in range(1,500):
dice = np.random.randint(1, 6)
# to keep track out how mant turns it takes
move_bank.insert(turns, dice)
#print(spaces,"spaces")
#print(dice,"dice",turns,"turns")
i = i + 1
if dice == 1:
#print("beforeinside",spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
elif dice == 2:
#print("beforeinside", spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
# elif spaces > 104:
# print("breaking the law", turns)
# turn_stats.insert(i, turns) # adding only to count turns
# break
elif dice == 3:
#print("beforeinside", spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
# elif spaces > 104:
# print("breaking the law", turns)
# turn_stats.insert(i, turns) # adding only to count turns
# break
elif dice == 4:
#print("beforeinside", spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
# elif spaces > 104:
# print("breaking the law", turns)
# turn_stats.insert(i, turns) # adding only to count turns
# break
elif dice == 5:
#print("beforeinside", spaces)
spaces = spaces + dice
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
# elif spaces > 104:
# print("breaking the law", turns)
# turn_stats.insert(i, turns) # adding only to count turns
# break
elif dice == 6:
#print("beforeinside", spaces)
spaces = spaces + 0
#print("afterinside", spaces)
configuration(m, spaces)
if spaces > 104:
#print('broken', turns)
turn_stats.insert(i, turns)
break
return (turn_stats)
def game_analysis(config):
turns_to_win = []
for game in range(1,101):
turns_to_win.insert(game,num_gen(config))
#print (turns)
return (turns_to_win)
def run_this(zero,dist):
a = game_analysis(zero)
a.sort() #sorting list
avg = np.mean(a)
std = np.std(a)
print(avg,'mean')
mode = st.mode(a)
print(mode[0],'mode')
#print(avg,std)
#if dist == 'pdf':
num_bins = 10
n, bins, patches = plt.hist(a, num_bins, normed=1, facecolor='green', alpha=0.5)
y = mlab.normpdf(bins, avg, std)
plt.plot(bins, y, 'r--')
if zero == 1:
plt.xlabel('Turns to Win: Configuration 1')
elif zero == 2:
plt.xlabel('Turns to Win: Configuration 2')
else:
plt.xlabel('Turns to Win')
plt.ylabel('Probability')
plt.title("Cumalative Density Function: Monte Carlo")
plt.show()
#elif dist == 'cdf':
num_bins = 10
fig, ax = plt.subplots(figsize=(8, 4))
n, bins, patches = ax.hist(a, num_bins, normed=1, histtype='step', cumulative=True)
y = mlab.normpdf(bins, avg, std).cumsum()
y /= y[-1]
ax.plot(bins, y, 'k--', linewidth=1.5)
if zero == 1:
plt.xlabel('Turns to Win: Configuration 1')
elif zero == 2:
plt.xlabel('Turns to Win: Configuration 2')
else:
plt.xlabel('Turns to Win')
plt.ylabel('Probability')
plt.title("Cumulative Density Function: Monte Carlo")
plt.show()
run_this(3,'cdf')
| StarcoderdataPython |
3391937 | <reponame>kevinyuan/pymtl3
#=========================================================================
# StructuralRTLIRGenL2Pass.py
#=========================================================================
# Author : <NAME>
# Date : Apr 3, 2019
"""Provide L2 structural RTLIR generation pass."""
from .StructuralRTLIRGenL1Pass import StructuralRTLIRGenL1Pass
class StructuralRTLIRGenL2Pass( StructuralRTLIRGenL1Pass ):
pass
| StarcoderdataPython |
9698615 | <reponame>Katee/artman<filename>artman/pipelines/sample_pipeline.py
# Copyright 2016 Google Inc. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""A canonical example of a flow class supported by gapic pipeline."""
from artman.pipelines import pipeline_base
from artman.tasks import sample_tasks
from taskflow.patterns import linear_flow
class SamplePipeline(pipeline_base.PipelineBase):
def __init__(self, **kwargs):
super(SamplePipeline, self).__init__(**kwargs)
def do_build_flow(self, **kwargs):
sleep_secs = kwargs.get('sleep_secs')
flow = linear_flow.Flow('sample-flow')
flow.add(sample_tasks.SampleTask('SampleTask',
inject={'sleep_secs': sleep_secs}))
return flow
def validate_kwargs(self, **kwargs):
if 'sleep_secs' not in kwargs:
raise ValueError('sleep_secs must be provided')
| StarcoderdataPython |
6478443 | <reponame>heshu-by/likelib-ws<gh_stars>0
from tester import test_case, Env, NodeConfig, Id, TEST_CHECK, TEST_CHECK_EQUAL, ClientType
@test_case("connection_legacy_grpc")
def main(env: Env) -> int:
node_id = Id(20101, grpc_port=50101)
env.start_node(NodeConfig(node_id))
client = env.get_client(ClientType.LEGACY_GRPC, node_id)
TEST_CHECK(client.connection_test())
return 0
@test_case("connection_legacy_http")
def main(env: Env) -> int:
node_id = Id(20102, http_port=50102)
env.start_node(NodeConfig(node_id))
client = env.get_client(ClientType.LEGACY_HTTP, node_id)
TEST_CHECK(client.connection_test())
return 0
@test_case("connection_python_http")
def main(env: Env) -> int:
node_id = Id(20103, http_port=50103)
env.start_node(NodeConfig(node_id))
client = env.get_client(ClientType.PYTHON_HTTP, node_id)
TEST_CHECK(client.connection_test())
return 0
@test_case("get_node_info_legacy_grpc")
def main(env: Env) -> int:
node_id = Id(20104, grpc_port=50104)
env.start_node(NodeConfig(node_id))
client = env.get_client(ClientType.LEGACY_GRPC, node_id)
info = client.node_info()
TEST_CHECK_EQUAL(info.top_block_number, 0)
return 0
@test_case("get_node_info_legacy_http")
def main(env: Env) -> int:
node_id = Id(20105, http_port=50105)
env.start_node(NodeConfig(node_id))
client = env.get_client(ClientType.LEGACY_HTTP, node_id)
info = client.node_info()
TEST_CHECK_EQUAL(info.top_block_number, 0)
return 0
@test_case("get_node_info_python_http")
def main(env: Env) -> int:
node_id = Id(20106, http_port=50106)
env.start_node(NodeConfig(node_id))
client = env.get_client(ClientType.PYTHON_HTTP, node_id)
info = client.node_info()
TEST_CHECK_EQUAL(info.top_block_number, 0)
return 0
| StarcoderdataPython |
1663919 | from django.conf.urls import url, include
from .views import (TaskListAPIView,
TaskDetailAPIView,
TaskCreateAPIView,
TaskUpdateAPIView,
TaskDeleteAPIView,
TaskWeekAPIView)
urlpatterns = [
url(r'^$', TaskListAPIView.as_view(), name='list'),
url(r'^week-list/$', TaskWeekAPIView.as_view(), name='week-list'),
url(r'^create/$', TaskCreateAPIView.as_view(), name='create'),
url(r'^(?P<pk>\d+)/$', TaskDetailAPIView.as_view(), name='detail'),
url(r'^(?P<pk>\d+)/edit$', TaskUpdateAPIView.as_view(), name='update'),
url(r'^(?P<pk>\d+)/delete$', TaskDeleteAPIView.as_view(), name='delete'),
] | StarcoderdataPython |
1952040 | ITEMS = {
"1150": {"zh-cn": "安柏", "zh-tw": "安柏", "en-us": "Amber", "ja-jp": "アンバー", "ko-kr": "엠버"},
"1310": {"zh-cn": "凯亚", "zh-tw": "凱亞", "en-us": "Kaeya", "ja-jp": "ガイア", "ko-kr": "케이아"},
"1440": {"zh-cn": "丽莎", "zh-tw": "麗莎", "en-us": "Lisa", "ja-jp": "リサ", "ko-kr": "리사"},
"1240": {"zh-cn": "芭芭拉", "zh-tw": "芭芭拉", "en-us": "Barbara", "ja-jp": "バーバラ", "ko-kr": "바바라"},
"1420": {"zh-cn": "雷泽", "zh-tw": "雷澤", "en-us": "Razor", "ja-jp": "レザー", "ko-kr": "레이저"},
"2130": {"zh-cn": "香菱", "zh-tw": "香菱", "en-us": "Xiangling", "ja-jp": "香菱", "ko-kr": "향릉"},
"2420": {"zh-cn": "北斗", "zh-tw": "北斗", "en-us": "Beidou", "ja-jp": "北斗", "ko-kr": "북두"},
"2210": {"zh-cn": "行秋", "zh-tw": "行秋", "en-us": "Xingqiu", "ja-jp": "行秋", "ko-kr": "행추"},
"2740": {"zh-cn": "凝光", "zh-tw": "凝光", "en-us": "Ningguang", "ja-jp": "凝光", "ko-kr": "응광"},
"1450": {"zh-cn": "菲谢尔", "zh-tw": "菲謝爾", "en-us": "Fischl", "ja-jp": "フィッシュル", "ko-kr": "피슬"},
"1110": {"zh-cn": "班尼特", "zh-tw": "班尼特", "en-us": "Bennett", "ja-jp": "ベネット", "ko-kr": "베넷"},
"1720": {"zh-cn": "诺艾尔", "zh-tw": "諾艾爾", "en-us": "Noelle", "ja-jp": "ノエル", "ko-kr": "노엘"},
"2320": {"zh-cn": "重云", "zh-tw": "重雲", "en-us": "Chongyun", "ja-jp": "重雲", "ko-kr": "중운"},
"1640": {"zh-cn": "砂糖", "zh-tw": "砂糖", "en-us": "Sucrose", "ja-jp": "スクロース", "ko-kr": "설탕"},
"1618": {"zh-cn": "琴", "zh-tw": "琴", "en-us": "Jean", "ja-jp": "ジン", "ko-kr": "진"},
"1128": {"zh-cn": "迪卢克", "zh-tw": "迪盧克", "en-us": "Diluc", "ja-jp": "ディルック", "ko-kr": "다이루크"},
"2318": {"zh-cn": "七七", "zh-tw": "七七", "en-us": "Qiqi", "ja-jp": "七七", "ko-kr": "치치"},
"1248": {"zh-cn": "莫娜", "zh-tw": "莫娜", "en-us": "Mona", "ja-jp": "モナ", "ko-kr": "모나"},
"2418": {"zh-cn": "刻晴", "zh-tw": "刻晴", "en-us": "Keqing", "ja-jp": "刻晴", "ko-kr": "각청"},
"1658": {"zh-cn": "温迪", "zh-tw": "溫迪", "en-us": "Venti", "ja-jp": "ウェンティ", "ko-kr": "벤티"},
"1148": {"zh-cn": "可莉", "zh-tw": "可莉", "en-us": "Klee", "ja-jp": "クレー", "ko-kr": "클레"},
"1350": {"zh-cn": "迪奥娜", "zh-tw": "迪奧娜", "en-us": "Diona", "ja-jp": "ディオナ", "ko-kr": "디오나"},
"7258": {"zh-cn": "达达利亚", "zh-tw": "達達利亞", "en-us": "Tartaglia", "ja-jp": "タルタリヤ", "ko-kr": "타르탈리아"},
"2120": {"zh-cn": "辛焱", "zh-tw": "辛焱", "en-us": "Xinyan", "ja-jp": "辛炎", "ko-kr": "신염"},
"2738": {"zh-cn": "钟离", "zh-tw": "鍾離", "en-us": "Zhongli", "ja-jp": "鍾離", "ko-kr": "종려"},
"1718": {"zh-cn": "阿贝多", "zh-tw": "阿貝多", "en-us": "Albedo", "ja-jp": "アルベド", "ko-kr": "알베도"},
"2358": {"zh-cn": "甘雨", "zh-tw": "甘雨", "en-us": "Ganyu", "ja-jp": "甘雨", "ko-kr": "감우"},
"2638": {"zh-cn": "魈", "zh-tw": "魈", "en-us": "Xiao", "ja-jp": "魈", "ko-kr": "소"},
"2138": {"zh-cn": "胡桃", "zh-tw": "胡桃", "en-us": "Hu Tao", "ja-jp": "胡桃", "ko-kr": "호두"},
"1330": {"zh-cn": "罗莎莉亚", "zh-tw": "蘿莎莉亞", "en-us": "Rosaria", "ja-jp": "ロサリア", "ko-kr": "로자리아"},
"2140": {"zh-cn": "烟绯", "zh-tw": "煙緋", "en-us": "Yanfei", "ja-jp": "煙緋", "ko-kr": "연비"},
"1328": {"zh-cn": "优菈", "zh-tw": "優菈", "en-us": "Eula", "ja-jp": "エウルア", "ko-kr": "유라"},
"3618": {"zh-cn": "枫原万叶", "zh-tw": "楓原萬葉", "en-us": "Kaedehara Kazuha", "ja-jp": "楓原万葉", "ko-kr": "카에데하라 카즈하"},
"3318": {"zh-cn": "神里绫华", "zh-tw": "", "en-us": "", "ja-jp": "", "ko-kr": ""},
"1C10": {"zh-cn": "冷刃", "zh-tw": "冷刃", "ja-jp": "冷刃", "ko-kr": "차가운 칼날", "en-us": "Cool Steel"},
"1C80": {"zh-cn": "黎明神剑", "zh-tw": "黎明神劍", "ja-jp": "黎明の神剣", "ko-kr": "여명신검", "en-us": "Harbinger of Dawn"},
"1C40": {"zh-cn": "飞天御剑", "zh-tw": "飛天御劍", "ja-jp": "飛天御剣", "ko-kr": "비천어검", "en-us": "Skyrider Sword"},
"1B40": {"zh-cn": "西风剑", "zh-tw": "西風劍", "ja-jp": "西風剣", "ko-kr": "페보니우스 검", "en-us": "Favonius Sword"},
"1B10": {"zh-cn": "笛剑", "zh-tw": "笛劍", "ja-jp": "笛の剣", "ko-kr": "피리검", "en-us": "The Flute"},
"1B41": {"zh-cn": "祭礼剑", "zh-tw": "祭禮劍", "ja-jp": "祭礼の剣", "ko-kr": "제례검", "en-us": "Sacrificial Sword"},
"1B11": {"zh-cn": "匣里龙吟", "zh-tw": "匣裡龍吟", "ja-jp": "匣中龍吟", "ko-kr": "용의 포효", "en-us": "Lion's Roar"},
"1B50": {"zh-cn": "暗巷闪光", "zh-tw": "暗巷閃光", "ja-jp": "ダークアレイの閃光", "ko-kr": "뒷골목의 섬광", "en-us": "The Alley Flash"},
"1A60": {"zh-cn": "风鹰剑", "zh-tw": "風鷹劍", "ja-jp": "風鷹剣", "ko-kr": "매의 검", "en-us": "Aquila Favonia"},
"1A40": {"zh-cn": "天空之刃", "zh-tw": "天空之刃", "ja-jp": "天空の刃", "ko-kr": "천공의 검", "en-us": "Skyward Blade"},
"1A10": {"zh-cn": "斫峰之刃", "zh-tw": "斫峰之刃", "ja-jp": "斬山の刃", "ko-kr": "참봉의 칼날", "en-us": "Summit Shaper"},
"1A70": {"zh-cn": "磐岩结绿", "zh-tw": "磐岩結綠", "ja-jp": "磐岩結緑", "ko-kr": "반암결록", "en-us": "Primordial Jade Cutter"},
"1A50": {"zh-cn": "苍古自由之誓", "zh-tw": "蒼古自由之誓", "ja-jp": "蒼古なる自由への誓い", "ko-kr": "보레아스의 조숙",
"en-us": "Freedom-Sworn"},
"1A80": {"zh-cn": "雾切之回光", "zh-tw": "", "ja-jp": "", "ko-kr": "", "en-us": ""},
"2C30": {"zh-cn": "铁影阔剑", "zh-tw": "鐵影闊劍", "ja-jp": "鉄影段平", "ko-kr": "강철의 그림자", "en-us": "Ferrous Shadow"},
"2C50": {"zh-cn": "沐浴龙血的剑", "zh-tw": "沐浴龍血的劍", "ja-jp": "龍血を浴びた剣", "ko-kr": "드래곤 블러드 소드",
"en-us": "Bloodtainted Greatsword"},
"2C10": {"zh-cn": "以理服人", "zh-tw": "以理服人", "ja-jp": "理屈責め", "ko-kr": "훌륭한 대화수단", "en-us": "Debate Club"},
"2C60": {"zh-cn": "飞天大御剑", "zh-tw": "飛天大御劍", "ja-jp": "飛天大御剣", "ko-kr": "비천대어검", "en-us": "Skyrider Greatsword"},
"2B40": {"zh-cn": "西风大剑", "zh-tw": "西風大劍", "ja-jp": "西風大剣", "ko-kr": "페보니우스 대검", "en-us": "Favonius Greatsword"},
"2B30": {"zh-cn": "钟剑", "zh-tw": "鐘劍", "ja-jp": "鐘の剣", "ko-kr": "시간의 검", "en-us": "The Bell"},
"2B41": {"zh-cn": "祭礼大剑", "zh-tw": "祭禮大劍", "ja-jp": "祭礼の大剣", "ko-kr": "제례 대검", "en-us": "Sacrificial Greatsword"},
"2B50": {"zh-cn": "雨裁", "zh-tw": "雨裁", "ja-jp": "雨裁", "ko-kr": "빗물 베기", "en-us": "Rainslasher"},
"2B10": {"zh-cn": "千岩古剑", "zh-tw": "千岩古劍", "ja-jp": "千岩古剣", "ko-kr": "천암고검", "en-us": "Lithic Blade"},
"2A40": {"zh-cn": "天空之傲", "zh-tw": "天空之傲", "ja-jp": "天空の傲", "ko-kr": "천공의 긍지", "en-us": "Skyward Pride"},
"2A10": {"zh-cn": "狼的末路", "zh-tw": "狼的末路", "ja-jp": "狼の末路", "ko-kr": "늑대의 말로", "en-us": "Wolf's Gravestone"},
"2A11": {"zh-cn": "无工之剑", "zh-tw": "無工之劍", "ja-jp": "無工の剣", "ko-kr": "무공의 검", "en-us": "The Unforged"},
"2A60": {"zh-cn": "松籁响起之时", "zh-tw": "松籟響起之時", "ja-jp": "松韻の響く頃", "ko-kr": "송뢰가 울릴 무렵",
"en-us": "Song of Broken Pines"},
"3C70": {"zh-cn": "白缨枪", "zh-tw": "白纓槍", "ja-jp": "白纓槍", "ko-kr": "백술창", "en-us": "White Tassel"},
"3C10": {"zh-cn": "钺矛", "zh-tw": "鉞矛", "ja-jp": "鉾槍", "ko-kr": "미늘창", "en-us": "Halberd"},
"3C30": {"zh-cn": "黑缨枪", "zh-tw": "黑纓槍", "ja-jp": "黒纓槍", "ko-kr": "흑술창", "en-us": "Black Tassel"},
"3B50": {"zh-cn": "匣里灭辰", "zh-tw": "匣裡滅辰", "ja-jp": "匣中滅龍", "ko-kr": "용학살창", "en-us": "Dragon's Bane"},
"3B40": {"zh-cn": "西风长枪", "zh-tw": "西風長槍", "ja-jp": "西風長槍", "ko-kr": "페보니우스 장창", "en-us": "Favonius Lance"},
"3B10": {"zh-cn": "千岩长枪", "zh-tw": "千岩長槍", "ja-jp": "千岩長槍", "ko-kr": "천암장창", "en-us": "Lithic Spear"},
"3A70": {"zh-cn": "和璞鸢", "zh-tw": "和璞鳶", "ja-jp": "和璞鳶", "ko-kr": "화박연", "en-us": "Primordial Jade Winged-Spear"},
"3A40": {"zh-cn": "天空之脊", "zh-tw": "天空之脊", "ja-jp": "天空の脊", "ko-kr": "천공의 마루", "en-us": "Skyward Spine"},
"3A10": {"zh-cn": "贯虹之槊", "zh-tw": "貫虹之槊", "ja-jp": "破天の槍", "ko-kr": "관홍의 창", "en-us": "Vortex Vanquisher"},
"3A80": {"zh-cn": "护摩之杖", "zh-tw": "護摩之杖", "ja-jp": "護摩の杖", "ko-kr": "호마의 지팡이", "en-us": "Staff of Homa"},
"4C50": {"zh-cn": "魔导绪论", "zh-tw": "魔導緒論", "ja-jp": "魔導緒論", "ko-kr": "마도 서론", "en-us": "Magic Guide"},
"4C30": {"zh-cn": "讨龙英杰谭", "zh-tw": "討龍英傑譚", "ja-jp": "龍殺しの英傑譚", "ko-kr": "드래곤 슬레이어 영웅담",
"en-us": "Thrilling Tales of Dragon Slayers"},
"4C40": {"zh-cn": "异世界行记", "zh-tw": "異世界行記", "ja-jp": "異世界旅行記", "ko-kr": "이세계 여행기", "en-us": "Otherworldly Story"},
"4C51": {"zh-cn": "翡玉法球", "zh-tw": "翡玉法球", "ja-jp": "翡玉法珠", "ko-kr": "비취 오브", "en-us": "Emerald Orb"},
"4C70": {"zh-cn": "甲级宝珏", "zh-tw": "甲級寶玨", "ja-jp": "特級の宝玉", "ko-kr": "1급 보옥", "en-us": "Twin Nephrite"},
"4B40": {"zh-cn": "西风秘典", "zh-tw": "西風秘典", "ja-jp": "西風秘典", "ko-kr": "페보니우스 비전", "en-us": "Favonius Codex"},
"4B80": {"zh-cn": "流浪乐章", "zh-tw": "流浪樂章", "ja-jp": "流浪楽章", "ko-kr": "음유시인의 악장", "en-us": "The Widsith"},
"4B50": {"zh-cn": "祭礼残章", "zh-tw": "祭禮殘章", "ja-jp": "祭礼の断片", "ko-kr": "제례의 악장", "en-us": "Sacrificial Fragments"},
"4B10": {"zh-cn": "昭心", "zh-tw": "昭心", "ja-jp": "昭心", "ko-kr": "소심", "en-us": "Eye of Perception"},
"4B41": {"zh-cn": "暗巷的酒与诗", "zh-tw": "暗巷的酒與詩", "ja-jp": "ダークアレイの酒と詩", "ko-kr": "뒷골목의 술과 시",
"en-us": "Wine and Song"},
"4A10": {"zh-cn": "天空之卷", "zh-tw": "天空之卷", "ja-jp": "天空の巻", "ko-kr": "천공의 두루마리", "en-us": "Skyward Atlas"},
"4A70": {"zh-cn": "四风原典", "zh-tw": "四風原典", "ja-jp": "四風原典", "ko-kr": "사풍 원서",
"en-us": "Lost Prayer to the Sacred Winds"},
"4A11": {"zh-cn": "尘世之锁", "zh-tw": "塵世之鎖", "ja-jp": "浮世の錠", "ko-kr": "속세의 자물쇠", "en-us": "Memory of Dust"},
"5C50": {"zh-cn": "鸦羽弓", "zh-tw": "鴉羽弓", "ja-jp": "鴉羽の弓", "ko-kr": "까마귀깃 활", "en-us": "Raven Bow"},
"5C80": {"zh-cn": "神射手之誓", "zh-tw": "神射手之誓", "ja-jp": "シャープシューターの誓い", "ko-kr": "신궁의 서약",
"en-us": "Sharpshooter's Oath"},
"5C30": {"zh-cn": "反曲弓", "zh-tw": "反曲弓", "ja-jp": "リカーブボウ", "ko-kr": "곡궁", "en-us": "Recurve Bow"},
"5C70": {"zh-cn": "弹弓", "zh-tw": "彈弓", "ja-jp": "弾弓", "ko-kr": "탄궁", "en-us": "Slingshot"},
"5C81": {"zh-cn": "信使", "zh-tw": "信使", "ja-jp": "文使い", "ko-kr": "전령", "en-us": "Messenger"},
"5B40": {"zh-cn": "西风猎弓", "zh-tw": "西風獵弓", "ja-jp": "西風猟弓", "ko-kr": "페보니우스 활", "en-us": "Favonius Warbow"},
"5B50": {"zh-cn": "绝弦", "zh-tw": "絕弦", "ja-jp": "絶弦", "ko-kr": "절현", "en-us": "The Stringless"},
"5B41": {"zh-cn": "祭礼弓", "zh-tw": "祭禮弓", "ja-jp": "祭礼の弓", "ko-kr": "제례활", "en-us": "Sacrificial Bow"},
"5B10": {"zh-cn": "弓藏", "zh-tw": "弓藏", "ja-jp": "弓蔵", "ko-kr": "녹슨 활", "en-us": "Rust"},
"5B11": {"zh-cn": "暗巷猎手", "zh-tw": "暗巷獵手", "ja-jp": "ダークアレイの狩人", "ko-kr": "뒷골목 사냥꾼", "en-us": "Alley Hunter"},
"5B60": {"zh-cn": "幽夜华尔兹", "zh-tw": "幽夜華爾滋", "ja-jp": "", "ko-kr": "", "en-us": "Mitternachts Waltz"},
"5A70": {"zh-cn": "天空之翼", "zh-tw": "天空之翼", "ja-jp": "天空の翼", "ko-kr": "천공의 날개", "en-us": "Skyward Harp"},
"5A10": {"zh-cn": "阿莫斯之弓", "zh-tw": "阿莫斯之弓", "ja-jp": "アモスの弓", "ko-kr": "아모스의 활", "en-us": "Amos' Bow"},
"5A40": {"zh-cn": "终末嗟叹之诗", "zh-tw": "終末嗟嘆之詩", "ja-jp": "終焉を嘆く詩", "ko-kr": "종말 탄식의 노래",
"en-us": "Elegy for the End"}
}
| StarcoderdataPython |
11264798 | # Copyright 2014 Google Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import confix
class TestStruct(confix.Struct):
a = confix.FieldDef(int, 'a field', 100)
b = confix.FieldDef(str, 'b field', 'default value')
u = confix.FieldDef(int, 'optional field', confix.Undefined)
class OuterStruct(confix.Struct):
inner = confix.FieldDef(confix.Struct, 'nested struct')
class ConfixTests(unittest.TestCase):
def testLoose(self):
s = confix.LooseStruct(a = 1, b = 2)
assert s.a == 1
assert s.b == 2
s.x = 100
assert s.x == 100
def testStrict(self):
self.assertRaises(AttributeError, TestStruct, x=2)
self.assertRaises(TypeError, TestStruct, a='string')
t = TestStruct(a=1, b='string')
self.assertEqual(t.a, 1)
self.assertEqual(t.b, 'string')
def testUndefinedFields(self):
t = TestStruct(a=200)
def GetU():
return t.u
self.assertRaises(AttributeError, GetU)
t.u = 100
self.assertEqual(t.u, 100)
def SetUToStr():
t.u = 'hey now!'
self.assertRaises(TypeError, SetUToStr)
def testDefaultTypecheck(self):
def CreateBadStruct():
class BadStruct(confix.Struct):
field = confix.FieldDef(str, default=100)
self.assertRaises(TypeError, CreateBadStruct)
def testDefaultValues(self):
val = TestStruct()
# Don't rely on comparison, as it could be implemented differently and
# we test it below.
self.assertEqual(val.a, 100)
self.assertEqual(val.b, 'default value')
def testCompare(self):
self.assertEqual(confix.LooseStruct(a=1, b='foo'),
confix.LooseStruct(a=1, b='foo'))
self.assertNotEquals(confix.LooseStruct(a=1, b='foo'),
confix.LooseStruct(a=1, b='foo', c=1.5))
self.assertEqual(TestStruct(a=100, b='value'),
TestStruct(a=100, b='value'))
self.assertNotEquals(TestStruct(a=100, b='value'),
TestStruct(a=100, b='different value'))
self.assertNotEquals(TestStruct(a=100, b='value'),
TestStruct(a=100, b='value', u=0))
# Verify that default fields are compared correctly.
self.assertEqual(TestStruct(a=100),
TestStruct(a=100, b='default value'))
def testDir(self):
self.assertEqual(dir(TestStruct(a=1, b='two')), ['a', 'b'])
def testNesting(self):
outer = OuterStruct(inner=TestStruct(a=1, b='two'))
self.assertEqual(outer, outer)
def testLists(self):
class TypeWithLists(confix.Struct):
l = confix.FieldDef(confix.List(str), 'list of strings', [])
v = TypeWithLists(l=['eeny', 'meeny', 'miney'])
self.assertRaises(TypeError, v.l.append, 100)
self.assertRaises(TypeError, v.l.__setitem__, 0, 100)
v.l.append('moe')
self.assertEqual(v.l,
confix.List(str)(['eeny', 'meeny', 'miney', 'moe']))
v.l[0] = 'serious'
self.assertEqual(v.l, ['serious', 'meeny', 'miney', 'moe'])
def testMaps(self):
class TypeWithMaps(confix.Struct):
m = confix.FieldDef(confix.Map(str, int), 'map of string to int',
{})
v = TypeWithMaps(m={'first': 100, 'second': 200})
self.assertEqual(v.m, {'first': 100, 'second': 200})
self.assertEqual(v.m['first'], 100)
self.assertEqual(sorted(v.m.items()),
[('first', 100), ('second', 200)])
self.assertEqual(sorted(v.m.iteritems()),
[('first', 100), ('second', 200)])
self.assertEqual(sorted(v.m.keys()), ['first', 'second'])
self.assertEqual(sorted(v.m.iterkeys()), ['first', 'second'])
self.assertEqual(sorted(v.m.values()), [100, 200])
self.assertEqual(sorted(v.m.itervalues()), [100, 200])
v.m['third'] = 300
self.assertEqual(v.m, {'first': 100, 'second': 200, 'third': 300})
self.assertRaises(TypeError, v.m.__setitem__, 'boing!')
v.m = {'a': 1, 'b': 2}
self.assertEqual(v.m, {'a': 1, 'b': 2})
def set_bad_val():
v.m = 'blech!'
self.assertRaises(TypeError, set_bad_val)
def testMapConversions(self):
# Test value conversions
m = confix.Map(str, confix.List(int))({'a': [1, 2]})
self.assertEqual(m, {'a': [1, 2]})
self.assertEqual(m.setdefault('b', [0]), confix.List(int)([0]))
self.assertEqual(m, {'a': [1, 2], 'b': [0]})
self.assertEqual(m.setdefault('a', [0]), [1, 2])
self.assertEqual(m, {'a': [1, 2], 'b': [0]})
self.assertEqual(m.get('c', [3]), confix.List(int)([3]))
self.assertEqual(m.get('a', [0]), [1, 2])
self.assertEqual(m, {'a': [1, 2], 'b': [0]})
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
5198321 | <reponame>hbristow/django-arxiv
from __future__ import absolute_import
from django.template.loader import get_template
from django.template import Context
from celery.schedules import crontab
import celery
import collections
from arxiv import feed, models, time
CELERYBEAT_SCHEDULE = {
# Executes on weekdays every 15 minutes (timezones are spaced 15 minutes apart)
'email-subscribers': {
'task': 'arxiv.tasks.email_subscribers',
'schedule': crontab(minute='*/15')
}
}
@celery.task
def email_subscribers():
"""Email subscribers with a daily digest of their subject areas"""
# get the timezones which need updating
timezones = models.Subscriber.objects.values_list('timezone', flat=True).distinct()
timezones = [zone for zone in timezones if time.satisfies(time.now(zone),
weekday = ['Monday', 'Tuesday', 'Wednesday', 'Thursday', 'Friday'],
hour = [07],
minute = [0]
)]
# get all subscribers in the timezones that need notifying
subscribers = models.Subscriber.objects.filter(timezone__in=timezones).prefetch_related('subjects')
# notify those subscribers
for subscriber in subscribers:
email_feed(subscriber)
@celery.task
def email_feed(subscriber):
"""Email a recipient with the given subjects"""
mail_server = models.MailServer.get_solo()
subjects = subscriber.subjects.all().values_list('cat', flat=True)
articles = feed.today(*subjects)
template = get_template('arxiv/email.inlined.html')
rendered = template.render(Context({
'feed': articles,
'subscriber': subscriber,
'domain': mail_server.domain,
}))
mail_server.send_mail('arXiv Feed', rendered, [subscriber.email])
| StarcoderdataPython |
4872611 | <filename>vulcan.py
import io
print("some printinfo, then exit.")
print("some more words!")
| StarcoderdataPython |
8111043 | # -*- coding: utf-8 -*-
# Copyright (c) 2012-2015, <NAME> and contributors
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer. Redistributions in binary
# form must reproduce the above copyright notice, this list of conditions and the
# following disclaimer in the documentation and/or other materials provided with
# the distribution
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import warnings
from django.conf import settings
from django.core.urlresolvers import reverse
from django.db import models
from django.utils.translation import ugettext_lazy as _
try:
import six
except ImportError:
from django.utils import six
from .base import OrgMeta, AbstractBaseOrganization, AbstractBaseOrganizationUser, AbstractBaseOrganizationOwner
from .fields import SlugField, AutoCreatedField, AutoLastModifiedField
from .signals import user_added, user_removed, owner_changed
USER_MODEL = getattr(settings, 'AUTH_USER_MODEL', 'auth.User')
ORGS_TIMESTAMPED_MODEL = getattr(settings, 'ORGS_TIMESTAMPED_MODEL', None)
if ORGS_TIMESTAMPED_MODEL:
warnings.warn("Configured TimestampModel has been replaced and is now ignored.",
DeprecationWarning)
class SharedBaseModel(models.Model):
"""
Adds fields ``created`` and ``modified`` and
two private methods that are used by the rest
of the abstract models.
"""
created = AutoCreatedField()
modified = AutoLastModifiedField()
@property
def _org_user_model(self):
model = self.__class__.module_registry[self.__class__.__module__]['OrgUserModel']
if model is None:
model = self.__class__.module_registry['organizations.models']['OrgUserModel']
return model
@property
def _org_owner_model(self):
model = self.__class__.module_registry[self.__class__.__module__]['OrgOwnerModel']
if model is None:
model = self.__class__.module_registry['organizations.models']['OrgOwnerModel']
return model
class Meta:
abstract = True
class AbstractOrganization(six.with_metaclass(OrgMeta, SharedBaseModel, AbstractBaseOrganization)):
"""
Abstract Organization model.
"""
slug = SlugField(max_length=200, blank=False, editable=True,
populate_from='name', unique=True,
help_text=_("The name in all lowercase, suitable for URL identification"))
class Meta(AbstractBaseOrganization.Meta):
abstract = True
verbose_name = _("organization")
verbose_name_plural = _("organizations")
def __unicode__(self):
return self.name
def get_absolute_url(self):
return reverse('organization_detail', kwargs={'organization_pk': self.pk})
def add_user(self, user, is_admin=False):
"""
Adds a new user and if the first user makes the user an admin and
the owner.
"""
users_count = self.users.all().count()
if users_count == 0:
is_admin = True
# TODO get specific org user?
org_user = self._org_user_model.objects.create(user=user,
organization=self,
is_admin=is_admin)
if users_count == 0:
# TODO get specific org user?
self._org_owner_model.objects.create(organization=self,
organization_user=org_user)
# User added signal
user_added.send(sender=self, user=user)
return org_user
def remove_user(self, user):
"""
Deletes a user from an organization.
"""
org_user = self._org_user_model.objects.get(user=user,
organization=self)
org_user.delete()
# User removed signal
user_removed.send(sender=self, user=user)
def get_or_add_user(self, user, **kwargs):
"""
Adds a new user to the organization, and if it's the first user makes
the user an admin and the owner. Uses the `get_or_create` method to
create or return the existing user.
`user` should be a user instance, e.g. `auth.User`.
Returns the same tuple as the `get_or_create` method, the
`OrganizationUser` and a boolean value indicating whether the
OrganizationUser was created or not.
"""
is_admin = kwargs.pop('is_admin', False)
users_count = self.users.all().count()
if users_count == 0:
is_admin = True
org_user, created = self._org_user_model.objects\
.get_or_create(organization=self,
user=user,
defaults={'is_admin': is_admin})
if users_count == 0:
self._org_owner_model.objects\
.create(organization=self, organization_user=org_user)
if created:
# User added signal
user_added.send(sender=self, user=user)
return org_user, created
def change_owner(self, new_owner):
"""
Changes ownership of an organization.
"""
old_owner = self.owner.organization_user
self.owner.organization_user = new_owner
self.owner.save()
# Owner changed signal
owner_changed.send(sender=self, old=old_owner, new=new_owner)
def is_admin(self, user):
"""
Returns True is user is an admin in the organization, otherwise false
"""
return True if self.organization_users.filter(user=user, is_admin=True) else False
def is_owner(self, user):
"""
Returns True is user is the organization's owner, otherwise false
"""
return self.owner.organization_user.user == user
class AbstractOrganizationUser(six.with_metaclass(OrgMeta, SharedBaseModel, AbstractBaseOrganizationUser)):
"""
Abstract OrganizationUser model
"""
is_admin = models.BooleanField(default=False)
class Meta(AbstractBaseOrganizationUser.Meta):
abstract = True
verbose_name = _("organization user")
verbose_name_plural = _("organization users")
def __unicode__(self):
return u"{0} ({1})".format(self.name if self.user.is_active else
self.user.email, self.organization.name)
def delete(self, using=None):
"""
If the organization user is also the owner, this should not be deleted
unless it's part of a cascade from the Organization.
If there is no owner then the deletion should proceed.
"""
from organizations.exceptions import OwnershipRequired
try:
if self.organization.owner.organization_user.id == self.id:
raise OwnershipRequired(_("Cannot delete organization owner "
"before organization or transferring ownership."))
# TODO This line presumes that OrgOwner model can't be modified
except self._org_owner_model.DoesNotExist:
pass
super(AbstractBaseOrganizationUser, self).delete(using=using)
def get_absolute_url(self):
return reverse('organization_user_detail', kwargs={
'organization_pk': self.organization.pk, 'user_pk': self.user.pk})
class AbstractOrganizationOwner(six.with_metaclass(OrgMeta, SharedBaseModel, AbstractBaseOrganizationOwner)):
"""
Abstract OrganizationOwner model
"""
class Meta:
abstract = True
verbose_name = _("organization owner")
verbose_name_plural = _("organization owners")
def save(self, *args, **kwargs):
"""
Extends the default save method by verifying that the chosen
organization user is associated with the organization.
Method validates against the primary key of the organization because
when validating an inherited model it may be checking an instance of
`Organization` against an instance of `CustomOrganization`. Mutli-table
inheritence means the database keys will be identical though.
"""
from organizations.exceptions import OrganizationMismatch
if self.organization_user.organization.pk != self.organization.pk:
raise OrganizationMismatch
else:
super(AbstractBaseOrganizationOwner, self).save(*args, **kwargs)
| StarcoderdataPython |
335559 | """
All question classes
"""
from typing import Optional, List, Union, Any
from fuzzywuzzy.fuzz import ratio
from string import ascii_lowercase
import random
from professor.utils.numeric import numeric_string
from professor.core.base import QuestionBase
from professor.core.wraps import Link
class FreeResponse(QuestionBase):
def __init__(self, *args, **kwargs):
"""
...
:param exact:
"""
self.exact = False
if "type_help" not in self.__dict__:
self.type_help = """To answer a free response question, enter, in precise words, your response. Be careful! Not all quiz builders are lenient on punctuation, capitalization, and spelling."""
if "name" not in self.__dict__:
self.name: str = "Free Response"
super(FreeResponse, self).__init__(**kwargs)
def build(self):
"""
Enforce type requirements
"""
if isinstance(self.answer, (int, float)):
self.answer = str(self.answer)
elif isinstance(self.answer, (list, tuple, set)):
if self.answer:
self.answer = random.choice(self.answer)
else:
self.answer = None
elif not isinstance(self.answer, str):
self.answer = None
def precision(self, answer: Optional[str] = None) -> int:
"""
Levenshtein coefficient that response must meet to be correct
"""
return max(70, int(round(100 - 100/len(self.answer)**0.5))) if not self.exact else 100
def check(self, x: str) -> bool:
"""
Validates a string against the question's answer
:param x:
:return:
"""
return ratio(x, self.answer) >= self.precision()
def edit_exact(self, x: bool) -> bool:
"""
Edits the exact attribute
"""
return self._edit_boolean(attr="exact", x=x)
class Numeric(QuestionBase):
def __init__(self, *args, **kwargs):
self.round: Optional[int] = None
if "type_help" not in self.__dict__:
self.type_help: str = """To answer a numeric question, enter the number that answers the question (digits, not words). Be careful! Some quiz builders may round your answer to a particular decimal."""
if "name" not in self.__dict__:
self.name: str = "Numeric"
super(Numeric, self).__init__(**kwargs)
def build(self):
"""
Enforce type requirements
"""
if isinstance(self.answer, str):
self.answer = numeric_string(string=self.answer)
elif isinstance(self.answer, (list, tuple, set)):
is_numeric: List[Union[int, float]] = []
# Get numeric answers
for ans in self.answer:
if isinstance(ans, str):
ans = numeric_string(string=ans)
if ans is not None:
is_numeric.append(ans)
# Choose one
if is_numeric:
self.answer = random.choice(is_numeric)
else:
self.answer = None
elif not isinstance(self.answer, (int, float)):
self.answer = None
def check(self, x: str) -> bool:
"""
Validates a string against the question's answer
"""
x = numeric_string(x)
if x is not None:
if self.round is not None:
return round(self.answer, self.round) == round(x, self.round)
return self.answer == x
return False
def edit_answer(self, x: str, i: Optional[int] = None) -> bool:
"""
Edits the answer attribute
"""
return self._edit_number(numeric_string(x), "answer")
def edit_round(self, x: str) -> bool:
"""
Edits the round attribute
"""
return self._edit_number(numeric_string(x), "round")
def clear_round(self):
"""
Sets the round attribute to None
"""
return self._clear_attr("round")
class MultipleChoice(QuestionBase):
def __init__(self, *args, **kwargs):
self.choices: List[str] = []
self.shuffle: bool = True
if "type_help" not in self.__dict__:
self.type_help = """To answer a multiple choice question, enter the character that is paired with the option you choose"""
if "name" not in self.__dict__:
self.name: str = "Multiple Choice"
super(MultipleChoice, self).__init__(**kwargs)
if self.shuffle:
random.shuffle(self.choices)
@property
def Choices(self) -> dict:
return {a: v for a, v in zip(ascii_lowercase, self.choices)}
def build(self):
"""
Enforce type requirements and coherence between choices and answer
"""
if isinstance(self.answer, (list, tuple, set)):
# Only retain one answer
# Ensure not an empty iterable
if self.answer:
keep = random.choice(self.answer)
for ans in self.answer:
if (ans != keep) & (ans in self.choices):
self.choices.remove(ans)
self.answer = keep
else:
self.answer = None
elif isinstance(self.answer, (int, float)):
# Convert to string
self.answer = str(self.answer)
# Ensure
if (self.answer not in self.choices) & (self.answer is not None):
self.choices.append(self.answer)
def check(self, x: Optional[str] = None, i: Optional[int] = None) -> bool:
"""
Checks if x is answer or option at index i is answer
"""
if x:
return x == self.answer
elif i:
return self.choices[i] == self.answer
return False
@Link(domain="answer", codomain="choices")
def edit_choice(self, x: Any, i: int) -> bool:
"""
Edits the value of choices at index i and the answer if the choice was the answer.
"""
return self._edit_element(attr="choices", x=x, i=i)
def add_choice(self, x: Any) -> bool:
"""
Appends a value to the choices
"""
return self._add_element(attr="choices", x=x)
def insert_choice(self, x: Any, i: int) -> bool:
"""
Inserts a value to choices at index i
"""
return self._insert_element(attr="choices", x=x, i=i)
@Link(domain="answer", codomain="choices")
def delete_choice(self, x: Optional[Any] = None, i: Optional[int] = None) -> bool:
"""
Deletes the choice at index i
"""
if i:
x = self.choices[i]
if x == self.answer:
self.answer = None
return self._delete_element(attr="choices", x=x, i=i)
def clear_choices(self) -> bool:
"""
Resets the choices array
"""
self.answer = None
return self._clear_attr(attr="choices", default=[])
def edit_shuffle(self, x: bool) -> bool:
"""
Sets the shuffle attribute
"""
return self._edit_boolean(attr="shuffle", x=x)
@Link(domain="answer", codomain="choices")
def edit_answer(self, x: str, i: Optional[int] = None) -> bool:
"""
Edits the answer and its paired choice
"""
return self._edit_string(attr="answer", x=x)
class MultipleResponse(MultipleChoice):
def __init__(self, *args, **kwargs):
self.answer: list = []
if "type_help" not in self.__dict__:
self.type_help = """To answer a multiple response question, enter the characters that are paired with the options you choose. Separate them with spaces or commas."""
if "name" not in self.__dict__:
self.name: str = "Multiple Response"
super(MultipleResponse, self).__init__(**kwargs)
def build(self):
"""
Enforce type requirements and coherence between choices and answer
"""
if not isinstance(self.answer, (list, tuple, set)):
self.answer = [str(self.answer)] if self.answer is not None else []
for missing in set(self.answer) - set(self.choices):
self.choices.append(missing)
def check(self, x: Optional[List[str]] = None, i: Optional[List[int]] = None):
"""
Checks if given responses are answers or given choice indices are answers
"""
if x:
return set(x) == self.answer
elif i:
return set(self.choices[k] for k in i) == self.answer
return False
@Link(domain="answer", codomain="choices")
def add_answer(self, x: Any) -> bool:
"""
Appends a value to the answers and choices
"""
return self._add_element(attr="answer", x=x)
@Link(domain="answer", codomain="choices")
def edit_choice(self, x: Any, i: int) -> bool:
"""
Edits the value of choices at index i and its paired answer if it was an answer
"""
return self._edit_element(attr="choices", x=x, i=i)
@Link(domain="answer", codomain="choices")
def edit_answer(self, x: str, i: int) -> bool:
"""
Edits the answer and its paired choice
"""
return self._edit_element(attr="answer", x=x, i=i)
@Link(domain="answer", codomain="choices")
def delete_answer(self, x: Optional[Any] = None, i: Optional[int] = None) -> bool:
"""
Deletes an answer and its paired choice
"""
return self._delete_element(attr="answer", x=x, i=i)
@Link(domain="answer", codomain="choices")
def clear_choices(self) -> bool:
"""
Resets the choices array to only the answer
"""
return self._clear_attr(attr="choices", default=list(self.answer))
@Link(domain="answer", codomain="choices")
def clear_answers(self):
"""
Resets the answers array
"""
return self._clear_attr(attr="answer", default=[])
class MultipleFreeResponse(FreeResponse):
def __init__(self, *args, **kwargs):
if "type_help" not in self.__dict__:
self.type_help = """To answer a multiple free response question, enter, in precise words, your response. There are multiple correct answers to this question, you should only give one. Be careful! Not all quiz builders are lenient on punctuation, capitalization, and spelling."""
if "name" not in self.__dict__:
self.name: str = "Multiple Free Response"
super(MultipleFreeResponse, self).__init__(**kwargs)
def build(self):
"""
Enforce type requirements and coherence between choices and answer
"""
if not isinstance(self.answer, (list, tuple, set)):
self.answer = [str(self.answer)] if self.answer is not None else []
def precision(self, answer: Optional[str] = " ") -> int:
"""
Levenshtein coefficient that response must meet to be correct.
Minimum requirement: 70
"""
return max(70, int(round(100 - 100 / len(answer) ** 2))) if not self.exact else 100
def check(self, x: str):
"""
Checks if given responses are answers or given choice indices are answers
"""
return any(ratio(x, ans) > self.precision(answer=ans) for ans in self.answer)
def add_answer(self, x: Any) -> bool:
"""
Appends a value to the answers
"""
return self._add_element(attr="answer", x=x)
def edit_answer(self, x: str, i: int) -> bool:
"""
Edits the answer at the index
"""
return self._edit_element(attr="answer", x=x, i=i)
def delete_answer(self, x: Optional[Any] = None, i: Optional[int] = None) -> bool:
"""
Deletes an answer
"""
return self._delete_element(attr="answer", x=x, i=i)
def clear_answers(self):
"""
Resets the answers array
"""
return self._clear_attr(attr="answer", default=[])
| StarcoderdataPython |
4968504 | <gh_stars>0
import time
import os.path as osp
# import huepy as hue
import torch
from torch.nn.utils import clip_grad_norm_
from ignite.engine.engine import Engine, Events
# can not use apex
try:
import apex
from apex import amp
except ImportError as e:
# raise
pass
from .distributed import reduce_dict, is_main_process
from .logger import MetricLogger
from .serialization import save_checkpoint
from .misc import ship_data_to_cuda, lucky_bunny, warmup_lr_scheduler, resume_from_checkpoint
def get_trainer(args, model, train_loader, optimizer, lr_scheduler, device, tfboard):
if args.apex:
model.roi_heads.box_roi_pool.forward = \
amp.half_function(model.roi_heads.box_roi_pool.forward)
if hasattr(model.roi_heads, 'pose_attention_net'):
model.roi_heads.pose_attention_net.pool.forward = \
amp.half_function(model.roi_heads.pose_attention_net.pool.forward)
model, optimizer = amp.initialize(model, optimizer, opt_level='O1')
model_without_ddp = model
if args.distributed:
if args.apex:
model = apex.parallel.convert_syncbn_model(model)
model = apex.parallel.DistributedDataParallel(model)
else:
model = torch.nn.SyncBatchNorm.convert_sync_batchnorm(model)
model = torch.nn.parallel.DistributedDataParallel(
model, device_ids=[args.local_rank], find_unused_parameters=True)
model_without_ddp = model.module
if args.resume is not None:
args, model_without_ddp, optimizer, lr_scheduler = resume_from_checkpoint(
args, model_without_ddp, optimizer, lr_scheduler)
def _update_model(engine, data):
"""
Args:
:param engine:handle
:param data:batch data
:return:data to be stored in the engine`s state.
"""
images, targets = ship_data_to_cuda(data, device)
loss_dict = model(images, targets)
losses = args.train.w_RPN_loss_cls * loss_dict['loss_objectness'] \
+ args.train.w_RPN_loss_box * loss_dict['loss_rpn_box_reg'] \
+ args.train.w_RCNN_loss_bbox * loss_dict['loss_box_reg'] \
+ args.train.w_RCNN_loss_cls * loss_dict['loss_detection'] \
+ args.train.w_OIM_loss_oim * loss_dict['loss_reid']
# reduce losses over all GPUs for logging purposes
if engine.state.iteration % args.train.disp_interval == 0:
loss_dict_reduced = reduce_dict(loss_dict)
losses_reduced = args.train.w_RPN_loss_cls * loss_dict_reduced['loss_objectness'] \
+ args.train.w_RPN_loss_box * loss_dict_reduced['loss_rpn_box_reg'] \
+ args.train.w_RCNN_loss_bbox * loss_dict_reduced['loss_box_reg'] \
+ args.train.w_RCNN_loss_cls * loss_dict_reduced['loss_detection'] \
+ args.train.w_OIM_loss_oim * loss_dict_reduced['loss_reid']
loss_value = losses_reduced.item()
state = dict(loss_value=loss_value,
lr=optimizer.param_groups[0]['lr'])
state.update(loss_dict_reduced)
else:
state = None
optimizer.zero_grad()
if args.apex:
with amp.scale_loss(losses, optimizer) as scaled_loss:
scaled_loss.backward()
else:
losses.backward()
if args.train.clip_gradient > 0:
clip_grad_norm_(model.parameters(), args.train.clip_gradient)
optimizer.step()
return state
trainer = Engine(_update_model)
@trainer.on(Events.STARTED)
def _init_run(engine):
engine.state.epoch = args.train.start_epoch
engine.state.iteration = args.train.start_epoch * len(train_loader)
@trainer.on(Events.EPOCH_STARTED)
def _init_epoch(engine):
if engine.state.epoch == 1 and args.train.lr_warm_up:
warmup_factor = 1. / 1000
warmup_iters = len(train_loader) - 1
engine.state.sub_scheduler = warmup_lr_scheduler(
optimizer, warmup_iters, warmup_factor)
lucky_bunny(engine.state.epoch)
engine.state.metric_logger = MetricLogger()
@trainer.on(Events.ITERATION_STARTED)
def _init_iter(engine):
if engine.state.iteration % args.train.disp_interval == 0:
engine.state.start = time.time() ## 从当前时间开始
@trainer.on(Events.ITERATION_COMPLETED)
def _post_iter(engine):
if engine.state.epoch == 1 and args.train.lr_warm_up: # epoch start from 1
engine.state.sub_scheduler.step()
if engine.state.iteration % args.train.disp_interval == 0:
# Update logger
batch_time = time.time() - engine.state.start
engine.state.metric_logger.update(batch_time=batch_time)
engine.state.metric_logger.update(**engine.state.output)
if hasattr(engine.state, 'debug_info'):
engine.state.metric_logger.update(**engine.state.debug_info)
# Print log on console
step = (engine.state.iteration - 1) % len(train_loader) + 1
engine.state.metric_logger.print_log(engine.state.epoch, step,
len(train_loader))
# Record log on tensorboard
if args.train.use_tfboard and is_main_process():
for k, v in engine.state.metric_logger.meters.items():
if 'loss' in k:
k = k.replace('loss_', 'Loss/')
if 'num' in k:
tfboard.add_scalars('Debug/fg_bg_ratio', {k: v.avg},
engine.state.iteration)
else:
tfboard.add_scalar(k, v.avg, engine.state.iteration)
@trainer.on(Events.EPOCH_COMPLETED)
def _post_epoch(engine):
lr_scheduler.step()
if is_main_process():
save_name = osp.join(args.path, 'checkpoint.pth')
save_checkpoint({
'epoch': engine.state.epoch,
'model': model_without_ddp.state_dict(),
'optimizer': optimizer.state_dict(),
'lr_scheduler': lr_scheduler.state_dict()
}, save_name)
# print(hue.good('save model: {}'.format(save_name)))
print('===============save model: {}======================='.format(save_name))
return trainer
| StarcoderdataPython |
6461302 | #
# This file is part of PyOLab. https://github.com/matsselen/pyolab
# (C) 2017 <NAME> <<EMAIL>>
#
# SPDX-License-Identifier: BSD-3-Clause
# (https://opensource.org/licenses/BSD-3-Clause)
#
"""
Files starting with the name "user", like this one, are provided
so that users can create their own analysis jobs.
This file is a handy place for the user to put any global variables
that she might need. Not that global variables are a great idea, mind you,
but its something Mats understands.
"""
class U(object):
analUserCalls = 0 # how many times analUserLoop() has been called
# (just for example - not needed in your own code)
# keep track of some stuff used for calculating averages
lastA7 = 0
lastA8 = 0
lastA9 = 0
lastHG = 0 | StarcoderdataPython |
12005 | <filename>xarray/backends/npy_io.py
import numpy as np
import xarray as xr
import pandas as pd
import sys
import json
import os
import datetime
from xarray.core.utils import (
decode_numpy_dict_values,
either_dict_or_kwargs,
ensure_us_time_resolution,
)
from numpy.compat import (
asbytes, asstr, asunicode, bytes, basestring, os_fspath, os_PathLike,
pickle, contextlib_nullcontext
)
from numpy.lib import format
class NumpyEncoder(json.JSONEncoder):
def default(self, obj):
if isinstance(obj, np.ndarray):
return obj.tolist()
if isinstance(obj, datetime.datetime):
return obj.__str__()
if isinstance(obj, np.datetime64):
return obj.__str__()
return json.JSONEncoder.default(self, obj)
def _is_string_like(obj):
"""
Check whether obj behaves like a string.
"""
try:
obj + ''
except (TypeError, ValueError):
return False
return True
def myJsonConverter(o):
if isinstance(o, datetime.datetime):
return o.__str__()
def save_npys(file, data, compress=False,min_dims_coord_npy = 2):
if isinstance(data,xr.DataArray):
_save_dataarray(file, data, compress=compress,min_dims_coord_npy=min_dims_coord_npy)
elif isinstance(data,xr.Dataset):
_save_dataset(file, data, compress=compress,min_dims_coord_npy=min_dims_coord_npy)
else:
raise BaseException('Unexpected type %'%str(type(data)))
class zip_file():
def __init__(self,file, *args, **kwargs):
"""
Create a ZipFile.
Allows for Zip64, and the `file` argument can accept file, str, or
pathlib.Path objects. `args` and `kwargs` are passed to the zipfile.ZipFile
constructor.
"""
if not hasattr(file, 'read'):
file = os_fspath(file)
import zipfile
kwargs['allowZip64'] = True
file_dir, file_prefix = os.path.split(file) if _is_string_like(file) else (None, 'tmp')
self.file_dir = file_dir
self.file_prefix = file_prefix
self.zipf = zipfile.ZipFile(file, *args, **kwargs)
def close(self):
self.zipf.close()
def open(self,x):
return self.zipf.open(x)
def read(self,x):
return self.zipf.read(x)
def namelist(self):
return self.zipf.namelist()
def add_bin_data(self,fname,data_bytes):
if sys.version_info >= (3, 6):
with self.zipf.open(fname, 'w', force_zip64=True) as fid:
fid.write(data_bytes)
else:
import tempfile
fd, tmpfile = tempfile.mkstemp(prefix=self.file_prefix, dir=self.file_dir, suffix=fname)
os.close(fd)
try:
fid = open(tmpfile, 'wb')
try:
fid.write(data_bytes)
fid.close()
fid = None
self.zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
def add_npy(self,fname,val):
if sys.version_info >= (3, 6):
with self.zipf.open(fname, 'w', force_zip64=True) as fid:
format.write_array(fid, np.asanyarray(val), allow_pickle=False, pickle_kwargs=None)
else:
import tempfile
# Since target file might be big enough to exceed capacity of a global
# temporary directory, create temp file side-by-side with the target file.
fd, tmpfile = tempfile.mkstemp(prefix=self.file_prefix, dir=self.file_dir, suffix=fname)
os.close(fd)
try:
fid = open(tmpfile, 'wb')
try:
format.write_array(fid, np.asanyarray(val), allow_pickle=False, pickle_kwargs=None)
fid.close()
fid = None
self.zipf.write(tmpfile, arcname=fname)
except IOError as exc:
raise IOError("Failed to write to %s: %s" % (tmpfile, exc))
finally:
if fid:
fid.close()
finally:
os.remove(tmpfile)
def _save_dataarray(file, dataarray, compress=False, min_dims_coord_npy =2):#mostly copied from _savez in numpy\lib\npyio.py
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.xar'):
file = file + '.xar'
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zip_file(file, mode="w", compression=compression)
if dataarray.name is None:
data_name = 'data'
else:
data_name = dataarray.name
zipf.add_npy(data_name+'.npy',dataarray.values)
d = dataarray.variable.to_dict(data=False)
d['version'] = xr.__version__
d.update({"coords": {}, "name": dataarray.name})
for k in dataarray.coords:
assert(k!=data_name)
coord_var = dataarray.coords[k].variable
item = {"attrs": decode_numpy_dict_values(coord_var.attrs), "dtype":str(coord_var.values.dtype)}# we save the type here
if (coord_var.dims!=()) and( len(coord_var.dims)>1 or coord_var.dims[0]!=k): # we don't keep the dims if we have a dimension_coordinate or if dims is empty to keep the json more concise (see http://xarray.pydata.org/en/stable/data-structures.html#coordinates)
item['dims'] = coord_var.dims
if (coord_var.dims!=()) and len(coord_var.dims)>=min_dims_coord_npy:
zipf.add_npy(k+'.npy',coord_var.values)
else:
item["data"] = ensure_us_time_resolution(coord_var.values) # keeping coordinates data in the json
d["coords"][k] = item
json_str = json.dumps(d,cls=NumpyEncoder) + "\n" # 2. string (i.e. JSON)
json_bytes = json_str.encode('utf-8')
zipf.add_bin_data('DataArray.json',json_bytes)
zipf.close()
def _save_dataset(file, dataset, compress=False, min_dims_coord_npy = 2):#mostly copied from _savez in numpy\lib\npyio.py
import zipfile
if not hasattr(file, 'write'):
file = os_fspath(file)
if not file.endswith('.xar'):
file = file + '.xar'
if compress:
compression = zipfile.ZIP_DEFLATED
else:
compression = zipfile.ZIP_STORED
zipf = zip_file(file, mode="w", compression=compression)
dataset_dict = dataset.to_dict(data = False)
dataset_dict['version'] = xr.__version__
for key, array in dict(dataset.data_vars).items():
val = np.asanyarray(array.values)
if val.ndim >= min_dims_coord_npy:
zipf.add_npy('%s.npy'%key, val)
else:
dataset_dict['data_vars'][key]['data']=ensure_us_time_resolution(val)
for key, array in dict(dataset.coords).items():
val = np.asanyarray(array.values)
if val.ndim >= min_dims_coord_npy:
zipf.add_npy('%s.npy'%key, val)
else:
dataset_dict['coords'][key]['data']=ensure_us_time_resolution(val)
json_str = json.dumps(dataset_dict,cls=NumpyEncoder) + "\n"
json_bytes = json_str.encode('utf-8')
zipf.add_bin_data('Dataset.json', json_bytes)
zipf.close()
def load_npys(file):
# TODO: Use contextlib.ExitStack once we drop Python 2
if hasattr(file, 'read'):
fid = file
own_fid = False
else:
fid = open(os_fspath(file), "rb")
own_fid = True
if True:
# Code to distinguish from NumPy binary files and pickles.
_ZIP_PREFIX = b'PK\x03\x04'
_ZIP_SUFFIX = b'PK\x05\x06' # empty zip files start with this
N = len(format.MAGIC_PREFIX)
magic = fid.read(N)
# If the file size is less than N, we need to make sure not
# to seek past the beginning of the file
fid.seek(-min(N, len(magic)), 1) # back-up
if magic.startswith(_ZIP_PREFIX) or magic.startswith(_ZIP_SUFFIX):
_zip = zip_file(fid)
files = _zip.namelist()
_data_dict={}
_type = None
for x in files:
if x.endswith('.npy'):
bytes = _zip.open(x)
magic = bytes.read(len(format.MAGIC_PREFIX))
bytes.close()
assert( magic == format.MAGIC_PREFIX)
bytes = _zip.open(x)
_data_dict[x[:-4]] = format.read_array(bytes, allow_pickle=False, pickle_kwargs=None)
elif x=='Dataset.json':
assert(_type is None)
_type = xr.Dataset
header = json.loads(_zip.read(x))
elif x=='DataArray.json':
assert(_type is None)
_type = xr.DataArray
header = json.loads(_zip.read(x))
if _type is None:
raise IOError("Failed to read file")
if _type == xr.DataArray:
if 'name' in header and (header['name'] is not None):
data_name = header['name']
else:
data_name = 'data'
data = _data_dict[data_name]
assert (data.dtype==header['dtype'])
assert (data.shape==tuple(header['shape']))
coords={}
for k,coord in header['coords'].items():
if 'data' in coord:
coord_data = np.array(coord['data'],dtype=coord['dtype'])
else:
coord_data = _data_dict[k]
if 'dims' in coord:
dims=coord['dims']
elif coord_data.ndim==0:
dims=()
else:
dims= [k]
coords[k]=xr.DataArray(coord_data,dims=dims)
return xr.DataArray(data, coords = coords, dims=header['dims'],attrs=header['attrs'],name=header['name'])
else: # type is Dataset
coords={}
data_vars={}
for k,d in header['coords'].items():
if 'data' in d:
data = np.array(d['data'],dtype=d['dtype'])
else:
data = _data_dict[k]
coords[k]=xr.DataArray(data, dims=d['dims'], attrs=d['attrs'])
for k,d in header['data_vars'].items():
if 'data' in d:
data = np.array(d['data'],dtype=d['dtype'])
else:
data = _data_dict[k]
data_vars[k]=xr.DataArray(data, dims=d['dims'], attrs=d['attrs'])
return xr.Dataset(data_vars, coords=coords,attrs=header['attrs'])
else:
raise IOError(
"Failed to interpret file %s as a zip" % repr(file))
return None
def test():
from xarray.testing import assert_identical
data = np.random.rand(4, 3)
locs = ['IA', 'IL', 'IN']
times = pd.date_range('2000-01-01', periods=4)
foo = xr.DataArray(data, coords=[times, locs], dims=['time', 'space'])
v=foo.coords['time'].variable
save_npys('foo',foo)
foo_loaded = load_npys('foo.xar')
assert_identical(foo,foo_loaded)
temp = 15 + 8 * np.random.randn(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
da = xr.DataArray(temp,name='precipitations',dims=['x','y','time'],
coords={'long': (['x', 'y'], lon), 'lat': (['x', 'y'], lat), 'time': pd.date_range('2014-09-06', periods=3), 'reference_time': pd.Timestamp('2014-09-05')})
save_npys('da',da)
da_loaded=load_npys('da.xar')
assert_identical(da,da_loaded)
temp = 15 + 8 * np.random.randn(2, 2, 3)
precip = 10 * np.random.rand(2, 2, 3)
lon = [[-99.83, -99.32], [-99.79, -99.23]]
lat = [[42.25, 42.21], [42.63, 42.59]]
ds = xr.Dataset({'temperature' : (['x', 'y', 'time'], temp),
'precipitation': (['x', 'y', 'time'], precip)},
coords={'long': (['x', 'y'], lon), 'lat': (['x', 'y'], lat), 'time': pd.date_range('2014-09-06', periods=3), 'reference_time': pd.Timestamp('2014-09-05')})
save_npys('ds',ds,min_dims_coord_npy=1)
ds_loaded= load_npys('ds.xar')
assert_identical(ds, ds_loaded)
if __name__ == "__main__":
test()
| StarcoderdataPython |
4985705 | class Solution:
def merge(self, nums1: List[int], m: int, nums2: List[int], n: int) -> None:
"""
Do not return anything, modify nums1 in-place instead.
"""
i = 0
nums1_copy = nums1[:m]
while i < len(nums1):
if nums1_copy and (not nums2 or nums1_copy[0] < nums2[0]):
nums1[i] = nums1_copy.pop(0)
elif nums2 and (not nums1_copy or nums1_copy[0] >= nums2[0]):
nums1[i] = nums2.pop(0)
i += 1
| StarcoderdataPython |
3454400 | # Copyright 2021 The ProLoaF Authors. All Rights Reserved.
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License.
# ==============================================================================
"""
Provides functions for handling and manipulating dataframes
Includes functions for scaling, rescaling, filling missing values etc.
Some functions are no longer used directly in the project, but may nonetheless be
useful for testing or future applications.
"""
import numpy as np
import pandas as pd
import utils.tensorloader as tl
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import MinMaxScaler
def load_raw_data_xlsx(files, path):
"""
Load data from an xlsx file
After loading, the date column in the raw data is converted to a UTC datetime
Parameters
----------
files : list
A list of files to read. See the Notes section for more information
path : string
The path specification which holds the input files in .XLSX format
Returns
-------
list
A list containing a DataFrame for each file that was read
Notes
-----
- Files is an array of maps containing the following data with the keyword (keyword)
+ ('file_name') the name of the xlsx file
+ ('date_column') the name of the date_column in the raw_data
+ ('time_zone') specifier for the timezone the raw data is recorded in
+ ('sheet_name') name or list of names of the sheets that are to be read
+ ('combine') boolean, all datasheets with true are combined into one, all others are read individually
+ ('start_column') Columns between this and ('end_column') are loaded
+ ('end_column')
"""
print("Importing XLSX Data...")
combined_files = []
individual_files = []
for xlsx_file in files:
print("importing " + xlsx_file["file_name"])
# if isinstance(file_name, str):
# file_name = [file_name,'UTC']
date_column = xlsx_file["date_column"]
raw_data = pd.read_excel(
path + xlsx_file["file_name"],
xlsx_file["sheet_name"],
parse_dates=[date_column],
)
# convert load data to UTC
if xlsx_file["time_zone"] != "UTC":
raw_data[date_column] = (
pd.to_datetime(raw_data[date_column])
.dt.tz_localize(xlsx_file["time_zone"], ambiguous="infer")
.dt.tz_convert("UTC")
.dt.strftime("%Y-%m-%d %H:%M:%S")
)
else:
if xlsx_file["dayfirst"]:
raw_data[date_column] = pd.to_datetime(
raw_data[date_column], format="%d-%m-%Y %H:%M:%S"
).dt.tz_localize(None)
else:
raw_data[date_column] = pd.to_datetime(
raw_data[date_column], format="%Y-%m-%d %H:%M:%S"
).dt.tz_localize(None)
if xlsx_file["data_abs"]:
raw_data.loc[
:, xlsx_file["start_column"] : xlsx_file["end_column"]
] = raw_data.loc[
:, xlsx_file["start_column"] : xlsx_file["end_column"]
].abs()
# rename column IDs, specifically Time, this will be used later as the df index
raw_data.rename(columns={date_column: "Time"}, inplace=True)
raw_data.head() # now the data is positive and set to UTC
raw_data.info()
# interpolating for missing entries created by asfreq and original missing values if any
raw_data.interpolate(method="time", inplace=True)
if xlsx_file["combine"]:
combined_files.append(raw_data)
else:
individual_files.append(raw_data)
if len(combined_files) > 0:
individual_files.append(pd.concat(combined_files))
return individual_files
def load_raw_data_csv(files, path):
"""
Load data from a csv file
After loading, the date column in the raw data is converted to a UTC datetime
Parameters
----------
files : list
A list of files to read. See the Notes section for more information
path : string
The path specification which holds the input files in .CSV format
Returns
-------
list
A list containing a DataFrame for each file that was read
Notes
-----
- Files is an array of maps containing the following data with the keyword (keyword)
+ ('file_name') the name of the load_file
+ ('date_column') the name of the date_column in the raw_data
+ ('dayfirst') specifier for the formatting of the read time
+ ('sep') separator used in this file
+ ('combine') boolean, all datasheets with true are combined into one, all others are read individually
+ ('use_columns') list of columns that are loaded
"""
print("Importing CSV Data...")
combined_files = []
individual_files = []
for csv_file in files:
print("Importing " + csv_file["file_name"] + " ...")
date_column = csv_file["date_column"]
raw_data = pd.read_csv(
path + csv_file["file_name"],
sep=csv_file["sep"],
usecols=csv_file["use_columns"],
parse_dates=[date_column],
dayfirst=csv_file["dayfirst"],
)
# pd.read_csv(INPATH + name, sep=sep, usecols=cols, parse_dates=[date_column] , dayfirst=dayfirst)
if csv_file["time_zone"] != "UTC":
raw_data[date_column] = (
pd.to_datetime(raw_data[date_column])
.dt.tz_localize(csv_file["time_zone"], ambiguous="infer")
.dt.tz_convert("UTC")
.dt.strftime("%Y-%m-%d %H:%M:%S")
)
else:
if csv_file["dayfirst"]:
raw_data[date_column] = pd.to_datetime(
raw_data[date_column], format="%d-%m-%Y %H:%M:%S"
).dt.tz_localize(None)
else:
raw_data[date_column] = pd.to_datetime(
raw_data[date_column], format="%Y-%m-%d %H:%M:%S"
).dt.tz_localize(None)
print("...Importing finished. ")
raw_data.rename(columns={date_column: "Time"}, inplace=True)
if csv_file["combine"]:
combined_files.append(raw_data)
else:
individual_files.append(raw_data)
if len(combined_files) > 0:
individual_files.append(pd.concat(combined_files, sort=False))
# for frame in individual_files:
# frame.rename(columns={date_column: 'Time'}, inplace=True)
return individual_files
def add_cyclical_features(df):
"""
Generates and adds trionemetric values to the DataFrame in respect to the index 'Time'.
Parameters
----------
df : pandas.DataFrame
The DataFrame that is complemented with cyclical time features
Returns
-------
df
The modified DataFrame
"""
## source http://blog.davidkaleko.com/feature-engineering-cyclical-features.html
df["hour_sin"] = np.sin(df.index.hour * (2.0 * np.pi / 24))
df["hour_cos"] = np.cos(df.index.hour * (2.0 * np.pi / 24))
df["mnth_sin"] = np.sin((df.index.month - 1) * (2.0 * np.pi / 12))
df["mnth_cos"] = np.cos((df.index.month - 1) * (2.0 * np.pi / 12))
return df
def add_onehot_features(df):
"""
Generates and adds one-hot encoded values to the DataFrame in respect to the index 'Time'.
Parameters
----------
df : pandas.DataFrame
The DataFrame that is complemented with one-hot coded time features
Returns
-------
df
The modified DataFrame
"""
# add one-hot encoding for Hour, Month & Weekdays
hours = pd.get_dummies(df.index.hour, prefix="hour").set_index(
df.index
) # one-hot encoding of hours
month = pd.get_dummies(df.index.month, prefix="month").set_index(
df.index
) # one-hot encoding of month
weekday = pd.get_dummies(df.index.dayofweek, prefix="weekday").set_index(
df.index
) # one-hot encoding of weekdays
df = pd.concat([df, hours, month, weekday], axis=1)
return df
def check_continuity(df):
"""
Raises value error upon violation of continuity constraint of the timeseries data.
Parameters
----------
df : pandas.DataFrame
The DataFrame whose index shall be checked against time continuity
Returns
-------
ValueError
The error message
"""
if not df.index.equals(
pd.date_range(min(df.index), max(df.index), freq=df.index.freq)
):
raise ValueError("DateTime index is not continuous")
return
def check_nans(df):
"""
Print information upon missing values in the timeseries data.
Parameters
----------
df : pandas.DataFrame
The DataFrame whose values shall be checked against missing values
Returns
-------
Print
Print message whether or not data is missing in the given DataFrame
"""
if not df.isnull().values.any():
print("No missing data \n")
else:
print("Missing data detected \n")
return
def set_to_hours(df):
"""
Sets the index of the DataFrame to 'Time' and the frequency to hours.
Parameters
----------
df : pandas.DataFrame
The DataFrame whose index and frequency are to be changed
Returns
-------
df
The modified DataFrame
"""
df["Time"] = pd.to_datetime(df["Time"])
df = df.set_index("Time")
df = df.asfreq(freq="H")
return df
def load_dataframe(data_path):
"""
Load the excel file at the given path into a pandas.DataFrame
.. deprecated::
This function is no longer used. Instead, use fc_prep.load_raw_data_xlsx, which
does the same thing but allows multiple files to be read at once, and ensures the
date column is correctly formatted.
Parameters
----------
data_path : string
The path to the excel file that is to be loaded
Returns
-------
pandas.DataFrame
A DataFrame containing the loaded data
"""
def parser(x):
return pd.datetime.strptime(x, "%Y-%m-%d %H:%M:%S")
print("> Load data from '{}'... ".format(data_path), end="")
df = pd.read_excel(data_path, parse_dates=[0], date_parser=parser)
print("done!")
return df
def ranges(nums):
"""
Take a list of numbers (sorted or unsorted) and return all contiguous ranges within the list
Ranges should be returned as tuples, where the first value is the start of the range and
the last value is the end (inclusive). Single numbers are returned as tuples where both
values equal the number. e.g. [0, 2, 3, 4, 6] -> [(0,0), (2,4), (6,6)]
Parameters
----------
nums : ndarray
A ndarray containing a list of numbers
Returns
-------
List
A list containing tuples, where each tuple represents a contiguous range from the
input ndarray
"""
nums = sorted(set(nums))
gaps = [[s, e] for s, e in zip(nums, nums[1:]) if s + 1 < e]
edges = iter(nums[:1] + sum(gaps, []) + nums[-1:])
return list(zip(edges, edges))
def custom_interpolate(df, periodicity = 1):
"""
Interpolate the features with missing values in a time series data frame
For each feature/columns:
- finds the range of intervals of missing values
- for each of the missing value in these intervals
+ collect the the previous day's value and the next day's value (t-24 & t+24) at that time instant
+ if any of them are missing, go for the next day(t-48 , t+48 and so on)
+ take their average
+ to account for the trend, shift the values by the slope of the interval extremes
Parameters
----------
df : pandas.DataFrame
DataFrame with missing values
periodicity : int, default = 1
An int value that allows the customized intepolation method, which makes use of the timeseries periodicity
Returns
-------
pandas.DataFrame
DataFrame with interpolated values
"""
rows, columns = np.where(pd.isnull(df))
miss_rows_ranges = ranges(rows)
for i in range(len(miss_rows_ranges)):
start, end = miss_rows_ranges[i]
# dur = end - start
p = periodicity # periodicity
for col in range(len(df.columns)):
seas = np.zeros(len(df))
if (
start == end and end + 1 <= df.shape[0] and start - 1 >= 0
): # if single point, take average of the nearby ones
t = start
df.iloc[t, col] = (df.iloc[t - 1, col] + df.iloc[t + 1, col]) / 2
elif (
start == end and (end + 1 > df.shape[0] or start - 1 <= 0)
): # if single point, but the single point is at the beginning or end of the series, take the nearby one
t = start
if (start - 1 <= 0):
df.iloc[t, col] = df.iloc[t + 1, col]
if (end + 1 > df.shape[0]):
df.iloc[t, col] = df.iloc[t - 1, col]
else:
# now we are dealing with a range
if (start - p) <= 0 or (end + p) > (df.shape[0]):
df = df.interpolate(method="pchip") # check this if ok
else:
for t in range(start, end + 1):
p1 = p
p2 = p
while np.isnan(df.iloc[t - p1, col]):
p1 += p
while np.isnan(df.iloc[t + p2, col]):
p2 += p
seas[t] = (df.iloc[t - p1, col] + df.iloc[t + p2, col]) / 2
trend1 = np.poly1d(
np.polyfit([start, end], [seas[start], seas[end]], 1)
)
trend2 = np.poly1d(
np.polyfit(
[start - 1, end + 1],
[df.iloc[start - 1, col], df.iloc[end + 1, col]],
1,
)
)
for t in range(start, end + 1):
df.iloc[t, col] = seas[t] - trend1(t) + trend2(t)
return df
def fill_if_missing(df, periodicity = 1):
"""
If the given pandas.DataFrame has any NaN values, they are replaced with interpolated values
Parameters
----------
df : pandas.DataFrame
A pandas.DataFrame for which NaN values need to be replaced by interpolated values
periodicity : int, default = 1
An int value that allows the customized intepolation method, which makes use of the timeseries periodicity
Returns
-------
pandas.DataFrame
A pandas.DataFrame with no NaN values
"""
if df.isnull().values.any():
print("Some values are NaN. They are being filled...")
df=custom_interpolate(df, periodicity)
print("...interpolation finished! No missing data left.")
else:
print("No missing data \n")
return df
def extract(df, horizon, anchor_key=0, filter_df=False):
"""
Extract data from the input DataFrame and reshape it into a suitable input form for a LSTM cell
The input DataFrame is reshaped into an ndarray with a number of entries (samples), such that each entry
contains n = 'horizon' rows (including all features) from the input DataFrame. There are i = 1,...,m entries in the
output ndarray, with m such that row (m + n - 1) is the final row from the input DataFrame.
e.g. The first entries in the ndarray would be:
[row 1, row 2, ..., row n], [row 2, row 3, ..., row n+1], etc.
Parameters
----------
df : pandas.DataFrame
The input DataFrame
horizon : int
The horizon/forecast length
Returns
-------
ndarray
The reshaped data in a numpy array (Shape: (number of samples, horizon, number of features))
"""
number_of_samples = df.shape[0] - horizon + 1
idx_remove = []
if number_of_samples <= 0:
number_of_samples = 1
if df.ndim > 1:
number_of_features = df.shape[1]
reshaped_data = np.empty([number_of_samples, horizon, number_of_features])
for i in range(number_of_samples):
reshaped_data[i, :, :] = df.iloc[i : i + horizon, :]
if isinstance(anchor_key, str) and anchor_key not in df.iloc[i].name:
idx_remove.append(i)
else:
reshaped_data = np.empty([number_of_samples, horizon])
for i in range(number_of_samples):
reshaped_data[i, :] = df.iloc[i : i + horizon]
if isinstance(anchor_key, str) and anchor_key not in df.iloc[i].name:
idx_remove.append(i)
if isinstance(anchor_key, str):
reshaped_data = np.delete(reshaped_data, idx_remove, axis=0)
if filter_df:
filtered_df = df
filtered_df = filtered_df.drop(filtered_df.index[idx_remove], inplace=True)
return reshaped_data, filtered_df
return reshaped_data
def scale(df, scaler):
"""
Scales the given pandas.DataFrame using the specified scikit-learn scaler
Parameters
----------
df : pandas.DataFrame
The input DataFrame
scaler : sklearn.preprocessing scaler
The scikit-learn scaler used by the model while training
Returns
-------
pandas.DataFrame
The scaled DataFrame
"""
df_new = scaler.transform(df)
df_new = pd.DataFrame(df_new, columns=df.columns)
return df_new
def rescale(values, scaler):
"""
Scale the given data back to its original representation
Parameters
----------
values : array-like, sparse matric of shape (n samples, n features)
The data to be rescaled
scaler : sklearn.preprocessing scaler
The scaler that was used to scale the data originally
Returns
-------
pandas.DataFrame
A DataFrame containing the rescaled data
"""
df_rescaled = pd.DataFrame(scaler.inverse_transform(values))
return df_rescaled
def rescale_manually(net, output, targets, target_position=0, **PAR):
"""
Manually rescales data that was previously scaled
Parameters
----------
net : utils.models.EncoderDecoder
The model that was used to generate the predictions.
output : list
A list containing predicted values. Each entry in the list is a set of predictions
targets : torch.Tensor
The actual or true values
target_position : int, default = 0
Which column of the data to rescale
**PAR : dict
A dictionary containing config parameters, see train.py for more.
Returns
-------
torch.Tensor
The targets (rescaled)
list
The expected values (for prob.: prediction intervals of the forecast, after rescaling, in form of output list)
"""
#TODO: isn't this also in a function of datatuner
#TODO: finish documentation
#get parameters 'scale' and 'center'
for group in PAR["feature_groups"]:
if group["features"] is not None and PAR["target_id"] in group["features"]:
scaler_name = group["name"]
scaler = net.scalers[scaler_name]
scale = scaler.scale_.take(target_position)
break # assuming target column can only be scaled once
if type(scaler) == RobustScaler:
# customized inversion robust_scaler
if scaler.center_.any() == False: # no center shift applied
center = 0
else:
center = scaler.center_.take(target_position)
scale = scaler.scale_.take(
target_position
) # The (scaled) interquartile range for each feature in the training set
elif type(scaler) == StandardScaler:
if scaler.mean_.take(target_position) == None:
center = 0
else:
center = scaler.mean_.take(target_position)
scale = scaler.scale_.take(target_position)
elif type(scaler) == MinMaxScaler:
range_min = scaler.feature_range[0]
range_max = scaler.feature_range[1]
data_max = scaler.data_max_.take(target_position)
data_min = scaler.data_min_.take(target_position)
scale = (data_max - data_min) / (range_max - range_min)
center = data_min - range_min * scale
#TODO: else options
# rescale
loss_type = net.criterion # check the name here
targets_rescaled = (targets * scale) + center
output_rescaled = output
if loss_type == "pinball":
output_rescaled[1] = (output[1] * scale) + center # expected value
output_rescaled[0] = (output[0] * scale) + center #quantile
elif loss_type == "nll_gauss" or loss_type == "crps":
output_rescaled[1] = (output[0] * scale) + center # expected value
output_rescaled[0] = output[1] * (scale ** 2) #variance
else: # case: rmse, case, mse, case rae etc.
output_rescaled = (output * scale) + center
# TODO: else options
return targets_rescaled, output_rescaled
def scale_all(df: pd.DataFrame, feature_groups, start_date=None, scalers=None, **_):
"""
Scale and return the specified feature groups of the given DataFrame, each with their own
scaler, beginning at the index 'start_date'
Parameters
----------
df : pandas.DataFrame
The DataFrame with the data to be scaled
feature_groups : array
An array of dicts. Each dict has entries with the following keywords:
- "name", stores the name of the feature group
- "scaler", stores a list in which the first entry is the name of the feature
group's scaler. Valid names are 'standard', 'robust' or 'minmax'. Additional
entries in the list are for scaler parameters.
- "features", stores a list with the names of the features belonging to the feature group
start_date : int, default = None
The index of the date from which to begin scaling
Returns
-------
pandas.DataFrame
A DataFrame with the scaled features/targets
dict
A dict of sklearn.preprocessing scalers with their corresponding feature group
names (e.g."main", "add") as keywords
Raises
------
RuntimeError
Raised when no scaler could be generated - invalid scaler name in config file.
"""
# grouping should be an array of dicts.
# each dict defines a scaler and the features to be scaled
# returns a list of dataframes with the scaled features/targets, and the according scalers in a dict defined by the "name" keyword.
# TODO should these be named for potential double scaling (name can be used as suffix in join)
# TODO check if it is critical that we do not use fitted scalers in evaluate script
scaled_features = pd.DataFrame(index=df.index)[start_date:]
if scalers is None:
scalers = {}
for group in feature_groups:
df_to_scale = df.filter(group["features"])[start_date:]
if group["name"] in scalers:
scaler = scalers[group["name"]]
else:
scaler = None
if group["scaler"] is None or group["scaler"][0] is None:
if group["name"] != "aux":
print(
group["name"]
+ " features were not scaled, if this was unintentional check the config file."
)
elif group["scaler"][0] == "standard":
scaler = StandardScaler()
elif group["scaler"][0] == "robust":
scaler = RobustScaler(
quantile_range=(group["scaler"][1], group["scaler"][2])
)
elif group["scaler"][0] == "minmax":
scaler = MinMaxScaler(
feature_range=(group["scaler"][1], group["scaler"][2])
)
else:
raise RuntimeError("scaler could not be generated")
if scaler is not None:
scaler.fit(df_to_scale)
if group["features"] is not None:
df_to_scale = df.filter(group["features"])[start_date:]
if scaler is not None:
add_scaled_features = pd.DataFrame(
scaler.transform(df_to_scale),
columns=df_to_scale.columns,
index=df_to_scale.index,
)
scaled_features = scaled_features.join(
add_scaled_features
) # merge differently scaled dataframes
else:
scaled_features = scaled_features.join(df_to_scale)
scalers[group["name"]] = scaler
return scaled_features, scalers
def constructDf(
data,
columns,
train_split,
forecast_horizon,
history_horizon,
interval=1,
number_forecasts=0,
limit_memory=False,
):
"""
Construct and reorder data for training
Parameters
----------
data : pandas.DataFrame with all data
The DataFrame containing all data to be reordered
columns : list
List of columns used in model target+exog
train_split : float
Fraction of data to use for training
forecast_horizon : int
The number of forecast steps into the future
history_horizon : int
The size of the history horizon
interval : int, default = 1
The number of time_steps between every forecast. By default, forecast with moving
window of 1 timestep
number_forecasts : int, default = 0
The number of forecast. By default, forecast over whole test-period
limit_memory : bool, default = False
Set to True when the history horizon should limit the recent memory
Returns
-------
List
A list of DataFrames for input
List
A list of DataFrames for output
"""
train = []
test = []
train_test_split = int(train_split * len(data))
df = data[columns]
recent_memory = 0
if number_forecasts == 0:
number_forecasts = len(data)
if history_horizon != 0 and limit_memory:
recent_memory = max(0, train_test_split - history_horizon)
for i in range(number_forecasts):
forecast_start = train_test_split + 1 + interval * i
if (forecast_start + forecast_horizon) > len(df):
# break when end of test period is reached in next iteration.
break
train.append(
pd.DataFrame(df.iloc[recent_memory:forecast_start], columns=df.columns)
)
test.append(
pd.DataFrame(
df.iloc[forecast_start : forecast_start + forecast_horizon],
columns=df.columns,
)
)
return train, test
def transform(
df: pd.DataFrame,
encoder_features,
decoder_features,
batch_size,
history_horizon,
forecast_horizon,
target_id,
train_split=0.7,
validation_split=0.85,
device='cpu',
**_,
):
"""
Construct tensor-data-loader transformed for encoderdecoder model input
Parameters
----------
TODO: check if consistent with new structure
df : pandas.DataFrame
The data frame containing the model features, to be split into sets for training
encoder_features : string list
A list containing desired encoder feature names as strings
decoder_features : string list
A list containing desired decoder feature names as strings
batch_size : int scalar
The size of a batch for the tensor data loader
history_horizon : int scalar
The length of the history horizon in hours
forecast_horizon : int scalar
The length of the forecast horizon in hours
train_split : float scalar
Where to split the data frame for the training set, given as a fraction of data frame length
validation_split : float scalar
Where to split the data frame for the validation set, given as a fraction of data frame length
device : string
defines whether to handle data with cpu or cuda
Returns
-------
utils.tensorloader.CustomTensorDataLoader
The training data loader
utils.tensorloader.CustomTensorDataLoader
The validation data loader
utils.tensorloader.CustomTensorDataLoader
The test data loader
"""
split_index = int(len(df.index) * train_split)
subsplit_index = int(len(df.index) * validation_split)
df_train = df.iloc[0:split_index]
df_val = df.iloc[split_index:subsplit_index]
df_test = df.iloc[subsplit_index:]
print("Size training set: \t{}".format(df_train.shape[0]))
print("Size validation set: \t{}".format(df_val.shape[0]))
# shape input data that is measured in the Past and can be fetched from UDW/LDW
train_data_loader = tl.make_dataloader(
df_train,
target_id,
encoder_features,
decoder_features,
history_horizon=history_horizon,
forecast_horizon=forecast_horizon,
batch_size=batch_size,
).to(device)
validation_data_loader = tl.make_dataloader(
df_val,
target_id,
encoder_features,
decoder_features,
history_horizon=history_horizon,
forecast_horizon=forecast_horizon,
batch_size=batch_size,
).to(device)
test_data_loader = tl.make_dataloader(
df_test,
target_id,
encoder_features,
decoder_features,
history_horizon=history_horizon,
forecast_horizon=forecast_horizon,
batch_size=1,
).to(device)
return train_data_loader, validation_data_loader, test_data_loader
| StarcoderdataPython |
4916438 | <filename>matador/fingerprints/similarity.py
# coding: utf-8
# Distributed under the terms of the MIT License.
""" This submodule implements filtering based on Fingerprint objects,
although only PDF has been implemented so far.
"""
import copy
from collections import defaultdict
from typing import List, Dict, Tuple
import numpy as np
from matador.fingerprints.pdf import PDF, PDFFactory
from matador.fingerprints.fingerprint import Fingerprint
from matador.utils.cursor_utils import get_guess_doc_provenance
def get_uniq_cursor(
cursor,
sim_tol=0.1,
energy_tol=1e-2,
enforce_same_stoich=True,
fingerprint=PDF,
hierarchy_order=None,
hierarchy_values=None,
debug=False,
**fingerprint_calc_args
) -> Tuple[List[int], Dict[int, int], List[Fingerprint], np.ndarray]:
""" Uses fingerprint to filter cursor into unique structures to some
tolerance sim_tol, additionally returning a dict of duplicates and the
correlation matrix.
The choice of which of the dulpicates is kept in the unique cursor is
defined by the "hierarchy". By default, this will guess the provenance
of a document and prefer structures from "primary sources", i.e.
ICSD -> OQMD -> Materials Project -> SWAPS -> AIRSS -> GA. A custom hiearchy
can be provided through `hierarchy_order`, which must be accompanied by a list
of values per structure to check against that hierarchy.
Parameters:
cursor (list) : matador cursor to be filtered
Keyword Arguments:
fingerprint (Fingerprint): fingerprint object type to compare
(DEFAULT: PDF)
sim_tol (float/bool): tolerance in similarity distance for
duplicates (if True, default value of 0.1 used)
energy_tol (float): compare only structures within a certain
energy tolerance (1e20 if enforce_same_stoich is False)
enforce_same_stoich (bool): compare only structures of the same
stoichiometry
debug (bool): print timings and list similarities
fingerprint_calc_args (dict): kwargs to pass to fingerprint
Returns:
ordered list of indices of unique documents,
a dict with keys from distinct_set,
a list of Fingerprint objects,
and the sparse correlation matrix of pairwise similarity distances
"""
if isinstance(sim_tol, bool):
sim_tol = 0.1
if not cursor:
raise RuntimeError("No structures provided to compare.")
fingerprint_list = []
if not enforce_same_stoich:
energy_tol = 1e20
print('Calculating fingerprints...')
fingerprint_list = [None for doc in cursor]
required_inds = set()
# scipy sparse matrices dont seem to allow non-zero default values, so we'll use a defaultdict
sim_mat = defaultdict(lambda: 1e10)
print('Assessing similarities...')
for i in range(len(fingerprint_list)):
for j in range(i+1, len(fingerprint_list)):
# are we checking stoichiometries, if so, ensure they're the same
if (enforce_same_stoich is False or
(sorted(cursor[j]['stoichiometry']) == sorted(cursor[i]['stoichiometry']) and
np.abs(cursor[j].get('enthalpy_per_atom', 0) - cursor[i].get('enthalpy_per_atom', 0)) < energy_tol)):
# need to set both to zero so we can iterate over the dict later
sim_mat[i, j] = None
sim_mat[j, i] = None
required_inds.add(i)
required_inds.add(j)
factory = PDFFactory(cursor, required_inds=list(required_inds), **fingerprint_calc_args)
for i, j in sim_mat:
if sim_mat[i, j] is None:
sim = cursor[i][factory.default_key].get_sim_distance(cursor[j][factory.default_key])
sim_mat[i, j] = sim
sim_mat[j, i] = sim
distinct_set = set()
dupe_set = set()
dupe_dict = dict()
for i in range(len(cursor)):
distinct_set.add(i)
dupe_dict[i] = set()
# loop over the similarity matrix and construct the set of "unique" structures
# and a dictionary containing their duplicates
for i, j in sim_mat:
if sim_mat[i, j] <= sim_tol:
if i not in dupe_set:
if j in distinct_set:
distinct_set.remove(j)
del dupe_dict[j]
dupe_set.add(j)
dupe_dict[i].add(j)
total_dupes = len(set(list(dupe_dict.keys()) + [item for key in dupe_dict for item in dupe_dict[key]]))
if len(cursor) != total_dupes:
raise RuntimeError("Something went wrong: dupe dict had wrong size {} compared to cursor {}!\nFull output: {}"
.format(total_dupes, len(cursor), dupe_dict))
if hierarchy_order is None:
hierarchy_order = ['ICSD', 'DOI', 'OQMD', 'MP', 'PF', 'SWAPS', 'AIRSS', 'GA']
if hierarchy_values is None:
hierarchy_values = [get_guess_doc_provenance(doc['source']) for doc in cursor]
print('Applying hierarchy of structures with order: {}'.format(hierarchy_order))
dupe_dict = _enforce_hierarchy(dupe_dict, hierarchy_values, hierarchy_order)
all_structures = set(list(dupe_dict.keys()) + [item for key in dupe_dict for item in dupe_dict[key]])
if len(cursor) != len(all_structures):
raise RuntimeError("Something went wrong: dupe dict had wrong size {} compared to cursor {}!\nDifference: {}"
.format(len(all_structures),
len(cursor),
all_structures.symmetric_difference({i for i in range(len(cursor))})))
print('Done!')
return sorted(list(dupe_dict.keys())), dupe_dict, fingerprint_list, sim_mat
def _enforce_hierarchy(dupe_dict, values, hierarchy):
""" Enforce a general hierarchy of which structures to keep, based
on the list of values and their importance.
Parameters:
dupe_dict (dict): the dictionary keyed by the index of unique structures
that holds lists of duplicates for that structure.
values (list): the list of values for each structure on which to enforce
the hierarchy.
hierarchy (list): the order in which to consider the values, e.g.
`['ICSD', 'OQMD']` will promote ICSD structures over OQMD.
Returns:
dict: the reshuffled dictionary of duplicates.
"""
max_val = max(list(dupe_dict.keys()) + [val for t in dupe_dict.values() for val in t])
if len(values) - 1 != max_val:
raise RuntimeError("Number of hierarchy values does not much number of items: {} vs {}"
.format(len(values)-1, max_val))
new_dupe_dict = copy.deepcopy(dupe_dict)
swapped = []
for i in new_dupe_dict:
if not list(new_dupe_dict[i]):
continue
for value in hierarchy:
found = False
for k in [i] + list(new_dupe_dict[i]):
if values[k] == value:
swapped.append((i, k))
found = True
break
if found:
break
for i, k in swapped:
if i != k:
if k in new_dupe_dict:
new_dupe_dict[k].update([ind for ind in new_dupe_dict[i] if ind != k] + [i])
else:
new_dupe_dict[k] = set([ind for ind in new_dupe_dict[i] if ind != k] + [i])
del new_dupe_dict[i]
return new_dupe_dict
| StarcoderdataPython |
9680794 | from . import views
from django.urls import path
urlpatterns = [
path('', views.PostList.as_view(), name='home'),
path('About/', views.AboutPageView.as_view(), name='about'),
path('Projects/', views.ProjectsPageView.as_view(), name='projects'),
path('Contact/', views.ContactPageView.as_view(), name='contact'),
path('Terms/', views.TermsPageView.as_view(), name='terms'),
path('Privacy/', views.PrivacyPageView.as_view(), name='privacy'),
path('Disclaimer/', views.DisclaimerPageView.as_view(), name='disclaimer'),
path('<slug:slug>/', views.PostDetail.as_view(), name='post_detail'),
]
| StarcoderdataPython |
11304500 | from kapteyn import maputils
import numpy
from service import *
fignum = 21
fig = plt.figure(figsize=figsize)
frame = fig.add_axes(plotbox)
theta_a = -45
t1 = -20.0; t2 = -70.0
eta = abs(t1-t2)/2.0
title = r"""Conic equal area projection (COE) with:
$\theta_a=-45^\circ$, $\theta_1=-20^\circ$ and $\theta_2=-70^\circ$. (Cal. fig.25)"""
header = {'NAXIS' : 2, 'NAXIS1': 100, 'NAXIS2': 80,
'CTYPE1' : 'RA---COE',
'CRVAL1' : 0.0, 'CRPIX1' : 50, 'CUNIT1' : 'deg', 'CDELT1' : -4.0,
'CTYPE2' : 'DEC--COE',
'CRVAL2' : theta_a, 'CRPIX2' : 40, 'CUNIT2' : 'deg', 'CDELT2' : 4.0,
'PV2_1' : theta_a, 'PV2_2' : eta
}
X = cylrange()
Y = numpy.arange(-90,91,30.0); Y[-1] = dec0
f = maputils.FITSimage(externalheader=header)
annim = f.Annotatedimage(frame)
grat = annim.Graticule(axnum= (1,2), wylim=(-90,90.0), wxlim=(0,360),
startx=X, starty=Y)
grat.setp_lineswcs0(0, lw=2)
grat.setp_lineswcs1(0, lw=2)
lon_world = list(range(0,360,30))
lon_world.append(180+epsilon)
lat_constval = 10
lat_world = [-60,-30,0,30,60]
addangle0 = -90.0
labkwargs0 = {'color':'r', 'va':'center', 'ha':'center'}
labkwargs1 = {'color':'b', 'va':'bottom', 'ha':'right'}
doplot(frame, fignum, annim, grat, title,
lon_world=lon_world, lat_world=lat_world,
lat_constval=lat_constval,
labkwargs0=labkwargs0, labkwargs1=labkwargs1,
addangle0=addangle0, markerpos=markerpos)
| StarcoderdataPython |
8000845 | <filename>users.py
from flask import Blueprint, current_app
users_bp = Blueprint("users", __name__)
@users_bp.route('/login')
def login():
print(current_app.database)
return "login page"
| StarcoderdataPython |
1909733 | from django.urls import path
from .views import LatestPostFeed, PostDetailView, PostListView
app_name = 'blog'
urlpatterns = [
path('feed/', LatestPostFeed()),
path('', PostListView.as_view(), name='index'),
path('<slug:slug>/', PostDetailView.as_view(), name='detail'),
]
| StarcoderdataPython |
153987 | # Copyright 2020 StrongDM Inc
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# Generated by the gRPC Python protocol compiler plugin. DO NOT EDIT!
"""Client and server classes corresponding to protobuf-defined services."""
import grpc
from . import roles_pb2 as roles__pb2
class RolesStub(object):
"""Roles are tools for controlling user access to resources. Each Role holds a
list of resources which they grant access to. Composite roles are a special
type of Role which have no resource associations of their own, but instead
grant access to the combined resources associated with a set of child roles.
Each user can be a member of one Role or composite role.
"""
def __init__(self, channel):
"""Constructor.
Args:
channel: A grpc.Channel.
"""
self.Create = channel.unary_unary(
'/v1.Roles/Create',
request_serializer=roles__pb2.RoleCreateRequest.SerializeToString,
response_deserializer=roles__pb2.RoleCreateResponse.FromString,
)
self.Get = channel.unary_unary(
'/v1.Roles/Get',
request_serializer=roles__pb2.RoleGetRequest.SerializeToString,
response_deserializer=roles__pb2.RoleGetResponse.FromString,
)
self.Update = channel.unary_unary(
'/v1.Roles/Update',
request_serializer=roles__pb2.RoleUpdateRequest.SerializeToString,
response_deserializer=roles__pb2.RoleUpdateResponse.FromString,
)
self.Delete = channel.unary_unary(
'/v1.Roles/Delete',
request_serializer=roles__pb2.RoleDeleteRequest.SerializeToString,
response_deserializer=roles__pb2.RoleDeleteResponse.FromString,
)
self.List = channel.unary_unary(
'/v1.Roles/List',
request_serializer=roles__pb2.RoleListRequest.SerializeToString,
response_deserializer=roles__pb2.RoleListResponse.FromString,
)
class RolesServicer(object):
"""Roles are tools for controlling user access to resources. Each Role holds a
list of resources which they grant access to. Composite roles are a special
type of Role which have no resource associations of their own, but instead
grant access to the combined resources associated with a set of child roles.
Each user can be a member of one Role or composite role.
"""
def Create(self, request, context):
"""Create registers a new Role.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Get(self, request, context):
"""Get reads one Role by ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Update(self, request, context):
"""Update patches a Role by ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def Delete(self, request, context):
"""Delete removes a Role by ID.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def List(self, request, context):
"""List gets a list of Roles matching a given set of criteria.
"""
context.set_code(grpc.StatusCode.UNIMPLEMENTED)
context.set_details('Method not implemented!')
raise NotImplementedError('Method not implemented!')
def add_RolesServicer_to_server(servicer, server):
rpc_method_handlers = {
'Create': grpc.unary_unary_rpc_method_handler(
servicer.Create,
request_deserializer=roles__pb2.RoleCreateRequest.FromString,
response_serializer=roles__pb2.RoleCreateResponse.SerializeToString,
),
'Get': grpc.unary_unary_rpc_method_handler(
servicer.Get,
request_deserializer=roles__pb2.RoleGetRequest.FromString,
response_serializer=roles__pb2.RoleGetResponse.SerializeToString,
),
'Update': grpc.unary_unary_rpc_method_handler(
servicer.Update,
request_deserializer=roles__pb2.RoleUpdateRequest.FromString,
response_serializer=roles__pb2.RoleUpdateResponse.SerializeToString,
),
'Delete': grpc.unary_unary_rpc_method_handler(
servicer.Delete,
request_deserializer=roles__pb2.RoleDeleteRequest.FromString,
response_serializer=roles__pb2.RoleDeleteResponse.SerializeToString,
),
'List': grpc.unary_unary_rpc_method_handler(
servicer.List,
request_deserializer=roles__pb2.RoleListRequest.FromString,
response_serializer=roles__pb2.RoleListResponse.SerializeToString,
),
}
generic_handler = grpc.method_handlers_generic_handler(
'v1.Roles', rpc_method_handlers)
server.add_generic_rpc_handlers((generic_handler,))
# This class is part of an EXPERIMENTAL API.
class Roles(object):
"""Roles are tools for controlling user access to resources. Each Role holds a
list of resources which they grant access to. Composite roles are a special
type of Role which have no resource associations of their own, but instead
grant access to the combined resources associated with a set of child roles.
Each user can be a member of one Role or composite role.
"""
@staticmethod
def Create(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Roles/Create',
roles__pb2.RoleCreateRequest.SerializeToString,
roles__pb2.RoleCreateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Get(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Roles/Get',
roles__pb2.RoleGetRequest.SerializeToString,
roles__pb2.RoleGetResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Update(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Roles/Update',
roles__pb2.RoleUpdateRequest.SerializeToString,
roles__pb2.RoleUpdateResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def Delete(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Roles/Delete',
roles__pb2.RoleDeleteRequest.SerializeToString,
roles__pb2.RoleDeleteResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
@staticmethod
def List(request,
target,
options=(),
channel_credentials=None,
call_credentials=None,
insecure=False,
compression=None,
wait_for_ready=None,
timeout=None,
metadata=None):
return grpc.experimental.unary_unary(request, target, '/v1.Roles/List',
roles__pb2.RoleListRequest.SerializeToString,
roles__pb2.RoleListResponse.FromString,
options, channel_credentials,
insecure, call_credentials, compression, wait_for_ready, timeout, metadata)
| StarcoderdataPython |
9750488 | <filename>meddlr/transforms/base/spatial.py
import logging
from typing import Sequence, Tuple
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from PIL import Image
import meddlr.ops.complex as cplx
from meddlr.transforms.build import TRANSFORM_REGISTRY
from meddlr.transforms.mixins import GeometricMixin
from meddlr.transforms.transform import Transform
@TRANSFORM_REGISTRY.register()
class AffineTransform(GeometricMixin, Transform):
def __init__(
self,
angle: float = None,
translate: Sequence[int] = None,
scale=None,
shear: Sequence[int] = None,
pad_like: str = None,
upsample_factor: float = 1,
upsample_order: int = 1,
) -> None:
super().__init__()
logger = logging.getLogger(f"{__name__}.{type(self).__name__}")
if angle is None:
angle = 0.0
if scale is None:
scale = 1.0
if translate is None:
translate = [0, 0]
if shear is None:
shear = [0, 0]
if pad_like not in (None, "MRAugment"):
raise ValueError("`pad_like` must be one of (None, 'MRAugment')")
if pad_like == "MRAugment" and translate not in ([0, 0], None):
logger.warning("MRAugment padding may not appropriately account for translation")
self._set_attributes(locals())
def _apply_affine(self, x):
img = x
angle = self.angle
translate = self.translate[::-1]
scale = self.scale
shear = self.shear[::-1]
upsample_factor = self.upsample_factor
upsample_order = self.upsample_order
is_complex = cplx.is_complex(img)
permute = is_complex or cplx.is_complex_as_real(img)
if is_complex:
img = torch.view_as_real(img)
if permute:
img = img.permute((img.ndim - 1,) + tuple(range(0, img.ndim - 1)))
shape = img.shape
use_view = img.ndim > 4
if use_view:
is_contiguous = img.is_contiguous()
if is_contiguous:
img = img.view((np.product(shape[:-3]),) + shape[-3:])
else:
img = img.reshape((np.product(shape[:-3]),) + shape[-3:])
base_shape = img.shape[-2:]
upsample = upsample_factor != 1
interpolation = Image.BICUBIC if upsample_order == 3 else Image.BILINEAR
if upsample:
upsampled_shape = (
img.shape[-2] * self.upsample_factor,
img.shape[-1] * self.upsample_factor,
)
img = TF.resize(img, size=upsampled_shape, interpolation=interpolation)
h, w = img.shape[-2:]
if self.pad_like == "MRAugment":
pad = _get_mraugment_affine_pad(shape[-2:], angle, translate, scale, shear)
img = TF.pad(img, padding=pad, padding_mode="reflect")
img = TF.affine(
img, angle=angle, translate=translate, scale=scale, shear=shear, resample=2 # bilinear
)
if self.pad_like == "MRAugment":
img = TF.center_crop(img, (h, w))
if upsample:
img = TF.resize(img, size=base_shape, interpolation=interpolation)
if use_view:
img = img.view(shape)
if permute:
img = img.permute(tuple(range(1, img.ndim)) + (0,))
if is_complex:
img = torch.view_as_complex(img.contiguous())
return img
def apply_image(self, img: torch.Tensor):
return self._apply_affine(img)
def apply_maps(self, maps: torch.Tensor):
maps = self._apply_affine(maps) # BxCxMxHxW
norm = cplx.rss(maps, dim=1).unsqueeze(1)
norm += 1e-8 * (norm == 0)
maps = maps / norm
return maps
def _eq_attrs(self) -> Tuple[str]:
return ("angle", "translate", "scale", "shear")
@TRANSFORM_REGISTRY.register()
class TranslationTransform(GeometricMixin, Transform):
def __init__(self, translate: Sequence[int], pad_mode="constant", pad_value=0) -> None:
super().__init__()
self.translate = translate
self.pad_mode = pad_mode
self.pad_value = pad_value
def apply_image(self, img: torch.Tensor):
translation = self.translate
max_dims = len(translation) + 2
is_complex = cplx.is_complex(img)
permute = is_complex or cplx.is_complex_as_real(img)
if is_complex:
img = torch.view_as_real(img)
if permute:
img = img.permute((img.ndim - 1,) + tuple(range(0, img.ndim - 1)))
shape = img.shape
use_view = img.ndim > max_dims
if use_view:
is_contiguous = img.is_contiguous()
if is_contiguous:
img = img.view((np.product(shape[: -(max_dims - 1)]),) + shape[-(max_dims - 1) :])
else:
img = img.reshape(
(np.product(shape[: -(max_dims - 1)]),) + shape[-(max_dims - 1) :]
)
pad, sl = _get_mraugment_translate_pad(img.shape, translation)
img = F.pad(img, pad, mode=self.pad_mode, value=self.pad_value)
img = img[sl]
if use_view:
img = img.view(shape)
if permute:
img = img.permute(tuple(range(1, img.ndim)) + (0,))
if is_complex:
img = torch.view_as_complex(img.contiguous())
return img
def apply_maps(self, maps: torch.Tensor):
maps = self.apply_image(maps) # BxCxMxHxW
norm = cplx.rss(maps, dim=1).unsqueeze(1)
norm += 1e-8 * (norm == 0)
maps = maps / norm
return maps
@TRANSFORM_REGISTRY.register()
class FlipTransform(GeometricMixin, Transform):
def __init__(self, dims):
super().__init__()
if isinstance(dims, int):
dims = (dims,)
self.dims = dims
def apply_image(self, img: torch.Tensor):
if cplx.is_complex_as_real(img):
img = torch.view_as_complex(img)
return torch.flip(img, self.dims)
def apply_kspace(self, kspace):
return self.apply_image(kspace)
def inverse(self):
return FlipTransform(self.dims)
def _eq_attrs(self) -> Tuple[str]:
return ("dims",)
@TRANSFORM_REGISTRY.register()
class Rot90Transform(GeometricMixin, Transform):
def __init__(self, k, dims) -> None:
super().__init__()
self.k = k
self.dims = dims
def apply_image(self, img: torch.Tensor):
return torch.rot90(img, self.k, self.dims)
def apply_kspace(self, kspace):
return self.apply_image(kspace)
def inverse(self):
return Rot90Transform(self.k, self.dims[::-1])
def _eq_attrs(self) -> Tuple[str]:
return ("k", "dims")
def _get_mraugment_affine_pad(im_shape, angle, translate, scale, shear):
"""Calculate the padding size based on MRAugment padding method.
This padding should be applied before the affine transformation.
Args:
im_shape (tuple): Shape as ``(height, width)``.
angle (float): The rotating angle.
scale (float): The scale factor.
shear (tuple): Shear factors (H x W) (i.e. YxX).
Note:
This method is adapted from MRAugment.
https://github.com/MathFLDS/MRAugment/blob/master/mraugment/data_augment.py
"""
h, w = im_shape
shear = shear[::-1]
translate = translate[::-1]
corners = [
[-h / 2, -w / 2, 1.0],
[-h / 2, w / 2, 1.0],
[h / 2, w / 2, 1.0],
[h / 2, -w / 2, 1.0],
]
mx = torch.tensor(
TF._get_inverse_affine_matrix([0.0, 0.0], -angle, translate, scale, [-s for s in shear])
).reshape(2, 3)
corners = torch.cat([torch.tensor(c).reshape(3, 1) for c in corners], dim=1)
tr_corners = torch.matmul(mx, corners)
all_corners = torch.cat([tr_corners, corners[:2, :]], dim=1)
bounding_box = all_corners.amax(dim=1) - all_corners.amin(dim=1)
py = torch.clip(torch.floor((bounding_box[0] - h) / 2), min=0.0, max=h - 1)
px = torch.clip(torch.floor((bounding_box[1] - w) / 2), min=0.0, max=w - 1)
return int(px.item()), int(py.item())
def _get_mraugment_translate_pad(im_shape, translation):
shape = im_shape[-len(translation) :]
pad = []
sl = []
for s, t in zip(shape, translation):
if t > 0:
pad.append((t, 0))
sl.append(slice(0, s))
else:
pad.append((0, abs(t)))
sl.append(slice(abs(t), None))
pad = [x for y in pad[::-1] for x in y]
sl.insert(0, Ellipsis)
return pad, sl
| StarcoderdataPython |
11246590 | <filename>Advance_Python/Exception_Handling/Finally_Keyword.py
# Finally Keyword
try:
a = int(input("Enter a : "))
b = int(input("Enter b : "))
c = a / b
print(c)
except ZeroDivisionError:
print("Division by zero")
else:
print("Else Block")
finally:
print("Final block always execute") | StarcoderdataPython |
1998645 | <reponame>kauanmatos224/DATA_SCIENCE<filename>Python_Softwares_MachineLearning_/PIPELINE_MultipleLinearRegression_DistributionPlot_PolynomialRegression_MSE_RSquared.py
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
from sklearn.linear_model import LinearRegression
#IMPORTA BIBLIOTECA DE VISUALIZAÇAÕ SEABORN
import seaborn as sns
#IMPORTA BIBLIOTECA DE VISUALIZAÇAÕ SEABORN
# CRIAÇÃO DE REGREÇÃO POLINOMAL
import sklearn.preprocessing
#CONTRUÇÃO DE PIPELINE
from sklearn.pipeline import Pipeline
from sklearn.preprocessing import StandardScaler
#CONTRUÇÃO DE PIPELINE
from sklearn.preprocessing import PolynomialFeatures
#erro quadratico medio - MULTIPLE LINEAR REGRESSION
from sklearn.metrics import mean_squared_error
#R SQUARED E MSE PARA POLINOMIAL REGRESSION
from sklearn.metrics import r2_score
path = 'https://cf-courses-data.s3.us.cloud-object-storage.appdomain.cloud/IBMDeveloperSkillsNetwork-DA0101EN-SkillsNetwork/labs/Data%20files/automobileEDA.csv'
df = pd.read_csv(path)
df.head()
#IMPORTAÇÕES------------------------------------------------------------------------
#PLOTA GRAFICO DE DISTRIBUIÇÃO, RESIDUO E REGESSÃO LINEAR
def create_plot(dataf, x1, y1 , plot_type, savename):
#limpa e fecha o gráfico atual (ultimo gráfico plotado)
plt.clf()
plt.close()
#define as dimensões da figura / gráfico
width = 12
height = 10
'''
abaixo, verifica o parâmetro passado na invocação do método referente ao tipo de
gráfico a ser plotado
'''
if(plot_type == 'regression'): #plota o gráfico de regressão linear
plt.figure(figsize=(width, height))
sns.regplot(x=dataf[x1], y=dataf[y1], data=dataf)
plt.ylim(0,)
elif (plot_type == 'resid'): #plota o gráfico de resíduo linear
plt.figure(figsize=(width, height))
sns.residplot(dataf[x1], df[y1])
elif (plot_type == 'distribution'): #plota gráfico de distribuição em curvas
lm = LinearRegression()
lm.fit(df[x1], df[y1])
y_prediction = lm.predict(df[x1])
plt.figure(figsize=(width, height))
ax1 = sns.kdeplot(df[y1], color="r", label="Actual Value")
sns.kdeplot(y_prediction, color="b", label="Fitted Values" , ax=ax1)
plt.title('Actual vs Fitted Values for '+ y1)
plt.xlabel(y1)
'''abaixo, cria um contador para extrair da variável de termos independentes
os rótulos do eixo X do gráfico a partir de seus índices
'''
c=0
ylabel = ''
while(c < len(x1)):
ylabel = ylabel+" | "+x1[c] +"|"
c+=1
print(c)
plt.ylabel(ylabel) #insere a string de rótulos
'''
abaixo, verifica se na invocação do método foi pedidio para salvar a imagem
'''
if len(savename) > 0:
plt.savefig(savename)
plt.show() #mostra o gráfico
'''chama a função passando os seguintes parâmetros:
dataframe,
termo independente,
termo dependente,
tipo de gráfico,
nome de salvamento do gráfico + extensão
'''
''' **O Termo independente poderá ser uma array de valores se for o caso do gráfico de
distribuição, seguindo o seguinte formato ['valor1', 'valor2', 'valor3'....]
'''
#PLOTA GRAFICO DE DISTRIBUIÇÃO, RESIDUO E REGESSÃO LINEAR
#---------------------IMPLEMENTATION LINEAR REGRESSION--------------------------
#CRIAÇÃO DE OBJETO LINER REGRESSION
lm = LinearRegression()
#CRIAÇÃO DE OBJETO LINER REGRESSION
#PAGAR VALORES 'x' VARIAVEL PREDIDITAVA E 'Y' VARIAVEL PREDITA
X = df[['highway-mpg']]
Y = df['price']
#PAGAR VALORES 'x' VARIAVEL PREDIDITAVA E 'Y' VARIAVEL PREDITA
#MÉTODO "fit" - OBTEM PARAMETROS DOS METODOS B-1 e B-0 PARA CONTRUIR LINHA
lm.fit(X,Y)
#MÉTODO "fit" - OBTEM PARAMETROS DOS METODOS B-1 e B-0 PARA CONTRUIR LINHA
#REALIZA PREVISÕES POR MEIO DE REGRESSÃO LINEAR
Yhat=lm.predict(X)
a = Yhat[0:5]
#REALIZA PREVISÕES POR MEIO DE REGRESSÃO LINEAR
#Yhat = a + b X
#CAPTURA O VALOR DO COEFICIENTE B_0
B_ZERO = lm.intercept_
#CAPTURA O VALOR DO COEFICIENTE B_0
#CAPTURA O COEFICIENTE B_1
b_UM = lm.coef_
#CAPTURA O COEFICIENTE B_1
#PAGAR VALORES 'x' VARIAVEL PREDIDITAVA E 'Y' VARIAVEL PREDITA
lm1 = LinearRegression()
X = df[['engine-size']]
Y = df['price']
lm1.fit(X,Y)
#PAGAR VALORES 'x' VARIAVEL PREDIDITAVA E 'Y' VARIAVEL PREDITA
#Yhat=-7963.34 - 166.86*X
lm1_coef = lm1.coef_
lm1_intercept = lm1.intercept_
EQUATION = -7963.34 + (166.86* X)
#---------------------IMPLEMENTATION LINEAR REGRESSION--------------------------
#---------------------IMPLEMENTATION MULTIPLE LINEAR REGRESSION--------------------------
#PEGA OS VALORES DAS VARIAVEIS PREDITORAS
Z = df[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']]
#PEGA OS VALORES DAS VARIAVEIS PREDITORAS
#CONSTRÓI OS PONTOS DA REGRAÇÃO LINEAR
lm.fit(Z, df['price'])
#CONTRÓI OS PONTOS DA REGRAÇÃO LINEAR
# QUESTION 2 - A
lm2 = LinearRegression()
x = df[['normalized-losses', 'highway-mpg']]
Y = df['price']
# QUESTION 2 - A
# QUESTION 2 - B
lm2.fit(x, Y)
# QUESTION 2 - A
#GRAFICO DISPERSÃO - RESIDUO - ENTRE HIGWAY MPG E PRICE
width = 12
height = 10
plt.figure(figsize=(width, height))
sns.regplot(x="highway-mpg", y="price", data=df)
plt.ylim(0,)
plt.clf()
#GRAFICO DISPERSÃO - RESIDUO - ENTRE HIGWAY MPG E PRICE
#CRIAR GRAFICO DE RESIDUO DE "highway-mpg", E "price"
create_plot(df, "highway-mpg", "price", "resid", "regplot1.png")
#CRIAR GRAFICO DE RESIDUO DE "highway-mpg", E "price"
#CONSTROI REGRESSAÕ LINAR MULTIPLA COM VARIAVEL Z QUE PREVE O PRICE
Y_hat = lm.predict(Z)
#CONSTROI REGRESSAÕ LINAR MULTIPLA COM VARIAVEL Z QUE PREVE O PRICE
#CONSTRUÇAÕ DE GRAFICO DE DISTRIBUIÇÃO
'''
plt.figure(figsize=(width, height))
ax1 = sns.distplot(df['price'], hist=False, color="r", label="Actual Value")
sns.distplot(Y_hat, hist=False, color="b", label="Fitted Values" , ax=ax1)
plt.title('Actual vs Fitted Values for Price')
plt.xlabel('Price (in dollars)')
plt.ylabel('Proportion of Cars')
plt.show()
plt.close()
'''
#CONSTRUÇAÕ DE GRAFICO DE DISTRIBUIÇÃO
#FUNÇAÕ - CONSTRUÇÃO DE GRAFICO DE DISTRIBUIÇÃO DA REGRESSAÕ LINEAR MULTIPLA
create_plot(df, ['horsepower', 'curb-weight', 'engine-size', 'highway-mpg'], "price", "distribution", "Dispplot2.png")
#FUNÇAÕ - CONSTRUÇÃO DE GRAFICO DE DISTRIBUIÇÃO DA REGRESSAÕ LINEAR MULTIPLA
#---------------------IMPLEMENTATION MULTIPLE LINEAR REGRESSION--------------------------
#---------------------IMPLEMENTATION POLINOMIAL REGRESSION--------------------------------
#FUNÇÃO - CRIA FUNÇÃO PARA EXIBIR GRAFICO DE REGRESSÃO POLINOMIAL
def PlotPolly(model, independent_variable, dependent_variabble, Name):
x_new = np.linspace(15, 55, 100)
y_new = model(x_new)
plt.plot(independent_variable, dependent_variabble, '.', x_new, y_new, '-')
plt.title('Polynomial Fit with Matplotlib for Price ~ Length')
ax = plt.gca()
ax.set_facecolor((0.898, 0.898, 0.898))
fig = plt.gcf()
plt.xlabel(Name)
plt.ylabel('Price of Cars')
plt.show()
plt.close()
#FUNÇÃO - CRIA FUNÇÃO PARA EXIBIR GRAFICO DE REGRESSÃO POLINOMIAL
# RECEBE OS VALORES DO ARRAY
x = df['highway-mpg']
y = df['price']
# RECEBE OS VALORES DO ARRAY
#CRIAÇÃO DA REGRESSÃO POLINOMIAL
f = np.polyfit(x, y, 3)
p = np.poly1d(f)
print(p)
#CRIAÇÃO DA REGRESSÃO POLINOMIAL
#CRIAR GRAFICO DA REGRESSÃO POLINOMIAL - CHAMA FUNÇÃO
PlotPolly(p, x, y, 'highway-mpg')
#CRIAR GRAFICO DA REGRESSÃO POLINOMIAL - CHAMA FUNÇÃO
#Z = df[['horsepower', 'curb-weight', 'engine-size', 'highway-mpg']]
#SELECIONA O GRAU DAS OPERAÇÕES PARA CRIAÇÃO DE FEATURES
pr=sklearn.preprocessing.PolynomialFeatures(degree=2)
#SELECIONA O GRAU DAS OPERAÇÕES PARA CRIAÇÃO DE FEATURES
#CRIA AS FEATURES
Z_pr=pr.fit_transform(Z)
#CRIA AS FEATURES
#LISTA QUNATIDADE - RIGISTROS - FEATURES/COLUNAS/VARIAVEIS - TRANSFORMA EM TUPLA
Z.shape
#LISTA QUNATIDADE - RIGISTROS - FEATURES/COLUNAS/VARIAVEIS - TRANSFORMA EM TUPLA
#LISTA QUNATIDADE - RIGISTROS - FEATURES/COLUNAS/VARIAVEIS - TRANSFORMA EM TUPLA
Z_pr.shape
#LISTA QUNATIDADE - RIGISTROS - FEATURES/COLUNAS/VARIAVEIS - TRANSFORMA EM TUPLA.
#LISTA DE TUPLAS - OBJETO LINER REGRESSION - POLINOMIALFEATURES - STANDARDSCALER
Input=[('scale',StandardScaler()), ('polynomial', PolynomialFeatures(include_bias=False)), ('model',LinearRegression())]
#LISTA DE TUPLAS - OBJETO LINER REGRESSION - POLINOMIALFEATURES - STANDARDSCALER
#CRIA PIPELINE - RECEBE COMO PARAMETRO LISTA DE TUPLAS COM OBJETOS
pipe=Pipeline(Input)
#CRIA PIPELINE - RECEBE COMO PARAMETRO LISTA DE TUPLAS COM OBJETOS
#CONVERTE O VALOR DO Z PARA FLOAT PARA NAÕ DAR ERRO AO APLICAR O PIPELINE
Z = Z.astype(float)
#CONVERTE O VALOR DO Z PARA FLOAT PARA NAÕ DAR ERRO AO APLICAR O PIPELINE
#EXECUTA O FIT DENTRO DO PIPELINE PARA EXECUTAR TRABSFORMAÇÕES PRÉDETERMINDADAS
#DENTRO DO PIPELINE NA ESTRUTURA DE DADOS 'Z' E 'y'
#----ALINHA MODELO DE REGRESSÃO LINEAR
pipe.fit(Z,y)
#----ALINHA MODELO DE REGRESSÃO LINEAR
#EXECUTA O FIT DENTRO DO PIPELINE PARA EXECUTAR TRABSFORMAÇÕES PRÉDETERMINDADAS
#DENTRO DO PIPELINE NA ESTRUTURA DE DADOS 'Z' E 'y'
#REALIZA PREDIÇÃO COM OS VALORES DE Z NO PIPELINE
ypipe=pipe.predict(Z)
ypipe[0:4]
#REALIZA PREDIÇÃO COM OS VALORES DE Z NO PIPELINE
#CONTROI INPUT - CIBTEM AS OPERAÇÕES DO PIPELINE
#OPERAÇÕES - PADRONIZAÇÃO DE ESCALA - REGREÇÃO LINEAR
Input=[('scale',StandardScaler()), ('model',LinearRegression())]
#CONTROI INPUT - CIBTEM AS OPERAÇÕES DO PIPELINE
#OPERAÇÕES - PADRONIZAÇÃO DE ESCALA - REGREÇÃO LINEAR
#CRIA O PIEPELINE
pipe = Pipeline(Input)
#CRIA O PIEPELINE
#CONTRÓI GRAFICO DE DISTRIBUIÇÃO COM KDEPLOT - MULTIPLE LINEAR REGRESION PIPELINE
width = 12
height = 10
plt.figure(figsize=(width, height))
ax1 = sns.kdeplot(df['price'], color="r", label="Actual Value")
sns.kdeplot(ypipe, color="b", label="Fitted Values" , ax=ax1)
plt.title('Actual vs Fitted Values for price')
plt.xlabel('price')
plt.show()
#CONTRÓI GRAFICO DE DISTRIBUIÇÃO COM KDEPLOT - MULTIPLE LINEAR REGRESION PIPELINE
#MSE E R^ - MEDIDAS PARA AVALIAÇÃO In-Sample-----------------------------------------
#MSE - ERRO QUADRATICO MÉDIO - DETERMINA A PROXIMIDADE DA MÉDIA DOS VALORES
#DOS VALORES REAIS DO MODELO COM OS VALORES PREVISTOS
#R-SQUARED - DETERMINA SE A VARIACIA DOS VALORES REAIS É EQUIVALENTE A DOS
#VALORES PREVISTOS. O RESULTADO DO CALCULO VARIA DE 0 A 1.
# QUANTO MAIS PROXIMO DE 1 MAIOR A EQUIVALENCIA DA VARIANCIA.
#FIT - ALINHAR MODELO
lm.fit(X, Y)
#FIT - ALINHAR MODELO
#CALCUOLO DO R^ SQUARED
print('The R-square is: ', lm.score(X, Y))
#CALCUOLO DO R^ SQUARED
#CALCULO MSE - MEAN SQUARED ERROR
Yhat=lm.predict(X)
print('The output of the first four predicted value is: ', Yhat[0:4])
#CALCULO MSE - MEAN SQUARED ERROR
#BIBLOTECA ABAICXO NECESSARIA PARA A EXECUÇÃO
#from sklearn.metrics import mean_squared_error
# R^2 - R SQUARED - MULTIPLE LINEAR REGRESSION
# fit the model
lm.fit(Z, df['price'])
# Find the R^2
print('The R-square is: ', lm.score(Z, df['price']))
# R^2 - R SQUARED - MULTIPLE LINEAR REGRESSION
#REALIZA PREDIÇÕES PARA CALCULO DO MSE
Y_predict_multifit = lm.predict(Z)
#REALIZA PREDIÇÕES PARA CALCULO DO MSE
#CALCULA MSE DE MUTIPLE LINEAER REGRESSION
print('The mean square error of price and predicted value using multifit is: ', \
mean_squared_error(df['price'], Y_predict_multifit))
#CALCULA MSE DE MUTIPLE LINEAER REGRESSION
#IMPORTAR - CALCULO DO R^ QUADRADO PARA POLINOMIAL FIT
#from sklearn.metrics import r2_score
# R^2 - R SQUARED - POLINOMIAL FIT
r_squared = r2_score(y, p(x))
print('The R-square value is: ', r_squared)
# R^2 - R SQUARED - POLINOMIAL FIT
#MSE - POLINOMIAL REGRESSION
print(mean_squared_error(df['price'], p(x)))
#MSE - POLINOMIAL REGRESSION
#MSE E R^ - MEDIDAS PARA AVALIAÇÃO In-Sample-----------------------------------------
#PRODUZINDO PREDIÇAÕ USANDO MÉTODO PREDICT-------------------------------------
#CRIA OBJETO NEW INPUT COM VALORES DE ZERO A 100 OBTIDOS PELO NP.ARANGE
new_input=np.arange(1, 100, 1).reshape(-1, 1)
#CRIA OBJETO NEW INPUT COM VALORES DE ZERO A 100 OBTIDOS PELO NP.ARANGE
#ALINHAR MODELO COM VALORES DE X E Y
lm.fit(X, Y)
#ALINHAR MODELO COM VALORES DE X E Y
#REALIZAR PREDIÇÃO USANDO O MÉTODO PREDICT
yhat=lm.predict(new_input)
print(yhat[0:5])
#REALIZAR PREDIÇÃO USANDO O MÉTODO PREDICT
#CRIA GRAFICO DA PREDIÇÃO
plt.plot(new_input, yhat)
plt.show()
#CRIA GRAFICO DA PREDIÇÃO
#PRODUZINDO PREDIÇAÕ USANDO MÉTODO PREDICT-------------------------------------
| StarcoderdataPython |
9787249 | <reponame>canokeys/yubikey-manager<filename>ykman/settings.py<gh_stars>1-10
# Copyright (c) 2017 Yubico AB
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or
# without modification, are permitted provided that the following
# conditions are met:
#
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above
# copyright notice, this list of conditions and the following
# disclaimer in the documentation and/or other materials provided
# with the distribution.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
# "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
# LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS
# FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE
# COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT,
# INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING,
# BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
# LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN
# ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
import os
import json
from pathlib import Path
HOME_CONFIG = "~/.ckman"
XDG_DATA_HOME = os.environ.get("XDG_DATA_HOME", "~/.local/share") + "/ckman"
XDG_CONFIG_HOME = os.environ.get("XDG_CONFIG_HOME", "~/.config") + "/ckman"
USE_XDG = "YKMAN_XDG_EXPERIMENTAL" in os.environ
class Settings(dict):
_config_dir = HOME_CONFIG
def __init__(self, name):
self.fname = Path(self._config_dir).expanduser().resolve() / (name + ".json")
if self.fname.is_file():
with self.fname.open("r") as fd:
self.update(json.load(fd))
def __eq__(self, other):
return other is not None and self.fname == other.fname
def __ne__(self, other):
return other is None or self.fname != other.fname
def write(self):
conf_dir = self.fname.parent
if not conf_dir.is_dir():
conf_dir.mkdir(0o700, parents=True)
with self.fname.open("w") as fd:
json.dump(self, fd, indent=2)
__hash__ = None
class Configuration(Settings):
_config_dir = XDG_CONFIG_HOME if USE_XDG else HOME_CONFIG
class AppData(Settings):
_config_dir = XDG_DATA_HOME if USE_XDG else HOME_CONFIG
| StarcoderdataPython |
3424935 | <reponame>hsph-qbrc/mev-backend
from rest_framework import serializers, exceptions
from api.models import ResourceMetadata, \
Resource, \
ExecutedOperation
from api.serializers.observation_set import NullableObservationSetSerializer
from api.serializers.feature_set import NullableFeatureSetSerializer
class ResourceMetadataSerializer(serializers.ModelSerializer):
resource = serializers.PrimaryKeyRelatedField(
queryset=Resource.objects.all()
)
observation_set = NullableObservationSetSerializer(required=False, allow_null=True)
feature_set = NullableFeatureSetSerializer(required=False, allow_null=True)
def prep_validated_data(self, validated_data):
'''
This method is used by the create and update methods
to create the proper serialized elements
'''
# the database object is saving json. Hence, we need to turn the
# observationSet into a dict to create/update the ResourceMetadata below.
try:
obs_set_data = validated_data['observation_set']
except KeyError as ex:
obs_set_data = None
if obs_set_data:
obs_set_serializer = NullableObservationSetSerializer(data=obs_set_data)
obs_set = obs_set_serializer.get_instance()
obs_set_dict = obs_set.to_dict()
else:
obs_set_dict = None
# same thing for the FeatureSet- need a dict
try:
feature_set_data = validated_data['feature_set']
except KeyError as ex:
feature_set_data = None
if feature_set_data:
feature_set_serializer = NullableFeatureSetSerializer(data=validated_data['feature_set'])
feature_set = feature_set_serializer.get_instance()
feature_set_dict = feature_set.to_dict()
else:
feature_set_dict = None
try:
parent_op = validated_data['parent_operation']
except KeyError as ex:
parent_op = None
if parent_op is not None:
parent_op = ExecutedOperation.objects.get(pk=parent_op)
return obs_set_dict, feature_set_dict, parent_op
def create(self, validated_data):
obs_set_dict, feature_set_dict, parent_op = self.prep_validated_data(validated_data)
rm = ResourceMetadata.objects.create(
observation_set = obs_set_dict,
feature_set = feature_set_dict,
parent_operation = parent_op,
resource = validated_data['resource']
)
return rm
def update(self, instance, validated_data):
obs_set_dict, feature_set_dict, parent_op = self.prep_validated_data(validated_data)
instance.observation_set = obs_set_dict
instance.feature_set = feature_set_dict
instance.parent_operation = parent_op
return instance
def validate_parent_operation(self, value):
if value is not None:
try:
ExecutedOperation.objects.get(pk=value)
except ExecutedOperation.DoesNotExist as ex:
raise ValidationError({'parent_operation': 'Parent operation not found.'})
return value
class Meta:
model = ResourceMetadata
fields = [
'resource',
'parent_operation',
'observation_set',
'feature_set'
]
class ResourceMetadataObservationsSerializer(serializers.ModelSerializer):
class Meta:
model = ResourceMetadata
fields = ['observation_set',]
class ResourceMetadataFeaturesSerializer(serializers.ModelSerializer):
class Meta:
model = ResourceMetadata
fields = ['feature_set',]
class ResourceMetadataParentOperationSerializer(serializers.ModelSerializer):
class Meta:
model = ResourceMetadata
fields = ['parent_operation',] | StarcoderdataPython |
3499498 | import functools
import logging
import time
from datetime import datetime, timedelta
from typing import Any, Callable, Dict, List, Optional, TypedDict
import pandas as pd
active_logger = logging.getLogger(__name__)
class TimingResult(TypedDict):
function: str
count: int
mean: timedelta
min: timedelta
max: timedelta
class TimingRegistrar:
def __init__(self, timings: Optional[Dict[str, List[timedelta]]] = None):
self._function_timings: Dict[str, List[timedelta]] = timings or {}
def log_result(self, elapsed_seconds: float, name: str) -> None:
if name not in self._function_timings:
self._function_timings[name] = []
self._function_timings[name].append(timedelta(seconds=elapsed_seconds))
def _call(self, f: Callable, key: str, *args, **kwargs) -> Any:
start_time = (
time.perf_counter()
) # gets timestamp in seconds (with decimal places)
val = f(*args, **kwargs) # execute function and store output
end_time = time.perf_counter()
elapsed_time = end_time - start_time # compute time for function execution
# use object name with method name for key
if key not in self._function_timings:
self._function_timings[key] = list()
self._function_timings[key].append(timedelta(seconds=elapsed_time))
return val
def register_named_method(self, name_attr: str) -> Callable:
"""
register a class method, whose name at runtime is determined by
- first component is attribute specified by `name_attr`
- second component is function name
e.g. the following below would yield to key in timing registrar of 'hello.timed_function'
reg = TimingRegistrar()
class A:
c='hello'
@reg.register_named_method(name_attr='c')
def timed_function():
# some stuff
"""
def outer(method: Callable):
@functools.wraps(method)
def inner(_self, *args, **kwargs):
# use object name with method name for key
key = getattr(_self, name_attr) + "." + method.__name__
return self._call(method, key, _self, *args, **kwargs)
return inner
return outer
def register_method(self, func: Callable) -> Callable:
"""
Register a class method for execution times to be logged
Example below would register function calls to key 'A.hello'
reg = TimingRegistrar()
class A:
@reg.register_method
def hello(self):
# do some stuff
"""
@functools.wraps(func)
def inner(_self, *args, **kwargs):
key = _self.__class__.__name__ + "." + func.__name__
return self._call(inner, key, _self, *args, **kwargs)
return inner
def register_function(self, func: Callable) -> Callable:
"""
Register a function for execution times to be logged, using function name as key to register
The example below would register function timings to key 'hello'
reg = TimingRegistrar()
@reg.register_function
def hello():
# do some stuff
"""
@functools.wraps(func)
def inner(*args, **kwargs):
return self._call(func, func.__name__, *args, **kwargs)
return inner
def _series(self, func_name: str) -> pd.Series:
"""
get series of timedeltas for execution time each time function was run
"""
return pd.Series(self._function_timings[func_name])
def timed_functions(self) -> List[str]:
"""
get list of function names who are being tracked for timing
"""
return list(self._function_timings.keys())
def get_timings_summary(self) -> List[Dict]:
"""
get a list of dictionaries with function timings information:
'Function' is function name
'Count' is number of times function was recorded
'Mean' is mean of timings as timedelta object
'Min' is minimum time as timedelta object
'Max' is maximum time as timedelta object
"""
return [
TimingResult(
function=k,
count=len(v),
mean=sum(v, timedelta()) / len(v),
min=min(v),
max=max(v),
)
for k, v in self._function_timings.items()
if v
]
def clear(self) -> None:
"""
empty lists of timed functions results
"""
self._function_timings = {}
def items(self):
return self._function_timings.items()
def __contains__(self, item):
return self._function_timings.__contains__(item)
def __setitem__(self, key, value):
return self._function_timings.__setitem__(key, value)
def __getitem__(self, item):
return self._function_timings.__getitem__(item)
def __add__(self, other):
result = TimingRegistrar(self._function_timings)
for k, v in other.items():
if k in result:
result[k] += v
else:
result[k] = v
return result
def ms_to_datetime(timestamp_ms):
return datetime.fromtimestamp(float(timestamp_ms) / 1000)
| StarcoderdataPython |
5180383 | """files.filesize bigint
Revision ID: 75b0775a8695
Revises: 26c2f6af45ab
Create Date: 2021-03-03 00:08:00.895764
"""
from alembic import op
import sqlalchemy as sa
# revision identifiers, used by Alembic.
revision = '75b0775a8695'
down_revision = '26c2f6af45ab'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.alter_column('files', 'filesize',
existing_type=sa.INTEGER(),
type_=sa.BigInteger(),
existing_nullable=True)
op.drop_column('files', 'checksum_crypt')
op.drop_column('files', 'filesize_crypt')
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.add_column('files', sa.Column('filesize_crypt', sa.TEXT(), autoincrement=False, nullable=True))
op.add_column('files', sa.Column('checksum_crypt', sa.TEXT(), autoincrement=False, nullable=True))
op.alter_column('files', 'filesize',
existing_type=sa.BigInteger(),
type_=sa.INTEGER(),
existing_nullable=True)
# ### end Alembic commands ###
| StarcoderdataPython |
5160238 | <reponame>1byte2bytes/cpython
# A class to help applications that do fancy text formatting.
# You create an instance each time you must redraw the window.
# Set the initial left, top and right coordinates;
# then feed it words, font changes and vertical movements.
#
# This class should eventually be extended to support much fancier
# formatting, along the lines of TeX; for now, a very simple model
# is sufficient.
#
class formatter:
#
# Initialize a formatter instance.
# Pass the window's drawing object, and left, top, right
# coordinates of the drawing space as arguments.
#
def __init__(self, d, left, top, right):
self.d = d # Drawing object
self.left = left # Left margin
self.right = right # Right margin
self.v = top # Top of current line
self.center = 0
self.justify = 1
self.setfont('') # Default font
self._reset() # Prepare for new line
#
# Reset for start of fresh line.
#
def _reset(self):
self.boxes = [] # Boxes and glue still to be output
self.sum_width = 0 # Total width of boxes
self.sum_space = 0 # Total space between boxes
self.sum_stretch = 0 # Total stretch for space between boxes
self.max_ascent = 0 # Max ascent of current line
self.max_descent = 0 # Max descent of current line
self.avail_width = self.right - self.left
self.hang_indent = 0
#
# Set the current font, and compute some values from it.
#
def setfont(self, font):
self.font = font
self.d.setfont(font)
self.font_space = self.d.textwidth(' ')
self.font_ascent = self.d.baseline()
self.font_descent = self.d.lineheight() - self.font_ascent
#
# Add a word to the list of boxes; first flush if line is full.
# Space and stretch factors are expressed in fractions
# of the current font's space width.
# (Two variations: one without, one with explicit stretch factor.)
#
def addword(self, word, spacefactor):
self.addwordstretch(word, spacefactor, spacefactor)
#
def addwordstretch(self, word, spacefactor, stretchfactor):
width = self.d.textwidth(word)
if width > self.avail_width:
self._flush(1)
space = int(float(self.font_space) * float(spacefactor))
stretch = int(float(self.font_space) * float(stretchfactor))
box = (self.font, word, width, space, stretch)
self.boxes.append(box)
self.sum_width = self.sum_width + width
self.sum_space = self.sum_space + space
self.sum_stretch = self.sum_stretch + stretch
self.max_ascent = max(self.font_ascent, self.max_ascent)
self.max_descent = max(self.font_descent, self.max_descent)
self.avail_width = self.avail_width - width - space
#
# Flush current line and start a new one.
# Flushing twice is harmless (i.e. does not introduce a blank line).
# (Two versions: the internal one has a parameter for justification.)
#
def flush(self):
self._flush(0)
#
def _flush(self, justify):
if not self.boxes:
return
#
# Compute amount of stretch needed.
#
if justify and self.justify or self.center:
#
# Compute extra space to fill;
# this is avail_width plus glue from last box.
# Also compute available stretch.
#
last_box = self.boxes[len(self.boxes)-1]
font, word, width, space, stretch = last_box
tot_extra = self.avail_width + space
tot_stretch = self.sum_stretch - stretch
else:
tot_extra = tot_stretch = 0
#
# Output the boxes.
#
baseline = self.v + self.max_ascent
h = self.left + self.hang_indent
if self.center:
h = h + tot_extra / 2
tot_extra = tot_stretch = 0
for font, word, width, space, stretch in self.boxes:
self.d.setfont(font)
v = baseline - self.d.baseline()
self.d.text((h, v), word)
h = h + width + space
if tot_extra > 0 and tot_stretch > 0:
extra = stretch * tot_extra / tot_stretch
h = h + extra
tot_extra = tot_extra - extra
tot_stretch = tot_stretch - stretch
#
# Prepare for next line.
#
self.v = baseline + self.max_descent
self.d.setfont(self.font)
self._reset()
#
# Add vertical space; first flush.
# Vertical space is expressed in fractions of the current
# font's line height.
#
def vspace(self, lines):
self.vspacepixels(int(lines * self.d.lineheight()))
#
# Add vertical space given in pixels.
#
def vspacepixels(self, dv):
self.flush()
self.v = self.v + dv
#
# Set temporary (hanging) indent, for paragraph start.
# First flush.
#
def tempindent(self, space):
self.flush()
hang = int(float(self.font_space) * float(space))
self.hang_indent = hang
self.avail_width = self.avail_width - hang
#
# Add (permanent) left indentation. First flush.
#
def addleftindent(self, space):
self.flush()
self.left = self.left \
+ int(float(self.font_space) * float(space))
self._reset()
#
# Test procedure
#
def test():
import stdwin, stdwinq
from stdwinevents import *
try:
import mac
# Mac font assignments:
font1 = 'times', '', 12
font2 = 'times', 'b', 14
except ImportError:
# X11R4 font assignments
font1 = '*times-medium-r-*-120-*'
font2 = '*times-bold-r-*-140-*'
words = \
['The','quick','brown','fox','jumps','over','the','lazy','dog.']
words = words * 2
stage = 0
stages = [(0,0,'ragged'), (1,0,'justified'), (0,1,'centered')]
justify, center, title = stages[stage]
stdwin.setdefwinsize(300,200)
w = stdwin.open(title)
winsize = w.getwinsize()
while 1:
type, window, detail = stdwinq.getevent()
if type == WE_CLOSE:
break
elif type == WE_SIZE:
newsize = w.getwinsize()
if newsize <> winsize:
w.change((0,0), winsize)
winsize = newsize
w.change((0,0), winsize)
elif type == WE_MOUSE_DOWN:
stage = (stage + 1) % len(stages)
justify, center, title = stages[stage]
w.settitle(title)
w.change((0, 0), (1000, 1000))
elif type == WE_DRAW:
width, height = winsize
f = formatter(w.begindrawing(), 0, 0, width)
f.center = center
f.justify = justify
if not center:
f.tempindent(5)
for font in font1, font2, font1:
f.setfont(font)
for word in words:
space = 1 + (word[-1:] == '.')
f.addword(word, space)
if center and space > 1:
f.flush()
f.flush()
height = f.v
del f
w.setdocsize(0, height)
| StarcoderdataPython |
6482397 | from functools import reduce
def hexa(s):
s = s.lower()
if set(s) - set('0123456789abcdef'):
raise ValueError('Invalid hexadecimal string')
l = [ord(c) - ord('a') + 10 if c in 'abcdef' else ord(c) - ord('0')
for c in s]
return reduce(lambda x, y: x * 16 + y, l, 0)
| StarcoderdataPython |
278431 | # -*- coding: utf-8 -*-
"""
Created on Thu Nov 23 13:21:34 2017
@author: HCHO
"""
import sqlite3
def convert(value):
if value.startwith('~'):
return value.strip('~')
if not value:
value='0'
return float(value)
conn=sqlite3.connect('food.db')
curs=conn.cursor()
curs.execute('''
CREATE TABLE food(
id TEXT PRIMARY KEY
desc TEXT,
water FLOAT,
kcal FLOAT,
protein FLOAT,
fat FLOAT,
ash FLOAT,
carbs FLOAT,
fiber FLOAT,
sugar FLOAT)
''')
query='INSERT INTO food VALUES(?,?,?,?,?,?,?,?,?,?)'
for line in open('ABBREV.txt'):
fields=line.split('^')
vals=[convert(f) for f in fields]
curs.execute(query,vals)
conn.commit()
conn.close()
import sqlite3, sys
conn=sqlite3.connect('food.db')
curs=conn.cursor()
query='SELECT *FROM food WHERE %s' % sys.argv[1]
print (query)
curs.execute(query)
names=[f[0] for f in curs.description]
for row in curs.fetchall():
for pair in zip(names, row):
print ('%s:%s' %pair)
print | StarcoderdataPython |
3387079 | <gh_stars>0
from django.conf.urls import url
from . import views
urlpatterns = [
url(r'^$', views.logsHome, name='logsHome'),
url(r'^accessLogs', views.accessLogs, name='accessLogs'),
url(r'^errorLogs', views.errorLogs, name='errorLogs'),
url(r'^emaillogs', views.emailLogs, name='emaillogs'),
url(r'^ftplogs', views.ftplogs, name='ftplogs'),
url(r'^modSecAuditLogs', views.modSecAuditLogs, name='modSecAuditLogs'),
url(r'^getLogsFromFile',views.getLogsFromFile, name="getLogsFromFile"),
url(r'^clearLogFile',views.clearLogFile, name="clearLogFile"),
url(r'^serverMail$', views.serverMail, name="serverMail"),
url(r'^saveSMTPSettings$', views.saveSMTPSettings, name="saveSMTPSettings"),
] | StarcoderdataPython |
5054897 | <filename>bblfsh_sonar_checks/checks/java/RSPEC-2447.py<gh_stars>1-10
import bblfsh_sonar_checks.utils as utils
import bblfsh
def check(uast):
findings = []
methods = utils.get_methods(uast)
for m in methods:
# Should look at the roles to filter by Boolean but there is a bug in the
# Java driver https://github.com/bblf../../java-driver/issues/83 so we check the token
if m.return_ and m.return_.type_name == 'boolean':
if any(list(bblfsh.filter(m.body, "//*[@roleReturn]//*[@roleNull]"))):
findings.append({"msg": "Don't return Null on Boolean-return methods"})
return findings
if __name__ == '__main__': utils.run_default_fixture(__file__, check)
| StarcoderdataPython |
5158573 | <gh_stars>0
from __future__ import absolute_import
from django.core.urlresolvers import reverse
from exam import fixture
from sentry.models import Team
from sentry.testutils import TestCase, PermissionTestCase
class TeamSettingsPermissionTest(PermissionTestCase):
def setUp(self):
super(TeamSettingsPermissionTest, self).setUp()
self.path = reverse('sentry-manage-team', args=[self.organization.slug, self.team.slug])
def test_team_admin_can_load(self):
self.assert_team_admin_can_access(self.path)
def test_team_member_cannot_load(self):
self.assert_team_member_cannot_access(self.path)
def test_org_admin_can_load(self):
self.assert_org_admin_can_access(self.path)
def test_org_member_cannot_load(self):
self.assert_org_member_cannot_access(self.path)
class TeamSettingsTest(TestCase):
@fixture
def path(self):
return reverse('sentry-manage-team', args=[self.organization.slug, self.team.slug])
def test_renders_with_context(self):
self.login_as(self.team.owner)
resp = self.client.get(self.path)
self.assertEquals(resp.status_code, 200)
self.assertTemplateUsed(resp, 'sentry/teams/manage.html')
assert resp.context['team'] == self.team
def test_valid_params(self):
self.login_as(self.team.owner)
resp = self.client.post(self.path, {
'name': 'bar',
'slug': self.team.slug,
})
assert resp.status_code == 302
self.assertEquals(resp['Location'], 'http://testserver' + self.path)
team = Team.objects.get(pk=self.team.pk)
self.assertEquals(team.name, 'bar')
| StarcoderdataPython |
63838 | <reponame>herrywen-nanj/51reboot<gh_stars>0
# -*- coding:utf-8 -*-
# author: lyl
import check
from prettytable import PrettyTable
def list_user():
# 检测用户文件是否为空,如非空将结果转换为字典并返回
tag, user_dict = check.user_dict()
if tag:
# 格式化输出
xtb = PrettyTable()
xtb.field_names = ["id", "username", "age", "tel", "email"]
for x in user_dict.keys():
xtb.add_row([user_dict[x]['id'], user_dict[x]['name'], user_dict[x]['age'], user_dict[x]['tel'],
user_dict[x]['email']])
print(xtb)
else:
print(user_dict)
def find_user(info_list):
# 检查用户输入是否合规
if len(info_list) != 2:
print("\033[1;31m输入有误,请重新输入. eg: find username|id \033[0m")
return
# 检查用户是否存在,并返回用户名
tag, user_name, _ = check.user(info_list[1])
if tag:
# 获取用户信息转换为字典形式
_, user_dict = check.user_dict()
# 格式化输出
xtb = PrettyTable()
xtb.field_names = ["id", "username", "age", "tel", "email"]
xtb.add_row([user_dict[user_name]['id'], user_dict[user_name]['name'], user_dict[user_name]['age'],
user_dict[user_name]['tel'], user_dict[user_name]['email']])
print(xtb)
def pagesize(info_list):
# display page 2 pagesize 5
if len(info_list) != 5:
print("\033[1;31m输入格式有误,请重新输入. eg: display page 2 pagesize 5 \033[0m")
return
if info_list[1] != 'page' or not info_list[2].isdigit() or info_list[3] != 'pagesize' or not info_list[4].isdigit():
print("\033[1;31m输入参数有误,请重新输入. eg: display page 2 pagesize 5 \033[0m")
return
tag, user_dict = check.user_dict()
if tag:
# 设置表头
xtb = PrettyTable()
xtb.field_names = ["id", "username", "age", "tel", "email"]
# 判断单页显示数据
if int(info_list[4]) >= len(user_dict.keys()):
print("\033[1;31m 单页显示内容过多,默认全部输出\033[0m")
list_user()
return
# 判断页数是否超过最大值
if int(info_list[2]) * int(info_list[4]) >= len(user_dict.keys()):
print("\033[1;31m 页数超出范围,已为您显示最后一页\033[0m")
user_list = list(user_dict.keys())[-int(info_list[4]):]
else:
# 容量小于最大值
user_list = list(user_dict.keys())[(int(info_list[2])-1) * int(info_list[4]):
(int(info_list[2])) * int(info_list[4])]
for x in user_list:
xtb.add_row([user_dict[x]['id'], user_dict[x]['name'], user_dict[x]['age'], user_dict[x]['tel'],
user_dict[x]['email']])
print(xtb)
else:
print(user_dict)
| StarcoderdataPython |
1976247 | <gh_stars>0
from __future__ import unicode_literals
from frappe import _
import frappe
def get_data():
return [
{
"label": _("Main Menu"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Monitoring Panel",
"doctype": "Stock Ledger Entry",
"onboard": 1,
"dependencies": ["Item"],
"label": _("Monitoring Panel")
},
{
"type": "report",
"is_query_report": True,
"name": "Pending Requests",
"doctype": "Material Request",
"dependencies": ["Item"],
"onboard": 1,
"route":"#List/Material Request/Report/Pending Requests",
"label": _("Pending Requests")
}
]
},
{
"label": _("Action Items"),
"items": [
{
"type": "doctype",
"name": "Item",
"onboard": 1,
"route": "#Form/Item/New Item 1",
"label":_("Add Items")
},
{
"type": "doctype",
"name": "Item",
"onboard": 1,
"label":_("Edit Items")
},
{
"type": "doctype",
"name": "Stock Entry",
"onboard": 1,
"reference_doctype": "Stock Entry",
"dependencies": ["Item"],
"route":"#List/Stock Entry/List?stock_entry_type=Material Receipt",
"label":_("Recieving Items")
},
{
"type": "doctype",
"name": "Stock Entry",
"onboard": 1,
"route":"#List/Stock Entry/List?stock_entry_type=Material Transfer",
"dependencies": ["Item"],
"label":_("Moving Items")
},
{
"type": "doctype",
"name": "Freezing Request",
"dependencies": ["Item"],
"onboard": 1,
"route":"#List/Freezing Request/List",
"label":_("Disabling Items")
},
{
"type": "doctype",
"name": "Item",
"onboard": 1,
"dependencies": ["Item"],
"label":_("Disposing Items")
},
{
"type": "doctype",
"name": "Item",
"route": "#List/Stock Reconciliation/List",
"onboard": 1,
"label":_("Physical Count Reconciliation")
},
]
},
{
"label": _("Purchase"),
"items": [
{
"type": "doctype",
"name": "Material Request",
"onboard": 1,
"label":_("Material Request"),
"dependencies": ["Item"],
},
{
"type": "doctype",
"name": "Purchase Order",
"onboard": 1,
"label":_("Purchase Order"),
"dependencies": ["Supplier"],
},
{
"type": "doctype",
"name": "Purchase Receipt",
"onboard": 1,
"label":_("Purchase Receipt"),
"dependencies": ["Supplier"],
},
{
"type": "doctype",
"name": "Landed Cost Voucher",
"onboard": 1,
"label":_("Landed Cost Voucher"),
"dependencies": ["Purchase Receipt"],
},
{
"type": "doctype",
"name": "Supplier",
"onboard": 1,
"label":_("Supplier"),
},
{
"type": "doctype",
"name": "Request for Quotation",
"onboard": 1,
"label":_("Request for Quotation"),
},
{
"type": "doctype",
"name": "Supplier Quotation",
"onboard": 1,
"label":_("Supplier Quotation"),
}
]
},
{
"label": _("Stock Reports"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Item Additions Report",
"doctype": "Item",
"route":"List/Item/Report/Item Additions Report",
"onboard": 1,
"label":_("Item Additions Report")
},
{
"type": "report",
"is_query_report": True,
"name": "Edited Items Report",
"doctype": "Item",
"onboard": 1,
"route":"List/Item/Report/Edited Items Report",
"label":_("Edited Items Report")
},
{
"type": "report",
"is_query_report": True,
"name": "Recieved Items",
"doctype": "Item",
"onboard": 1,
"dependencies": ["Item"],
"label":_("Recieved Items Report")
},
{
"type": "report",
"is_query_report": True,
"name": "Movement Report",
"doctype": "Item",
"onboard": 1,
"dependencies": ["Item"],
"label":_("Movement Report")
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Ageing",
"onboard": 1,
"doctype": "Item",
"route":"#List/Item/Report/Disabled Items",
"dependencies": ["Item"],
"label":_("Disabled Item Report")
},
# {
# "type": "report",
# "is_query_report": True,
# "name": "Item Price Stock",
# "doctype": "Item",
# "dependencies": ["Item"],
# "route":"#List/Item/Report/Disposed Items",
# "label":_("Disposed Items Report"),
# },
{
"type": "report",
"is_query_report": True,
"name": "Physical Count Reconciliation Report",
"onboard": 1,
"doctype": "Item",
"dependencies": ["Item"],
"breadcrumbs" : "Senergy",
# "route":"#List/Stock Reconciliation/Report/Physical Count Reconciliation ",
"label":_("Physical Count Reconciliation Report")
},
{
"type": "report",
"is_query_report": True,
"name": "Item Card",
"onboard": 1,
"doctype": "Item",
"route":"#List/Item/Report/Item Card",
"dependencies": ["Item"],
"label":_("Item Card")
}
]
},
{
"label": _("Analysis Report"),
"items": [
# {
# "type": "doctype",
# "name": "Item",
# "route": "#stock-balance",
# "onboard": 1,
# "label":_("Stock Summary"),
# },
{
"type": "report",
"is_query_report": True,
"name": "Stock Ageing Report",
"doctype": "Item",
"onboard": 1,
"label":_("Stock Ageing"),
"dependencies": ["Item"],
},
{
"type": "report",
"is_query_report": True,
"name": "Slow Moving Report",
"doctype": "Stock Ledger Entry",
"onboard": 1,
"label":_("Slow Moving"),
"dependencies": ["Item"],
},
{
"type": "report",
"is_query_report": True,
"onboard": 1,
"name": "Item Price Stock",
"label":_("Pricing Analysis"),
"dependencies": ["Item"],
}
]
},
{
"label": _("Management Report"),
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Inventory Listing",
"doctype": "Stock Ledger Entry",
"onboard": 1,
"label":_("Inventory Listing"),
"dependencies": ["Item"],
},
{
"type": "report",
"is_query_report": True,
"name": "Stock Ledger",
"doctype": "Stock Ledger Entry",
"onboard": 1,
"label":_("My Dashboard"),
"route": "#dashboard/Inventory",
"dependencies": ["Item"],
}
]
},
{
"label": _("Purchase Reports"),
"icon": "fa fa-table",
"items": [
{
"type": "report",
"is_query_report": True,
"name": "Purchase Analytics",
"reference_doctype": "Purchase Order",
"onboard": 1
},
{
"type": "report",
"is_query_report": True,
"name": "Supplier-Wise Sales Analytics",
"reference_doctype": "Stock Ledger Entry",
"onboard": 1
},
{
"type": "report",
"is_query_report": True,
"name": "Purchase Order Trends",
"reference_doctype": "Purchase Order",
"onboard": 1,
},
{
"type": "report",
"is_query_report": True,
"name": "Procurement Tracker",
"reference_doctype": "Purchase Order",
"onboard": 1,
},
{
"type": "report",
"is_query_report": True,
"name": "Requested Items To Be Ordered",
"reference_doctype": "Material Request",
"onboard": 1,
},
]
},
]
| StarcoderdataPython |
12845211 | <filename>globals/mime.py
# -*- coding: utf-8 -*-
## \package globals.mime
# MIT licensing
# See: docs/LICENSE.txt
from globals.execute import GetCommandOutput
from globals.execute import GetExecutable
## TODO: Doxygen
def GetFileMimeType(filename):
CMD_file = GetExecutable(u'file')
if not CMD_file:
return None
return GetCommandOutput(CMD_file, (u'--mime-type', u'--brief', filename,))
| StarcoderdataPython |
6420148 | """Serveradmin - Servershell
Copyright (c) 2019 InnoGames GmbH
"""
| StarcoderdataPython |
6682117 | from .pathlib_ext import *
| StarcoderdataPython |
3299777 | # Generated by Django 3.2.5 on 2021-07-08 07:56
from django.db import migrations, models
import django.db.models.deletion
class Migration(migrations.Migration):
initial = True
dependencies = [
]
operations = [
migrations.CreateModel(
name='Company',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_name', models.CharField(db_column='name', max_length=45, null=True)),
('company_password', models.CharField(db_column='password', max_length=45, null=True)),
('company_description', models.TextField(db_column='description', null=True)),
('company_sector', models.CharField(db_column='sector', max_length=45, null=True)),
('company_country', models.CharField(db_column='country', max_length=45, null=True)),
('company_street', models.CharField(db_column='street', max_length=45, null=True)),
('company_city', models.CharField(db_column='city', max_length=45, null=True)),
('company_st_number', models.IntegerField(db_column='number', null=True)),
('company_postal_code', models.IntegerField(db_column='postal_code', null=True)),
('n_employees', models.IntegerField(db_column='number_of_employees', null=True)),
('company_website', models.URLField(db_column='website', null=True)),
],
options={
'db_table': 'company',
},
),
migrations.CreateModel(
name='User',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(db_column='username', max_length=45, null=True, unique=True)),
('user_password', models.CharField(db_column='password', max_length=45, null=True)),
('user_name', models.CharField(db_column='name', max_length=45, null=True)),
('last_name1', models.CharField(db_column='1st_last_Name', max_length=45, null=True)),
('last_name2', models.CharField(db_column='2nd_last_name', max_length=45, null=True)),
('user_age', models.IntegerField(db_column='age', null=True)),
('user_email', models.CharField(db_column='email', max_length=45, null=True)),
('user_number', models.BigIntegerField(db_column='mobile_number', null=True)),
('user_education', models.CharField(db_column='education', max_length=45, null=True)),
('user_volunteer', models.CharField(db_column='volunteer_work', max_length=45, null=True)),
('user_country', models.CharField(db_column='country', max_length=45, null=True)),
('user_street', models.CharField(db_column='street', max_length=45, null=True)),
('user_st_number', models.IntegerField(db_column='number', null=True)),
('user_city', models.CharField(db_column='city', max_length=45, null=True)),
('user_postal_code', models.IntegerField(db_column='postal_code', null=True)),
('user_CV', models.TextField(db_column='cv', null=True)),
],
options={
'db_table': 'user',
},
),
migrations.CreateModel(
name='Work_Experience',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('username', models.CharField(db_column='username', max_length=45, null=True)),
('job_title', models.CharField(db_column='job_title', max_length=45, null=True)),
('company', models.CharField(db_column='company', max_length=45, null=True)),
('duration', models.FloatField(db_column='duration_years', null=True)),
('user', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.user')),
],
options={
'db_table': 'work_experience',
},
),
migrations.CreateModel(
name='Job',
fields=[
('id', models.BigAutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('company_name', models.CharField(db_column='company_names', max_length=45, null=True)),
('job_title', models.CharField(db_column='job_title', max_length=45, null=True)),
('job_description', models.TextField(db_column='job_description', null=True)),
('company', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, to='app1.company')),
],
options={
'db_table': 'job',
},
),
]
| StarcoderdataPython |
82362 | <filename>tests/test_matrix.py
from rigidbody import (Matrix, svd, approx)
def test_matrix_equality():
A = Matrix(3, 3)
B = Matrix(3, 3)
assert(A == B)
A = Matrix.identity(3, 3)
B = Matrix.identity(3, 3)
assert(A == B)
A = Matrix.zero(3, 3)
B = Matrix.zero(3, 3)
assert(A == B)
def test_matrix_identities():
A = Matrix.identity(3, 3)
A[0, 1] = 1
assert(A + Matrix.zero(3, 3) == A)
assert(Matrix.zero(3, 3) + A == A)
assert(A * Matrix.identity(3, 3) == A)
assert(Matrix.identity(3, 3) * A == A)
def test_matrix_addition():
A = Matrix(3, 3)
B = Matrix(3, 3)
C = Matrix(3, 3)
A[0, 1] = 1
B[1, 0] = 2
C[0, 1] = 1
C[1, 0] = 2
assert(A + B == C)
assert(B + A == C)
def test_matrix_subtraction():
A = Matrix(3, 3)
B = Matrix(3, 3)
C = Matrix(3, 3)
A[0, 1] = 1
B[1, 0] = 2
C[0, 1] = 1
C[1, 0] = 2
assert(C - A == B)
assert(C - B == A)
def test_matrix_multiplication():
A = Matrix(3, 3)
B = Matrix(3, 3)
AB = Matrix(3, 3)
BA = Matrix(3, 3)
A[0, :] = [1, 0, 0]
A[1, :] = [0, 0, 1]
A[2, :] = [0, 1, 0]
B[0, :] = [1, 2, 3]
B[1, :] = [4, 5, 6]
B[2, :] = [7, 8 ,9]
AB[0, :] = [1, 2, 3]
AB[1, :] = [7, 8 ,9]
AB[2, :] = [4, 5, 6]
assert(A * B == AB)
BA[0, :] = [1, 3, 2]
BA[1, :] = [4, 6, 5]
BA[2, :] = [7 ,9, 8]
assert(B * A == BA)
def test_matrix_approx():
A = Matrix(3, 3)
B = Matrix(3, 3)
A[0, :] = B[0, :] = [1, 2, 3]
A[1, :] = B[1, :] = [4, 5, 6]
A[2, :] = B[2, :] = [7, 8 ,9]
assert(approx(A, B, tol=0.001))
A[0, 0] += 0.002
assert(not approx(A, B, tol=0.001))
def test_matrix_svd():
A = Matrix(3, 3)
A[0, :] = [1, 2, 3]
A[1, :] = [4, 5, 6]
A[2, :] = [7, 8 ,9]
U, S, V = svd(A)
assert(approx(U * S * V.transposed(), A))
| StarcoderdataPython |
6413333 | from diskspace import bytes_to_readable, subprocess_check_output, show_space_list, args, print_tree
import os
import unittest
import StringIO
import sys
class TestMethods(unittest.TestCase):
def setUp(self):
self.largest_size = 8
self.total_size = 4
self.cmd = 'du '
self.path = os.path.abspath('.')
self.cmd += self.path
self.file_tree = {self.path: {'print_size': '50.00Kb', 'children': [], 'size': 3}}
def test_bytes_to_readable(self):
blocks = 100
self.assertEqual(bytes_to_readable(blocks), "50.00Kb")
def test_bytes_to_readable_wrong(self):
blocks = 100
self.assertNotEqual(bytes_to_readable(blocks), "50.00Mb")
def test_subprocess_check_output(self):
path = subprocess_check_output(self.cmd)
self.assertIn(self.path, path)
def test_print_tree(self):
capturedOutput = StringIO.StringIO()
sys.stdout = capturedOutput
print_tree(self.file_tree, self.file_tree[self.path], self.path,
self.largest_size, self.total_size)
sys.stdout = sys.__stdout__
self.assertEqual('50.00Kb 75% '+self.path, capturedOutput.getvalue().strip())
def test_show_space_list(self):
capturedOutput = StringIO.StringIO()
sys.stdout = capturedOutput
show_space_list(args.directory, args.depth, order=(args.order == 'desc'))
sys.stdout = sys.__stdout__
self.assertIn('Size (%) File' and self.path, capturedOutput.getvalue().strip())
suite = unittest.TestLoader().loadTestsFromTestCase(TestMethods)
unittest.TextTestRunner(verbosity=2).run(suite)
| StarcoderdataPython |
103199 | <filename>tests/config.py
import os
from aiohttp import BasicAuth
FHIR_SERVER_URL = os.environ.get("FHIR_SERVER_URL", "http://localhost:8080/fhir")
FHIR_SERVER_AUTHORIZATION = os.environ.get(
"FHIR_SERVER_AUTHORIZATION", BasicAuth("root", "secret").encode()
)
| StarcoderdataPython |
1793404 | <reponame>patdaburu/hoonds
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import unittest
from hoonds.errors import CustomException
class TestCustomException(CustomException):
pass
class TestCustomExceptionSuite(unittest.TestCase):
def test_initWithoutInner_verify(self):
ge = TestCustomException(message='Test Message')
self.assertEqual('Test Message', ge.message)
self.assertIsNone(ge.inner)
def test_initWithInner_verify(self):
inner = Exception()
ge = TestCustomException(message='Test Message',
inner=inner)
self.assertEqual('Test Message', ge.message)
self.assertTrue(ge.inner == inner) | StarcoderdataPython |
6417253 | #!/usr/bin/python
"""
A simple script to install Leo on Linux.
Contributed by <NAME> <<EMAIL>>
"""
import commands,os,sys # commands module is for Unix only.
# We must be root to use this script.
if os.getuid() != 0:
print("You need to run this install script as root")
sys.exit(1)
# Create /usr/lib/leo and copy all files there.
print("***** Installing Leo to /usr/lib/leo...")
commands.getoutput("mkdir -p /usr/lib/leo")
commands.getoutput("cp -rp * /usr/lib/leo")
# Create user's 'leo' command script into /usr/bin/leo
print("***** Creating Leo startup script -> /usr/bin/leo")
fd = open("/usr/bin/leo", "w")
fd.write("""#!/usr/bin/python
import commands,sys
files = " ".join(sys.argv[1:])
print(commands.getoutput("python /usr/lib/leo/leo.py %s" % files))
""")
fd.close()
commands.getoutput("chmod 755 /usr/bin/leo")
print("***** Leo installed successfully - type 'leo filename.leo' to use it.")
| StarcoderdataPython |
3310837 | import os
import re
def _create_path(base: str, tail: str) -> str:
"""
Creates a safe path according to which OS the program is running on
:param base: The current path
:param tail: The last part of the path that is to be added
:return: A new OS safe path
"""
return base + ('\\' if os.name == 'nt' else '/') + __safe_file_name(tail)
def __safe_file_name(name: str) -> str:
"""
This helper is responsible for removing forbidden OS characters from a certain string.
:param name: String to be converted
:return: Safe string
"""
return re.sub(r'<|>|/|:|\"|\\|\||\?|\*', '', name) | StarcoderdataPython |
11300298 | # MetaPerson.py
from collections import defaultdict
from utils.utils import dump_json
from utils.timeprofile import TimeProfile as TP
from utils.timeprofile import TimeProfileEntry as TPE
from utils.timeprofile import PatDur as PD
class MetaPerson:
mid = 1
def __init__(self, tps, capacity, probability=1.0, people=None):
self.id = MetaPerson.mid
MetaPerson.mid += 1
self.tps = tps
self.capacity = capacity
self.probability = probability
self.people = people
for p in self.people:
p.mid = self.id
@staticmethod
def parse_from_list(plist):
tp = TP(TPE(PD.merge(x for person in plist for x in person.tp)))
# TODO: is this the right thing to do?
capacity = defaultdict(int)
for person in plist:
for meid, ct in person.capacity.items():
capacity[meid] += ct
capacity_list = [{'metaevent-id' : meid, 'probability' : ct}
for meid, ct in capacity.items()
if meid is not None]
return MetaPerson(tps=tp, capacity=capacity_list,
probability=len(plist), people=plist)
def __str__(self):
return f'''\
MetaPerson {self.id};
on {self.tps}'''
def as_dict(self):
return {
'id' : self.id,
'probability' : self.probability,
'time-profiles' : self.tps.as_dict(),
'event-affinity' : self.capacity
}
@staticmethod
def dump(metapeople, fname):
dump_json(fname, [mp.as_dict() for mp in metapeople])
| StarcoderdataPython |
192697 | <gh_stars>10-100
import time
import numpy as np
import os
import pickle as pkl
import re
from collections import OrderedDict
from openbox.utils.constants import SUCCESS
from openbox.optimizer.smbo import SMBO
from solnml.components.optimizers.base_optimizer import BaseOptimizer, MAX_INT
cur_dir = os.path.dirname(__file__)
source_dir = os.path.join('%s', '..', 'transfer_learning', 'tlbo', 'runhistory') % cur_dir
class TlboOptimizer(BaseOptimizer):
def __init__(self, evaluator, config_space, name, surrogate_type='tlbo_rgpe_prf',
metric='bal_acc', time_limit=None, evaluation_limit=None,
per_run_time_limit=300, per_run_mem_limit=1024, output_dir='./',
inner_iter_num_per_iter=1, seed=1, n_jobs=1):
super().__init__(evaluator, config_space, name, seed)
self.time_limit = time_limit
self.evaluation_num_limit = evaluation_limit
self.inner_iter_num_per_iter = inner_iter_num_per_iter
self.per_run_time_limit = per_run_time_limit
self.per_run_mem_limit = per_run_mem_limit
self.output_dir = output_dir
# TODO: leave target out
if hasattr(evaluator, 'estimator_id'):
estimator_id = evaluator.estimator_id
else:
raise ValueError
runhistory_dir = os.path.join(source_dir, 'hpo2', '%s_%s_%s') % ('hpo', metric, estimator_id)
dataset_names = get_datasets(runhistory_dir, estimator_id, metric)
source_data = load_runhistory(runhistory_dir, dataset_names, estimator_id, metric)
self.optimizer = SMBO(self.evaluator, config_space,
history_bo_data=source_data,
surrogate_type=surrogate_type,
max_runs=int(1e10),
time_limit_per_trial=self.per_run_time_limit,
logging_dir=output_dir)
self.trial_cnt = 0
self.configs = list()
self.perfs = list()
self.exp_output = dict()
self.incumbent_perf = float("-INF")
self.incumbent_config = self.config_space.get_default_configuration()
# Estimate the size of the hyperparameter space.
hp_num = len(self.config_space.get_hyperparameters())
if hp_num == 0:
self.config_num_threshold = 0
else:
_threshold = int(len(set(self.config_space.sample_configuration(10000))) * 0.75)
self.config_num_threshold = _threshold
self.logger.debug('The maximum trial number in HPO is: %d' % self.config_num_threshold)
self.maximum_config_num = min(600, self.config_num_threshold)
self.early_stopped_flag = False
self.eval_dict = {}
def run(self):
while True:
evaluation_num = len(self.perfs)
if self.evaluation_num_limit is not None and evaluation_num > self.evaluation_num_limit:
break
if self.time_limit is not None and time.time() - self.start_time > self.time_limit:
break
self.iterate()
return np.max(self.perfs)
def iterate(self, budget=MAX_INT):
_start_time = time.time()
for _ in range(self.inner_iter_num_per_iter):
if len(self.configs) >= self.maximum_config_num:
self.early_stopped_flag = True
self.logger.warning('Already explored 70 percentage of the '
'hyperspace or maximum configuration number met: %d!' % self.maximum_config_num)
break
if time.time() - _start_time > budget:
self.logger.warning('Time limit exceeded!')
break
_config, _status, _perf, _ = self.optimizer.iterate()
if _status == SUCCESS:
self.exp_output[time.time()] = (_config, _perf)
self.configs.append(_config)
self.perfs.append(-_perf)
runhistory = self.optimizer.get_history()
if self.name == 'hpo':
if hasattr(self.evaluator, 'fe_config'):
fe_config = self.evaluator.fe_config
else:
fe_config = None
self.eval_dict = {(fe_config, hpo_config): [-score, time.time()] for hpo_config, score in
runhistory.data.items()}
else:
if hasattr(self.evaluator, 'hpo_config'):
hpo_config = self.evaluator.hpo_config
else:
hpo_config = None
self.eval_dict = {(fe_config, hpo_config): [-score, time.time()] for fe_config, score in
runhistory.data.items()}
self.incumbent_config, self.incumbent_perf = runhistory.get_incumbents()[0]
self.incumbent_perf = -self.incumbent_perf
iteration_cost = time.time() - _start_time
# incumbent_perf: the large the better
return self.incumbent_perf, iteration_cost, self.incumbent_config
def get_metafeature_vector(metafeature_dict):
sorted_keys = sorted(metafeature_dict.keys())
return np.array([metafeature_dict[key] for key in sorted_keys])
def get_datasets(runhistory_dir, estimator_id, metric, task_id='hpo'):
_datasets = list()
pattern = r'(.*)-%s-%s-%s.pkl' % (estimator_id, metric, task_id)
for filename in os.listdir(runhistory_dir):
result = re.search(pattern, filename, re.M | re.I)
if result is not None:
_datasets.append(result.group(1))
return _datasets
def load_runhistory(runhistory_dir, dataset_names, estimator_id, metric, task_id='hpo'):
metafeature_file = os.path.join(source_dir, 'metafeature.pkl')
with open(metafeature_file, 'rb') as f:
metafeature_dict = pkl.load(f)
for dataset in metafeature_dict.keys():
vec = get_metafeature_vector(metafeature_dict[dataset])
metafeature_dict[dataset] = vec
runhistory = list()
for dataset in dataset_names:
_filename = '%s-%s-%s-%s.pkl' % (dataset, estimator_id, metric, task_id)
with open(os.path.join(runhistory_dir, _filename), 'rb') as f:
data = pkl.load(f)
runhistory.append(OrderedDict(data))
return runhistory
| StarcoderdataPython |
3215798 | # Crie um programa que tenha a função leiaInt(), que vai funcionar
# de forma semelhante ‘a função input() do Python, só que fazendo a
# validação para aceitar apenas um valor numérico. Ex: n = leiaInt(‘Digite um n: ‘)
# Reescreva a função leiaInt() que fizemos no desafio 104, incluindo agora a
# possibilidade da digitação de um número de tipo inválido. Aproveite e crie
# também uma função leiaFloat() com a mesma funcionalidade.
def leiaInt(msg):
while True:
try:
num = int(input(msg))
except Exception as erro:
print(f'Não é um número válido. Erro: {erro}')
continue
else:
return num
def leiaFloat(msg):
while True:
try:
num = float(input(msg))
except Exception as erro:
print(f'Não é um número válido. Erro: {erro}')
continue
else:
return num
n = leiaInt('Número: ')
print(n)
n = leiaFloat('Número: ')
print(n)
| StarcoderdataPython |
96699 | <filename>SRC/engine/IO/GUI/skeletonselectiontoolboxGUI.py
# -*- python -*-
# This software was produced by NIST, an agency of the U.S. government,
# and by statute is not subject to copyright in the United States.
# Recipients of this software assume all responsibilities associated
# with its operation, modification and maintenance. However, to
# facilitate maintenance we ask that before distributing modified
# versions of this software, you first contact the authors at
# <EMAIL>.
from ooflib.SWIG.common import config
from ooflib.common import debug
from ooflib.common.IO.GUI import genericselectGUI
from ooflib.common.IO.GUI import gtklogger
from ooflib.common.IO.GUI import toolboxGUI
from ooflib.engine import skeletonselectionmodes
from ooflib.engine import skeletonselmodebase
import gtk
# The SkeletonSelectionToolbox GUI is a ToolboxGUI that contains other
# ToolboxGUI's. The inner GUI's are instances of
# SkeletonSelectionToolboxModeGUI. Inner toolboxes are selected by a
# set of radio buttons at the top of the outer toolbox. The inner
# toolboxes and the buttons are created automatically from the
# SkeletonSelectionMode classes. Each of the inner gui toolboxes
# corresponds to a non-gui toolbox class.
tbclasses = {}
class SkeletonSelectionToolboxGUI(toolboxGUI.GfxToolbox):
def __init__(self, toolbox):
# The 'toolbox' argument here is the non-gui toolbox
# corresponding to one of the inner toolboxes. It doesn't
# matter which one.
toolboxGUI.GfxToolbox.__init__(self, toolbox)
vbox = gtk.VBox(spacing=2)
self.gtk.add(vbox)
bbox = gtk.HBox(spacing=2)
gtklogger.setWidgetName(bbox, "Select")
vbox.pack_start(bbox, expand=0, fill=0)
bbox.pack_start(gtk.Label("Select: "), expand=0, fill=0)
self.tbbox = gtk.Frame() # holds SkelSelToolboxModeGUIs
vbox.pack_start(self.tbbox, expand=1, fill=1)
group = None
self.tbdict = {}
modebuttons = []
skeletonselectionmodes.initialize()
for mode in skeletonselmodebase.SkeletonSelectionMode.modes:
if group:
button = gtk.RadioButton(label=mode.name, group=group)
else:
button = gtk.RadioButton(label=mode.name)
group = button
modebuttons.append(button)
gtklogger.setWidgetName(button, mode.name)
gtklogger.connect(button, 'clicked', self.switchModeCB, mode.name)
# Get the actual toolbox for each mode
tb = self.gfxwindow().getToolboxByName(mode.toolboxName())
tbgui = tbclasses[mode.name](tb, tb.method)
self.tbdict[mode.name] = tbgui
table = gtk.Table(columns=2, rows=2)
bbox.pack_start(table, expand=0, fill=0)
table.attach(modebuttons[0], 0,1, 0,1)
table.attach(modebuttons[1], 1,2, 0,1)
table.attach(modebuttons[2], 0,1, 1,2)
table.attach(modebuttons[3], 1,2, 1,2)
self.currentMode = None
self.setMode(skeletonselmodebase.firstMode().name)
def displayName(self):
return "Skeleton Selection"
def switchModeCB(self, button, modename):
if button.get_active():
self.setMode(modename)
def setMode(self, modename):
debug.mainthreadTest()
if self.currentMode:
mode = self.tbdict[self.currentMode]
mode.deactivate()
self.tbbox.remove(self.tbbox.get_children()[0])
self.currentMode = modename
subtb = self.tbdict[modename]
self.tbbox.add(subtb.gtk)
self.installMouseHandler()
subtb.show()
subtb.activate()
def close(self):
for tb in self.tbdict.values():
tb.close()
def activate(self):
if not self.active:
if self.currentMode is None:
self.setMode(skeletonselmodebase.firstMode().name)
else:
self.tbdict[self.currentMode].activate()
toolboxGUI.GfxToolbox.activate(self)
def deactivate(self):
if self.active:
if self.currentMode is not None:
self.tbdict[self.currentMode].deactivate()
toolboxGUI.GfxToolbox.deactivate(self)
def installMouseHandler(self):
if self.currentMode is not None:
self.tbdict[self.currentMode].installMouseHandler()
#=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=##=--=#
# Generate selection toolbox GUI subclasses for each of the selection
# modes (Element, Node, etc) defined in skeletonselectionmodes.py.
# Although there are many non-gui SkeletonSelectionToolboxes, they
# all share a GUI panel, so only one of them has a makeGUI function.
first = True
for mode in skeletonselmodebase.SkeletonSelectionMode.modes:
class SkelSelToolboxModeGUI(genericselectGUI.GenericSelectToolboxGUI):
selectionMode = mode
changeSignal = mode.changedselectionsignal
def displayName(self, name=mode.name):
return "Select " + name + "s"
tbclasses[mode.name] = SkelSelToolboxModeGUI
if first:
def _makeGUI(self):
return SkeletonSelectionToolboxGUI(self)
mode.toolboxclass.makeGUI = _makeGUI
first = False
| StarcoderdataPython |
281746 | <filename>immunopy/MMCorePyFake.py
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on 2014-05-28
@author: <NAME>
"""
import os
import Tkinter as tk
import ttk
import tkFileDialog
import threading
import numpy as np
from scipy import misc
try:
import MMCorePy
base = MMCorePy.CMMCore
MM_INSTALLED = True
except ImportError:
base = object
MM_INSTALLED = False
class CMMCore(base):
def __init__(self):
super(CMMCore, self).__init__()
# path = 'image/hdab256.tif'
# path = 'image/2px_um.tif'
path = 'image/Ki6720x_blue_filter.tif'
curdir = os.path.dirname(os.path.abspath(__file__))
self.set_image(os.path.join(curdir, path))
MainWindow(mmcore=self)
def getImageHeight(self):
return self.frame.shape[0]
def getImageWidth(self):
return self.frame.shape[1]
def setROI(self, x, y, w, h):
print("setROI: %d %d %d %d") % (x, y, w, h)
if self.RGB32.shape[0] < (y + h) or self.RGB32.shape[1] < (x + w):
raise ValueError(
"ROI %d, %d, %dx%d is bigger than image" % (x, y, w, h))
self.frame = self.RGB32[y:y+h, x:x+w].copy()
def clearROI(self):
self.frame = self.RGB32.copy()
def getLastImage(self):
return self.frame.copy()
def getImage(self):
return self.frame.copy()
def popNextImage(self):
return self.frame.copy()
def set_image(self, path):
self.RGB = misc.imread(path)
self.BGR = self.RGB[:,:,::-1]
self.BGRA = np.dstack(
(self.BGR, np.zeros((self.BGR.shape[0], self.BGR.shape[1]),
dtype=np.uint8)))
self.RGB32 = self.BGRA.view(dtype=np.uint32)
self.frame = self.RGB32
# If Micromanager isn't installed
if not MM_INSTALLED:
print("BAD NEWS")
def loadSystemConfiguration(self, config_name):
pass
def getCameraDevice(self):
return "Fake camera"
def startContinuousSequenceAcquisition(self, bool_):
pass
def snapImage(self):
pass
def loadDevice(self, *device):
self.input_video = ', '.join(device)
print("Device '%s' loaded" % self.input_video)
def initializeDevice(self, devname):
print("Device '%s' initialized" % devname)
def setCameraDevice(self, devname):
print("Device camera '%s' initialized" % devname)
def hasProperty(self, *props):
pass
def setProperty(self, *props):
print("Props '%s' setted" % ', '.join([str(k) for k in props]))
def setCircularBufferMemoryFootprint(self, value):
pass
def enableStderrLog(self, bool_):
pass
def enableDebugLog(self, bool_):
pass
def getBufferTotalCapacity(self):
return 0.
def getDevicePropertyNames(self, label):
assert(label == "Fake camera")
return ("Exposure", "Gain")
def getImageBufferSize(self):
return 0.
def getRemainingImageCount(self):
return 2
def stopSequenceAcquisition(self):
pass
def reset(self):
print("MMAdapterFake: Fake input_video `%s` reseted." % self.input_video)
class MainWindow(threading.Thread):
def __init__(self, mmcore):
super(MainWindow, self).__init__()
self.mmc = mmcore
self.image_list = None
self.start()
def choose_directory(self):
"""Returns a selected directoryname."""
self.dir_opt = {
'mustexist': True,
'parent': self.root,
'title': 'Images folder'}
dname = tkFileDialog.askdirectory(**self.dir_opt)
if dname:
self.scan_dir(dname)
def on_select(self, evt):
w = evt.widget
print('Image: %s' % [w.get(int(i)) for i in w.curselection()])
if w.size() > 0:
# return self.image_list[w.curselection()[0]]
# int() here for Windows
self.mmc.set_image(self.image_list[int(w.curselection()[0])])
else:
return None
def scan_dir(self, directiry):
self.image_list = list()
self.list_bx.delete(0, tk.END)
for f in sorted(os.listdir(directiry)):
fpath = os.path.join(directiry, f)
if os.path.isfile(fpath) and '.tif' in f:
self.list_bx.insert(tk.END, f)
self.image_list.append(fpath)
def run(self):
self.root = tk.Tk()
self.root.wm_attributes('-topmost', 1)
self.root.protocol("WM_DELETE_WINDOW", self.root.quit)
self.root.title("Micromanager fake")
self.label = ttk.Label(self.root, text="Which image should I return?")
self.label.pack()
self.list_bx = tk.Listbox(self.root, name='lb')
self.list_bx.bind('<<ListboxSelect>>', self.on_select)
self.list_bx.pack()
self.open_btn = ttk.Button(self.root, text="Locate TIFF's", command=self.choose_directory)
self.open_btn.pack(side='left')
self.close_btn = ttk.Button(self.root, text="Close", command=self.root.destroy)
self.close_btn.pack(side='right')
self.root.mainloop()
| StarcoderdataPython |
9652160 | """
Copyright (c) 2018 ARM Limited
SPDX-License-Identifier: Apache-2.0
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import time
from mbed_host_tests import BaseHostTest
from mbed_host_tests.host_tests_runner.host_test_default import DefaultTestSelector
DEFAULT_CYCLE_PERIOD = 1.0
MSG_VALUE_DUMMY = '0'
MSG_KEY_DEVICE_READY = 'ready'
MSG_KEY_DEVICE_RESET = 'reset'
MSG_KEY_SYNC = '__sync'
class SystemResetTest(BaseHostTest):
"""Test for the system_reset API.
Given a device running code
When the device is restarted using @a system_reset()
Then the device is restarted
"""
def __init__(self):
super(SystemResetTest, self).__init__()
self.reset = False
self.test_steps_sequence = self.test_steps()
# Advance the coroutine to it's first yield statement.
self.test_steps_sequence.send(None)
def setup(self):
self.register_callback(MSG_KEY_DEVICE_READY, self.cb_device_ready)
def cb_device_ready(self, key, value, timestamp):
"""Acknowledge device rebooted correctly and feed the test execution
"""
self.reset = True
try:
if self.test_steps_sequence.send(value):
self.notify_complete(True)
except (StopIteration, RuntimeError) as exc:
self.notify_complete(False)
def test_steps(self):
"""Reset the device and check the status
"""
system_reset = yield
self.reset = False
wait_after_reset = self.get_config_item('forced_reset_timeout')
wait_after_reset = wait_after_reset if wait_after_reset is not None else DEFAULT_CYCLE_PERIOD
self.send_kv(MSG_KEY_DEVICE_RESET, MSG_VALUE_DUMMY)
time.sleep(wait_after_reset)
self.send_kv(MSG_KEY_SYNC, MSG_VALUE_DUMMY)
system_reset = yield
if self.reset == False:
raise RuntimeError('Platform did not reset as expected.')
# The sequence is correct -- test passed.
yield True
| StarcoderdataPython |
5125360 | <reponame>franciscomcdias/SpoKlient
from spoklient.outlookservices.item import Item
class Event(Item):
"""An event in a calendar."""
| StarcoderdataPython |
9623191 | <gh_stars>1-10
from app.common.command_handler import CommandHandler
from app.event.publisher import IntegrationEventPublisher
from app.tweet.application.dislike_tweet.command import DislikeTweet
from app.tweet.domain.repository import TweetAggregateRepository
from app.tweet.event import TweetDisliked
class DislikeTweetCommandHandler(CommandHandler[DislikeTweet]):
def __init__(self, tweet_aggregate_repo: TweetAggregateRepository, event_publisher: IntegrationEventPublisher):
self.tweet_aggregate_repo = tweet_aggregate_repo
self.event_publisher = event_publisher
async def handle(self, command: DislikeTweet) -> None:
tweet = await self.tweet_aggregate_repo.find_by_id(command.tweet_id)
tweet.dislike(command.disliked_user_id)
await self.tweet_aggregate_repo.save(tweet)
await self.event_publisher.publish(TweetDisliked(command.tweet_id, command.disliked_user_id))
| StarcoderdataPython |
29976 | <reponame>ExpressApp/pybotx
"""Implementation for bot classes."""
import asyncio
from dataclasses import InitVar, field
from typing import Any, Callable, Dict, List
from weakref import WeakSet
from loguru import logger
from pydantic.dataclasses import dataclass
from botx import concurrency, exception_handlers, exceptions, shared, typing
from botx.bots.mixins import (
clients,
collectors,
exceptions as exception_mixin,
lifespan,
middlewares,
)
from botx.clients.clients import async_client, sync_client as synchronous_client
from botx.collecting.collectors.collector import Collector
from botx.dependencies.models import Depends
from botx.middlewares.authorization import AuthorizationMiddleware
from botx.middlewares.exceptions import ExceptionMiddleware
from botx.models import credentials, datastructures, menu
from botx.models.messages.message import Message
@dataclass(config=shared.BotXDataclassConfig)
class Bot( # noqa: WPS215
collectors.BotCollectingMixin,
clients.ClientsMixin,
lifespan.LifespanMixin,
middlewares.MiddlewareMixin,
exception_mixin.ExceptionHandlersMixin,
):
"""Class that implements bot behaviour."""
dependencies: InitVar[List[Depends]] = field(default=None)
bot_accounts: List[credentials.BotXCredentials] = field(default_factory=list)
startup_events: List[typing.BotLifespanEvent] = field(default_factory=list)
shutdown_events: List[typing.BotLifespanEvent] = field(default_factory=list)
client: async_client.AsyncClient = field(init=False)
sync_client: synchronous_client.Client = field(init=False)
collector: Collector = field(init=False)
exception_middleware: ExceptionMiddleware = field(init=False)
state: datastructures.State = field(init=False)
dependency_overrides: Dict[Callable, Callable] = field(
init=False,
default_factory=dict,
)
tasks: WeakSet = field(init=False, default_factory=WeakSet)
async def __call__(self, message: Message) -> None:
"""Iterate through collector, find handler and execute it, running middlewares.
Arguments:
message: message that will be proceed by handler.
"""
self.tasks.add(asyncio.ensure_future(self.exception_middleware(message)))
def __post_init__(self, dependencies: List[Depends]) -> None:
"""Initialize special fields.
Arguments:
dependencies: initial background dependencies for inner collector.
"""
self.state = datastructures.State()
self.client = async_client.AsyncClient()
self.sync_client = synchronous_client.Client()
self.collector = Collector(
dependencies=dependencies,
dependency_overrides_provider=self,
)
self.exception_middleware = ExceptionMiddleware(self.collector)
self.add_exception_handler(
exceptions.DependencyFailure,
exception_handlers.dependency_failure_exception_handler,
)
self.add_exception_handler(
exceptions.NoMatchFound,
exception_handlers.no_match_found_exception_handler,
)
self.add_middleware(AuthorizationMiddleware)
async def status(self, *args: Any, **kwargs: Any) -> menu.Status:
"""Generate status object that could be return to BotX API on `/status`.
Arguments:
args: additional positional arguments that will be passed to callable
status function.
kwargs: additional key arguments that will be passed to callable
status function.
Returns:
Built status for returning to BotX API.
"""
status = menu.Status()
for handler in self.handlers:
if callable(handler.include_in_status):
include_in_status = await concurrency.callable_to_coroutine(
handler.include_in_status,
*args,
**kwargs,
)
else:
include_in_status = handler.include_in_status
if include_in_status:
status.result.commands.append(
menu.MenuCommand(
description=handler.description or "",
body=handler.body,
name=handler.name,
),
)
return status
async def execute_command(self, message: dict) -> None:
"""Process data with incoming message and handle command inside.
Arguments:
message: incoming message to bot.
"""
logger.bind(botx_bot=True, payload=message).debug("process incoming message")
msg = Message.from_dict(message, self)
# raise UnknownBotError if not registered.
self.get_account_by_bot_id(msg.bot_id)
await self(msg)
async def authorize(self, *args: Any) -> None:
"""Process auth for each bot account."""
for account in self.bot_accounts:
try:
token = await self.get_token(
account.host,
account.bot_id,
account.signature,
)
except (exceptions.BotXAPIError, exceptions.BotXConnectError) as exc:
logger.bind(botx_bot=True).warning(
f"Credentials `host - {account.host}, " # noqa: WPS305
f"bot_id - {account.bot_id}` are invalid. "
f"Reason - {exc.message_template}",
)
continue
account.token = token
| StarcoderdataPython |
8074103 | <filename>gym_environments/plot.py
import os
import numpy as np
import matplotlib.pyplot as plt
import argparse
import fnmatch
import gym
import gym_shadow_hand
ALGOS = ["DDPG", "HER+DDPG", "DDPGfED", "HER+DDPGfED"]
ALGOS.extend([algo.lower() for algo in ALGOS])
def moving_average(values, window):
"""
Smooth values by doing a moving average
:param values: (numpy array)
:param window: (int)
:return: (numpy array)
"""
weights = np.repeat(1.0, window) / window
return np.convolve(values, weights, 'valid')
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--env', type=str, default="shadow_hand_reach-v1", help='environment ID')
parser.add_argument('--algo', help='RL Algorithm', default='DDPGfED', type=str, nargs='+', choices=ALGOS)
parser.add_argument('--agent', help='Location of the agent', type=str, nargs='+', required=True)
parser.add_argument('--window-size', help='Window size', type=int, default=5)
parser.add_argument('--single-worker', help='Use single workers', action='store_true', required=False)
args = parser.parse_args()
env = gym.make(args.env)
legacy_env = False
if args.env.endswith("v0"):
legacy_env = True
n_agents = len(args.agent)
window_size = args.window_size
if n_agents is not len(args.algo):
args.algo = [args.algo] * n_agents
expert_batch_size_all, training_success_median = [], []
result_median, result_min, result_max = [], [], []
success_median, success_min, success_max = [], [], []
x, x_batch = [], []
eval_episodes = False
for n in range(n_agents):
try:
agent = args.agent[n].split(":")[0]
except IndexError:
agent = args.agent[n]
if "her" in args.algo[n] or "HER" in args.algo[n]:
args.algo[n] = "her"
log_dir = os.path.join(os.path.dirname(__file__), "logs", args.algo[n].lower(), agent)
logs = fnmatch.filter(os.listdir(log_dir), "*.npz")
n_worker = len(logs)
# Get length
path = os.path.join(log_dir, logs[0])
data = np.load(path)
n_eval, n_eval_ep, ep_length = data["results"].shape[0], data["results"].shape[1], data["results"].shape[2]
if "episode" in logs[0]:
x.append(data["episodes"])
eval_episodes = True
else:
if legacy_env or args.single_worker:
x.append(data["timesteps"])
else:
x.append(data["timesteps"] * n_worker)
x_batch.append(x[n])
try:
t = data["expert_batch_sizes"]
del t
has_expert_batch_size = True
except KeyError:
has_expert_batch_size = False
del data
if has_expert_batch_size:
expert_batch_size = np.empty(shape=(n_eval, n_worker))
result = np.empty(shape=(n_eval, n_worker))
success = np.empty(shape=(n_eval, n_worker))
training_success = np.empty(shape=(n_eval, n_worker))
for k in range(n_worker):
path = os.path.join(log_dir, logs[k])
data = np.load(path)
if has_expert_batch_size and not legacy_env:
expert_batch_size[:, k] = data["expert_batch_sizes"]
if legacy_env:
if ep_length is not env.max_steps_per_episode + 1:
result_ = np.max(data["results"][:, :, :env.max_steps_per_episode + 1], axis=2)
success_ = np.sum(data["successes"][:, :, :env.max_steps_per_episode + 1], axis=2)
else:
result_ = np.max(data["results"], axis=2)
success_ = np.sum(data["successes"], axis=2)
result[:, k] = np.mean(result_, axis=1)
success_ = np.where(success_ > 0, 1., 0.)
success[:, k] = np.mean(success_, axis=1)
else:
if ep_length is not env.max_steps_per_episode + 1:
result_ = np.mean(data["results"][:, :, :env.max_steps_per_episode + 1], axis=2)
success_ = np.mean(data["successes"][:, :, :env.max_steps_per_episode + 1], axis=2)
else:
result_ = np.mean(data["results"], axis=2)
success_ = np.mean(data["successes"], axis=2)
result[:, k] = np.mean(result_, axis=1)
success[:, k] = np.mean(success_, axis=1)
if n_eval < len(data["training_successes"]):
step = int(len(data["training_successes"]) / n_eval)
training_success[:, k] = data["training_successes"][::step]
elif n_eval > len(data["training_successes"]):
training_successes = data["training_successes"]
training_successes = np.concatenate((training_successes, np.array([data["training_successes"][-1]] * (n_eval - len(data["training_successes"])))), axis=0)
training_success[:, k] = training_successes
else:
training_success[:, k] = data["training_successes"]
del data
if has_expert_batch_size:
expert_batch_size = np.median(expert_batch_size, axis=1)
expert_batch_size_all.append(expert_batch_size)
result_max_, result_min_ = np.percentile(result, [75, 25], axis=1)
result = np.median(result, axis=1)
success_max_, success_min_ = np.percentile(success, [75, 25], axis=1)
success = np.median(success, axis=1)
training_success = np.median(training_success, axis=1)
result_median.append(moving_average(result, window=window_size))
result_min.append(moving_average(result_min_, window=window_size))
result_max.append(moving_average(result_max_, window=window_size))
success_median.append(moving_average(success, window=window_size))
success_min.append(moving_average(success_min_, window=window_size))
success_max.append(moving_average(success_max_, window=window_size))
training_success_median.append(moving_average(training_success, window=window_size))
x[n] = x[n][len(x[n]) - len(success_median[n]):]
if not legacy_env:
expert_batch_size_all = np.array(expert_batch_size_all)
result_median = np.array(result_median)
result_min = np.array(result_min)
result_max = np.array(result_max)
success_median = np.array(success_median)
success_min = np.array(success_min)
success_max = np.array(success_max)
training_success_median = np.array(training_success_median)
with plt.style.context("ggplot"):
color = [(0., 0.4470, 0.7410, 1.), (0.8500, 0.3250, 0.0980, 1.), (0.4660, 0.6740, 0.1880, 1.)]
if args.env in ["shadow_hand_reach-v1", "shadow_hand_reach_goalenv-v1"]:
title = "ShadowHandReach-v1"
elif args.env in ["shadow_hand_block-v1", "shadow_hand_block_goalenv-v1"]:
title = "ShadowHandBlock-v1"
elif args.env in ["shadow_hand_reach-v0", "shadow_hand_reach_goalenv-v0"]:
title = "ShadowHandReach-v0"
if eval_episodes:
x_label = "Episodes"
else:
x_label = "Timesteps"
y_label = "Median Success Rate"
fig, axes = plt.subplots(nrows=1, ncols=1)
legend = []
for n in range(n_agents):
axes.plot(x[n], success_median[n], color=color[n])
axes.fill_between(x[n], success_min[n], success_max[n], color=color[n], alpha=0.2)
try:
legend.append(args.agent[n].split(":")[1])
except IndexError:
legend.append(args.algo[n])
axes.set_ylim([-0.05, 1.05])
axes.set_xlabel(x_label, color="k")
axes.set_ylabel(y_label, color="k")
if eval_episodes:
axes.ticklabel_format(style='sci', axis='x', scilimits=(3, 3))
#axes.set_xlim(0, 15000)
else:
axes.ticklabel_format(style='sci', axis='x', scilimits=(6, 6))
axes.tick_params(axis="both", colors="k")
axes.legend(legend, loc="lower right")
axes.set_title(title, color="k")
y_label = "Median Training Success Rate"
fig, axes = plt.subplots(nrows=1, ncols=1)
for n in range(n_agents):
axes.plot(x[n], training_success_median[n], color=color[n])
axes.set_xlabel(x_label, color="k")
axes.set_ylabel(y_label, color="k")
if eval_episodes:
axes.ticklabel_format(style='sci', axis='x', scilimits=(3, 3))
else:
axes.ticklabel_format(style='sci', axis='x', scilimits=(6, 6))
axes.tick_params(axis="both", colors="k")
axes.legend(legend, loc="lower right")
axes.set_title(title, color="k")
y_label = "Median Reward"
fig, axes = plt.subplots(nrows=1, ncols=1)
for n in range(n_agents):
axes.plot(x[n], result_median[n,], color=color[n])
axes.fill_between(x[n], result_min[n], result_max[n], color=color[n], alpha=0.2)
axes.set_xlabel(x_label, color="k")
axes.set_ylabel(y_label, color="k")
if eval_episodes:
axes.ticklabel_format(style='sci', axis='x', scilimits=(3, 3))
else:
axes.ticklabel_format(style='sci', axis='x', scilimits=(6, 6))
axes.tick_params(axis="both", colors="k")
axes.legend(legend, loc="lower right")
axes.set_title(title, color="k")
if has_expert_batch_size and not legacy_env:
y_label = "Expert Batch Size"
fig, axes = plt.subplots(nrows=1, ncols=1)
for n in range(n_agents):
axes.plot(x_batch[n], expert_batch_size_all[n], color=color[n])
axes.set_xlabel(x_label, color="k")
axes.set_ylabel(y_label, color="k")
if eval_episodes:
axes.ticklabel_format(style='sci', axis='x', scilimits=(3, 3))
else:
axes.ticklabel_format(style='sci', axis='x', scilimits=(6, 6))
axes.tick_params(axis="both", colors="k")
axes.legend(legend, loc="upper right")
axes.set_title(title, color="k")
plt.show()
| StarcoderdataPython |
1747248 | <reponame>asears/opt_einsum
# -*- coding: utf-8 -*-
import setuptools
import versioneer
short_description = "Optimizing numpys einsum function"
try:
with open("README.md", "r") as handle:
long_description = handle.read()
except:
long_description = short_description
if __name__ == "__main__":
setuptools.setup(
name='opt_einsum',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description=short_description,
author='<NAME>',
author_email='<EMAIL>',
url="https://github.com/dgasmith/opt_einsum",
license='MIT',
packages=setuptools.find_packages(),
python_requires='>=3.5',
install_requires=[
'numpy>=1.7',
],
extras_require={
'docs': [
'sphinx==1.2.3', # autodoc was broken in 1.3.1
'sphinxcontrib-napoleon',
'sphinx_rtd_theme',
'numpydoc',
],
'tests': [
'pytest',
'pytest-cov',
'pytest-pep8',
],
},
tests_require=[
'pytest',
'pytest-cov',
'pytest-pep8',
],
classifiers=[
'Development Status :: 5 - Production/Stable',
'Intended Audience :: Science/Research',
'Intended Audience :: Developers',
'License :: OSI Approved',
'Programming Language :: Python',
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.5',
'Programming Language :: Python :: 3.6',
'Programming Language :: Python :: 3.7',
'Programming Language :: Python :: 3.8',
],
zip_safe=True,
long_description=long_description,
long_description_content_type="text/markdown"
)
| StarcoderdataPython |
6463994 | <reponame>parnurzeal/openml-python
import collections
import io
import json
import os
import sys
import time
import warnings
import numpy as np
import sklearn.pipeline
import six
import xmltodict
import sklearn.metrics
import openml
import openml.utils
import openml._api_calls
from ..exceptions import PyOpenMLError
from .. import config
from openml.flows.sklearn_converter import _check_n_jobs
from openml.flows.flow import _copy_server_fields
from ..flows import sklearn_to_flow, get_flow, flow_exists, OpenMLFlow
from ..setups import setup_exists, initialize_model
from ..exceptions import OpenMLCacheException, OpenMLServerException
from ..tasks import OpenMLTask
from .run import OpenMLRun, _get_version_information
from .trace import OpenMLRunTrace
# _get_version_info, _get_dict and _create_setup_string are in run.py to avoid
# circular imports
RUNS_CACHE_DIR_NAME = 'runs'
def run_model_on_task(model, task, avoid_duplicate_runs=True, flow_tags=None,
seed=None, add_local_measures=True):
"""See ``run_flow_on_task for a documentation``."""
# TODO: At some point in the future do not allow for arguments in old order (order changed 6-2018).
if isinstance(model, OpenMLTask) and hasattr(task, 'fit') and hasattr(task, 'predict'):
warnings.warn("The old argument order (task, model) is deprecated and will not be supported in the future. "
"Please use the order (model, task).", DeprecationWarning)
task, model = model, task
flow = sklearn_to_flow(model)
return run_flow_on_task(task=task, flow=flow,
avoid_duplicate_runs=avoid_duplicate_runs,
flow_tags=flow_tags, seed=seed,
add_local_measures=add_local_measures)
def run_flow_on_task(flow, task, avoid_duplicate_runs=True, flow_tags=None,
seed=None, add_local_measures=True):
"""Run the model provided by the flow on the dataset defined by task.
Takes the flow and repeat information into account. In case a flow is not
yet published, it is published after executing the run (requires
internet connection).
Parameters
----------
model : sklearn model
A model which has a function fit(X,Y) and predict(X),
all supervised estimators of scikit learn follow this definition of a model [1]
[1](http://scikit-learn.org/stable/tutorial/statistical_inference/supervised_learning.html)
task : OpenMLTask
Task to perform. This may be an OpenMLFlow instead if the second argument is an OpenMLTask.
avoid_duplicate_runs : bool
If this flag is set to True, the run will throw an error if the
setup/task combination is already present on the server. Works only
if the flow is already published on the server. This feature requires an
internet connection.
This may be an OpenMLTask instead if the first argument is the OpenMLFlow.
flow_tags : list(str)
A list of tags that the flow should have at creation.
seed: int
Models that are not seeded will be automatically seeded by a RNG. The
RBG will be seeded with this seed.
add_local_measures : bool
Determines whether to calculate a set of evaluation measures locally,
to later verify server behaviour. Defaults to True
Returns
-------
run : OpenMLRun
Result of the run.
"""
if flow_tags is not None and not isinstance(flow_tags, list):
raise ValueError("flow_tags should be list")
# TODO: At some point in the future do not allow for arguments in old order (order changed 6-2018).
if isinstance(flow, OpenMLTask) and isinstance(task, OpenMLFlow):
# We want to allow either order of argument (to avoid confusion).
warnings.warn("The old argument order (Flow, model) is deprecated and will not be supported in the future. "
"Please use the order (model, Flow).", DeprecationWarning)
task, flow = flow, task
flow.model = _get_seeded_model(flow.model, seed=seed)
# skips the run if it already exists and the user opts for this in the config file.
# also, if the flow is not present on the server, the check is not needed.
flow_id = flow_exists(flow.name, flow.external_version)
if avoid_duplicate_runs and flow_id:
flow_from_server = get_flow(flow_id)
flow_from_server.model = flow.model
setup_id = setup_exists(flow_from_server)
ids = _run_exists(task.task_id, setup_id)
if ids:
raise PyOpenMLError("Run already exists in server. Run id(s): %s" % str(ids))
_copy_server_fields(flow_from_server, flow)
dataset = task.get_dataset()
if task.class_labels is None:
raise ValueError('The task has no class labels. This method currently '
'only works for tasks with class labels.')
run_environment = _get_version_information()
tags = ['openml-python', run_environment[1]]
# execute the run
res = _run_task_get_arffcontent(flow.model, task, add_local_measures=add_local_measures)
# in case the flow not exists, flow_id will be False (as returned by
# flow_exists). Also check whether there are no illegal flow.flow_id values
# (compared to result of openml.flows.flow_exists)
if flow_id is False:
if flow.flow_id is not None:
raise ValueError('flow.flow_id is not None, but the flow does not'
'exist on the server according to flow_exists')
_publish_flow_if_necessary(flow)
# if the flow was published successfully
# and has an id
if flow.flow_id is not None:
flow_id = flow.flow_id
data_content, trace, fold_evaluations, sample_evaluations = res
if not isinstance(flow.flow_id, int):
# This is the usual behaviour, where the flow object was initiated off
# line and requires some additional information (flow_id, input_id for
# each hyperparameter) to be usable by this library
server_flow = get_flow(flow_id)
openml.flows.flow._copy_server_fields(server_flow, flow)
openml.flows.assert_flows_equal(flow, server_flow,
ignore_parameter_values=True)
else:
# This can only happen when the function is called directly, and not
# through "run_model_on_task"
if flow.flow_id != flow_id:
# This should never happen, unless user made a flow-creation fault
raise ValueError(
"Result from API call flow_exists and flow.flow_id are not "
"same: '%s' vs '%s'" % (str(flow.flow_id), str(flow_id))
)
run = OpenMLRun(
task_id=task.task_id,
flow_id=flow.flow_id,
dataset_id=dataset.dataset_id,
model=flow.model,
flow_name=flow.name,
tags=tags,
trace=trace,
data_content=data_content,
)
# TODO: currently hard-coded sklearn assumption.
run.parameter_settings = openml.flows.obtain_parameter_values(flow)
# now we need to attach the detailed evaluations
if task.task_type_id == 3:
run.sample_evaluations = sample_evaluations
else:
run.fold_evaluations = fold_evaluations
config.logger.info('Executed Task %d with Flow id: %d' % (task.task_id, run.flow_id))
return run
def _publish_flow_if_necessary(flow):
# try publishing the flow if one has to assume it doesn't exist yet. It
# might fail because it already exists, then the flow is currently not
# reused
try:
flow.publish()
except OpenMLServerException as e:
if e.message == "flow already exists":
# TODO: JvR: the following lines of code can be replaced by
# a pass (after changing the unit tests) as run_flow_on_task does
# not longer rely on it
flow_id = openml.flows.flow_exists(flow.name,
flow.external_version)
server_flow = get_flow(flow_id)
openml.flows.flow._copy_server_fields(server_flow, flow)
openml.flows.assert_flows_equal(flow, server_flow,
ignore_parameter_values=True)
else:
raise e
def get_run_trace(run_id):
"""
Get the optimization trace object for a given run id.
Parameters
----------
run_id : int
Returns
-------
openml.runs.OpenMLTrace
"""
trace_xml = openml._api_calls._perform_api_call('run/trace/%d' % run_id)
run_trace = OpenMLRunTrace.trace_from_xml(trace_xml)
return run_trace
def initialize_model_from_run(run_id):
"""
Initialized a model based on a run_id (i.e., using the exact
same parameter settings)
Parameters
----------
run_id : int
The Openml run_id
Returns
-------
model : sklearn model
the scikitlearn model with all parameters initailized
"""
run = get_run(run_id)
return initialize_model(run.setup_id)
def initialize_model_from_trace(run_id, repeat, fold, iteration=None):
"""
Initialize a model based on the parameters that were set
by an optimization procedure (i.e., using the exact same
parameter settings)
Parameters
----------
run_id : int
The Openml run_id. Should contain a trace file,
otherwise a OpenMLServerException is raised
repeat: int
The repeat nr (column in trace file)
fold: int
The fold nr (column in trace file)
iteration: int
The iteration nr (column in trace file). If None, the
best (selected) iteration will be searched (slow),
according to the selection criteria implemented in
OpenMLRunTrace.get_selected_iteration
Returns
-------
model : sklearn model
the scikit-learn model with all parameters initailized
"""
run_trace = get_run_trace(run_id)
if iteration is None:
iteration = run_trace.get_selected_iteration(repeat, fold)
request = (repeat, fold, iteration)
if request not in run_trace.trace_iterations:
raise ValueError('Combination repeat, fold, iteration not availavle')
current = run_trace.trace_iterations[(repeat, fold, iteration)]
search_model = initialize_model_from_run(run_id)
if not isinstance(search_model, sklearn.model_selection._search.BaseSearchCV):
raise ValueError('Deserialized flow not instance of ' \
'sklearn.model_selection._search.BaseSearchCV')
base_estimator = search_model.estimator
base_estimator.set_params(**current.get_parameters())
return base_estimator
def _run_exists(task_id, setup_id):
"""Checks whether a task/setup combination is already present on the server.
Parameters
----------
task_id: int
setup_id: int
Returns
-------
Set run ids for runs where flow setup_id was run on task_id. Empty
set if it wasn't run yet.
"""
if setup_id <= 0:
# openml setups are in range 1-inf
return set()
try:
result = list_runs(task=[task_id], setup=[setup_id])
if len(result) > 0:
return set(result.keys())
else:
return set()
except OpenMLServerException as exception:
# error code 512 implies no results. This means the run does not exist yet
assert(exception.code == 512)
return set()
def _get_seeded_model(model, seed=None):
"""Sets all the non-seeded components of a model with a seed.
Models that are already seeded will maintain the seed. In
this case, only integer seeds are allowed (An exception
is thrown when a RandomState was used as seed)
Parameters
----------
model : sklearn model
The model to be seeded
seed : int
The seed to initialize the RandomState with. Unseeded subcomponents
will be seeded with a random number from the RandomState.
Returns
-------
model : sklearn model
a version of the model where all (sub)components have
a seed
"""
def _seed_current_object(current_value):
if isinstance(current_value, int): # acceptable behaviour
return False
elif isinstance(current_value, np.random.RandomState):
raise ValueError(
'Models initialized with a RandomState object are not supported. Please seed with an integer. ')
elif current_value is not None:
raise ValueError(
'Models should be seeded with int or None (this should never happen). ')
else:
return True
rs = np.random.RandomState(seed)
model_params = model.get_params()
random_states = {}
for param_name in sorted(model_params):
if 'random_state' in param_name:
currentValue = model_params[param_name]
# important to draw the value at this point (and not in the if statement)
# this way we guarantee that if a different set of subflows is seeded,
# the same number of the random generator is used
newValue = rs.randint(0, 2**16)
if _seed_current_object(currentValue):
random_states[param_name] = newValue
# Also seed CV objects!
elif isinstance(model_params[param_name],
sklearn.model_selection.BaseCrossValidator):
if not hasattr(model_params[param_name], 'random_state'):
continue
currentValue = model_params[param_name].random_state
newValue = rs.randint(0, 2 ** 16)
if _seed_current_object(currentValue):
model_params[param_name].random_state = newValue
model.set_params(**random_states)
return model
def _prediction_to_row(rep_no, fold_no, sample_no, row_id, correct_label,
predicted_label, predicted_probabilities, class_labels,
model_classes_mapping):
"""Util function that turns probability estimates of a classifier for a given
instance into the right arff format to upload to openml.
Parameters
----------
rep_no : int
The repeat of the experiment (0-based; in case of 1 time CV, always 0)
fold_no : int
The fold nr of the experiment (0-based; in case of holdout, always 0)
sample_no : int
In case of learning curves, the index of the subsample (0-based; in case of no learning curve, always 0)
row_id : int
row id in the initial dataset
correct_label : str
original label of the instance
predicted_label : str
the label that was predicted
predicted_probabilities : array (size=num_classes)
probabilities per class
class_labels : array (size=num_classes)
model_classes_mapping : list
A list of classes the model produced.
Obtained by BaseEstimator.classes_
Returns
-------
arff_line : list
representation of the current prediction in OpenML format
"""
if not isinstance(rep_no, (int, np.integer)): raise ValueError('rep_no should be int')
if not isinstance(fold_no, (int, np.integer)): raise ValueError('fold_no should be int')
if not isinstance(sample_no, (int, np.integer)): raise ValueError('sample_no should be int')
if not isinstance(row_id, (int, np.integer)): raise ValueError('row_id should be int')
if not len(predicted_probabilities) == len(model_classes_mapping):
raise ValueError('len(predicted_probabilities) != len(class_labels)')
arff_line = [rep_no, fold_no, sample_no, row_id]
for class_label_idx in range(len(class_labels)):
if class_label_idx in model_classes_mapping:
index = np.where(model_classes_mapping == class_label_idx)[0][0] # TODO: WHY IS THIS 2D???
arff_line.append(predicted_probabilities[index])
else:
arff_line.append(0.0)
arff_line.append(class_labels[predicted_label])
arff_line.append(correct_label)
return arff_line
def _run_task_get_arffcontent(model, task, add_local_measures):
def _prediction_to_probabilities(y, model_classes):
# y: list or numpy array of predictions
# model_classes: sklearn classifier mapping from original array id to prediction index id
if not isinstance(model_classes, list):
raise ValueError('please convert model classes to list prior to calling this fn')
result = np.zeros((len(y), len(model_classes)), dtype=np.float32)
for obs, prediction_idx in enumerate(y):
array_idx = model_classes.index(prediction_idx)
result[obs][array_idx] = 1.0
return result
arff_datacontent = []
arff_tracecontent = []
# stores fold-based evaluation measures. In case of a sample based task,
# this information is multiple times overwritten, but due to the ordering
# of tne loops, eventually it contains the information based on the full
# dataset size
user_defined_measures_per_fold = collections.OrderedDict()
# stores sample-based evaluation measures (sublevel of fold-based)
# will also be filled on a non sample-based task, but the information
# is the same as the fold-based measures, and disregarded in that case
user_defined_measures_per_sample = collections.OrderedDict()
# sys.version_info returns a tuple, the following line compares the entry of tuples
# https://docs.python.org/3.6/reference/expressions.html#value-comparisons
can_measure_runtime = sys.version_info[:2] >= (3, 3) and _check_n_jobs(model)
# TODO use different iterator to only provide a single iterator (less
# methods, less maintenance, less confusion)
num_reps, num_folds, num_samples = task.get_split_dimensions()
for rep_no in range(num_reps):
for fold_no in range(num_folds):
for sample_no in range(num_samples):
model_fold = sklearn.base.clone(model, safe=True)
res = _run_model_on_fold(model_fold, task, rep_no, fold_no, sample_no,
can_measure_runtime=can_measure_runtime,
add_local_measures=add_local_measures)
arff_datacontent_fold, arff_tracecontent_fold, user_defined_measures_fold, model_fold = res
arff_datacontent.extend(arff_datacontent_fold)
arff_tracecontent.extend(arff_tracecontent_fold)
for measure in user_defined_measures_fold:
if measure not in user_defined_measures_per_fold:
user_defined_measures_per_fold[measure] = collections.OrderedDict()
if rep_no not in user_defined_measures_per_fold[measure]:
user_defined_measures_per_fold[measure][rep_no] = collections.OrderedDict()
if measure not in user_defined_measures_per_sample:
user_defined_measures_per_sample[measure] = collections.OrderedDict()
if rep_no not in user_defined_measures_per_sample[measure]:
user_defined_measures_per_sample[measure][rep_no] = collections.OrderedDict()
if fold_no not in user_defined_measures_per_sample[measure][rep_no]:
user_defined_measures_per_sample[measure][rep_no][fold_no] = collections.OrderedDict()
user_defined_measures_per_fold[measure][rep_no][fold_no] = user_defined_measures_fold[measure]
user_defined_measures_per_sample[measure][rep_no][fold_no][sample_no] = user_defined_measures_fold[measure]
# Note that we need to use a fitted model (i.e., model_fold, and not model) here,
# to ensure it contains the hyperparameter data (in cv_results_)
if isinstance(model_fold, sklearn.model_selection._search.BaseSearchCV):
# arff_tracecontent is already set
arff_trace_attributes = _extract_arfftrace_attributes(model_fold)
trace = OpenMLRunTrace.generate(
arff_trace_attributes,
arff_tracecontent,
)
else:
trace = None
return (
arff_datacontent,
trace,
user_defined_measures_per_fold,
user_defined_measures_per_sample,
)
def _run_model_on_fold(model, task, rep_no, fold_no, sample_no, can_measure_runtime, add_local_measures):
"""Internal function that executes a model on a fold (and possibly
subsample) of the dataset. It returns the data that is necessary
to construct the OpenML Run object (potentially over more than
one folds). Is used by run_task_get_arff_content. Do not use this
function unless you know what you are doing.
Parameters
----------
model : sklearn model
The UNTRAINED model to run
task : OpenMLTask
The task to run the model on
rep_no : int
The repeat of the experiment (0-based; in case of 1 time CV,
always 0)
fold_no : int
The fold nr of the experiment (0-based; in case of holdout,
always 0)
sample_no : int
In case of learning curves, the index of the subsample (0-based;
in case of no learning curve, always 0)
can_measure_runtime : bool
Wether we are allowed to measure runtime (requires: Single node
computation and Python >= 3.3)
add_local_measures : bool
Determines whether to calculate a set of measures (i.e., predictive
accuracy) locally, to later verify server behaviour
Returns
-------
arff_datacontent : List[List]
Arff representation (list of lists) of the predictions that were
generated by this fold (for putting in predictions.arff)
arff_tracecontent : List[List]
Arff representation (list of lists) of the trace data that was
generated by this fold (for putting in trace.arff)
user_defined_measures : Dict[float]
User defined measures that were generated on this fold
model : sklearn model
The model trained on this fold
"""
def _prediction_to_probabilities(y, model_classes):
# y: list or numpy array of predictions
# model_classes: sklearn classifier mapping from original array id to prediction index id
if not isinstance(model_classes, list):
raise ValueError('please convert model classes to list prior to calling this fn')
result = np.zeros((len(y), len(model_classes)), dtype=np.float32)
for obs, prediction_idx in enumerate(y):
array_idx = model_classes.index(prediction_idx)
result[obs][array_idx] = 1.0
return result
# TODO: if possible, give a warning if model is already fitted (acceptable in case of custom experimentation,
# but not desirable if we want to upload to OpenML).
train_indices, test_indices = task.get_train_test_split_indices(repeat=rep_no,
fold=fold_no,
sample=sample_no)
X, Y = task.get_X_and_y()
trainX = X[train_indices]
trainY = Y[train_indices]
testX = X[test_indices]
testY = Y[test_indices]
user_defined_measures = collections.OrderedDict()
try:
# for measuring runtime. Only available since Python 3.3
if can_measure_runtime:
modelfit_starttime = time.process_time()
model.fit(trainX, trainY)
if can_measure_runtime:
modelfit_duration = (time.process_time() - modelfit_starttime) * 1000
user_defined_measures['usercpu_time_millis_training'] = modelfit_duration
except AttributeError as e:
# typically happens when training a regressor on classification task
raise PyOpenMLError(str(e))
# extract trace, if applicable
arff_tracecontent = []
if isinstance(model, sklearn.model_selection._search.BaseSearchCV):
arff_tracecontent.extend(_extract_arfftrace(model, rep_no, fold_no))
# search for model classes_ (might differ depending on modeltype)
# first, pipelines are a special case (these don't have a classes_
# object, but rather borrows it from the last step. We do this manually,
# because of the BaseSearch check)
if isinstance(model, sklearn.pipeline.Pipeline):
used_estimator = model.steps[-1][-1]
else:
used_estimator = model
if isinstance(used_estimator, sklearn.model_selection._search.BaseSearchCV):
model_classes = used_estimator.best_estimator_.classes_
else:
model_classes = used_estimator.classes_
if can_measure_runtime:
modelpredict_starttime = time.process_time()
PredY = model.predict(testX)
try:
ProbaY = model.predict_proba(testX)
except AttributeError:
ProbaY = _prediction_to_probabilities(PredY, list(model_classes))
if can_measure_runtime:
modelpredict_duration = (time.process_time() - modelpredict_starttime) * 1000
user_defined_measures['usercpu_time_millis_testing'] = modelpredict_duration
user_defined_measures['usercpu_time_millis'] = modelfit_duration + modelpredict_duration
if ProbaY.shape[1] != len(task.class_labels):
warnings.warn("Repeat %d Fold %d: estimator only predicted for %d/%d classes!" % (rep_no, fold_no, ProbaY.shape[1], len(task.class_labels)))
# add client-side calculated metrics. These might be used on the server as consistency check
def _calculate_local_measure(sklearn_fn, openml_name):
user_defined_measures[openml_name] = sklearn_fn(testY, PredY)
if add_local_measures:
_calculate_local_measure(sklearn.metrics.accuracy_score, 'predictive_accuracy')
arff_datacontent = []
for i in range(0, len(test_indices)):
arff_line = _prediction_to_row(rep_no, fold_no, sample_no,
test_indices[i], task.class_labels[testY[i]],
PredY[i], ProbaY[i], task.class_labels, model_classes)
arff_datacontent.append(arff_line)
return arff_datacontent, arff_tracecontent, user_defined_measures, model
def _extract_arfftrace(model, rep_no, fold_no):
if not isinstance(model, sklearn.model_selection._search.BaseSearchCV):
raise ValueError('model should be instance of'\
' sklearn.model_selection._search.BaseSearchCV')
if not hasattr(model, 'cv_results_'):
raise ValueError('model should contain `cv_results_`')
arff_tracecontent = []
for itt_no in range(0, len(model.cv_results_['mean_test_score'])):
# we use the string values for True and False, as it is defined in this way by the OpenML server
selected = 'false'
if itt_no == model.best_index_:
selected = 'true'
test_score = model.cv_results_['mean_test_score'][itt_no]
arff_line = [rep_no, fold_no, itt_no, test_score, selected]
for key in model.cv_results_:
if key.startswith('param_'):
value = model.cv_results_[key][itt_no]
if value is not np.ma.masked:
serialized_value = json.dumps(value)
else:
serialized_value = np.nan
arff_line.append(serialized_value)
arff_tracecontent.append(arff_line)
return arff_tracecontent
def _extract_arfftrace_attributes(model):
if not isinstance(model, sklearn.model_selection._search.BaseSearchCV):
raise ValueError('model should be instance of'\
' sklearn.model_selection._search.BaseSearchCV')
if not hasattr(model, 'cv_results_'):
raise ValueError('model should contain `cv_results_`')
# attributes that will be in trace arff, regardless of the model
trace_attributes = [('repeat', 'NUMERIC'),
('fold', 'NUMERIC'),
('iteration', 'NUMERIC'),
('evaluation', 'NUMERIC'),
('selected', ['true', 'false'])]
# model dependent attributes for trace arff
for key in model.cv_results_:
if key.startswith('param_'):
# supported types should include all types, including bool, int float
supported_basic_types = (bool, int, float, six.string_types)
for param_value in model.cv_results_[key]:
if isinstance(param_value, supported_basic_types) or param_value is None or param_value is np.ma.masked:
# basic string values
type = 'STRING'
elif isinstance(param_value, list) and all(isinstance(i, int) for i in param_value):
# list of integers
type = 'STRING'
else:
raise TypeError('Unsupported param type in param grid: %s' %key)
# we renamed the attribute param to parameter, as this is a required
# OpenML convention - this also guards against name collisions
# with the required trace attributes
attribute = (openml.runs.trace.PREFIX + key[6:], type)
trace_attributes.append(attribute)
return trace_attributes
def get_runs(run_ids):
"""Gets all runs in run_ids list.
Parameters
----------
run_ids : list of ints
Returns
-------
runs : list of OpenMLRun
List of runs corresponding to IDs, fetched from the server.
"""
runs = []
for run_id in run_ids:
runs.append(get_run(run_id))
return runs
def get_run(run_id):
"""Gets run corresponding to run_id.
Parameters
----------
run_id : int
Returns
-------
run : OpenMLRun
Run corresponding to ID, fetched from the server.
"""
run_dir = openml.utils._create_cache_directory_for_id(RUNS_CACHE_DIR_NAME, run_id)
run_file = os.path.join(run_dir, "description.xml")
if not os.path.exists(run_dir):
os.makedirs(run_dir)
try:
return _get_cached_run(run_id)
except (OpenMLCacheException):
run_xml = openml._api_calls._perform_api_call("run/%d" % run_id)
with io.open(run_file, "w", encoding='utf8') as fh:
fh.write(run_xml)
run = _create_run_from_xml(run_xml)
return run
def _create_run_from_xml(xml, from_server=True):
"""Create a run object from xml returned from server.
Parameters
----------
run_xml : string
XML describing a run.
Returns
-------
run : OpenMLRun
New run object representing run_xml.
"""
def obtain_field(xml_obj, fieldname, from_server, cast=None):
# this function can be used to check whether a field is present in an object.
# if it is not present, either returns None or throws an error (this is
# usually done if the xml comes from the server)
if fieldname in xml_obj:
if cast is not None:
return cast(xml_obj[fieldname])
return xml_obj[fieldname]
elif not from_server:
return None
else:
raise AttributeError('Run XML does not contain required (server) field: ', fieldname)
run = xmltodict.parse(xml, force_list=['oml:file', 'oml:evaluation', 'oml:parameter_setting'])["oml:run"]
run_id = obtain_field(run, 'oml:run_id', from_server, cast=int)
uploader = obtain_field(run, 'oml:uploader', from_server, cast=int)
uploader_name = obtain_field(run, 'oml:uploader_name', from_server)
task_id = int(run['oml:task_id'])
task_type = obtain_field(run, 'oml:task_type', from_server)
# even with the server requirement this field may be empty.
if 'oml:task_evaluation_measure' in run:
task_evaluation_measure = run['oml:task_evaluation_measure']
else:
task_evaluation_measure = None
flow_id = int(run['oml:flow_id'])
flow_name = obtain_field(run, 'oml:flow_name', from_server)
setup_id = obtain_field(run, 'oml:setup_id', from_server, cast=int)
setup_string = obtain_field(run, 'oml:setup_string', from_server)
parameters = []
if 'oml:parameter_setting' in run:
obtained_parameter_settings = run['oml:parameter_setting']
for parameter_dict in obtained_parameter_settings:
current_parameter = collections.OrderedDict()
current_parameter['oml:name'] = parameter_dict['oml:name']
current_parameter['oml:value'] = parameter_dict['oml:value']
if 'oml:component' in parameter_dict:
current_parameter['oml:component'] = parameter_dict['oml:component']
parameters.append(current_parameter)
if 'oml:input_data' in run:
dataset_id = int(run['oml:input_data']['oml:dataset']['oml:did'])
elif not from_server:
dataset_id = None
files = collections.OrderedDict()
evaluations = collections.OrderedDict()
fold_evaluations = collections.OrderedDict()
sample_evaluations = collections.OrderedDict()
if 'oml:output_data' not in run:
if from_server:
raise ValueError('Run does not contain output_data (OpenML server error?)')
else:
output_data = run['oml:output_data']
if 'oml:file' in output_data:
# multiple files, the normal case
for file_dict in output_data['oml:file']:
files[file_dict['oml:name']] = int(file_dict['oml:file_id'])
if 'oml:evaluation' in output_data:
# in normal cases there should be evaluations, but in case there
# was an error these could be absent
for evaluation_dict in output_data['oml:evaluation']:
key = evaluation_dict['oml:name']
if 'oml:value' in evaluation_dict:
value = float(evaluation_dict['oml:value'])
elif 'oml:array_data' in evaluation_dict:
value = evaluation_dict['oml:array_data']
else:
raise ValueError('Could not find keys "value" or "array_data" '
'in %s' % str(evaluation_dict.keys()))
if '@repeat' in evaluation_dict and '@fold' in evaluation_dict and '@sample' in evaluation_dict:
repeat = int(evaluation_dict['@repeat'])
fold = int(evaluation_dict['@fold'])
sample = int(evaluation_dict['@sample'])
if key not in sample_evaluations:
sample_evaluations[key] = collections.OrderedDict()
if repeat not in sample_evaluations[key]:
sample_evaluations[key][repeat] = collections.OrderedDict()
if fold not in sample_evaluations[key][repeat]:
sample_evaluations[key][repeat][fold] = collections.OrderedDict()
sample_evaluations[key][repeat][fold][sample] = value
elif '@repeat' in evaluation_dict and '@fold' in evaluation_dict:
repeat = int(evaluation_dict['@repeat'])
fold = int(evaluation_dict['@fold'])
if key not in fold_evaluations:
fold_evaluations[key] = collections.OrderedDict()
if repeat not in fold_evaluations[key]:
fold_evaluations[key][repeat] = collections.OrderedDict()
fold_evaluations[key][repeat][fold] = value
else:
evaluations[key] = value
if 'description' not in files and from_server is True:
raise ValueError('No description file for run %d in run '
'description XML' % run_id)
if 'predictions' not in files and from_server is True:
task = openml.tasks.get_task(task_id)
if task.task_type_id == 8:
raise NotImplementedError(
'Subgroup discovery tasks are not yet supported.'
)
else:
# JvR: actually, I am not sure whether this error should be raised.
# a run can consist without predictions. But for now let's keep it
# Matthias: yes, it should stay as long as we do not really handle
# this stuff
raise ValueError('No prediction files for run %d in run '
'description XML' % run_id)
tags = openml.utils.extract_xml_tags('oml:tag', run)
return OpenMLRun(run_id=run_id, uploader=uploader,
uploader_name=uploader_name, task_id=task_id,
task_type=task_type,
task_evaluation_measure=task_evaluation_measure,
flow_id=flow_id, flow_name=flow_name,
setup_id=setup_id, setup_string=setup_string,
parameter_settings=parameters,
dataset_id=dataset_id, output_files=files,
evaluations=evaluations,
fold_evaluations=fold_evaluations,
sample_evaluations=sample_evaluations,
tags=tags)
def _get_cached_run(run_id):
"""Load a run from the cache."""
run_cache_dir = openml.utils._create_cache_directory_for_id(
RUNS_CACHE_DIR_NAME, run_id,
)
try:
run_file = os.path.join(run_cache_dir, "description.xml")
with io.open(run_file, encoding='utf8') as fh:
run = _create_run_from_xml(xml=fh.read())
return run
except (OSError, IOError):
raise OpenMLCacheException("Run file for run id %d not "
"cached" % run_id)
def list_runs(offset=None, size=None, id=None, task=None, setup=None,
flow=None, uploader=None, tag=None, display_errors=False, **kwargs):
"""
List all runs matching all of the given filters.
(Supports large amount of results)
Parameters
----------
offset : int, optional
the number of runs to skip, starting from the first
size : int, optional
the maximum number of runs to show
id : list, optional
task : list, optional
setup: list, optional
flow : list, optional
uploader : list, optional
tag : str, optional
display_errors : bool, optional (default=None)
Whether to list runs which have an error (for example a missing
prediction file).
kwargs: dict, optional
Legal filter operators: task_type.
Returns
-------
dict
List of found runs.
"""
return openml.utils._list_all(_list_runs, offset=offset, size=size, id=id, task=task, setup=setup,
flow=flow, uploader=uploader, tag=tag, display_errors=display_errors, **kwargs)
def _list_runs(id=None, task=None, setup=None,
flow=None, uploader=None, display_errors=False, **kwargs):
"""
Perform API call `/run/list/{filters}'
<https://www.openml.org/api_docs/#!/run/get_run_list_filters>`
Parameters
----------
The arguments that are lists are separated from the single value
ones which are put into the kwargs.
display_errors is also separated from the kwargs since it has a
default value.
id : list, optional
task : list, optional
setup: list, optional
flow : list, optional
uploader : list, optional
display_errors : bool, optional (default=None)
Whether to list runs which have an error (for example a missing
prediction file).
kwargs: dict, optional
Legal filter operators: task_type.
Returns
-------
dict
List of found runs.
"""
api_call = "run/list"
if kwargs is not None:
for operator, value in kwargs.items():
api_call += "/%s/%s" % (operator, value)
if id is not None:
api_call += "/run/%s" % ','.join([str(int(i)) for i in id])
if task is not None:
api_call += "/task/%s" % ','.join([str(int(i)) for i in task])
if setup is not None:
api_call += "/setup/%s" % ','.join([str(int(i)) for i in setup])
if flow is not None:
api_call += "/flow/%s" % ','.join([str(int(i)) for i in flow])
if uploader is not None:
api_call += "/uploader/%s" % ','.join([str(int(i)) for i in uploader])
if display_errors:
api_call += "/show_errors/true"
return __list_runs(api_call)
def __list_runs(api_call):
"""Helper function to parse API calls which are lists of runs"""
xml_string = openml._api_calls._perform_api_call(api_call)
runs_dict = xmltodict.parse(xml_string, force_list=('oml:run',))
# Minimalistic check if the XML is useful
if 'oml:runs' not in runs_dict:
raise ValueError('Error in return XML, does not contain "oml:runs": %s'
% str(runs_dict))
elif '@xmlns:oml' not in runs_dict['oml:runs']:
raise ValueError('Error in return XML, does not contain '
'"oml:runs"/@xmlns:oml: %s'
% str(runs_dict))
elif runs_dict['oml:runs']['@xmlns:oml'] != 'http://openml.org/openml':
raise ValueError('Error in return XML, value of '
'"oml:runs"/@xmlns:oml is not '
'"http://openml.org/openml": %s'
% str(runs_dict))
assert type(runs_dict['oml:runs']['oml:run']) == list, \
type(runs_dict['oml:runs'])
runs = collections.OrderedDict()
for run_ in runs_dict['oml:runs']['oml:run']:
run_id = int(run_['oml:run_id'])
run = {'run_id': run_id,
'task_id': int(run_['oml:task_id']),
'setup_id': int(run_['oml:setup_id']),
'flow_id': int(run_['oml:flow_id']),
'uploader': int(run_['oml:uploader'])}
runs[run_id] = run
return runs
| StarcoderdataPython |
1944657 | <reponame>elyashiv3839/compare_objects<gh_stars>0
from ..compare_objects.CompareObjectsWithInfo import compare_objects_with_info
# ######################### WITH INFORMATION ######################### #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NATIVE ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
def test_two_strings_val_compare_with_info():
assert compare_objects_with_info("hello", "hello")[0] is True
def test_two_int_val_compare_with_info():
assert compare_objects_with_info(1, 1)[0] is True
def test_two_bool_val_compare_with_info():
assert compare_objects_with_info(True, True)[0] is True
def test_two_float_val_compare_with_info():
assert compare_objects_with_info(3.33, 3.33)[0] is True
def test_two_strings_val_not_compare_with_info():
assert compare_objects_with_info("hello", "hell")[0] is False
def test_two_int_val_not_compare_with_info():
assert compare_objects_with_info(1, 2)[0] is False
def test_two_bool_val_not_compare_with_info():
assert compare_objects_with_info(True, False)[0] is False
def test_two_float_val_not_compare_with_info():
assert compare_objects_with_info(3.33, 3.32)[0] is False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ CONTAINERS ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ HIGH LEVEL ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# *********** list object ************* #
def test_two_empty_lists_with_info():
assert compare_objects_with_info([], [])[0] is True
def test_two_list_val_compare_with_info():
assert compare_objects_with_info([1, 2, 3], [1, 2, 3])[0] is True
def test_two_list_val_not_compare_with_info():
assert compare_objects_with_info([1, 2, 3], [1, 2, 4])[0] is False
def test_two_list_without_same_length_with_info():
assert compare_objects_with_info([1], [1, 2])[0] is False
# *********** tuple object ************* #
def test_two_empty_tuples_with_info():
assert compare_objects_with_info((), ())[0] is True
def test_two_tuple_val_compare_with_info():
assert compare_objects_with_info((1, 2, 3), (1, 2, 3))[0] is True
def test_two_tuple_val_not_compare_with_info():
assert compare_objects_with_info((1, 2, 3), (1, 2, 4))[0] is False
def test_two_tuple_without_same_length_with_info():
assert compare_objects_with_info((1,), (1, 2))[0] is False
# *********** set object ************* #
def test_two_empty_sets_with_info():
assert compare_objects_with_info({}, {})[0] is True
def test_two_set_val_compare_with_info():
assert compare_objects_with_info({1, 2, 3}, {1, 2, 3})[0] is True
def test_two_set_val_not_compare_with_info():
assert compare_objects_with_info({1, 2, 3}, {1, 2, 4})[0] is False
def test_two_set_without_same_length_with_info():
assert compare_objects_with_info({1}, {1, 2})[0] is False
# *********** dict object ************* #
def test_two_empty_dicts_with_info():
assert compare_objects_with_info({}, {})[0] is True
def test_two_dict_val_compare_with_info():
assert compare_objects_with_info({'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'b': 2, 'c': 3})[0] is True
def test_two_dict_val_not_compare_val_with_info():
assert compare_objects_with_info({'a': 1, 'b': 5, 'c': 3}, {'a': 1, 'b': 2, 'c': 3})[0] is False
def test_two_dict_val_not_compare_key_with_info():
assert compare_objects_with_info({'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'r': 2, 'c': 3})[0] is False
def test_two_dict_without_same_length_with_info():
assert compare_objects_with_info({'a': 1, 'b': 2, 'c': 3}, {'a': 1, 'b': 2})[0] is False
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ NESTED ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ #
# ~~~~~~~~~~~~~ EQUAL ALL COMBINATIONS~~~~~~~~~~~~~~~ #
def test_equal_list_in_tuple_with_info():
assert compare_objects_with_info(([1, 2], [3, 4]), ([1, 2], [3, 4]))[0] is True
def test_equal_tuple_in_list_with_info():
assert compare_objects_with_info([(1, 2), (3, 4)], [(1, 2), (3, 4)])[0] is True
def test_equal_set_in_list_with_info():
assert compare_objects_with_info([{1, 2}, {3, 4}], [{1, 2}, {3, 4}])[0] is True
def test_equal_list_in_dict_with_info():
assert compare_objects_with_info({"a": [1, 2], "b": [3, 4]}, {'a': [1, 2], "b": [3, 4]})[0] is True
def test_equal_dict_in_list_with_info():
assert compare_objects_with_info([{"a": [1, 2], "b": [3, 4]}, 5], [{'a': [1, 2], "b": [3, 4]}, 5])[0] is True
def test_equal_set_in_tuple_with_info():
assert compare_objects_with_info(({1, 2}, {3, 4}), ({1, 2}, {3, 4}))[0] is True
def test_equal_dict_in_tuple_with_info():
assert compare_objects_with_info(({'a': 1, 'b': 2}, {'a': 3, 'b': 4}), ({'a': 1, 'b': 2}, {'a': 3, 'b': 4}))[
0] is True
def test_equal_tuple_in_dict_with_info():
assert compare_objects_with_info({'a': (5, 6), 'b': 2}, {'a': (5, 6), 'b': 2})[0] is True
def test_equal_list_in_list_with_info():
assert compare_objects_with_info([[1, 2], [3, 4]], [[1, 2], [3, 4]])[0] is True
def test_equal_tuple_in_tuple_with_info():
assert compare_objects_with_info(((1, 2), (3, 4)), ((1, 2), (3, 4)))[0] is True
def test_equal_dict_in_dict_with_info():
assert compare_objects_with_info({'a': {"in": 4}, 'b': 2}, {'a': {"in": 4}, 'b': 2})[0] is True
# ~~~~~~~~~~~~~ NOT EQUAL ALL COMBINATIONS~~~~~~~~~~~~~~~ #
def test_not_equal_list_in_tuple_with_info():
assert compare_objects_with_info(([1, 2], [3, 4]), ([1, 2], [3, 5]))[0] is False
def test_not_equal_tuple_in_list_with_info():
assert compare_objects_with_info([(1, 2), (3, 4)], [(1, 2), (3, 5)])[0] is False
def test_not_equal_set_in_list_with_info():
assert compare_objects_with_info([{1, 2}, {3, 4}], [{1, 2}, {3, 5}])[0] is False
def test_not_equal_list_in_dict_with_info():
assert compare_objects_with_info({"a": [1, 2], "b": [3, 4]}, {'a': [1, 2], "b": [3, 5]})[0] is False
def test_not_equal_dict_in_list_with_info():
assert compare_objects_with_info([{"a": [1, 2], "b": [3, 4]}, 5], [{'a': [1, 2], "b": [3, 4]}, 6])[0] is False
def test_not_equal_set_in_tuple_with_info():
assert compare_objects_with_info(({1, 2}, {3, 4}), ({1, 2}, {3, 5}))[0] is False
def test_not_equal_dict_in_tuple_with_info():
assert compare_objects_with_info(({'a': 1, 'b': 2}, {'a': 3, 'b': 4}), ({'a': 1, 'b': 2}, {'a': 3, 'b': 5}))[
0] is False
def test_not_equal_tuple_in_dict_with_info():
assert compare_objects_with_info({'a': (5, 6), 'b': 2}, {'a': (5, 6), 'b': 5})[0] is False
def test_not_equal_list_in_list_with_info():
assert compare_objects_with_info([[1, 2], [3, 4]], [[1, 2], [3, 5]])[0] is False
def test_not_equal_tuple_in_tuple_with_info():
assert compare_objects_with_info(((1, 2), (3, 4)), ((1, 2), (3, 5)))[0] is False
def test_not_equal_dict_in_dict_with_info():
assert compare_objects_with_info({'a': {"in": 4}, 'b': 2}, {'a': {"in": 5}, 'b': 2})[0] is False
def test_not_equal_dict_in_dict_out_with_info():
assert compare_objects_with_info({'a': {"in": 4}, 'b': 2}, {'a': {"in": 4}, 'b': 5})[0] is False
| StarcoderdataPython |
6422806 | <filename>scenegraph/exp-official/taskographyv5medium5bagslots5_FD-lama-first/taskographyv5medium5bagslots5_FD-lama-first_test.py
STATS = [
{
"num_node_expansions": 320,
"plan_cost": 84,
"plan_length": 84,
"search_time": 0.201463,
"total_time": 1.57179
},
{
"num_node_expansions": 1091,
"plan_cost": 79,
"plan_length": 79,
"search_time": 0.556501,
"total_time": 2.02308
},
{
"num_node_expansions": 155,
"plan_cost": 58,
"plan_length": 58,
"search_time": 0.827871,
"total_time": 6.0794
},
{
"num_node_expansions": 259,
"plan_cost": 79,
"plan_length": 79,
"search_time": 0.906334,
"total_time": 5.54106
},
{
"num_node_expansions": 2788,
"plan_cost": 51,
"plan_length": 51,
"search_time": 0.995181,
"total_time": 1.76771
},
{
"num_node_expansions": 272,
"plan_cost": 66,
"plan_length": 66,
"search_time": 0.872045,
"total_time": 3.90416
},
{
"num_node_expansions": 153,
"plan_cost": 49,
"plan_length": 49,
"search_time": 0.473592,
"total_time": 3.9017
},
{
"num_node_expansions": 19696,
"plan_cost": 132,
"plan_length": 132,
"search_time": 9.48245,
"total_time": 10.7384
},
{
"num_node_expansions": 173,
"plan_cost": 54,
"plan_length": 54,
"search_time": 0.14957,
"total_time": 2.10611
},
{
"num_node_expansions": 1508,
"plan_cost": 90,
"plan_length": 90,
"search_time": 0.920547,
"total_time": 2.69396
},
{
"num_node_expansions": 269,
"plan_cost": 88,
"plan_length": 88,
"search_time": 0.17765,
"total_time": 1.50744
},
{
"num_node_expansions": 20322,
"plan_cost": 84,
"plan_length": 84,
"search_time": 8.25461,
"total_time": 9.2625
},
{
"num_node_expansions": 1808,
"plan_cost": 71,
"plan_length": 71,
"search_time": 0.926076,
"total_time": 2.11502
},
{
"num_node_expansions": 178,
"plan_cost": 57,
"plan_length": 57,
"search_time": 0.888838,
"total_time": 5.84717
},
{
"num_node_expansions": 196,
"plan_cost": 55,
"plan_length": 55,
"search_time": 0.571992,
"total_time": 4.5666
},
{
"num_node_expansions": 1396,
"plan_cost": 61,
"plan_length": 61,
"search_time": 0.248944,
"total_time": 0.673664
},
{
"num_node_expansions": 179,
"plan_cost": 60,
"plan_length": 60,
"search_time": 0.0615781,
"total_time": 0.457004
},
{
"num_node_expansions": 221,
"plan_cost": 61,
"plan_length": 61,
"search_time": 0.506479,
"total_time": 4.41283
},
{
"num_node_expansions": 189,
"plan_cost": 52,
"plan_length": 52,
"search_time": 0.324603,
"total_time": 3.05952
},
{
"num_node_expansions": 174,
"plan_cost": 64,
"plan_length": 64,
"search_time": 0.692062,
"total_time": 5.28865
},
{
"num_node_expansions": 220,
"plan_cost": 57,
"plan_length": 57,
"search_time": 0.728741,
"total_time": 3.40463
},
{
"num_node_expansions": 573,
"plan_cost": 89,
"plan_length": 89,
"search_time": 0.793219,
"total_time": 3.36994
},
{
"num_node_expansions": 245,
"plan_cost": 64,
"plan_length": 64,
"search_time": 0.346946,
"total_time": 2.98469
},
{
"num_node_expansions": 235,
"plan_cost": 63,
"plan_length": 63,
"search_time": 0.36964,
"total_time": 2.7056
},
{
"num_node_expansions": 152,
"plan_cost": 48,
"plan_length": 48,
"search_time": 0.222874,
"total_time": 2.81739
},
{
"num_node_expansions": 130,
"plan_cost": 49,
"plan_length": 49,
"search_time": 0.225158,
"total_time": 3.01191
},
{
"num_node_expansions": 186,
"plan_cost": 51,
"plan_length": 51,
"search_time": 0.276474,
"total_time": 3.06305
},
{
"num_node_expansions": 194,
"plan_cost": 68,
"plan_length": 68,
"search_time": 0.654046,
"total_time": 5.44738
},
{
"num_node_expansions": 284,
"plan_cost": 83,
"plan_length": 83,
"search_time": 1.25293,
"total_time": 6.32246
},
{
"num_node_expansions": 218,
"plan_cost": 65,
"plan_length": 65,
"search_time": 0.183867,
"total_time": 0.945063
},
{
"num_node_expansions": 240,
"plan_cost": 58,
"plan_length": 58,
"search_time": 0.123615,
"total_time": 1.04049
},
{
"num_node_expansions": 556,
"plan_cost": 79,
"plan_length": 79,
"search_time": 0.176472,
"total_time": 0.776186
},
{
"num_node_expansions": 94,
"plan_cost": 45,
"plan_length": 45,
"search_time": 0.0303342,
"total_time": 0.615901
},
{
"num_node_expansions": 185,
"plan_cost": 51,
"plan_length": 51,
"search_time": 0.27736,
"total_time": 3.07705
},
{
"num_node_expansions": 3701,
"plan_cost": 62,
"plan_length": 62,
"search_time": 4.60336,
"total_time": 6.91297
},
{
"num_node_expansions": 327,
"plan_cost": 62,
"plan_length": 62,
"search_time": 0.447218,
"total_time": 2.89504
},
{
"num_node_expansions": 159,
"plan_cost": 56,
"plan_length": 56,
"search_time": 0.573514,
"total_time": 6.6428
},
{
"num_node_expansions": 128,
"plan_cost": 55,
"plan_length": 55,
"search_time": 0.39673,
"total_time": 5.65059
},
{
"num_node_expansions": 234,
"plan_cost": 43,
"plan_length": 43,
"search_time": 0.138873,
"total_time": 0.999488
},
{
"num_node_expansions": 1118,
"plan_cost": 79,
"plan_length": 79,
"search_time": 5.04864,
"total_time": 8.28072
},
{
"num_node_expansions": 206,
"plan_cost": 75,
"plan_length": 75,
"search_time": 0.815741,
"total_time": 4.56192
},
{
"num_node_expansions": 2397,
"plan_cost": 77,
"plan_length": 77,
"search_time": 0.236173,
"total_time": 0.342548
},
{
"num_node_expansions": 524,
"plan_cost": 86,
"plan_length": 86,
"search_time": 0.0216399,
"total_time": 0.0619392
},
{
"num_node_expansions": 199,
"plan_cost": 60,
"plan_length": 60,
"search_time": 0.924843,
"total_time": 4.9888
},
{
"num_node_expansions": 186,
"plan_cost": 68,
"plan_length": 68,
"search_time": 1.10799,
"total_time": 5.08656
},
{
"num_node_expansions": 4585,
"plan_cost": 57,
"plan_length": 57,
"search_time": 0.723763,
"total_time": 0.953443
},
{
"num_node_expansions": 185,
"plan_cost": 58,
"plan_length": 58,
"search_time": 0.152459,
"total_time": 1.36847
},
{
"num_node_expansions": 146,
"plan_cost": 46,
"plan_length": 46,
"search_time": 0.110208,
"total_time": 1.33381
},
{
"num_node_expansions": 332,
"plan_cost": 69,
"plan_length": 69,
"search_time": 0.700787,
"total_time": 4.9951
},
{
"num_node_expansions": 1526,
"plan_cost": 64,
"plan_length": 64,
"search_time": 2.05441,
"total_time": 4.76161
},
{
"num_node_expansions": 278,
"plan_cost": 80,
"plan_length": 80,
"search_time": 0.432504,
"total_time": 3.67333
},
{
"num_node_expansions": 191,
"plan_cost": 65,
"plan_length": 65,
"search_time": 0.479822,
"total_time": 3.80735
},
{
"num_node_expansions": 120,
"plan_cost": 55,
"plan_length": 55,
"search_time": 0.0628142,
"total_time": 1.06254
},
{
"num_node_expansions": 116,
"plan_cost": 54,
"plan_length": 54,
"search_time": 0.0712541,
"total_time": 1.26172
},
{
"num_node_expansions": 17243,
"plan_cost": 56,
"plan_length": 56,
"search_time": 3.89017,
"total_time": 4.4874
},
{
"num_node_expansions": 296,
"plan_cost": 62,
"plan_length": 62,
"search_time": 1.45039,
"total_time": 8.03498
},
{
"num_node_expansions": 230,
"plan_cost": 61,
"plan_length": 61,
"search_time": 0.489163,
"total_time": 4.1545
},
{
"num_node_expansions": 244,
"plan_cost": 58,
"plan_length": 58,
"search_time": 0.725626,
"total_time": 3.82902
},
{
"num_node_expansions": 226,
"plan_cost": 61,
"plan_length": 61,
"search_time": 0.038116,
"total_time": 0.242923
},
{
"num_node_expansions": 162,
"plan_cost": 61,
"plan_length": 61,
"search_time": 0.0402944,
"total_time": 0.291883
},
{
"num_node_expansions": 12090,
"plan_cost": 94,
"plan_length": 94,
"search_time": 2.34858,
"total_time": 2.60798
},
{
"num_node_expansions": 9674,
"plan_cost": 77,
"plan_length": 77,
"search_time": 2.06353,
"total_time": 2.32312
},
{
"num_node_expansions": 155,
"plan_cost": 56,
"plan_length": 56,
"search_time": 0.0957513,
"total_time": 1.14684
},
{
"num_node_expansions": 156,
"plan_cost": 45,
"plan_length": 45,
"search_time": 0.288619,
"total_time": 2.40508
},
{
"num_node_expansions": 198,
"plan_cost": 48,
"plan_length": 48,
"search_time": 0.448026,
"total_time": 3.48083
},
{
"num_node_expansions": 190,
"plan_cost": 64,
"plan_length": 64,
"search_time": 0.533325,
"total_time": 4.73539
},
{
"num_node_expansions": 114,
"plan_cost": 47,
"plan_length": 47,
"search_time": 0.306428,
"total_time": 3.80694
},
{
"num_node_expansions": 255,
"plan_cost": 84,
"plan_length": 84,
"search_time": 0.335317,
"total_time": 2.8903
},
{
"num_node_expansions": 324,
"plan_cost": 75,
"plan_length": 75,
"search_time": 1.13237,
"total_time": 4.3301
},
{
"num_node_expansions": 359,
"plan_cost": 62,
"plan_length": 62,
"search_time": 0.87758,
"total_time": 3.53737
},
{
"num_node_expansions": 183,
"plan_cost": 63,
"plan_length": 63,
"search_time": 0.272608,
"total_time": 3.37263
},
{
"num_node_expansions": 336,
"plan_cost": 75,
"plan_length": 75,
"search_time": 0.359853,
"total_time": 2.38474
},
{
"num_node_expansions": 104,
"plan_cost": 47,
"plan_length": 47,
"search_time": 0.0974437,
"total_time": 2.27486
},
{
"num_node_expansions": 1035,
"plan_cost": 78,
"plan_length": 78,
"search_time": 0.142749,
"total_time": 0.353585
},
{
"num_node_expansions": 169,
"plan_cost": 58,
"plan_length": 58,
"search_time": 0.0353249,
"total_time": 0.289929
},
{
"num_node_expansions": 155,
"plan_cost": 56,
"plan_length": 56,
"search_time": 0.922017,
"total_time": 7.47038
},
{
"num_node_expansions": 256,
"plan_cost": 69,
"plan_length": 69,
"search_time": 2.14966,
"total_time": 8.52598
},
{
"num_node_expansions": 189,
"plan_cost": 64,
"plan_length": 64,
"search_time": 0.601462,
"total_time": 3.64858
},
{
"num_node_expansions": 247,
"plan_cost": 54,
"plan_length": 54,
"search_time": 0.350848,
"total_time": 2.28865
},
{
"num_node_expansions": 138,
"plan_cost": 47,
"plan_length": 47,
"search_time": 0.619597,
"total_time": 5.23973
},
{
"num_node_expansions": 98,
"plan_cost": 50,
"plan_length": 50,
"search_time": 0.0543675,
"total_time": 0.843014
},
{
"num_node_expansions": 99721,
"plan_cost": 64,
"plan_length": 64,
"search_time": 12.4852,
"total_time": 12.6461
},
{
"num_node_expansions": 202,
"plan_cost": 65,
"plan_length": 65,
"search_time": 0.462207,
"total_time": 3.95442
},
{
"num_node_expansions": 1104,
"plan_cost": 50,
"plan_length": 50,
"search_time": 1.52436,
"total_time": 4.29277
},
{
"num_node_expansions": 526,
"plan_cost": 66,
"plan_length": 66,
"search_time": 0.222679,
"total_time": 0.754265
},
{
"num_node_expansions": 167,
"plan_cost": 53,
"plan_length": 53,
"search_time": 0.275853,
"total_time": 2.61875
},
{
"num_node_expansions": 173,
"plan_cost": 64,
"plan_length": 64,
"search_time": 0.351874,
"total_time": 2.89515
},
{
"num_node_expansions": 117,
"plan_cost": 43,
"plan_length": 43,
"search_time": 0.265541,
"total_time": 2.76789
},
{
"num_node_expansions": 169,
"plan_cost": 54,
"plan_length": 54,
"search_time": 0.28105,
"total_time": 2.51697
},
{
"num_node_expansions": 1388,
"plan_cost": 60,
"plan_length": 60,
"search_time": 1.15136,
"total_time": 3.44077
},
{
"num_node_expansions": 103,
"plan_cost": 44,
"plan_length": 44,
"search_time": 0.0652363,
"total_time": 1.09154
},
{
"num_node_expansions": 252,
"plan_cost": 48,
"plan_length": 48,
"search_time": 0.104683,
"total_time": 0.798517
},
{
"num_node_expansions": 797,
"plan_cost": 98,
"plan_length": 98,
"search_time": 0.131898,
"total_time": 0.402812
},
{
"num_node_expansions": 162,
"plan_cost": 64,
"plan_length": 64,
"search_time": 0.136614,
"total_time": 1.70154
},
{
"num_node_expansions": 139,
"plan_cost": 51,
"plan_length": 51,
"search_time": 0.690556,
"total_time": 5.14333
},
{
"num_node_expansions": 190,
"plan_cost": 61,
"plan_length": 61,
"search_time": 1.10033,
"total_time": 5.83773
},
{
"num_node_expansions": 199,
"plan_cost": 58,
"plan_length": 58,
"search_time": 0.130145,
"total_time": 1.13467
},
{
"num_node_expansions": 159,
"plan_cost": 57,
"plan_length": 57,
"search_time": 0.0907863,
"total_time": 1.30847
},
{
"num_node_expansions": 156,
"plan_cost": 59,
"plan_length": 59,
"search_time": 0.0931239,
"total_time": 1.50389
},
{
"num_node_expansions": 122,
"plan_cost": 52,
"plan_length": 52,
"search_time": 0.0267971,
"total_time": 0.312033
}
]
num_timeouts = 80
num_timeouts = 2
num_problems = 182
| StarcoderdataPython |
9768356 | <filename>chtc_runners/simulation_runner.py
"""
Runs an iterative screening simulation.
Usage:
python simulation_runner.py \
--pipeline_params_json_file=../param_configs/general_pipeline_config.json \
--nbs_params_json_file=../param_configs/ClusterBasedWCSelector_params_reduced.json \
--exploration_strategy=weighted \
--iter_max=5 \
--process_num=$process_num \
--batch_size_index=0 \
--rnd_seed=0 \
--no-random_param_sampling \
--no-precompute_dissimilarity_matrix
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import argparse
import json
import pathlib
import numpy as np
import pandas as pd
import csv
import time
import os
from active_learning_dd.active_learning_dd import get_next_batch
from active_learning_dd.database_loaders.prepare_loader import prepare_loader
from active_learning_dd.utils.data_utils import get_duplicate_smiles_in1d
from active_learning_dd.utils.generate_dissimilarity_matrix import compute_dissimilarity_matrix
from simulation_utils import *
if __name__ == '__main__':
# read args
parser = argparse.ArgumentParser()
parser.add_argument('--pipeline_params_json_file', action="store", dest="pipeline_params_json_file", required=True)
parser.add_argument('--nbs_params_json_file', action="store", dest="nbs_params_json_file", required=True)
parser.add_argument('--exploration_strategy', action="store", dest="exploration_strategy", required=True)
parser.add_argument('--iter_max', type=int, default=10, action="store", dest="iter_max", required=True)
parser.add_argument('--process_num', type=int, default=0, action="store", dest="process_num", required=True)
parser.add_argument('--batch_size_index', type=int, default=0, action="store", dest="batch_size_index", required=True)
parser.add_argument('--rnd_seed', type=int, default=0, action="store", dest="rnd_seed", required=True)
parser.add_argument('--random_param_sampling', dest='random_param_sampling', action='store_true')
parser.add_argument('--no-random_param_sampling', dest='random_param_sampling', action='store_false')
parser.add_argument('--precompute_dissimilarity_matrix', dest='precompute_dissimilarity_matrix', action='store_true')
parser.add_argument('--no-precompute_dissimilarity_matrix', dest='precompute_dissimilarity_matrix', action='store_false')
parser.set_defaults(random_param_sampling=True)
given_args = parser.parse_args()
pipeline_params_json_file = given_args.pipeline_params_json_file
nbs_params_json_file = given_args.nbs_params_json_file
exploration_strategy = given_args.exploration_strategy
iter_max = given_args.iter_max
process_num = given_args.process_num
batch_size_index = given_args.batch_size_index
rnd_seed = given_args.rnd_seed
random_param_sampling = given_args.random_param_sampling
precompute_dissimilarity_matrix = given_args.precompute_dissimilarity_matrix
start_iter = 0
# load param json configs
with open(pipeline_params_json_file) as f:
pipeline_config = json.load(f)
with open(nbs_params_json_file) as f:
nbs_config = json.load(f)
param_sampling_str = {True: 'random', False: 'distributive'}
param_sampling_str = param_sampling_str[random_param_sampling]
print(param_sampling_str)
next_batch_selector_params = get_param_from_dist(nbs_config, rnd_seed=rnd_seed,
use_uniform=random_param_sampling,
exploration_strategy=exploration_strategy)
params_set_results_dir = pipeline_config['common']['params_set_results_dir'].format(param_sampling_str, exploration_strategy,
next_batch_selector_params['class'], process_num)
params_set_config_csv = params_set_results_dir+'/'+pipeline_config['common']['params_set_config_csv']
pathlib.Path(params_set_config_csv).parent.mkdir(parents=True, exist_ok=True)
with open(params_set_config_csv,'w') as f:
csv_w = csv.writer(f)
csv_w.writerow(list(next_batch_selector_params.keys()) + ['rnd_seed'])
csv_w.writerow(list(next_batch_selector_params.values()) + [rnd_seed])
# run this param set for each batch size
batch_size_list = next_batch_selector_params["batch_size"]
batch_size = batch_size_list[batch_size_index]
print('---------------------------------------------------------------')
print('Starting AL pipeline with batch_size: {}'.format(batch_size))
next_batch_selector_params["batch_size"] = batch_size
batch_size_results_dir = params_set_results_dir + pipeline_config['common']['batch_size_results_dir'].format(batch_size)
if os.path.exists(batch_size_results_dir):
start_iter = len(glob.glob(batch_size_results_dir + '/iter_*')) - 1
else:
pathlib.Path(batch_size_results_dir+'/'+pipeline_config['common']['params_set_config_csv']).parent.mkdir(parents=True, exist_ok=True)
# modify location of training data to be able to continue jobs
if not os.path.exists(batch_size_results_dir + '/training_data/'):
import shutil
shutil.copytree(pathlib.Path(pipeline_config['training_data_params']['data_path_format']).parent,
batch_size_results_dir + '/training_data')
pipeline_config['training_data_params']['data_path_format'] = batch_size_results_dir + '/training_data/iter_{}.csv'
with open(batch_size_results_dir+'/'+pipeline_config['common']['params_set_config_csv'],'w') as f:
csv_w = csv.writer(f)
csv_w.writerow(list(next_batch_selector_params.keys()) + ['rnd_seed'])
csv_w.writerow(list(next_batch_selector_params.values()) + [rnd_seed])
try:
pipeline_config['common']['dissimilarity_memmap_filename']
except:
pipeline_config['common']['dissimilarity_memmap_filename'] = None
if precompute_dissimilarity_matrix:
if pipeline_config['common']['dissimilarity_memmap_filename'] is None:
pipeline_config['common']['dissimilarity_memmap_filename'] = '../datasets/dissimilarity_matrix.dat'
compute_dissimilarity_matrix(csv_file_or_dir=pipeline_config['unlabeled_data_params']['data_path_format'],
output_dir=pipeline_config['common']['dissimilarity_memmap_filename'])
# run iterations for this simulation
for iter_num in range(start_iter, iter_max):
iter_start_time = time.time()
print('---------------------------------------------------------------')
print('Processing iteration number: {}...'.format(iter_num))
#### Run single iteration of active learning pipeline ####
selection_start_time = time.time()
exploitation_df, exploration_df, exploitation_array, exploration_array = get_next_batch(training_loader_params=pipeline_config['training_data_params'],
unlabeled_loader_params=pipeline_config['unlabeled_data_params'],
model_params=pipeline_config['model'],
task_names=pipeline_config['common']['task_names'],
next_batch_selector_params=next_batch_selector_params,
dissimilarity_memmap_filename=pipeline_config['common']['dissimilarity_memmap_filename'])
selection_end_time = time.time()
total_selection_time = selection_end_time - selection_start_time
#### Evaluation ####
# save results
print('Evaluating selected batch...')
eval_start_time = time.time()
evaluate_selected_batch(exploitation_df, exploration_df,
exploitation_array, exploration_array,
batch_size_results_dir,
pipeline_config,
iter_num,
batch_size,
total_selection_time,
add_mean_medians=False)
eval_end_time = time.time()
print('Time it took to evaluate batch {} seconds.'.format(eval_end_time-eval_start_time))
if exploitation_df is not None or exploration_df is not None:
# finally save the exploitation, exploration dataframes to training data directory for next iteration
pd.concat([exploitation_df, exploration_df]).to_csv(pipeline_config['training_data_params']['data_path_format'].format(iter_num+1),
index=False)
iter_end_time = time.time()
print('Finished processing iteration {}. Took {} seconds.'.format(iter_num, iter_end_time-iter_start_time))
# terminate if both exploitation and exploration df are None
if exploitation_df is None and exploration_df is None:
print('Both exploitation and exploration selections are empty. Terminating program.')
break
# summarize the evaluation results into a single csv file
summarize_simulation(batch_size_results_dir,
pipeline_config) | StarcoderdataPython |
4826818 | <reponame>sundials-codes/PeleLM<filename>Testing/Scaling/extractWeakScalingData.py
#!/usr/bin/env python3
import sys
import os
import shutil
import argparse
import socket
import numpy as np
import subprocess
import fnmatch
# Script used to extract weak scaling data from PeleLM log files
# Simply ./extractWeakScalingData.py in the folder created using weakScaling.py
# Adapt the "logprefix" below to your case
if __name__ == "__main__":
# log file pattern
logprefix = "slurm*"
# get list of cases from folders
cases = [ os.path.relpath(f.path,"./") for f in os.scandir(".") if f.is_dir() ]
cases.sort()
# data holder
runTimes = []
diffusionTimes = []
reactionTimes = []
MacProjTimes = []
NodalProjTimes = []
VelAdvTimes = []
ScalAdvTimes = []
SyncTimes = []
for case in cases:
print(case)
folder = "./{}".format(case)
logfile = ""
for root, dirs, files in os.walk(folder):
for name in files:
if fnmatch.fnmatch(name, logprefix):
logfile = os.path.join(root, name)
break
if (logfile == ""):
print("WARNING ! Could not find logfile in {}".format(case))
continue
# Get runTime
cmd = "cat {}".format(logfile)+" | grep 'Run time =' | awk -F= '{print $2}'"
procRunTime = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
currRunTime = procRunTime.communicate()[0].decode("utf-8").strip()
runTimes.append(currRunTime)
# Get component times: reaction, diffusion, MacProj, NodalProj, ScalAdv, VelAdv, Sync
cmd = "cat {}".format(logfile)+" | grep 'PeleLM::advance::reaction' | awk 'NR%2==0' | awk '{print $4}'"
procRunTime = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
currReactTime = procRunTime.communicate()[0].decode("utf-8").strip()
reactionTimes.append(currReactTime)
cmd = "cat {}".format(logfile)+" | grep 'PeleLM::advance::diffusion' | awk 'NR%2==0' | awk '{print $4}'"
procRunTime = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
currDiffTime = procRunTime.communicate()[0].decode("utf-8").strip()
diffusionTimes.append(currDiffTime)
cmd = "cat {}".format(logfile)+" | grep 'PeleLM::advance::mac' | awk 'NR%2==0' | awk '{print $4}'"
procRunTime = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
currMACTime = procRunTime.communicate()[0].decode("utf-8").strip()
MacProjTimes.append(currMACTime)
cmd = "cat {}".format(logfile)+" | grep 'PeleLM::advance::project' | awk 'NR%2==0' | awk '{print $4}'"
procRunTime = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
currProjTime = procRunTime.communicate()[0].decode("utf-8").strip()
NodalProjTimes.append(currProjTime)
cmd = "cat {}".format(logfile)+" | grep 'PeleLM::advance::scalars_adv' | awk 'NR%2==0' | awk '{print $4}'"
procRunTime = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
currScalAdvTime = procRunTime.communicate()[0].decode("utf-8").strip()
ScalAdvTimes.append(currScalAdvTime)
cmd = "cat {}".format(logfile)+" | grep 'PeleLM::advance::velocity_adv' | awk 'NR%2==0' | awk '{print $4}'"
procRunTime = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
currVelAdvTime = procRunTime.communicate()[0].decode("utf-8").strip()
VelAdvTimes.append(currVelAdvTime)
cmd = "cat {}".format(logfile)+" | grep 'PLM::mac_sync()' | awk 'NR%2==0' | awk '{print $4}'"
procRunTime = subprocess.Popen(cmd,shell=True,stdout=subprocess.PIPE)
currSyncTime = procRunTime.communicate()[0].decode("utf-8").strip()
SyncTimes.append(currSyncTime)
fout = open("ScalingData.dat", "w")
fout.write(" NodeCount RunTime Reaction Diffusion MacProj NodalProj ScalAdv VelAdv Sync \n")
for n in range(len(cases)):
fout.write("{} {} {} {} {} {} {} {} {} \n".format(cases[n], runTimes[n], reactionTimes[n], diffusionTimes[n], MacProjTimes[n], NodalProjTimes[n], ScalAdvTimes[n], VelAdvTimes[n], SyncTimes[n]))
fout.close()
| StarcoderdataPython |
9774492 | # This file is MACHINE GENERATED! Do not edit.
# Generated by: tensorflow/python/tools/api/generator/create_python_api.py script.
"""Public API for tf.keras.applications.inception_v3 namespace.
"""
from __future__ import print_function as _print_function
import sys as _sys
from keras.applications.inception_v3 import InceptionV3
from keras.applications.inception_v3 import decode_predictions
from keras.applications.inception_v3 import preprocess_input
del _print_function
| StarcoderdataPython |
241098 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
## Project: SCRIPT - February 2018
## Contact: <NAME> - <EMAIL>
import sys
import os
import glob
from argparse import ArgumentParser
import imp
import numpy as np
from utils import spectrogram2wav
# from scipy.io.wavfile import write
import soundfile
import tqdm
from concurrent.futures import ProcessPoolExecutor
import tensorflow as tf
from architectures import SSRNGraph
from synthesize import make_mel_batch, split_batch, synth_mel2mag
from configuration import load_config
def synth_wave(hp, magfile):
mag = np.load(magfile)
#print ('mag shape %s'%(str(mag.shape)))
wav = spectrogram2wav(hp, mag)
outfile = magfile.replace('.mag.npy', '.wav')
outfile = outfile.replace('.npy', '.wav')
#print magfile
#print outfile
#print
# write(outfile, hp.sr, wav)
soundfile.write(outfile, wav, hp.sr)
def main_work():
#################################################
# ======== Get stuff from command line ==========
a = ArgumentParser()
a.add_argument('-c', dest='config', required=True, type=str)
a.add_argument('-ncores', type=int, default=1)
opts = a.parse_args()
# ===============================================
hp = load_config(opts.config)
### 1) convert saved coarse mels to mags with latest-trained SSRN
print('mel2mag: restore last saved SSRN')
g = SSRNGraph(hp, mode="synthesize")
with tf.Session() as sess:
sess.run(tf.global_variables_initializer())
## TODO: use restore_latest_model_parameters from synthesize?
var_list = tf.get_collection(tf.GraphKeys.TRAINABLE_VARIABLES, 'SSRN')
saver2 = tf.train.Saver(var_list=var_list)
savepath = hp.logdir + "-ssrn"
latest_checkpoint = tf.train.latest_checkpoint(savepath)
if latest_checkpoint is None: sys.exit('No SSRN at %s?'%(savepath))
ssrn_epoch = latest_checkpoint.strip('/ ').split('/')[-1].replace('model_epoch_', '')
saver2.restore(sess, latest_checkpoint)
print("SSRN Restored from latest epoch %s"%(ssrn_epoch))
filelist = glob.glob(hp.logdir + '-t2m/validation_epoch_*/*.npy')
filelist = [fname for fname in filelist if not fname.endswith('.mag.npy')]
batch, lengths = make_mel_batch(hp, filelist, oracle=False)
Z = synth_mel2mag(hp, batch, g, sess, batchsize=32)
print ('synthesised mags, now splitting batch:')
maglist = split_batch(Z, lengths)
for (infname, outdata) in tqdm.tqdm(zip(filelist, maglist)):
np.save(infname.replace('.npy','.mag.npy'), outdata)
### 2) GL in parallel for both t2m and ssrn validation set
print('GL for SSRN validation')
filelist = glob.glob(hp.logdir + '-t2m/validation_epoch_*/*.mag.npy') + \
glob.glob(hp.logdir + '-ssrn/validation_epoch_*/*.npy')
if opts.ncores==1:
for fname in tqdm.tqdm(filelist):
synth_wave(hp, fname)
else:
executor = ProcessPoolExecutor(max_workers=opts.ncores)
futures = []
for fpath in filelist:
futures.append(executor.submit(synth_wave, hp, fpath))
proc_list = [future.result() for future in tqdm.tqdm(futures)]
if __name__=="__main__":
main_work()
| StarcoderdataPython |
6568873 | from toontown.toonbase.ToontownGlobals import *
from otp.level import BasicEntities
class MintProduct(BasicEntities.NodePathEntity):
Models = {CashbotMintIntA: 'phase_10/models/cashbotHQ/MoneyBag',
CashbotMintIntB: 'phase_10/models/cashbotHQ/MoneyStackPallet',
CashbotMintIntC: 'phase_10/models/cashbotHQ/GoldBarStack'}
Scales = {CashbotMintIntA: 0.98,
CashbotMintIntB: 0.38,
CashbotMintIntC: 0.6}
def __init__(self, level, entId):
BasicEntities.NodePathEntity.__init__(self, level, entId)
self.model = None
self.mintId = self.level.mintId
self.loadModel()
def destroy(self):
if self.model:
self.model.removeNode()
del self.model
BasicEntities.NodePathEntity.destroy(self)
def loadModel(self):
if self.model:
self.model.removeNode()
self.model = None
self.model = loader.loadModel(self.Models[self.mintId])
self.model.setScale(self.Scales[self.mintId])
self.model.flattenStrong()
if self.model:
self.model.reparentTo(self)
if __dev__:
def setMintId(self, mintId):
self.mintId = mintId
self.loadModel()
| StarcoderdataPython |
257250 | from django.db import models
from pydis_site.apps.api.models.bot.user import User
from pydis_site.apps.api.models.utils import ModelReprMixin
class Nomination(ModelReprMixin, models.Model):
"""A helper nomination created by staff."""
active = models.BooleanField(
default=True,
help_text="Whether this nomination is still relevant."
)
actor = models.ForeignKey(
User,
on_delete=models.CASCADE,
help_text="The staff member that nominated this user.",
related_name='nomination_set'
)
reason = models.TextField(
help_text="Why this user was nominated."
)
user = models.ForeignKey(
User,
on_delete=models.CASCADE,
help_text="The nominated user.",
related_name='nomination'
)
inserted_at = models.DateTimeField(
auto_now_add=True,
help_text="The creation date of this nomination."
)
end_reason = models.TextField(
help_text="Why the nomination was ended.",
default=""
)
ended_at = models.DateTimeField(
auto_now_add=False,
help_text="When the nomination was ended.",
null=True
)
| StarcoderdataPython |
3410625 | #!/usr/bin/env python3
#This sample demonstrates turning relays on and off.
#Install Relay uHAT library with "pip3 install turta-relayuhat"
from time import sleep
from turta_relayuhat import Turta_Relay
#Initialize
relay = Turta_Relay.RelayController()
try:
while 1:
#Turn on relay 1
relay.write(1, True)
print("Relay 1 state: " + ("On" if relay.read(1) else "Off"))
sleep(2.0)
#Turn on relay 2
relay.write(2, True)
print("Relay 2 state: " + ("On" if relay.read(2) else "Off"))
sleep(2.0)
#Turn off relay 1
relay.write(1, False)
print("Relay 1 state: " + ("On" if relay.read(1) else "Off"))
sleep(2.0)
#Turn off relay 2
relay.write(2, False)
print("Relay 2 state: " + ("On" if relay.read(2) else "Off"))
sleep(2.0)
#Turn on all relays
relay.write_all(True)
print("Turn on all relays")
sleep(2.0)
#Turn off all relays
relay.write_all(False)
print("Turn off all relays")
sleep(2.0)
except KeyboardInterrupt:
print('Bye.') | StarcoderdataPython |
6599615 | # encoding: utf-8
'''🐱 PDS GitHub Utilities: Python Version determination'''
import logging, os, packaging.version, re, sys, subprocess
_logger = logging.getLogger(__name__)
_detectives = set()
# Classes
# -------
class NoVersionDetectedError(ValueError):
'''😦 Raised when we cannot detect a version from a Python workspace'''
class VersionDetective(object):
'''🕵️♀️ Abstract detective for a version of a Python package given its source code. You can
define your own classes by deriving from this class and implementing the ``detect`` method.
This package comes with several implmentations, and you can register your own by calling
``registerDetective``.
'''
def __init__(self, workspace: str):
'''Initialize this detective by saving the given workspace (a path to a directory as a string)
into the instance of this object.
'''
self.workspace = workspace
def findFile(self, fn: str):
'''Utility method: Find the file named ``fn`` in the workspace and return its path,
or None if it's not found. Handy for subclasses.'''
path = os.path.join(self.workspace, fn)
return path if os.path.isfile(path) else None
def detect(self):
'''Detect the version of the Python package in the source code ``workspace`` and return it,
or None if we can't figure it out.
'''
raise NotImplementedError('Subclasses must implement ``VersionDetective.detect``')
class VersioneerDetective(VersionDetective):
'''Detective that uses Python Versioneer to tell what version we have'''
def detect(self):
if not sys.executable:
_logger.debug('🤷♂️ Cannot tell what my own Python executable is, so not bothering with versioneer')
return None
setupFile = self.findFile('setup.py')
if not setupFile:
_logger.debug('🤷♀️ No setup.py file, so cannot call versioneer command on it')
return None
expr = re.compile(r'^Version: (.+)$')
try:
completion = subprocess.run(
[sys.executable, setupFile, 'version'],
check=True, cwd=self.workspace, encoding='utf-8', stdin=subprocess.DEVNULL, stdout=subprocess.PIPE,
text=True
)
for line in completion.stdout.split('\n'):
match = expr.match(line)
if match: return match.group(1).strip()
except subprocess.CalledProcessError as ex:
_logger.debug('🚳 Could not execute ``version`` command on ``setup.py``, rc=%d', ex.returncode)
return None
class TextFileDetective(VersionDetective):
'''Detective that looks for a ``version.txt`` file of some kind for a version indication'''
@classmethod
def locate_file(cls, root_dir):
src_dir = os.path.join(root_dir, 'src')
if not os.path.isdir(src_dir):
raise ValueError('Unable to locate ./src directory in workspace.')
version_file = None
for dirpath, dirnames, filenames in os.walk(src_dir):
for fn in filenames:
if fn.lower() == 'version.txt':
version_file = os.path.join(dirpath, fn)
_logger.debug('🪄 Found a version.txt in %s', version_file)
break
return version_file
def detect(self):
version_file = self.locate_file(self.workspace)
if version_file is not None:
with open(version_file, 'r') as inp:
return inp.read().strip()
else:
return None
class ModuleInitDetective(VersionDetective):
'''Detective that parses ``__init__.py`` files for a version definition, using the first one matched;
this is typically the highest level one in the package, which is what you want.
'''
def detect(self):
expr = re.compile(r'^__version__\s*=\s*[\'"]([^\'"]+)[\'"]')
for dirpath, dirnames, filenames in os.walk(os.path.join(self.workspace, 'src')):
for fn in filenames:
if fn == '__init__.py':
init = os.path.join(dirpath, '__init__.py')
_logger.debug('🧞♀️ Found a potential module init in %s', init)
with open(init, 'r') as inp:
for line in inp:
match = expr.match(line)
if match:
version = match.group(1)
_logger.debug('🔍 Using version «%s» from %s', version, init)
return version
return None
class _SetupDetective(VersionDetective):
'''An abstract detective that refactors common behavior for detecting versions in both
``setup.py`` and ``setup.cfg`` files.
'''
def getFile(self):
'''Tell what file we're looking for'''
raise NotImplementedError('Subclasses must implement ``getFile``')
def getRegexp(self):
'''Give us a good regexp to use in the file; the regexp must provide one capture
group that contains the version string.
'''
raise NotImplementedError('Subclasses must implement ``getRegexp``')
def detect(self):
setupFile = self.findFile(self.getFile())
if not setupFile: return None
expr = self.getRegexp()
with open(setupFile, 'r') as inp:
for line in inp:
match = expr.search(line)
if match: return match.group(1).strip()
return None
class SetupConfigDetective(_SetupDetective):
'''Detective that parses the ``seutp.cfg`` file for a declarative version'''
def getFile(self):
return 'setup.cfg'
def getRegexp(self):
return re.compile(r'^version\s*=\s*([^#\s]+)')
class SetupModuleDetective(_SetupDetective):
'''Detective that parses the ``setup.py`` module for a programmatic version'''
def getFile(self):
return 'setup.py'
def getRegexp(self):
return re.compile(r'version\s*=\s*[\'"]([^\'"]+)[\'"]')
# Functions
# ---------
def registerDetective(detective: type):
'''✍️ Register the given ``detective`` with the set of potential detetives to use to detect
version information in a Python source tree.
'''
if not issubclass(detective, VersionDetective):
raise ValueError('Only register ``VersionDetective`` classes/subclasses with this function')
_detectives.add(detective)
def getVersion(workspace=None):
'''🕵️ Get the version of a Python package in the given ``workspace``, or in the directory
given by the ``GITHUB_WORKSPACE`` environment variable if it's set and non-empty,
or the current working directory. Try several strategies to determine the version and
use the one that makes the "most valid" version string, or raise a ``NoVersionDetectedError``
if none of them look copacetic.
'''
_logger.info('🤔 Python getVersion called with workspace %s', workspace)
# Figure out where to work
gh = os.getenv('GITHUB_WORKSPACE')
workspace = os.path.abspath(workspace if workspace else gh if gh else os.getcwd())
_logger.debug('👣 The computed path is %s', workspace)
# Try each detective
versions = set()
for detectiveClass in _detectives:
detective = detectiveClass(workspace)
version = detective.detect()
_logger.debug('🔍 Detected version using %s is %r', detectiveClass.__name__, version)
if version:
# Validate it
try:
versionObj = packaging.version.parse(version)
if not isinstance(versionObj, packaging.version.LegacyVersion):
# A newer and therefore better version, so add it
versions.add(version)
except packaging.version.InvalidVersion:
# Invalid, we won't add it
pass
# What we're left with are all valid so go with the shortest I guess; i.e., if one detective
# said ``1.2.3`` but another said ``1.2.3.post4`` we prefer ``1.2.3``.
if len(versions) == 0:
raise NoVersionDetectedError()
versions = list(versions)
versions.sort(key=len)
version = versions[0]
_logger.debug('🏁 High confidence version is %s', version)
return version
# Register the "built in" detectives:
for d in (VersioneerDetective, SetupConfigDetective, SetupModuleDetective, TextFileDetective, ModuleInitDetective):
registerDetective(d)
| StarcoderdataPython |
242986 | import setuptools
setuptools.setup(
name="liblora",
version="0.0.3",
author="<NAME>",
author_email="<EMAIL>",
description="A library with functions related to LoRaWAN, derived from Basic MAC",
url="https://github.com/mkuyper/liblora",
packages=setuptools.find_packages(),
install_requires=[ "pycryptodome" ],
classifiers=[
"Programming Language :: Python :: 3",
"License :: OSI Approved :: BSD License",
"Operating System :: OS Independent",
],
python_requires='>=3.8',
)
| StarcoderdataPython |
8014284 | <filename>tests/h/admin/views/nipsa_test.py
# -*- coding: utf-8 -*-
from pyramid import httpexceptions
import pytest
from h.admin.views import nipsa as views
@pytest.mark.usefixtures('nipsa_service', 'routes', 'users')
class TestNipsaIndex(object):
def test_lists_flagged_usernames(self, pyramid_request):
result = views.nipsa_index(pyramid_request)
assert set(result['usernames']) == set(['kiki', 'ursula', 'osono'])
def test_lists_flagged_usernames_no_results(self, nipsa_service, pyramid_request):
nipsa_service.flagged = set([])
result = views.nipsa_index(pyramid_request)
assert result['usernames'] == []
@pytest.mark.usefixtures('nipsa_service', 'routes', 'users')
class TestNipsaAddRemove(object):
def test_add_flags_user(self, nipsa_service, pyramid_request, users):
pyramid_request.params = {"add": "carl"}
views.nipsa_add(pyramid_request)
assert users['carl'] in nipsa_service.flagged
@pytest.mark.parametrize('user', ['', 'donkeys', '\x00'])
def test_add_raises_when_user_not_found(self, user, nipsa_service, pyramid_request):
pyramid_request.params = {"add": user}
with pytest.raises(views.UserNotFoundError):
views.nipsa_add(pyramid_request)
def test_add_redirects_to_index(self, pyramid_request):
pyramid_request.params = {"add": "carl"}
result = views.nipsa_add(pyramid_request)
assert isinstance(result, httpexceptions.HTTPSeeOther)
assert result.location == '/adm/nipsa'
def test_remove_unflags_user(self, nipsa_service, pyramid_request, users):
pyramid_request.params = {"remove": "kiki"}
views.nipsa_remove(pyramid_request)
assert users['kiki'] not in nipsa_service.flagged
@pytest.mark.parametrize('user', ['', 'donkeys', '\x00'])
def test_remove_raises_when_user_not_found(self, user, nipsa_service, pyramid_request):
pyramid_request.params = {"remove": user}
with pytest.raises(views.UserNotFoundError):
views.nipsa_remove(pyramid_request)
def test_remove_redirects_to_index(self, pyramid_request):
pyramid_request.params = {"remove": "kiki"}
result = views.nipsa_remove(pyramid_request)
assert isinstance(result, httpexceptions.HTTPSeeOther)
assert result.location == '/adm/nipsa'
class FakeNipsaService(object):
def __init__(self, users):
self.flagged = set([u for u in users if u.nipsa])
@property
def flagged_users(self):
return list(self.flagged)
def flag(self, user):
self.flagged.add(user)
def unflag(self, user):
self.flagged.remove(user)
@pytest.fixture
def nipsa_service(pyramid_config, users):
service = FakeNipsaService([u for u in users.values()])
pyramid_config.register_service(service, name='nipsa')
return service
@pytest.fixture
def routes(pyramid_config):
pyramid_config.add_route('admin_nipsa', '/adm/nipsa')
@pytest.fixture
def users(db_session, factories):
users = {
'carl': factories.User(username='carl'),
'kiki': factories.User(username='kiki', nipsa=True),
'ursula': factories.User(username='ursula', nipsa=True),
'osono': factories.User(username='osono', nipsa=True),
}
db_session.add_all([u for u in users.values()])
db_session.flush()
return users
| StarcoderdataPython |
11342341 | <gh_stars>1-10
class Solution:
def partitionLabels(self, S):
"""
:type S: str
:rtype: List[int]
"""
res, n = [], len(S)
idx_dict = {}
for i in range(n):
idx_dict[S[i]] = i
i = 0
while i < n:
start = j = i
while i <= j:
last_si_idx = idx_dict[S[i]]
if j < last_si_idx:
j = last_si_idx
i += 1
res.append(j-start+1)
return res | StarcoderdataPython |
6675760 | <reponame>zabir-nabil/data-viz<gh_stars>0
# coding: utf-8
# In[16]:
import pandas as pd
# In[17]:
excel_ds = 'dataset.xlsx'
subjects = pd.read_excel(excel_ds, index_col = 0)
# In[18]:
subjects.shape
# In[19]:
subjects.head()
# In[20]:
subjects.tail()
# In[21]:
best_composite_index = subjects.sort_values(['CompositeIndex'], ascending=False)
# In[22]:
best_composite_index[["CompositeIndex","CompositeIndicator","DistrictCode","UpazilaCode","Village","Sex",
"Disability","Religion","adolescent_girl","Lact_mother","Main_Sanitation_Option",
"Main_Water_Source","Beneficiary Type","Main_IGAName","FirstAssetYear",
"shp3_schoolAttending","shp3_cashSavings"]].head()
# In[23]:
best_composite_index[["CompositeIndex","CompositeIndicator","DistrictCode","UpazilaCode","Village","Sex",
"Disability","Religion","adolescent_girl","Lact_mother","Main_Sanitation_Option",
"Main_Water_Source","Beneficiary Type","Main_IGAName","FirstAssetYear",
"shp3_schoolAttending","shp3_cashSavings"]].tail()
# In[24]:
best_composite_indic = subjects.sort_values(['CompositeIndicator'], ascending=False)
# In[25]:
best_composite_indic[["CompositeIndex","CompositeIndicator","DistrictCode","UpazilaCode","Village","Sex",
"Disability","Religion","adolescent_girl","Lact_mother","Main_Sanitation_Option",
"Main_Water_Source","Beneficiary Type","Main_IGAName","FirstAssetYear",
"shp3_schoolAttending","shp3_cashSavings"]].head()
# In[26]:
best_composite_index[["CompositeIndex","CompositeIndicator","DistrictCode","UpazilaCode","Village","Sex",
"Disability","Religion","adolescent_girl","Lact_mother","Main_Sanitation_Option",
"Main_Water_Source","Beneficiary Type","Main_IGAName","FirstAssetYear",
"shp3_schoolAttending","shp3_cashSavings"]].tail()
# In[27]:
from bokeh.plotting import figure, show
from bokeh.io import output_notebook
output_notebook()
# In[28]:
sex1 = subjects.loc[subjects["Sex"]==1]
sex2 = subjects.loc[subjects["Sex"]==2]
TOOLS="resize,crosshair,pan,wheel_zoom,box_zoom,reset,box_select,lasso_select"
mp = figure(title=" Does Sex have an impact on CIndex and CIndicator?",
x_axis_label='CompositeIndex',
y_axis_label='CompositeIndicator', tools = TOOLS)
mp.circle(sex1["CompositeIndex"],sex1["CompositeIndicator"],legend="Sex 1", alpha=0.5, fill_color="blue",
fill_alpha=0.6, line_color="blue", size=6)
mp.circle(sex2["CompositeIndex"],sex2["CompositeIndicator"],legend="Sex 2", alpha=0.5, fill_color="red",
fill_alpha=0.6, line_color="red", size=6)
show(mp)
# In[29]:
from bokeh.charts import Bar, output_file, show
mp = Bar(subjects, 'Sex', values='CompositeIndex', agg = 'mean',
title=" Mean of cIndex by Sex", bar_width=0.4, color = 'navy')
show(mp)
# In[30]:
from bokeh.charts import Bar, output_file, show
mp = Bar(subjects, label='DistrictCode', values='CompositeIndex', agg='median', group = 'Beneficiary Type',
title="Median CompositeIndex by District Code grouped by Beneficiary Type", legend='top_right')
show(mp)
# In[31]:
from bokeh.charts import Histogram, output_file, show
mp = Histogram(subjects['CompositeIndicator'], title="Composite Indicator Distribution")
output_file("histogramCIndicator.html")
show(mp)
# In[32]:
from bokeh.charts import Histogram, output_file, show
mp = Histogram(subjects['CompositeIndex'], title="Composite Index Distribution")
output_file("histogramCIndex.html")
show(mp)
# In[33]:
from bokeh.charts import Bar, output_file, show
mp = Bar(subjects, label='UnionCode', values='CompositeIndicator', agg='mean', group = 'shp3_cashSavings',
title="Median CompositeIndicator by Union Code grouped by Cash Savings", legend='top_right')
show(mp)
# In[34]:
from bokeh.charts import Bar, output_file, show
mp = Bar(subjects, label='DistrictCode', values='CompositeIndex', agg='mean', group = 'shp3_cashSavings',
title="Median CompositeIndex by District Code grouped by Cash Savings", legend='top_right')
show(mp)
# In[60]:
from bokeh.plotting import figure, show
mp = figure(title="CompositeIndex and CompositeIndicator",
x_axis_label='CompositeIndex',
y_axis_label='CompositeIndicator')
mp.circle(subjects['CompositeIndex'], subjects['CompositeIndicator'], size = 5, alpha = 0.8,
fill_color="red", line_color="navy", line_width=3)
show(mp)
# In[36]:
cIdic = subjects.loc[:,"CompositeIndicator"].values
print(len(cIdic))
# In[37]:
cIdex = subjects.loc[:,"CompositeIndex"].values
print(len(cIdex))
# In[38]:
len(subjects.CompositeIndex.unique()) # 189 classes with certain compositeIndex
# In[39]:
subjects_selected_cols = subjects[["CompositeIndex","CompositeIndicator","DistrictCode","UpazilaCode","Village","Sex",
"Disability","Religion","adolescent_girl","Lact_mother","Main_Sanitation_Option",
"Main_Water_Source","Beneficiary Type","Main_IGAName","FirstAssetYear",
"shp3_schoolAttending","shp3_cashSavings"]]
# In[40]:
import matplotlib.pyplot as plt
ax = subjects_selected_cols.plot(figsize=(15,10),kind = 'density',title = 'KDE of variables')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5));
plt.show()
# In[41]:
ax = subjects_selected_cols.plot(figsize=(15,10),kind = 'line',title = 'Variance of Variables')
ax.legend(loc='center left', bbox_to_anchor=(1, 0.5));
plt.show()
# In[42]:
import numpy as np
X = subjects[["DistrictCode","UpazilaCode","Sex","femaleHeaded","OldAges",
"Disability","Religion","adolescent_girl","Lact_mother","Main_Sanitation_Option",
"Main_Water_Source","Total_IGDValue",
"shp3_schoolAttending","shp3_cashSavings"]]
X.replace([np.inf, -np.inf], np.nan).dropna(axis=1)
y = subjects[["CompositeIndex"]]
y.replace([np.inf, -np.inf], np.nan).dropna(axis=1)
# In[43]:
from sklearn.preprocessing import StandardScaler, Imputer
imp = Imputer(missing_values='NaN', strategy='mean', axis=0)
X = imp.fit_transform(X)
print(X)
X_std = StandardScaler().fit_transform(X)
# In[44]:
from sklearn.decomposition import PCA as sklearnPCA
sklearn_pca = sklearnPCA(n_components=2)
Y_sklearn = sklearn_pca.fit_transform(X_std)
# In[45]:
print(X_std.shape)
print(Y_sklearn.shape)
# In[46]:
print(sklearn_pca.components_)
# In[47]:
print(sklearn_pca.explained_variance_)
# In[68]:
plt.figure(figsize=(15,10))
plt.scatter(Y_sklearn[:, 0], Y_sklearn[:, 1],
c=y.CompositeIndex, edgecolor='none', alpha=1,
cmap=plt.cm.get_cmap('nipy_spectral', 10))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar().set_label('CompositeIndex');
plt.show()
# In[81]:
y2 = subjects[["Sex"]]
y2.replace([np.inf, -np.inf], np.nan).dropna(axis=1)
#print(y2)
hi = y2.iloc[:,0]
print(max(hi))
print(min(hi))
# ### Looking for clusters
#
# 1. *Todo*
#
# * A 3d scatter plot
# * k-means
# In[82]:
plt.figure(figsize=(15,10))
plt.scatter(Y_sklearn[:, 0], Y_sklearn[:, 1],
c=y2.Sex, edgecolor='blue', alpha=1,
cmap=plt.cm.get_cmap('nipy_spectral', 2))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar().set_label('Sex');
plt.show()
# In[77]:
y2 = subjects[["shp3_cashSavings"]]
y2.replace([np.inf, -np.inf], np.nan).dropna(axis=1)
#print(y2)
hi = y2.iloc[:,0]
print(max(hi))
print(min(hi))
# In[79]:
plt.figure(figsize=(15,10))
plt.scatter(Y_sklearn[:, 0], Y_sklearn[:, 1],
c=y2.shp3_cashSavings, edgecolor='red', alpha=1,
cmap=plt.cm.get_cmap('nipy_spectral', 2))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar().set_label('Cash Savings');
plt.show()
# In[86]:
# CompositeIndicator
y4 = subjects[["CompositeIndicator"]]
y4.replace([np.inf, -np.inf], np.nan).dropna(axis=1)
#print(y4)
hi = y4.iloc[:,0]
print(max(hi))
print(min(hi))
# In[88]:
plt.figure(figsize=(15,10))
plt.scatter(Y_sklearn[:, 0], Y_sklearn[:, 1],
c=y4.CompositeIndicator, edgecolor='orange', alpha=1,
cmap=plt.cm.get_cmap('nipy_spectral', 5))
plt.xlabel('component 1')
plt.ylabel('component 2')
plt.colorbar().set_label('CompositeIndicator');
plt.show()
# In[52]:
get_ipython().magic('load_ext py_d3')
# In[53]:
get_ipython().run_cell_magic('d3', '', '\n<g></g>\n\n<script>\nd3.select("g").text("Testing d3, ...")\n</script>')
# In[ ]:
| StarcoderdataPython |
1610611 | #from Python
import time
import csv
import os
import math
import numpy as np
import sys
from shutil import copyfile
#from Pytorch
import torch
import torchvision
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torchvision import datasets
from torchvision import transforms
from torchvision.utils import save_image
#from OpenCV
import cv2
#from this project
import param as p
import VisionOP
#local function
def to_var(x):
if torch.cuda.is_available():
x = x.cuda()
return Variable(x)
def denorm(x):
out = (x + 1) / 2
return out.clamp(0, 1)
def norm(x):
out = (x - 0.5) * 2
return out.clamp(-1,1)
version = '3-15l-7-5-3'
################ Hyper Parameters ################
maxDataNum = p.maxDataNum #in fact, 4206
batchSize = p.batchSize
imgCropWidth = p.imgCropWidth
imgCropHeight = p.imgCropHeight
imageWidth = p.imageWidth
imageHeight = p.imageHeight
NGF = p.NGF
NDF = p.NDF
ZLength = p.ZLength
# train
MaxEpoch = p.MaxEpoch
learningRateE = p.learningRateE
lrDecayRateE = p.lrDecayRateE
learningRateR = p.learningRateR # default:0.0003
lrDecayRateR = p.lrDecayRateR #0.999308
# save
numberSaveImage = p.numberSaveImage
############################################
class VisionOPLayer(nn.Module):
def __init__(self):
super(VisionOPLayer, self).__init__()
def forward(self, x, isDenorm = 1):
if isDenorm == 1:
x = denorm(x)
#x = VisionOP.RGB2HSI(x)
#x[:,1,:,:] = norm(torch.clamp((x[:,1,:,:] + 0.3),0,1))
x = norm(VisionOP.Laplacian(x,ksize=3))
return x
class SharpMSELoss(nn.Module):
def __init__(self):
super(SharpMSELoss, self).__init__()
#(N,C,H,W)
def forward(self, input, target):
# error < 0.5 -> error / error >= 0.5 -> error^2+0.25
input = torch.abs(input - target).cuda()
underth = VisionOP.Nonzero((VisionOP.Min(input,0.5*torch.ones(input.size()).cuda()) - 0.5))
input = input * underth + (input*input+0.25) * (1 - underth)
Loss = torch.mean(input) #torch.mean(input.view(input.size()[0],-1),1)
return Loss
class Util(nn.Module):
def __init__(self):
super(Util, self).__init__()
self.downSample = nn.AvgPool2d(4,stride=4)
self.upSample = nn.Upsample(size=(imageHeight,imageWidth),mode='bilinear')
def forward(self, x):
x = x.view(-1,3,imageHeight,imageWidth)
x = self.upSample(self.downSample(x))
return x
class Encoder_class(nn.Module):
def __init__(self):
super(Encoder_class, self).__init__()
self.conv1 = nn.Conv2d(3, NDF * 1, 4, 2, 1) # 256->128
self.conv2 = nn.Conv2d(NDF * 1, NDF * 2, 4, 2, 1) # 128->64
self.conv3 = nn.Conv2d(NDF * 2, NDF * 4, 4, 2, 1) # 64->32
self.conv4 = nn.Conv2d(NDF * 4, NDF * 8, 4, 2, 1) # 32->16
self.BN_conv2 = nn.BatchNorm2d(NDF * 2)
self.BN_conv3 = nn.BatchNorm2d(NDF * 4)
self.BN_conv4 = nn.BatchNorm2d(NDF * 8)
nn.init.xavier_normal_(self.conv1.weight)
nn.init.xavier_normal_(self.conv2.weight)
nn.init.xavier_normal_(self.conv3.weight)
nn.init.xavier_normal_(self.conv4.weight)
def forward(self, x):
x = x.view(-1, 3, imageHeight, imageWidth)
x = F.leaky_relu(self.conv1(x), 0.2)
x = F.leaky_relu(self.BN_conv2(self.conv2(x)), 0.2)
x = F.leaky_relu(self.BN_conv3(self.conv3(x)), 0.2)
x = F.leaky_relu(self.BN_conv4(self.conv4(x)), 0.2)
return x
class Decoder_class(nn.Module):
def __init__(self):
super(Decoder_class, self).__init__()
self.Deconv1 = nn.ConvTranspose2d(NGF * 8, NGF * 4, 4, 2, 1) # 16->32
self.Deconv2 = nn.ConvTranspose2d(NGF * 4, NGF * 2, 4, 2, 1) # 32->64
self.Deconv3 = nn.ConvTranspose2d(NGF * 2, NGF * 1, 4, 2, 1) # 64->128
self.Deconv4 = nn.ConvTranspose2d(NGF * 1, 3, 4, 2, 1) # 128->256
self.BN_conv1 = nn.BatchNorm2d(NGF * 4)
self.BN_conv2 = nn.BatchNorm2d(NGF * 2)
self.BN_conv3 = nn.BatchNorm2d(NGF * 1)
nn.init.xavier_normal_(self.Deconv1.weight)
nn.init.xavier_normal_(self.Deconv2.weight)
nn.init.xavier_normal_(self.Deconv3.weight)
nn.init.xavier_normal_(self.Deconv4.weight)
def forward(self, x):
x = F.relu(self.BN_conv1(self.Deconv1(x)))
x = F.relu(self.BN_conv2(self.Deconv2(x)))
x = F.relu(self.BN_conv3(self.Deconv3(x)))
x = F.tanh(self.Deconv4(x))
return x
class Res_class(nn.Module):
def __init__(self):
super(Res_class, self).__init__()
self.conv1 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 2) # I/O same size
self.conv2 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 1)
self.conv3 = nn.Conv2d(NDF * 8, NDF * 8, 3, 1, 1)
self.conv4 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 2)
self.conv5 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 1)
self.conv6 = nn.Conv2d(NDF * 8, NDF * 8, 3, 1, 1)
self.conv7 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 2)
self.conv8 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 1)
self.conv9 = nn.Conv2d(NDF * 8, NDF * 8, 3, 1, 1)
self.conv10 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 2)
self.conv11 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 1)
self.conv12 = nn.Conv2d(NDF * 8, NDF * 8, 3, 1, 1)
self.conv13 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 2)
self.conv14 = nn.Conv2d(NDF * 8, NDF * 8, 4, 1, 1)
self.conv15 = nn.Conv2d(NDF * 8, NDF * 8, 3, 1, 1)
self.BN_conv = nn.BatchNorm2d(NGF * 8)
nn.init.xavier_normal_(self.conv1.weight)
nn.init.xavier_normal_(self.conv2.weight)
nn.init.xavier_normal_(self.conv3.weight)
nn.init.xavier_normal_(self.conv4.weight)
nn.init.xavier_normal_(self.conv5.weight)
nn.init.xavier_normal_(self.conv6.weight)
nn.init.xavier_normal_(self.conv7.weight)
nn.init.xavier_normal_(self.conv8.weight)
nn.init.xavier_normal_(self.conv9.weight)
nn.init.xavier_normal_(self.conv10.weight)
nn.init.xavier_normal_(self.conv11.weight)
nn.init.xavier_normal_(self.conv12.weight)
nn.init.xavier_normal_(self.conv13.weight)
nn.init.xavier_normal_(self.conv14.weight)
nn.init.xavier_normal_(self.conv15.weight)
def forward(self, x):
res = x
x = F.leaky_relu(self.BN_conv(self.conv1(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv2(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv3(x)) + res, 0.2)
res = x
x = F.leaky_relu(self.BN_conv(self.conv4(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv5(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv6(x) + res), 0.2)
res = x
x = F.leaky_relu(self.BN_conv(self.conv7(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv8(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv9(x) + res), 0.2)
res = x
x = F.leaky_relu(self.BN_conv(self.conv10(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv11(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv12(x) + res), 0.2)
res = x
x = F.leaky_relu(self.BN_conv(self.conv13(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv14(x)), 0.2)
x = F.leaky_relu(self.BN_conv(self.conv15(x) + res), 0.2)
return x
class D_class(nn.Module):
def __init__(self):
super(D_class, self).__init__()
self.conv1 = nn.Conv2d(3, NDF * 1, 4, 2, 1) # 64->32
self.conv2 = nn.Conv2d(NDF * 1, NDF * 2, 4, 2, 1) # 32->16
self.conv3 = nn.Conv2d(NDF * 2, NDF * 4, 4, 2, 1) # 16->8
self.conv4 = nn.Conv2d(NDF * 4, NDF * 8, 4, 2, 1) # 8->4
self.conv5 = nn.Conv2d(NDF * 8, 1, 4, 1, 0) # 4->1
self.BN_conv2 = nn.BatchNorm2d(NDF * 2)
self.BN_conv3 = nn.BatchNorm2d(NDF * 4)
self.BN_conv4 = nn.BatchNorm2d(NDF * 8)
nn.init.xavier_normal_(self.conv1.weight)
nn.init.xavier_normal_(self.conv2.weight)
nn.init.xavier_normal_(self.conv3.weight)
nn.init.xavier_normal_(self.conv4.weight)
nn.init.xavier_normal_(self.conv5.weight)
def parallelPool(self, x):
xSize = x.size()
x = x.contiguous()
lrX = torch.chunk(x.view(xSize[0], xSize[1], xSize[2], -1, 2), 2, 4)
lubX = torch.chunk(lrX[0].contiguous().view(xSize[0], xSize[1], xSize[2], -1, 2), 2, 4)
rubX = torch.chunk(lrX[1].contiguous().view(xSize[0], xSize[1], xSize[2], -1, 2), 2, 4)
x1 = lubX[0].contiguous().view(xSize[0], xSize[1], xSize[2], round(xSize[3] / 2), round(xSize[4] / 2))
x2 = rubX[0].contiguous().view(xSize[0], xSize[1], xSize[2], round(xSize[3] / 2), round(xSize[4] / 2))
x3 = lubX[1].contiguous().view(xSize[0], xSize[1], xSize[2], round(xSize[3] / 2), round(xSize[4] / 2))
x4 = rubX[1].contiguous().view(xSize[0], xSize[1], xSize[2], round(xSize[3] / 2), round(xSize[4] / 2))
x = torch.cat((x1, x2, x3, x4), 1) # (N,C,D,H,W)->(N,C*4,D,H/2,W/2)
return x
def forward(self, x):
x = x.view(-1, 3, imageHeight, imageWidth)
x = F.leaky_relu(self.conv1(x), 0.2)
x = F.leaky_relu(self.BN_conv2(self.conv2(x)), 0.2)
x = F.leaky_relu(self.BN_conv3(self.conv3(x)), 0.2)
x = F.leaky_relu(self.BN_conv4(self.conv4(x)), 0.2)
x = F.sigmoid(self.conv5(x))
return x
| StarcoderdataPython |
6643992 | <gh_stars>1-10
# -*- coding: utf-8 -*-
# Generated by Django 1.10.4 on 2017-02-06 09:49
from __future__ import unicode_literals
from django.db import migrations, models
import django.db.models.deletion
import django_extensions.db.fields
import modelcluster.contrib.taggit
import modelcluster.fields
import wagtail.wagtailcore.fields
class Migration(migrations.Migration):
initial = True
dependencies = [
('taggit', '0002_auto_20150616_2121'),
('wagtailcore', '0030_index_on_pagerevision_created_at'),
('wagtailimages', '0015_fill_filter_spec_field'),
]
operations = [
migrations.CreateModel(
name='Product',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
('description', wagtail.wagtailcore.fields.RichTextField()),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ProductImage',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('sort_order', models.IntegerField(blank=True, editable=False, null=True)),
('caption', models.CharField(blank=True, max_length=255)),
('image', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='+', to='wagtailimages.Image')),
('product', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='images', to='longclawproducts.Product')),
],
options={
'ordering': ['sort_order'],
'abstract': False,
},
),
migrations.CreateModel(
name='ProductIndex',
fields=[
('page_ptr', models.OneToOneField(auto_created=True, on_delete=django.db.models.deletion.CASCADE, parent_link=True, primary_key=True, serialize=False, to='wagtailcore.Page')),
],
options={
'abstract': False,
},
bases=('wagtailcore.page',),
),
migrations.CreateModel(
name='ProductTag',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('content_object', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='tagged_items', to='longclawproducts.Product')),
('tag', models.ForeignKey(on_delete=django.db.models.deletion.CASCADE, related_name='longclawproducts_producttag_items', to='taggit.Tag')),
],
options={
'abstract': False,
},
),
migrations.CreateModel(
name='ProductVariant',
fields=[
('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')),
('price', models.DecimalField(decimal_places=2, max_digits=12)),
('ref', models.CharField(max_length=32)),
('slug', django_extensions.db.fields.AutoSlugField(blank=True, editable=False, populate_from=('product', 'ref'), separator='')),
('description', wagtail.wagtailcore.fields.RichTextField()),
('stock', models.IntegerField(default=0)),
('product', modelcluster.fields.ParentalKey(on_delete=django.db.models.deletion.CASCADE, related_name='variants', to='longclawproducts.Product')),
],
options={
'abstract': False,
},
),
migrations.AddField(
model_name='product',
name='tags',
field=modelcluster.contrib.taggit.ClusterTaggableManager(blank=True, help_text='A comma-separated list of tags.', through='longclawproducts.ProductTag', to='taggit.Tag', verbose_name='Tags'),
),
]
| StarcoderdataPython |
3299666 | import tweepy
import os
import sys
ckey= ''
csecret= ''
atoken=''
asecret= ''
auth = tweepy.OAuthHandler(ckey,csecret) #Authenticating with twitter
auth.set_access_token(atoken,asecret) #Access tokens
api = tweepy.API(auth) #Performing Auth
filename = os.path.abspath(sys.argv[1])
status = sys.argv[2]
print "Posting Now"
api.update_with_media(filename,status=status)
| StarcoderdataPython |
12858800 | import configparser
import logging
from flask import Flask
from flask_pymongo import PyMongo
from Crypto.PublicKey import RSA
# Value mapping
LOG_LEVELS = {'INFO': logging.INFO, 'DEBUG': logging.DEBUG, 'WARN': logging.DEBUG, 'ERROR': logging.ERROR}
# Create application
app = Flask(__name__)
# Read external config
config = configparser.ConfigParser()
config.read('auth-api.cfg')
app.config['MONGO_DBNAME'] = config['DATABASE']['dbName']
app.config['MONGO_URI'] = config['DATABASE']['dbURI']
logfile = config['LOGGING']['logFile']
loglevel = LOG_LEVELS[config['LOGGING']['logLevel']]
app.config['SERVER_NAME'] = config['APPLICATION']['serverName']
app.config['DEBUG'] = config['APPLICATION']['debug']
# Set up logging
fh = logging.FileHandler(logfile, mode='a', encoding='utf8', delay=False)
fmt = logging.Formatter('%(asctime)s %(levelname)s %(filename)s %(lineno)d %(message)s')
fh.setFormatter(fmt)
app.logger.addHandler(fh)
app.logger.setLevel(loglevel)
# Set up database
mongo = PyMongo(app)
# Get crypto
pubkeyfile = config['PKI']['pubkeyFile']
authpublickey = RSA.import_key(open(pubkeyfile).read()).exportKey()
keyfile = config['PKI']['keyFile']
passphrase = config['PKI']['passPhrase']
authprivatekey = RSA.import_key(open(keyfile).read(), passphrase=passphrase).exportKey()
# Get session secret
app.secret_key = config['SESSIONS']['secretKey']
| StarcoderdataPython |
4875293 | <gh_stars>0
from pipelining.pipe_root import ConfigRoot
from etl import maxqdata_manager, gold_data_transform_rules
from pipelining import data_flow_registry
from IPython import embed
import main
ConfigRoot.articles_xml_directory = data_flow_registry.maxqdata_data["md1"]["articles_xml_directory"]
ConfigRoot.annotations_xlsx_file_path = data_flow_registry.maxqdata_data["md1"]["annotations_xlsx_file_path"]
class ConfigTransformArticles(ConfigRoot):
maxqdata_gold_data_transform_function = data_flow_registry.gold_data["g1"]["maxqdata_specific_processing"]
gold_data_json_path = data_flow_registry.gold_data["g1"]["path"]
class ConfigTransformSentencesSm(ConfigRoot):
maxqdata_gold_data_transform_function = data_flow_registry.gold_data["g2"]["maxqdata_specific_processing"]
gold_data_json_path = data_flow_registry.gold_data["g2"]["path"]
spacy_base_model = data_flow_registry.gold_data["g2"]["spacy_base_model"]
class ConfigTransformSentencesLg(ConfigRoot):
maxqdata_gold_data_transform_function = data_flow_registry.gold_data["g3"]["maxqdata_specific_processing"]
gold_data_json_path = data_flow_registry.gold_data["g3"]["path"]
spacy_base_model = data_flow_registry.gold_data["g3"]["spacy_base_model"]
def run():
gdc_articles = main.load_from_maxqdata(ConfigTransformArticles)
gdc_sentences_sm = main.load_from_maxqdata(ConfigTransformSentencesSm)
gdc_sentences_lg = main.load_from_maxqdata(ConfigTransformSentencesLg)
main.persist_gold_data(ConfigTransformArticles, gdc_articles)
main.persist_gold_data(ConfigTransformSentencesSm, gdc_sentences_sm)
main.persist_gold_data(ConfigTransformSentencesLg, gdc_sentences_lg)
embed() | StarcoderdataPython |
1616327 | """
This module provides different kinds of iterators, all wrapped by the
DataIterator class which should generally be the only one used in practice.
The BaseIterator class allows "peeking" `checklines` lines into the data --
even if it's a consumable iterator -- in order to figure out what the dialect
is and therefore decide whether the data is GFF or GTF format, which is
important for figuring out how to construct the database.
"""
import os
import tempfile
import itertools
from gffutils.feature import feature_from_line
from gffutils.interface import FeatureDB
from gffutils import helpers
from textwrap import dedent
import six
from six.moves.urllib.request import urlopen
if six.PY3:
from urllib import parse as urlparse
else:
import urlparse
def peek(it, n):
_peek = []
for _ in range(n):
try:
_peek.append(six.next(it))
except StopIteration:
break
return _peek, itertools.chain(_peek, it)
class Directive(object):
def __init__(self, line):
self.info = line
class _BaseIterator(object):
def __init__(self, data, checklines=10, transform=None,
force_dialect_check=False, dialect=None):
"""
Base class for iterating over features. In general, you should use
DataIterator -- so see the docstring of class for argument
descriptions.
All subclasses -- _FileIterator, _URLIterator, _FeatureIterator,
_StringIterator -- gain the following behavior:
- self.current_item and self.current_item_number are set on every
iteration. This is very useful for debugging, or reporting to
the user exactly what item or line number caused the issue.
- transform a Feature before it gets yielded, filter out a Feature
- auto-detect dialect by peeking `checklines` items into the
iterator, and then re-reading those, applying the detected
dialect. If multiple dialects are found, use
helpers._choose_dialect to figure out the best one.
- keep track of directives
"""
self.data = data
self.checklines = checklines
self.current_item = None
self.current_item_number = None
self.dialect = None
self._observed_dialects = []
self.directives = []
self.transform = transform
self.warnings = []
if force_dialect_check and dialect is not None:
raise ValueError("force_dialect_check is True, but a dialect "
"is provided")
if force_dialect_check:
# In this case, self.dialect remains None. When
# parser._split_keyvals gets None as a dialect, it tries to infer
# a dialect.
self._iter = self._custom_iter()
elif dialect is not None:
self._observed_dialects = [dialect]
self.dialect = helpers._choose_dialect(self._observed_dialects)
self._iter = self._custom_iter()
else:
# Otherwise, check some lines to determine what the dialect should
# be
self.peek, self._iter = peek(self._custom_iter(), checklines)
self._observed_dialects = [i.dialect for i in self.peek]
self.dialect = helpers._choose_dialect(self._observed_dialects)
def _custom_iter(self):
raise NotImplementedError("Must define in subclasses")
def __iter__(self):
for i in self._iter:
i.dialect = self.dialect
if self.transform:
i = self.transform(i)
if i:
yield i
else:
yield i
def _directive_handler(self, directive):
self.directives.append(directive[2:])
class _FileIterator(_BaseIterator):
"""
Subclass for iterating over features provided as a filename
"""
def open_function(self, data):
data = os.path.expanduser(data)
if data.endswith('.gz'):
import gzip
return gzip.open(data)
return open(data)
def _custom_iter(self):
valid_lines = 0
for i, line in enumerate(self.open_function(self.data)):
if isinstance(line, six.binary_type):
line = line.decode('utf-8')
line = line.rstrip('\n\r')
self.current_item = line
self.current_item_number = i
if line == '##FASTA' or line.startswith('>'):
return
if line.startswith('##'):
self._directive_handler(line)
continue
if line.startswith(('#')) or len(line) == 0:
continue
# (If we got here it should be a valid line)
valid_lines += 1
yield feature_from_line(line, dialect=self.dialect)
class _UrlIterator(_FileIterator):
"""
Subclass for iterating over features provided as a URL
"""
def open_function(self, data):
response = urlopen(data)
# ideas from
# http://stackoverflow.com/a/17537107
# https://rationalpie.wordpress.com/2010/06/02/\
# python-streaming-gzip-decompression/
if data.endswith('.gz'):
import zlib
d = zlib.decompressobj(16 + zlib.MAX_WBITS)
READ_BLOCK_SIZE = 1024
def _iter():
last_line = ""
while True:
data = response.read(READ_BLOCK_SIZE)
if not data:
break
data = "".join((last_line, d.decompress(data).decode()))
lines = data.split('\n')
last_line = lines.pop()
for line in lines:
yield line + '\n'
yield last_line
return _iter()
else:
return response
class _FeatureIterator(_BaseIterator):
"""
Subclass for iterating over features that are already in an iterator
"""
def _custom_iter(self):
for i, feature in enumerate(self.data):
self.current_item = feature
self.current_item_number = i
yield feature
class _StringIterator(_FileIterator):
"""
Subclass for iterating over features provided as a string (e.g., from
file.read())
"""
def _custom_iter(self):
self.tmp = tempfile.NamedTemporaryFile(delete=False)
data = dedent(self.data)
if isinstance(data, six.text_type):
data = data.encode('utf-8')
self.tmp.write(data)
self.tmp.close()
self.data = self.tmp.name
for feature in super(_StringIterator, self)._custom_iter():
yield feature
os.unlink(self.tmp.name)
def is_url(url):
"""
Check to see if a URL has a valid protocol.
Parameters
----------
url : str or unicode
Returns
-------
True if `url` has a valid protocol False otherwise.
"""
try:
return urlparse.urlparse(url).scheme in urlparse.uses_netloc
except:
return False
def DataIterator(data, checklines=10, transform=None,
force_dialect_check=False, from_string=False, **kwargs):
"""
Iterate over features, no matter how they are provided.
Parameters
----------
data : str, iterable of Feature objs, FeatureDB
`data` can be a string (filename, URL, or contents of a file, if
from_string=True), any arbitrary iterable of features, or a FeatureDB
(in which case its all_features() method will be called).
checklines : int
Number of lines to check in order to infer a dialect.
transform : None or callable
If not None, `transform` should accept a Feature object as its only
argument and return either a (possibly modified) Feature object or
a value that evaluates to False. If the return value is False, the
feature will be skipped.
force_dialect_check : bool
If True, check the dialect of every feature. Thorough, but can be
slow.
from_string : bool
If True, `data` should be interpreted as the contents of a file rather
than the filename itself.
dialect : None or dict
Provide the dialect, which will override auto-detected dialects. If
provided, you should probably also use `force_dialect_check=False` and
`checklines=0` but this is not enforced.
"""
_kwargs = dict(data=data, checklines=checklines, transform=transform,
force_dialect_check=force_dialect_check, **kwargs)
if isinstance(data, six.string_types):
if from_string:
return _StringIterator(**_kwargs)
else:
if os.path.exists(data):
return _FileIterator(**_kwargs)
elif is_url(data):
return _UrlIterator(**_kwargs)
elif isinstance(data, FeatureDB):
_kwargs['data'] = data.all_features()
return _FeatureIterator(**_kwargs)
else:
return _FeatureIterator(**_kwargs)
| StarcoderdataPython |
3242399 | # Copyright (c) 2017, <NAME>, and <NAME>
# All rights reserved.
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# * Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# * Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# * Neither the name of the Pontifical Catholic University of Rio Grande do Sul nor the
# names of its contributors may be used to endorse or promote products
# derived from this software without specific prior written permission.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <NAME>, and <NAME> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import os
# Settings
class ACSettings(object):
pass
# Paths
class ACPaths(object):
# Folders
work_folder = "work/ACRun/"
script_folder = os.environ["EDGE_ROOT"] + "/scripts/fixdelay/AC/"
# Files (Check Paths)
ACCheckPaths_script = work_folder + "ACCheckPaths.tcl"
available_paths_report = work_folder + "available_paths.rpt"
# Files (Functions)
ACSynth_functions = work_folder + "AC_functions.tcl"
ACSynth_warnings = work_folder + "AC_warnings.tcl"
# Files
ACInit = script_folder + "ACInit.py"
ACInit_script = work_folder + "ACInit.tcl"
ACStart = script_folder + "ACStart.py"
ACStart_script = work_folder + "ACStart.tcl"
# Temporary Files
ACSynth_temp = work_folder + "ACSynth.data"
# Commands
class ACCommand(object):
# AC Command Names
reportConstraints = "AC_report_constraints"
checkConstraints = "AC_check_constraints"
setConstraints = "AC_set_constraints"
# DC/ICC Commands
setMinDelay = "custom_set_min_delay"
# External Commands
getDelay = "custom_get_delay"
maxOfArray = "custom_max"
report_paths = "custom_report_existent"
# TCL Variables
class ACVar(object):
prefix = "AC_"
# Global Vars
maxIterations = prefix + "maxIterations"
constraintMet = prefix + "allConstOK"
# Shared Vars
sharedBase = prefix + "aux_base"
sharedEnforced = prefix + "aux_enforced"
sharedDelta = prefix + "aux_delta"
sharedAux = prefix + "aux"
# Path and Constraint
path = prefix + "path"
constraint = prefix + "cnst"
# Base Object
class ACObject(object):
def __init__(self):
super(ACObject, self).__init__()
self.id = ["ACObject"]
def getID(self):
return self.id[-1]
def isInstance(self, className):
return (className in self.id)
# Exception Classes
class ACException(Exception):
def __init__(self, origin, cause):
self.origin = origin
self.cause = cause
def description(self):
return self.origin + " - " + self.cause
class ACConstraintException(ACException):
def __init__(self, cause):
origin = "Constraint"
ACException.__init__(self, origin, cause)
class ACPathException(ACException):
def __init__(self, cause):
origin = "Path"
ACException.__init__(self, origin, cause)
class ACSetException(ACException):
def __init__(self, cause):
origin = "Set"
ACException.__init__(self, origin, cause)
class ACRelativeTimingException(ACException):
def __init__(self, cause):
origin = "Constraint"
ACException.__init__(self, origin, cause)
class ACDesignException(ACException):
def __init__(self, cause):
origin = "Design"
ACException.__init__(self, origin, cause)
class ACParserException(ACException):
def __init__(self, cause):
origin = "Parser"
ACException.__init__(self, origin, cause)
class ACSynthException(ACException):
def __init__(self, cause):
origin = "Synthesis"
ACException.__init__(self, origin, cause)
| StarcoderdataPython |
3298179 | """
author: <NAME>
time:
link:
"""
import numpy as np
from scipy import stats
from sklearn.metrics import f1_score, classification_report
def select_threshold(X, Xval, yval):
"""
use CV data to find the best epsilon
"""
# create multivariate model using training data
mu = X.mean(axis=0)
cov = np.cov(X.T)
multi_normal = stats.multivariate_normal(mu, cov)
# this is key, use CV data for fine tuning hyper parameters
pval = multi_normal.pdf(Xval)
# set up epsilon candidates
epsilon = np.linspace(np.min(pval), np.max(pval), num=10000)
# calculate f-score
fs = []
for e in epsilon:
y_pred = (pval <= e).astype('int')
fs.append(f1_score(yval, y_pred))
# find the best f-score
argmax_fs = np.argmax(fs)
return epsilon[argmax_fs], fs[argmax_fs]
def predict(X, Xval, e, Xtest, ytest):
"""
with optimal epsilon, combine X, Xval and predict Xtest
"""
Xdata = np.concatenate((X, Xval), axis=0)
mu = Xdata.mean(axis=0)
cov = np.cov(Xdata.T)
multi_normal = stats.multivariate_normal(mu, cov)
# calculate probability of test data
pval = multi_normal.pdf(Xtest)
y_pred = (pval <= e).astype('int')
print(classification_report(ytest, y_pred))
return multi_normal, y_pred
| StarcoderdataPython |
3869 | <reponame>sflippl/patches<gh_stars>0
"""Datasets of latent predictability tasks.
"""
from .pilgrimm import *
| StarcoderdataPython |
11229454 | #-*-coding:utf-8-*-
import os
from app import app
from models import db , User, Role, Device, Platforms_info, Ip, Project, App
from werkzeug.security import generate_password_hash
def build_first_db():
"""
Populate a small db with some example entries.
"""
db.drop_all()
db.create_all()
anonymous = Role(name = u'Anonymous', description = u'匿名用户')
admin = Role(name = u'Admin', description = u'管理员')
develop = Role(name = 'Develop', description = u'开发人员')
test = Role(name = 'Test', description = u'测试人员')
ops = Role(name = 'Ops', description = u'运维人员')
admin_user = User(real_name = u'admin',
email = u'<EMAIL>',
login=u"admin",
password=generate_password_hash(u"<PASSWORD>"),
roles=[admin]
)
anonymous_user = User(real_name = u'anonymous',
email = u'<EMAIL>',
login=u"anonymous",
password=generate_password_hash(u"<PASSWORD>"),
roles=[anonymous]
)
ip1 = Ip(isp = u'电信',
use = u'在用',
ip=u"1.1.1.1",
mask=(u"255.255.255.0"),
mac=(u"44a8-422a-20ff"),
route=(u"172.16.58.3"),
switch_port=(u"5F-U09 G1/0/32"),
)
ip2 = Ip(isp = u'电信',
use = u'在用',
ip=u"1.1.1.2",
mask=(u"255.255.255.0"),
mac=(u"44a8-422a-20ff"),
route=(u"172.16.58.3"),
switch_port=(u"5F-U09 G1/0/32"),
)
ip3 = Ip(isp = u'内网',
use = u'在用',
ip=u"1.1.1.3",
mask=(u"255.255.255.0"),
mac=(u"44a8-422a-20ff"),
route=(u"172.16.58.3"),
switch_port=(u"5F-U09 G1/0/32"),
)
ip4 = Ip(isp = u'联通',
use = u'在用',
ip=u"1.1.1.4",
mask=(u"255.255.255.0"),
mac=(u"44a8-422a-20ff"),
route=(u"172.16.58.3"),
switch_port=(u"5F-U09 G1/0/32"),
)
app1 = App(app = u'kf_scsa',
description=u"客服我也不知道",
ps=(u"没什么事"),
)
app2 = App(app = u'gamemanager',
description=u"游戏我也不知道",
ps=(u"没什么事"),
)
app3 = App(app = u'webPlatform',
description=u"公共我也不知道",
ps=(u"没什么事"),
)
app4 = App(app = u'wechat-server2',
description=u"wx我也不知道",
ps=(u"没什么事"),
)
project1 = Project(project = u'体彩项目', apps = [app1])
project2 = Project(project = u'福彩项目', apps = [app2])
project3 = Project(project = u'公共平台项目', apps = [app3])
project4 = Project(project = u'客服系统项目', apps = [app4])
device1 = Device(device_num = u'02-1331',
device_name = u'5F-U10',
idc=u"东莞",
location=(u"5F-U10"),
hardware_type=(u"DELL-2U"),
brand=(u"DELL"),
fast_repair_code=(u"没什么事"),
cpu=(u"没什么事"),
memory=(u"没什么事"),
disk=(u"没什么事"),
ips=[ip1],
apps = [app1],
)
device2 = Device(device_num = u'02-1331',
device_name = u'5F-U12',
idc=u"东莞",
location=(u"5F-U10"),
hardware_type=(u"DELL-2U"),
brand=(u"DELL"),
fast_repair_code=(u"没什么事"),
cpu=(u"没什么事"),
memory=(u"没什么事"),
disk=(u"没什么事"),
ips=[ip2],
apps = [app2],
)
platforms_info1 = Platforms_info(platform = u'阿里云管理控制台',
description = u'申请云服务器及域名解析',
url=u"http://www.aliyun.com/",
username=u"hhlyadmin",
password=(u"<PASSWORD>"),
ps=(u"登陆进入后,依次点击:\
订单管理-我的租用-最后面详细\
下方图标-进入之后\
点击IP即可查看流量图"
),
)
platforms_info2 = Platforms_info(platform = u'DNS盾',
description = u'13322.com域名A记录解析网站',
url=u"http://www.dnsdun.com",
username=u"<EMAIL>",
password=(u"<PASSWORD>3<PASSWORD>"),
ps=(u"登陆进入后"
),
)
db.session.add(anonymous)
db.session.add(admin)
db.session.add(develop)
db.session.add(test)
db.session.add(ops)
db.session.add(admin_user)
db.session.add(anonymous_user)
db.session.add(ip1)
db.session.add(ip2)
db.session.add(ip3)
db.session.add(ip4)
db.session.add(app1)
db.session.add(app2)
db.session.add(app3)
db.session.add(app4)
db.session.add(project1)
db.session.add(project2)
db.session.add(project3)
db.session.add(project4)
db.session.add(device1)
db.session.add(device2)
db.session.add(platforms_info1)
db.session.add(platforms_info2)
db.session.commit()
return
if __name__ == '__main__':
# Build a sample db on the fly, if one does not exist yet.
app_dir = os.path.realpath(os.path.dirname(__file__))
database_path = os.path.join(app_dir, app.config['DATABASE_FILE'])
if not os.path.exists(database_path):
build_first_db()
# Start app
app.run(debug=True) | StarcoderdataPython |
5164613 | import json
class ResponseOverride(Exception):
def __init__(self, status_code: int = None, url: str = None, link: str = None,
provider: str = None, message: str = None):
self.status_code = status_code
self.url = url
self.text = link
self.provider = provider
self.message = message
self.attributes = {"status_code": self.status_code, "url": self.url, "link": self.text,
"provider": self.provider, "message": self.message}
self.attributes_str = json.dumps(self.attributes)
def get_status_code(self):
return self.status_code
def get_url(self):
return self.url
def get_link(self):
return self.text
def get_provider(self):
return self.provider
def get_message(self):
return self.message
def json(self):
return json.loads(self.attributes_str)
def to_dict(self):
return {"status_code": self.status_code, "url": self.url, "link": self.text,
"provider": self.provider, "message": self.message}
| StarcoderdataPython |
3529514 | no_of_teams = int(input())
teams = {}
for i in range(no_of_teams):
team = input()
teams[team] = [0,0.0]
num_of_matches = int(input())
for i in range(num_of_matches):
pointsA,pointsB = 0,0
teamA,teamA_runrate, teamB,teamB_runrate = input().split(" ")
teamA_runrate,teamB_runrate = float(teamA_runrate),float(teamB_runrate)
if (teamA_runrate > teamB_runrate):
pointsA += 2
if (teamA_runrate < teamB_runrate):
pointsB += 2
if (teamA_runrate == teamB_runrate):
pointsA += 1
pointsB += 1
teams[teamA][0] += pointsA
teams[teamA][1] += teamA_runrate
teams[teamB][0] += pointsB
teams[teamB][1] += teamB_runrate
teams = sorted(teams.items(),key=lambda item:item[1][1],reverse=True)
for key,value in teams:
print('{} {} {}'.format(key,value[0],format(value[1],'.2f')))
| StarcoderdataPython |
11274983 | # -*- coding: utf-8 -*-
"""Tests for the cli interface."""
import pytest
import shutil
import os
import stat
import tarfile
from pathlib import Path
from click.testing import CliRunner
from clitools import fs_lint
@pytest.fixture
def change_test_dir(request):
"""Bla."""
os.chdir(request.fspath.dirname)
yield
os.chdir(request.config.invocation_dir)
@pytest.fixture
def testfilesystem(tmp_path, request):
"""Generate a test filesystem."""
tarball = os.path.join(request.fspath.dirname, 'resources', 'test-fs.tgz')
os.chdir(tmp_path)
tar = tarfile.open(tarball)
tar.extractall()
tar.close()
yield tmp_path / 'test-fs'
shutil.rmtree(tmp_path / 'test-fs')
def test_fs(testfilesystem):
"""Test cli run on example fs."""
runner = CliRunner()
with runner.isolated_filesystem(temp_dir=str(testfilesystem)):
result = runner.invoke(fs_lint.fs_lint, [
'--color',
'never',
'--verbose',
'--fix',
'--experimental',
'--statistics',
str(testfilesystem)
]
)
assert 'FAIL' in result.output
assert 42 == len(result.output.splitlines())
assert 1 == result.exit_code
def test_empty_run():
"""Test empty cli run."""
runner = CliRunner()
result = runner.invoke(fs_lint.fs_lint)
assert 'Usage' in result.output
assert 1 == result.exit_code
result = runner.invoke(fs_lint.fs_lint, ['--help'])
assert 'Usage' in result.output
assert 0 == result.exit_code
result = runner.invoke(fs_lint.fs_lint, ['--list-tests'])
assert 'Test if' in result.output
assert 0 == result.exit_code
def test_TestPermissionsWorldWritable(tmp_path):
"""Test for TestPermissionsWorldWritable."""
# setup test case
test_path = tmp_path / 'testfile'
test_path.touch()
os.chmod(test_path, stat.S_IWOTH)
# run test case
test = fs_lint.TestPermissionsWorldWritable()
assert test(test_path, test_path.lstat()) is False
test.fix(test_path, test_path.lstat())
assert test(test_path, test_path.lstat()) is True
# cleanup
test_path.unlink()
def test_TestPermsWorldReadable(tmp_path):
"""Test for TestPermissionsWorldReadable."""
# setup test case
test_path = tmp_path / 'testfile'
test_path.touch()
os.chmod(test_path, stat.S_IROTH)
# run test case
test = fs_lint.TestPermissionsWorldReadable()
assert test(test_path, test_path.lstat()) is False
test.fix(test_path, test_path.lstat())
assert test(test_path, test_path.lstat()) is True
# cleanup
test_path.unlink()
def test_TestPermsWorldReadableDir(tmp_path):
"""Test for TestPermissionsWorldReadableDir."""
# setup test case
test_path = tmp_path / 'testdir'
test_path.mkdir()
test_path.chmod(0o755)
# run test case
test = fs_lint.TestPermissionsWorldReadableDir()
assert test(test_path, test_path.lstat()) is False
test.fix(test_path, test_path.lstat())
assert test(test_path, test_path.lstat()) is True
# cleanup
test_path.rmdir()
@pytest.mark.parametrize(
'name, perms, shouldpass',
[
('orphanex', 0o661, False),
('orphanex', 0o616, False),
('orphanex', 0o166, False),
('okex', 0o766, True),
('okex', 0o756, True),
('okex', 0o757, True),
('okex', 0o566, True),
('okex', 0o656, True),
('okex', 0o665, True),
('noex', 0o644, True),
('noex', 0o464, True),
('noex', 0o446, True),
]
)
def test_TestPermsOrphanExecutableBit(tmp_path, name, perms, shouldpass):
"""Test for TestPermissionsOrphanExecutableBit."""
# setup test case
test_path = Path(tmp_path / name)
test_path.touch()
test_path.chmod(perms)
# run test case
test = fs_lint.TestPermissionsOrphanExecutableBit()
assert test(test_path, test_path.lstat()) is shouldpass
# TODO: fix
# test.fix(test_path, test_path.lstat())
# assert test(test_path, test_path.lstat()) is True
# cleanup
test_path.unlink()
@pytest.mark.parametrize(
'bad, fixed, shouldpass',
[
('testfile', 'testfile', True),
('test file', 'test file', True),
('testfile ', 'testfile ', True),
(' testfile', 'testfile', False),
(' testfile', 'testfile', False),
(' testfile', 'testfile', False),
(' test file', 'test file', False),
]
)
def test_TestNameSpaceAtStart(tmp_path, bad, fixed, shouldpass):
"""Test for TestNameSpaceAtStart."""
# setup test case
test_path = Path(tmp_path / bad)
fixed_path = Path(tmp_path / fixed)
test_path.touch()
# run test case
test = fs_lint.TestNameSpaceAtStart()
assert test(test_path, test_path.lstat()) is shouldpass, 'Test should pass assertion'
test.fix(test_path, test_path.lstat())
assert shouldpass is test_path.exists(), 'Assert original file {0} was renamed according to expectation'.format(test_path)
assert fixed_path.exists(), 'Assert new file after rename exists "{0}"->"{1}"'.format(test_path, fixed_path)
assert test(fixed_path, fixed_path.lstat()) is True, 'Fixed file should pass test'
# cleanup
if test_path.exists():
test_path.unlink()
if fixed_path.exists():
fixed_path.unlink()
@pytest.mark.parametrize(
'bad, fixed, shouldpass',
[
('testfile', 'testfile', True),
('test file', 'test file', True),
(' testfile', ' testfile', True),
('testfile ', 'testfile', False),
('testfile ', 'testfile', False),
('testfile ', 'testfile', False),
('test file ', 'test file', False),
]
)
def test_TestNameSpaceAtEnd(tmp_path, bad, fixed, shouldpass):
"""Test for TestNameSpaceAtEnd."""
# setup test case
test_path = Path(tmp_path / bad)
fixed_path = Path(tmp_path / fixed)
test_path.touch()
# run test case
test = fs_lint.TestNameSpaceAtEnd()
assert test(test_path, test_path.lstat()) is shouldpass, 'Test should pass assertion for "{0}"'.format(test_path)
test.fix(test_path, test_path.lstat())
assert shouldpass is test_path.exists(), 'Assert original file {0} was renamed according to expectation'.format(test_path)
assert fixed_path.exists(), 'Assert new file after rename exists "{0}"->"{1}"'.format(test_path, fixed_path)
assert test(fixed_path, fixed_path.lstat()) is True, 'Fixed file should pass test'
# cleanup
if test_path.exists():
test_path.unlink()
if fixed_path.exists():
fixed_path.unlink()
@pytest.mark.parametrize(
'bad, fixed, shouldpass',
[
('testfile', 'testfile', True),
('test file', 'test file', True),
(' testfile', ' testfile', True),
('testfile ', 'testfile ', True),
('testfile ', 'testfile ', False),
('testfile ', 'testfile ', False),
('test file ', 'test file ', False),
('test file ', 'test file ', False),
(' test file ', ' test file ', False),
('test file', 'test file', False),
]
)
def test_TestNameSpaceDouble(tmp_path, bad, fixed, shouldpass):
"""Test for TestNameSpaceDouble."""
# setup test case
test_path = Path(tmp_path / bad)
fixed_path = Path(tmp_path / fixed)
test_path.touch()
# run test case
test = fs_lint.TestNameSpaceDouble()
assert test(test_path, test_path.lstat()) is shouldpass, 'Test should pass assertion for "{0}"'.format(test_path)
test.fix(test_path, test_path.lstat())
assert shouldpass is test_path.exists(), 'Assert original file {0} was renamed according to expectation'.format(test_path)
assert fixed_path.exists(), 'Assert new file after rename exists "{0}"->"{1}"'.format(test_path, fixed_path)
assert test(fixed_path, fixed_path.lstat()) is True, 'Fixed file should pass test'
# cleanup
if test_path.exists():
test_path.unlink()
if fixed_path.exists():
fixed_path.unlink()
@pytest.mark.parametrize(
'bad, fixed, shouldpass',
[
('testfile', 'testfile', True),
('test file', 'test file', True),
('test\tfile', 'testfile', False),
('testfile\x08', 'testfile', False),
]
)
def test_TestNameControlChars(tmp_path, bad, fixed, shouldpass):
"""Test for TestNameControlChars."""
# setup test case
test_path = Path(tmp_path / bad)
fixed_path = Path(tmp_path / fixed)
test_path.touch()
# run test case
test = fs_lint.TestNameControlChars()
assert test(test_path, test_path.lstat()) is shouldpass, 'Test should pass assertion'
test.fix(test_path, test_path.lstat())
assert shouldpass is test_path.exists(), 'Assert original file {0} was renamed according to expectation'.format(test_path)
assert fixed_path.exists(), 'Assert new file after rename exists'
assert test(fixed_path, fixed_path.lstat()) is True, 'Fixed file should pass test'
# cleanup
if test_path.exists():
test_path.unlink()
if fixed_path.exists():
fixed_path.unlink()
| StarcoderdataPython |
3440462 | <reponame>project-k-0-1/project-k
""" Font awesome """
def get_font_awesome():
""" Gdt fontawsome library """
f_a = '<link rel="stylesheet" href="https://use.fontawesome.com/releases/v5.6.3/css/all.css">'
return_data = f_a
return return_data
| StarcoderdataPython |
11373362 | <gh_stars>10-100
import torch.utils.data
import torch.backends.cudnn as cudnn
from src.configs.setters.set_enums import *
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu")
cudnn.benchmark = True # set to true only if inputs to model are fixed size; otherwise lot of computational overhead
# task {Retrieval,Classification,Captioning,Summarization}
"""----------------------------------------------- TASK -------------------------------------------------------------"""
TASK = 'Captioning'
# if using COLAB
COLAB = False
"""--------------------------------------------- FINE TUNED ---------------------------------------------------------"""
# fine tune is True change paths
FINE_TUNED_PATH = False
"""-------------------------------------------- SPECIAL TOKENS ------------------------------------------------------"""
# tokenization parameters for AUXLM
SPECIAL_TOKENS = {"bos_token": "<start>",
"eos_token": "<end>",
"unk_token": "<unk>",
"pad_token": "<pad>"}
"""-------------------------------------------- GLOBAL PARAMS -------------------------------------------------------"""
# GLOBAL PARAMETERS
TOKENIZER = TOKENIZATION.PEGASUS.value
ARCHITECTURE = ARCHITECTURES.FUSION.value
DATASET = DATASETS.RSICD.value
CUSTOM_VOCAB = True # True if creating a custom vocab in order to reduce the size.
"""------------------------------------------------ MODELS ----------------------------------------------------------"""
# MODELS
ENCODER_MODEL = ENCODERS.EFFICIENT_NET_V2_IMAGENET_FINETUNED_AUGMENTED_CONTRASTIVE.value # which encoder using now
AUX_LM = AUX_LMs.PEGASUS.value if TOKENIZER == TOKENIZATION.PEGASUS.value else None # which aux. LM using
"""------------------------------------------- TRAINING PARAMETERS --------------------------------------------------"""
ATTENTION = ATTENTION_TYPE.pyramid_attention.value # type of attention
OPTIMIZER = OPTIMIZERS.Adam_W.value
LOSS = LOSSES.SupConLoss.value if TASK == 'Classification' else LOSSES.Cross_Entropy.value
"""----------------------------------------------- ABLATIONS --------------------------------------------------------"""
PYRAMID_REDUCTION_LAYER = False
SELF_CRITICAL = False # not working
VISUALIZATION = False # True only for attention map visualization
if ARCHITECTURE == ARCHITECTURES.FUSION.value:
# if doing multi_input for pegasus encoder else False
MULTI_INPUT = False
"""--------Types of Fusion--------"""
"""SUPPORTED: {None, cold, simple}"""
FUSION = 'cold'
REDUCTION_LAYER = True if FUSION == 'cold' else False
# concat only
CONCAT_ONLY = True if FUSION is None else False
if TASK == 'Classification':
EXTRA_EPOCHS = True
"""------------------------------------------------- PATHS ----------------------------------------------------------"""
# PATHS
RSICD_PATH = '../data/images/RSICD_images'
UCM_PATH = '../data/images/UCM_images'
SYDNEY_PATH = '../data/images/Sydney_images'
# CAPTIONS PATH
RSICD_CAPTIONS_PATH = '../data/captions/dataset_rsicd_modified.json'
UCM_CAPTIONS_PATH = '../data/captions/dataset_ucm_modified.json'
SYDNEY_CAPTIONS_PATH = '../data/captions/dataset_sydney_modified.json'
# INPUT CLASSES PATH
RSICD_CLASSES_PATH = '../../data/classification/classes_rsicd'
UCM_CLASSES_PATH = '../data/classification/classes_ucm'
SYDNEY_CLASSES_PATH = '../data/classification/classes_sydney'
# CLASSIFICATION DATASET PATH
RSICD_CLASSIFICATION_DATASET_PATH = "../../data/classification/datasets/classification_dataset_rsicd.json"
UCM_CLASSIFICATION_DATASET_PATH = "../data/classification/datasets/classification_dataset_ucm.json"
SYDNEY_CLASSIFICATION_DATASET_PATH = "../data/classification/datasets/classification_dataset_sydney.json"
# FOR EVALUATION
JSON_refs_coco = 'test_coco_format'
bleurt_checkpoint = "metrics_files/bleurt/test_checkpoint" # uses Tiny
"""------------------------------------------------- LOADER ---------------------------------------------------------"""
# LOADERS
# which pre-trained encoder loading from/loading to
# if doing classification pretraining
# the loader path might be different from the current encoder (pretraining an efficientnet on imagenet)
ENCODER_LOADER = ENCODERS.EFFICIENT_NET_V2_IMAGENET_FINETUNED_AUGMENTED_CONTRASTIVE.value if TASK == 'Classification' else ENCODER_MODEL | StarcoderdataPython |
9773348 | <filename>tests/providers/aws/project_resources/key_pair.py<gh_stars>1-10
import unittest
import boto3
import os
from moto import mock_ec2
from spotty.providers.aws.deployment.project_resources.key_pair import KeyPairResource
class TestKeyPairResource(unittest.TestCase):
def test_key_path(self):
region = 'eu-central-1'
project_name = 'TEST_PROJECT'
provider_name = 'aws'
key_resource = KeyPairResource(project_name, region, provider_name)
# check key path
key_name = 'spotty-key-%s-%s' % (project_name.lower(), region)
key_path = os.path.join(os.path.expanduser('~'), '.spotty', 'keys', provider_name, key_name)
self.assertEqual(key_resource.key_path, key_path)
@mock_ec2
def test_create_and_delete_key(self):
region = 'eu-central-1'
project_name = 'TEST_PROJECT'
provider_name = 'aws'
ec2 = boto3.client('ec2', region_name=region)
key_resource = KeyPairResource(project_name, region, provider_name)
# key doesn't exist
self.assertFalse(key_resource._ec2_key_exists())
# create the key
key_name = key_resource.get_or_create_key()
self.assertTrue(key_resource._ec2_key_exists())
self.assertTrue(os.path.isfile(key_resource.key_path))
with open(key_resource.key_path) as f:
key_content = f.read()
# get the existing key
key_resource.get_or_create_key()
with open(key_resource.key_path) as f:
same_key_content = f.read()
self.assertEqual(key_content, same_key_content)
# create the key and rewrite the key file
ec2.delete_key_pair(KeyName=key_name)
self.assertFalse(key_resource._ec2_key_exists())
self.assertTrue(os.path.isfile(key_resource.key_path))
key_resource.get_or_create_key()
self.assertTrue(key_resource._ec2_key_exists())
self.assertTrue(os.path.isfile(key_resource.key_path))
with open(key_resource.key_path) as f:
new_key_content = f.read()
self.assertNotEqual(key_content, new_key_content)
# recreate the key if the key file doesn't exist
os.unlink(key_resource.key_path)
self.assertFalse(os.path.isfile(key_resource.key_path))
key_resource.get_or_create_key()
self.assertTrue(key_resource._ec2_key_exists())
self.assertTrue(os.path.isfile(key_resource.key_path))
# delete key
key_resource.delete_key()
self.assertFalse(key_resource._ec2_key_exists())
self.assertFalse(os.path.isfile(key_resource.key_path))
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
8078148 | <reponame>koyo-jakanees/qgis-stac-browser
import os
import json
from ..models.api import API
class Config:
def __init__(self):
self._json = None
self.load()
def load(self):
if not os.path.exists(self.path):
self._json = {}
self.save()
else:
with open(self.path, 'r') as f:
self._json = json.load(f)
def save(self):
config = {
'apis': [api.json for api in self.apis],
'download_directory': self.download_directory,
'last_update': self.last_update,
'api_update_interval': self.api_update_interval
}
with open(self.path, 'w') as f:
f.write(json.dumps(config))
@property
def path(self):
return os.path.join(
os.path.split(os.path.dirname(__file__))[0],
'config.json'
)
@property
def apis(self):
apis = self._json.get('apis', None)
if apis is None:
apis = [
{
"id": "default-staccato",
"href": "https://stac.boundlessgeo.io",
},
{
"id": "default-sat-api",
"href": "https://sat-api.developmentseed.org/stac",
}
# {
# "id": "default-astraea",
# "href": "https://stac.astraea.earth/api/v2",
# }
]
return [API(api) for api in apis]
@apis.setter
def apis(self, apis):
self._json['apis'] = [api.json for api in apis]
@property
def last_update(self):
return self._json.get('last_update', None)
@property
def api_update_interval(self):
return self._json.get('api_update_interval', 60 * 60 * 24)
@last_update.setter
def last_update(self, value):
self._json['last_update'] = value
@property
def download_directory(self):
if self._json.get('download_directory', None) is None:
return os.environ.get('HOME', '')
return self._json.get('download_directory', '')
@download_directory.setter
def download_directory(self, value):
self._json['download_directory'] = value
| StarcoderdataPython |
6520714 | import json
import os
from collections import OrderedDict
def load_characters():
fname = os.path.join(os.path.dirname(__file__), 'charactersdata.json')
with open(fname, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
def load_locations():
fname = os.path.join(os.path.dirname(__file__), 'locationsdata.json')
with open (fname, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict)
def load_bending():
fname = os.path.join(os.path.dirname(__file__), 'bendingdata.json')
with open (fname, 'r') as f:
return json.load(f, object_pairs_hook=OrderedDict) | StarcoderdataPython |
3292460 | <gh_stars>1-10
from google.cloud import language_v1
from google.cloud.language_v1 import enums
from concurrent.futures import ThreadPoolExecutor
from functools import partial
import csv, os, datetime, asyncio
# define only once
client = language_v1.LanguageServiceClient()
_executor = ThreadPoolExecutor()
async def run(content, mode, queryCSV, argCSV, sentenceCSV, failedCSV):
"""
Analyzing Sentiment
"""
# get content
if mode == "queries":
text_content = content
elif mode == "argument":
# doc, text_content = content[9], content[0]
doc, text_content = content[0], content[2]
# set options
type_ = enums.Document.Type.PLAIN_TEXT
language = "en"
document = {"content": text_content, "type": type_, "language": language}
encoding_type = enums.EncodingType.UTF8
# run async analysis
loop = asyncio.get_running_loop()
response = False
try:
response = await loop.run_in_executor(
_executor,
partial(client.analyze_sentiment, document, encoding_type=encoding_type),
)
# fail
except Exception as err:
# put into failed.csv for later
with open(failedCSV, mode="a+", newline="") as failed:
failed_writer = csv.writer(
failed, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
if os.stat(failedCSV).st_size == 0:
failed_writer.writerow(["qid", "text", "err"])
failed_writer.writerow([doc, text_content, str(err)])
# dummy argument to csv
if mode == "argument":
with open(argCSV, mode="a+", newline="") as argument_sentiments_csv:
argument_sentiment_writer = csv.writer(
argument_sentiments_csv,
delimiter=",",
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
)
if os.stat(argCSV).st_size == 0:
argument_sentiment_writer.writerow(
["doc", "sentiment_score", "sentiment_magnitude"]
)
argument_sentiment_writer.writerow(
[doc, "XXX", "failed",]
)
# success
if mode == "queries" and response is not False:
# add queries to csv
with open(queryCSV, mode="w+", newline="") as sentiments_csv:
query_sentiment_writer = csv.writer(
sentiments_csv, delimiter=",", quotechar='"', quoting=csv.QUOTE_MINIMAL
)
if os.stat(queryCSV).st_size == 0:
query_sentiment_writer.writerow(
["qid", "text", "sentiment_score", "sentiment_magnitude"]
)
addition = ""
number = 1
for num, sentence in enumerate(response.sentences, start=1):
# handle weird stuff in data
if num == 51:
number = 1
addition = "000"
if addition == "000" and number == 10:
addition = "00"
if sentence.text.content[-1:] == ".":
sentence.text.content = sentence.text.content[:-1]
query_sentiment_writer.writerow(
[
addition + str(number),
sentence.text.content,
"{0:.4f}".format(sentence.sentiment.score),
"{0:.4f}".format(sentence.sentiment.magnitude),
]
)
number += 1
elif mode == "argument" and response is not False:
# add argument to csv
with open(argCSV, mode="a+", newline="") as argument_sentiments_csv:
argument_sentiment_writer = csv.writer(
argument_sentiments_csv,
delimiter=",",
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
)
if os.stat(argCSV).st_size == 0:
argument_sentiment_writer.writerow(
["doc", "sentiment_score", "sentiment_magnitude"]
)
argument_sentiment_writer.writerow(
[
doc,
"{0:.4f}".format(response.document_sentiment.score),
"{0:.4f}".format(response.document_sentiment.magnitude),
]
)
# add sentences to csv
with open(sentenceCSV, mode="a+", newline="") as sentence_sentiments_csv:
sentence_sentiment_writer = csv.writer(
sentence_sentiments_csv,
delimiter=",",
quotechar='"',
quoting=csv.QUOTE_MINIMAL,
)
if os.stat(sentenceCSV).st_size == 0:
sentence_sentiment_writer.writerow(
[
"doc",
"num",
"snippet",
"sentiment_score",
"sentiment_magnitude",
]
)
for num, sentence in enumerate(response.sentences, start=1):
sentence_sentiment_writer.writerow(
[
doc,
num,
sentence.text.content.split(" ", 1)[0],
"{0:.4f}".format(sentence.sentiment.score),
"{0:.4f}".format(sentence.sentiment.magnitude),
]
)
| StarcoderdataPython |
5176152 | #!/usr/bin/env python
import os
from os.path import join as pjoin, abspath
import torch
import numpy as np
import numpy.random as rand
from numpy import genfromtxt
import pandas as pd
from .dataset import Dataset
_default_folder = abspath(pjoin(__file__, "..", "..", "..", "datasets"))
DATASET_FOLDER = os.environ.get("DATASET_FOLDER", _default_folder)
class RealData(Dataset):
def __init__(self, X, Y, task, test_size, seed, X_test=None, Y_test=None,
labels=None, labels_test=None):
assert np.all(np.isfinite(X))
assert np.all(np.isfinite(Y))
if X_test is None and test_size:
mask = np.zeros(len(X), dtype=bool)
n_test = int(len(X) * test_size)
mask[:n_test] = True
np.random.RandomState(seed).shuffle(mask)
X_test = X[mask]
Y_test = Y[mask]
X = X[~mask]
Y = Y[~mask]
if labels is not None:
labels_test = labels[mask]
labels = labels[~mask]
else:
labels_test = None
else:
X_test = torch.empty((0, *X.shape[1:]))
Y_test = torch.empty((0, *Y.shape[1:]))
self.X_train = torch.as_tensor(X)
self.Y_train = torch.as_tensor(Y)
self.X_test = torch.as_tensor(X_test)
self.Y_test = torch.as_tensor(Y_test)
self.labels_train = labels
self.labels_test = labels_test
super().__init__(task)
@classmethod
def uci_kin8nm(cls, test_size=0.2, seed=None):
data = genfromtxt(pjoin(DATASET_FOLDER, 'uci_kin8nm.csv'),
delimiter=',', skip_header=1)
X, Y = data[:, :8], data[:, 8:]
return cls(X, Y, 'regression', test_size, seed)
@classmethod
def uci_boston(cls, test_size=0.2, seed=None):
XY = pd.read_csv(pjoin(DATASET_FOLDER, 'uci_boston.csv'),
delim_whitespace=True).values
X, Y = XY[:, :-1], XY[:, -1:]
return cls(X, Y, 'regression', test_size, seed)
@classmethod
def uci_audit(cls, test_size=0.2, seed=None):
path = pjoin(DATASET_FOLDER, "uci_audit.csv")
XY = pd.read_csv(path, header=0).values
X, Y = XY[:, :-1], XY[:, -1:].astype(float)
# Remove location ID and risk columns
X = np.hstack((X[:, :1], X[:, 2:-1])).astype(float)
# Set NaN value to zero (index 642, column "Money_Value")
X[np.isnan(X)] = 0.0
assert np.all(np.isfinite(X))
return cls(X, Y, "binary_classification", test_size, seed)
@classmethod
def uci_cervical_cancer(cls, test_size=0.2, seed=None):
path = pjoin(DATASET_FOLDER, "uci_cervical_cancer.csv")
df = pd.read_csv(path)
Y = df["Dx:Cancer"].values.astype(float).reshape(-1, 1)
X = df.loc[:, df.columns != "Dx:Cancer"].values
# FIXME
X[X == "?"] = np.NAN
X = X.astype(float)
# Replace NaN's with 0 (although should be treated as uncertain)
X[np.isnan(X)] = 0.0
return cls(X, Y, "binary_classification", test_size, seed)
@classmethod
def uci_energy(cls, test_size=0.2, heat=True, seed=None):
path = pjoin(DATASET_FOLDER, "uci_energy.xlsx")
XY = pd.read_excel(path).values
X = XY[:, :-2]
j = -2 if heat else -1
Y = XY[:, j].reshape(-1, 1)
return cls(X, Y, 'regression', test_size, seed)
@classmethod
def uci_protein(cls, test_size=0.2, heat=True, seed=None):
path = pjoin(DATASET_FOLDER, "uci_protein.csv")
YX = pd.read_csv(path).values
Y, X = YX[:, :1], YX[:, 1:]
return cls(X, Y, 'regression', test_size, seed)
@classmethod
def uci_naval(cls, test_size=0.2, seed=None):
path = pjoin(DATASET_FOLDER, "uci_naval.txt")
data = np.loadtxt(path)
X, Y = data[:, :16], data[:, 16:17] # what to do?
return cls(X, Y, 'regression', test_size, seed)
@classmethod
def QPCR(cls, seed=None, test_size=0):
df = pd.read_csv(pjoin(DATASET_FOLDER, "qpcr.txt"))
X = df.values[:, 1:].astype(float)
labels = df.values[:, 0]
label_set = set(labels.tolist())
label_dict = {l: i for i, l in enumerate(label_set)}
Y = np.zeros((len(X), 1))
for i, l in enumerate(labels):
Y[i] = label_dict[l]
return cls(X, Y, "multi_classification", test_size, seed,
labels=labels)
@classmethod
def uci_concrete(cls, test_size=0.2, seed=None):
path = pjoin(DATASET_FOLDER, "uci_concrete.xls")
XY = pd.read_excel(path).values
X, Y = XY[:, :-1], XY[:, -1:]
return cls(X, Y, 'regression', test_size, seed)
@classmethod
def MNIST(cls, seed=None, test_size=0, digits=range(10),
n_observations=1000):
YX = pd.read_csv(pjoin(DATASET_FOLDER, "mnist_test.csv")).values
Y, X = YX[:, 0].astype(int), YX[:, 1:].astype(float)
bools = np.array([Y == i for i in digits])
bools = np.any(bools, axis=0)
X = X[bools]
Y = Y[bools]
if n_observations < len(X):
state = rand.RandomState(seed=seed)
idxs = state.choice(len(Y), n_observations, replace=False)
X = X[idxs]
Y = Y[idxs]
X /= X.max()
return cls(X, Y, "multi_classification", test_size, seed,
labels=Y)
| StarcoderdataPython |
11303862 | from django.urls import include, path
from rest_framework import routers
from . import views
router = routers.DefaultRouter()
urlpatterns = [
path('', include(router.urls)),
path('folders/', views.FolderList.as_view()),
path('folders/<int:pk>/', views.FolderDetail.as_view()),
path('files/', views.FileList.as_view()),
path('files/<int:pk>/', views.FileDetail.as_view()),
path('users/', views.UserList.as_view()),
path('users/<int:pk>/', views.UserDetail.as_view()),
path('api-auth/', include('rest_framework.urls', namespace='rest_framework'))
]
| StarcoderdataPython |
8016510 | __test__ = False
if __name__ == '__main__':
import MySQLdb as m
from eventlet import patcher
from eventlet.green import MySQLdb as gm
patcher.monkey_patch(all=True, MySQLdb=True)
patched_set = set(patcher.already_patched) - set(['psycopg'])
assert patched_set == frozenset(['MySQLdb', 'os', 'select', 'socket', 'thread', 'time'])
assert m.connect == gm.connect
print('pass')
| StarcoderdataPython |
3566650 | import os
import sys
import random
import math
import numpy as np
import skimage.io
import pickle
import matplotlib.pyplot as plt
import logging
logging.getLogger().setLevel(level=logging.INFO)
# Root directory of the project
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR) # To find local version of the library
# import mrcnn.model as modellib
# from mrcnn import visualize
import model as modellib
import visualize
import config as Configure
def group(lst1, n):
for i in range(0, len(lst1), n):
val = lst1[i:i+n]
if len(val) == n:
yield tuple(val)
# Directory to save logs and trained model
MODEL_DIR = os.path.join(ROOT_DIR, "logs")
# Local path to trained weights file
COCO_MODEL_PATH = "/kaggle/input/model/mask_rcnn_coco.h5"
# Download COCO trained weights from Releases if needed
# if not os.path.exists(COCO_MODEL_PATH):
# utils.download_trained_weights(COCO_MODEL_PATH)
# Directory of images to run detection on
# IMAGE_DIR = '/kaggle/input/airbus-ship-detection/test/'
class InferenceConfig(Configure.Config):
# Set batch size to 1 since we'll be running inference on
# one image at a time. Batch size = GPU_COUNT * IMAGES_PER_GPU
GPU_COUNT = 1
NUM_CLASSES = 81
config = InferenceConfig()
batch_size = 15
config.IMAGES_PER_GPU = batch_size
config.BATCH_SIZE = config.IMAGES_PER_GPU * config.GPU_COUNT
config.display()
# Create model object in inference mode.
model = modellib.MaskRCNN(mode="inference", model_dir=MODEL_DIR, config=config)
# Load weights trained on MS-COCO
model.load_weights(COCO_MODEL_PATH, by_name=True)
# COCO Class names
# Index of the class in the list is its ID. For example, to get ID of
# the teddy bear class, use: class_names.index('teddy bear')
class_names = ['BG', 'person', 'bicycle', 'car', 'motorcycle', 'airplane',
'bus', 'train', 'truck', 'boat', 'traffic light',
'fire hydrant', 'stop sign', 'parking meter', 'bench', 'bird',
'cat', 'dog', 'horse', 'sheep', 'cow', 'elephant', 'bear',
'zebra', 'giraffe', 'backpack', 'umbrella', 'handbag', 'tie',
'suitcase', 'frisbee', 'skis', 'snowboard', 'sports ball',
'kite', 'baseball bat', 'baseball glove', 'skateboard',
'surfboard', 'tennis racket', 'bottle', 'wine glass', 'cup',
'fork', 'knife', 'spoon', 'bowl', 'banana', 'apple',
'sandwich', 'orange', 'broccoli', 'carrot', 'hot dog', 'pizza',
'donut', 'cake', 'chair', 'couch', 'potted plant', 'bed',
'dining table', 'toilet', 'tv', 'laptop', 'mouse', 'remote',
'keyboard', 'cell phone', 'microwave', 'oven', 'toaster',
'sink', 'refrigerator', 'book', 'clock', 'vase', 'scissors',
'teddy bear', 'hair drier', 'toothbrush']
TEST_PATH = '/kaggle/input/airbus-ship-detection/test/'
im_names = os.listdir(TEST_PATH)
data = list(group(im_names, batch_size))
# leftover = im_names[len(im_names)-6:]
START = 1021
END = len(data)
all_results = []
for idx, d in enumerate(data[START:END]):
logging.info('Processing batch {}...'.format(START+idx))
im_batch = []
for im in d:
im_batch.append(skimage.io.imread(os.path.join(TEST_PATH, im)))
results = model.detect(im_batch, verbose=0)
for i in range(len(results)):
results[i]['im_name'] = d[i]
all_results += results
with open('output.txt', 'w') as f:
f.write('Processing batch {}/{}...'.format(START+idx, len(data)))
with open('results/results_{}_{}.pkl'.format(START, END), 'wb') as f:
pickle.dump(all_results, f)
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.