id stringlengths 1 7 | text stringlengths 6 1.03M | dataset_id stringclasses 1
value |
|---|---|---|
1744617 | <reponame>arj119/FedML
import logging
import torch
from torch import nn
import torch.nn.functional as F
import torchvision.transforms as tfs
from torchvision.utils import make_grid
import wandb
from torch.utils.data import TensorDataset, DataLoader
from itertools import cycle
from fedml_api.model.cv.generator import Generator
try:
from fedml_core.trainer.model_trainer import ModelTrainer
except ImportError:
from FedML.fedml_core.trainer.model_trainer import ModelTrainer
class FedSSGANModelTrainer(ModelTrainer):
def __init__(self, generator, local_model):
"""
Args:
generator: Homogeneous model between clients that acts as knowledge transfer vehicle. In this case Generator
local_model: Heterogeneous model that is chosen by clients that can better utilise client resources
"""
super().__init__(generator)
self.generator: Generator = generator
self.local_model = local_model
self.fixed_noise = generator.generate_noise_vector(16, device='cpu')
self.resize = tfs.Resize(32)
self.mean = torch.Tensor([0.5])
self.std = torch.Tensor([0.5])
self.transforms = torch.nn.Sequential(
tfs.Resize(32),
tfs.Normalize(mean=self.mean, std=self.std),
)
def denorm(self, x, channels=None, w=None, h=None, resize=False, device='cpu'):
unnormalize = tfs.Normalize((-self.mean / self.std).tolist(), (1.0 / self.std).tolist()).to(device)
x = unnormalize(x)
if resize:
if channels is None or w is None or h is None:
print('Number of channels, width and height must be provided for resize.')
x = x.view(x.size(0), channels, w, h)
return x
def log_gan_images(self, caption, client_id, round_idx):
images = make_grid(self.denorm(self.generator(self.fixed_noise)), nrow=8, padding=2, normalize=False,
range=None,
scale_each=False, pad_value=0)
images = wandb.Image(images, caption=caption)
wandb.log({f"Generator Outputs {client_id}": images, 'round': round_idx})
def get_model_params(self):
return self.generator.cpu().state_dict()
def set_model_params(self, model_parameters):
self.generator.load_state_dict(model_parameters)
def train(self, train_data, device, args=None):
"""
Args:
train_data: Tuple of (labelled_data, unlabelled_data).
device: Device to perform training on
args: Other args
Returns:
"""
generator, local_model = self.generator.to(device), self.local_model.to(device)
if args.client_optimizer == "sgd":
optimiser_G = torch.optim.SGD(self.generator.parameters(), lr=args.lr)
optimiser_D = torch.optim.SGD(self.local_model.parameters(), lr=args.lr)
else:
beta1, beta2 = 0.5, 0.999
optimiser_G = torch.optim.Adam(filter(lambda p: p.requires_grad, self.generator.parameters()),
lr=args.lr,
weight_decay=args.wd,
amsgrad=True,
betas=(beta1, beta2)
)
optimiser_D = torch.optim.Adam(filter(lambda p: p.requires_grad, self.local_model.parameters()),
lr=args.lr,
weight_decay=args.wd,
amsgrad=True,
betas=(beta1, beta2)
)
labelled_data, unlabelled_data = train_data
self._gan_training(generator, local_model, labelled_data, unlabelled_data, args.epochs, optimiser_G,
optimiser_D, device)
# self._train_loop(generator, train_data, None, args.epochs, optimiser_D, device)
def _discriminator_output(self, logits):
Z_x = torch.logsumexp(logits, dim=-1)
return torch.sigmoid(Z_x / (Z_x + 1))
def _gan_training(self, generator, discriminator, labelled_data, unlabelled_data, epochs, optimizer_G, optimizer_D,
device):
generator.train()
discriminator.train()
real_label, fake_label = 1, 0 # Soft labels
# train_data = labelled_data if unlabelled_data is None else zip(labelled_data, unlabelled_data)
# Initialize BCELoss function
unsupervised_loss = nn.BCELoss().to(device)
supervised_loss = nn.CrossEntropyLoss().to(device)
torch.autograd.set_detect_anomaly(True)
transforms = self.transforms.to(device)
epoch_loss_D = []
epoch_loss_G = []
for epoch in range(epochs):
batch_loss_D, batch_loss_G = [], []
# train_data = labelled_data if unlabelled_data is None else zip(labelled_data, unlabelled_data)
for batch_idx, data in enumerate(
labelled_data if unlabelled_data is None else zip(labelled_data, cycle(unlabelled_data))):
if unlabelled_data is not None:
(real, labels), (synth, synth_labels) = data
# ulreal = ulreal[0] # zip packs iterables of varying length in to tuples so ulreal becomes [ulreal]
real, labels, synth, synth_labels = real.to(device), labels.to(device), synth.to(device), synth_labels.to(device)
real = torch.unsqueeze(real, 1) if len(real.shape) < 4 else real # 1 channel datasets miss second dim
real = transforms(real)
with torch.no_grad():
real, labels = torch.cat((real, synth), dim=0), torch.cat((labels, synth_labels), dim=0)
else:
real, labels = data
real, labels = real.to(device), labels.to(device)
real = transforms(real)
# unlabelled_real = real
# unlabelled_real = unlabelled_real.to(device)
b_size = real.size(0)
generator.zero_grad()
discriminator.zero_grad()
""" Update Discriminator """
# TRAIN THE DISCRIMINATOR (THE CLASSIFIER)
optimizer_D.zero_grad()
# 1. on Unlabelled data
outputs = discriminator(real)
logz_unlabel = torch.logsumexp(outputs, dim=-1)
lossUL = 0.5 * (-torch.mean(logz_unlabel) + torch.mean(F.softplus(logz_unlabel)))
# 2. on the generated data
fake = generator.generate(b_size, device)
outputs = discriminator(fake.detach()) # detach() because we are not training G here
logz_fake = torch.logsumexp(outputs, dim=-1)
lossD = 0.5 * torch.mean(F.softplus(logz_fake))
# 3. on labeled data
output = discriminator(real)
logz_label = torch.logsumexp(output, dim=-1)
prob_label = torch.gather(output, 1, labels.unsqueeze(1))
labeled_loss = -torch.mean(prob_label) + torch.mean(logz_label)
D_loss = labeled_loss + lossD + lossUL
D_loss.backward()
optimizer_D.step()
# TRAIN THE DISCRIMINATOR (THE CLASSIFIER)
optimizer_G.zero_grad()
outputs = discriminator(fake)
logz_unlabel = torch.logsumexp(outputs, dim=-1)
G_loss = 0.5 * (-torch.mean(logz_unlabel) + torch.mean(F.softplus(logz_unlabel)))
G_loss.backward()
optimizer_G.step()
# Save Losses for plotting later
batch_loss_G.append(D_loss.item())
batch_loss_D.append(G_loss.item())
# Logging
epoch_loss_G.append(sum(batch_loss_G) / len(batch_loss_G))
epoch_loss_D.append(sum(batch_loss_D) / len(batch_loss_D))
logging.info(
f'tEpoch: {epoch}\t Gen Loss: {sum(epoch_loss_G) / len(epoch_loss_D):.6f}\t Disc Loss: {sum(epoch_loss_D) / len(epoch_loss_D)}')
def _train_loop(self, model, train_data, criterion, epochs, optimizer, device):
"""
Args:
model: Model to be trained
train_data: Training data
criterion: Loss for main task
epochs: Epochs of training to be completed
optimizer: Optimiser that should be used to update hyperparams
device: Device in which training should occur
Returns:
"""
model.train()
transforms = self.transforms.to(device)
criterion = nn.CrossEntropyLoss().to(device)
epoch_loss = []
for epoch in range(epochs):
batch_loss = []
for batch_idx, (x, labels) in enumerate(train_data):
x, labels = x.to(device), labels.to(device)
x = transforms(x)
model.zero_grad()
output = model(x) # in classification case will be logits
# 3. on labeled data
logz_label = torch.logsumexp(output, dim=-1)
prob_label = torch.gather(output, 1, labels.unsqueeze(1))
loss = -torch.mean(prob_label) + torch.mean(logz_label)
loss.backward()
optimizer.step()
batch_loss.append(loss.item())
epoch_loss.append(sum(batch_loss) / len(batch_loss))
logging.info(f'tEpoch: {epoch}\tLoss: {sum(epoch_loss) / len(epoch_loss):.6f}')
return epoch_loss
def test(self, test_data, device, args=None):
model = self.local_model.to(device)
model.eval()
transforms = self.transforms.to(device)
metrics = {
'test_correct': 0,
'test_loss': 0,
'test_precision': 0,
'test_recall': 0,
'test_total': 0
}
'''
stackoverflow_lr is the task of multi-label classification
please refer to following links for detailed explainations on cross-entropy and corresponding implementation of tff research:
https://towardsdatascience.com/cross-entropy-for-classification-d98e7f974451
https://github.com/google-research/federated/blob/49a43456aa5eaee3e1749855eed89c0087983541/optimization/stackoverflow_lr/federated_stackoverflow_lr.py#L131
'''
if args.dataset == "stackoverflow_lr":
criterion = nn.BCELoss(reduction='sum').to(device)
else:
criterion = nn.CrossEntropyLoss().to(device)
with torch.no_grad():
for batch_idx, (x, target) in enumerate(test_data):
x = x.to(device)
x = transforms(x)
target = target.to(device)
pred = model(x)
loss = criterion(pred, target)
if args.dataset == "stackoverflow_lr":
predicted = (pred > .5).int()
correct = predicted.eq(target).sum(axis=-1).eq(target.size(1)).sum()
true_positive = ((target * predicted) > .1).int().sum(axis=-1)
precision = true_positive / (predicted.sum(axis=-1) + 1e-13)
recall = true_positive / (target.sum(axis=-1) + 1e-13)
metrics['test_precision'] += precision.sum().item()
metrics['test_recall'] += recall.sum().item()
else:
_, predicted = torch.max(pred, 1)
correct = predicted.eq(target).sum()
metrics['test_correct'] += correct.item()
metrics['test_loss'] += loss.item() * target.size(0)
if len(target.size()) == 1: #
metrics['test_total'] += target.size(0)
elif len(target.size()) == 2: # for tasks of next word prediction
metrics['test_total'] += target.size(0) * target.size(1)
return metrics
def test_on_the_server(self, train_data_local_dict, test_data_local_dict, device, args=None) -> bool:
return False
def pre_train(self, private_data, device, args):
"""
Pre-training in FedMD algorithm to do transfer learning from public data set
to private dataset
Args:
private_data: Private data only known to the client
device: Device to perform training on
args: Other args
Returns:
"""
model = self.local_model
model.to(device)
if args.client_optimizer == "sgd":
optimiser_D = torch.optim.SGD(self.local_model.parameters(), lr=args.lr)
else:
beta1, beta2 = 0.5, 0.999
optimiser_D = torch.optim.Adam(filter(lambda p: p.requires_grad, self.local_model.parameters()),
lr=args.lr,
weight_decay=args.wd,
amsgrad=True,
betas=(beta1, beta2)
)
# Transfer learning to private dataset
self._train_loop(model, train_data=private_data, criterion=None, epochs=args.pretrain_epochs_private,
optimizer=optimiser_D, device=device)
def _get_pseudo_labels_with_probability(self, disc_logits):
class_probabilities = F.softmax(disc_logits, dim=-1)
max_probs, labels = class_probabilities.max(dim=-1)
return max_probs, labels
def generate_synthetic_dataset(self, target_size, real_score_threshold=0.85, device='cpu'):
generator, discriminator = self.generator.to(device), self.local_model.to(device)
generator.eval()
discriminator.eval()
generated_images = generator.generate(target_size, device=device)
# Filter by realness score to select best generated images
label_probs, labels = self._get_pseudo_labels_with_probability(discriminator(generated_images))
mask = label_probs >= real_score_threshold
good_generated_images, labels = generated_images[mask], labels[mask]
# If generator is not good enough do not create synthetic dataset
size = good_generated_images.size(0)
if size == 0:
return None, 0
dataset = TensorDataset(good_generated_images, labels)
# data_loader = DataLoader(dataset, batch_size=batch_size)
return dataset, size
| StarcoderdataPython |
1787411 | <filename>code/P3.py
import numpy as np
from numpy import linalg as LA
import pandas as pd
import matplotlib.pyplot as plt
import os
from utils import covariance, LinearLeastSquare, TotalLeastSquare, RANSAC
np.set_printoptions(formatter={'all':lambda x: str(x)})
file =os.path.join("../data","insurance_data.csv")
# Read cvs data into a dataframe
data = pd.read_csv(file)
x = data['age'].to_numpy()
y = data['charges'].to_numpy()
# Perform min-max normalization to bring data to the same scale
x_n = (x - np.min(x))/(np.max(x) - np.min(x))
y_n = (y - np.min(y))/(np.max(y) - np.min(y))
# Q1 _____________________________________________________________
# Compute Covariance matrix of original & normalized data
cov_mat = np.array([[covariance(x, x), covariance(x, y)], [covariance(x, y), covariance(y, y)]])
print("Covariance Matrix of original data: ")
print(cov_mat)
cov_mat = np.array([[covariance(x_n, x_n), covariance(x_n, y_n)], [covariance(x_n, y_n), covariance(y_n, y_n)]])
print("Covariance Matrix of normalized data: ")
print(cov_mat)
eig_val, eig_vec = LA.eig(cov_mat)
eig_vec1 = eig_vec[:,np.argmax(eig_val)] # Eigen vector corresponding to max eigen value
eig_vec2 = eig_vec[:,np.argmin(eig_val)] # Eigen vector corresponding to max eigen value
print("Dot product of eigen vectors is: ", np.dot(eig_vec1, eig_vec2))
plt.figure(1)
plt.scatter(x, y)
plt.title("Principal Components")
plt.xlabel('age')
plt.ylabel('insurance_charges')
# Rescale each eigen vector to fit the graph and plot them at the mean of the data
plt.quiver(np.mean(x), np.mean(y), eig_vec1[0]*(np.max(x) - np.min(x))*np.max(eig_val) , eig_vec1[1]*(np.max(y) - np.min(y))*np.max(eig_val) , label = 'Principal Component 1', color = 'r', units='xy', angles='xy', scale_units='xy', scale=0.2)
plt.quiver(np.mean(x), np.mean(y), eig_vec2[0]*(np.max(x) - np.min(x))*np.min(eig_val) , eig_vec2[1]*(np.max(y) - np.min(y))*np.min(eig_val) , label = 'Principal Component 2', units='xy', angles='xy', scale_units='xy', scale =0.2)
plt.legend()
# __________________________________________________________________
# Q2________________________________________________________________
plt.figure(2)
plt.scatter(x, y)
plt.title("Line Fitting using LS & TLS")
plt.xlabel('age')
plt.ylabel('insurance_charges')
ls = LinearLeastSquare()
# Fit original data using LinearLeastSquare as it is scale invariant
m, c = ls.fit_line(x, y)
y1 = m*x + c
plt.plot(x, y1, label='LS', color='r')
# Fit normalized data using TotalLeastSquare
tls = TotalLeastSquare()
m, c = tls.fit_line(x_n, y_n)
x2 = np.linspace(np.min(x_n), np.max(x_n), x_n.size)
x_ps = np.linspace(np.min(x), np.max(x), x.size)
y2 = m*x2 + c
# Rescale data by performing inverse of min-max normalization
y2 = y2*(np.max(y) - np.min(y)) + np.min(y)
plt.plot(x_ps, y2, label="TLS", color='g')
plt.legend()
plt.figure(3)
# Fit normalized data using RANSAC
"""Optional : fit_line function of RANSAC class takes an optional boolean flag, "optimal_fit" while returns the slope and intercept after running Linear Least Square on only the inliers as a final step.
Set to False as default for comparison between LS, TLS & RANSAC
"""
ransac = RANSAC()
m, c, mask = ransac.fit_line(x_n, y_n, 0.001, p=0.99, optimal_fit = False)
# Plot outliers
plt.scatter(x[np.invert(mask)], y[np.invert(mask)], label='Outliers', color = 'tab:orange')
# Plot inliers
plt.scatter(x[mask], y[mask], label='Inliers', color='g')
plt.title("Line Fitting using RANSAC")
plt.xlabel('age')
plt.ylabel('insurance_charges')
y3 = m*x2 + c
# Rescale data by performing inverse of min-max normalization
y3 = y3*(np.max(y) - np.min(y)) + np.min(y)
plt.plot(x_ps, y3, color='r')
plt.legend()
plt.show() | StarcoderdataPython |
1761354 | import testbase
from sqlalchemy import *
class CompileTest(testbase.AssertMixin):
"""test various mapper compilation scenarios"""
def tearDownAll(self):
clear_mappers()
def testone(self):
global metadata, order, employee, product, tax, orderproduct
metadata = BoundMetaData(testbase.db)
order = Table('orders', metadata,
Column('id', Integer, primary_key=True),
Column('employee_id', Integer, ForeignKey('employees.id'), nullable=False),
Column('type', Unicode(16)))
employee = Table('employees', metadata,
Column('id', Integer, primary_key=True),
Column('name', Unicode(16), unique=True, nullable=False))
product = Table('products', metadata,
Column('id', Integer, primary_key=True),
)
orderproduct = Table('orderproducts', metadata,
Column('id', Integer, primary_key=True),
Column('order_id', Integer, ForeignKey("orders.id"), nullable=False),
Column('product_id', Integer, ForeignKey("products.id"), nullable=False),
)
class Order(object):
pass
class Employee(object):
pass
class Product(object):
pass
class OrderProduct(object):
pass
order_join = order.select().alias('pjoin')
order_mapper = mapper(Order, order,
select_table=order_join,
polymorphic_on=order_join.c.type,
polymorphic_identity='order',
properties={
'orderproducts': relation(OrderProduct, lazy=True, backref='order')}
)
mapper(Product, product,
properties={
'orderproducts': relation(OrderProduct, lazy=True, backref='product')}
)
mapper(Employee, employee,
properties={
'orders': relation(Order, lazy=True, backref='employee')})
mapper(OrderProduct, orderproduct)
# this requires that the compilation of order_mapper's "surrogate mapper" occur after
# the initial setup of MapperProperty objects on the mapper.
class_mapper(Product).compile()
def testtwo(self):
"""test that conflicting backrefs raises an exception"""
global metadata, order, employee, product, tax, orderproduct
metadata = BoundMetaData(testbase.db)
order = Table('orders', metadata,
Column('id', Integer, primary_key=True),
Column('type', Unicode(16)))
product = Table('products', metadata,
Column('id', Integer, primary_key=True),
)
orderproduct = Table('orderproducts', metadata,
Column('id', Integer, primary_key=True),
Column('order_id', Integer, ForeignKey("orders.id"), nullable=False),
Column('product_id', Integer, ForeignKey("products.id"), nullable=False),
)
class Order(object):
pass
class Product(object):
pass
class OrderProduct(object):
pass
order_join = order.select().alias('pjoin')
order_mapper = mapper(Order, order,
select_table=order_join,
polymorphic_on=order_join.c.type,
polymorphic_identity='order',
properties={
'orderproducts': relation(OrderProduct, lazy=True, backref='product')}
)
mapper(Product, product,
properties={
'orderproducts': relation(OrderProduct, lazy=True, backref='product')}
)
mapper(OrderProduct, orderproduct)
try:
class_mapper(Product).compile()
assert False
except exceptions.ArgumentError, e:
assert str(e).index("Backrefs do not match") > -1
def testthree(self):
metadata = BoundMetaData(testbase.db)
node_table = Table("node", metadata,
Column('node_id', Integer, primary_key=True),
Column('name_index', Integer, nullable=True),
)
node_name_table = Table("node_name", metadata,
Column('node_name_id', Integer, primary_key=True),
Column('node_id', Integer, ForeignKey('node.node_id')),
Column('host_id', Integer, ForeignKey('host.host_id')),
Column('name', String(64), nullable=False),
)
host_table = Table("host", metadata,
Column('host_id', Integer, primary_key=True),
Column('hostname', String(64), nullable=False,
unique=True),
)
metadata.create_all()
try:
node_table.insert().execute(node_id=1, node_index=5)
class Node(object):pass
class NodeName(object):pass
class Host(object):pass
node_mapper = mapper(Node, node_table)
host_mapper = mapper(Host, host_table)
node_name_mapper = mapper(NodeName, node_name_table,
properties = {
'node' : relation(Node, backref=backref('names')),
'host' : relation(Host),
}
)
sess = create_session()
assert sess.query(Node).get(1).names == []
finally:
metadata.drop_all()
if __name__ == '__main__':
testbase.main()
| StarcoderdataPython |
4806799 | <gh_stars>1-10
from django.shortcuts import render
from sitecampus.models import Autor, Post
# Create your views here.
def index(request):
posts = Post.objects.all()
context = {'posts': posts}
return render(request, 'index.html', context=context )
| StarcoderdataPython |
3293956 | import torch
import torch.nn as nn
from torchvision.models.resnet import resnet50
from torchvision.models.vgg import vgg16
import dino.vision_transformer as vits
#import moco.vits as vits_moco
def get_model(arch, patch_size, device):
# Initialize model with pretraining
url = None
if "moco" in arch:
if arch == "moco_vit_small" and patch_size == 16:
url = "moco-v3/vit-s-300ep/vit-s-300ep.pth.tar"
elif arch == "moco_vit_base" and patch_size == 16:
url = "moco-v3/vit-b-300ep/vit-b-300ep.pth.tar"
model = vits.__dict__[arch](num_classes=0)
elif "mae" in arch:
if arch == "mae_vit_base" and patch_size == 16:
url = "mae/visualize/mae_visualize_vit_base.pth"
model = vits.__dict__[arch](num_classes=0)
elif "vit" in arch:
if arch == "vit_small" and patch_size == 16:
url = "dino/dino_deitsmall16_pretrain/dino_deitsmall16_pretrain.pth"
elif arch == "vit_small" and patch_size == 8:
url = "dino/dino_deitsmall8_300ep_pretrain/dino_deitsmall8_300ep_pretrain.pth"
elif arch == "vit_base" and patch_size == 16:
url = "dino/dino_vitbase16_pretrain/dino_vitbase16_pretrain.pth"
elif arch == "vit_base" and patch_size == 8:
url = "dino/dino_vitbase8_pretrain/dino_vitbase8_pretrain.pth"
elif arch == "resnet50":
url = "dino/dino_resnet50_pretrain/dino_resnet50_pretrain.pth"
model = vits.__dict__[arch](patch_size=patch_size, num_classes=0)
else:
raise NotImplementedError
for p in model.parameters():
p.requires_grad = False
if url is not None:
print(
"Since no pretrained weights have been provided, we load the reference pretrained DINO weights."
)
state_dict = torch.hub.load_state_dict_from_url(
url="https://dl.fbaipublicfiles.com/" + url
)
if "moco" in arch:
state_dict = state_dict['state_dict']
for k in list(state_dict.keys()):
# retain only base_encoder up to before the embedding layer
if k.startswith('module.base_encoder') and not k.startswith('module.base_encoder.head'):
# remove prefix
state_dict[k[len("module.base_encoder."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
elif "mae" in arch:
state_dict = state_dict['model']
for k in list(state_dict.keys()):
# retain only base_encoder up to before the embedding layer
if k.startswith('decoder') or k.startswith('mask_token'):
# remove prefix
#state_dict[k[len("module.base_encoder."):]] = state_dict[k]
# delete renamed or unused k
del state_dict[k]
msg = model.load_state_dict(state_dict, strict=True)
print(
"Pretrained weights found at {} and loaded with msg: {}".format(
url, msg
)
)
else:
print(
"There is no reference weights available for this model => We use random weights."
)
model.eval()
model.to(device)
return model
| StarcoderdataPython |
102902 | from baserow.contrib.database.formula.exceptions import BaserowFormulaException
class InvalidNumberOfArguments(BaserowFormulaException):
def __init__(self, function_def, num_args):
if num_args == 1:
error_prefix = "1 argument was"
else:
error_prefix = f"{num_args} arguments were"
super().__init__(
f"{error_prefix} given to the {function_def}, it must instead "
f"be given {function_def.num_args}"
)
class MaximumFormulaSizeError(BaserowFormulaException):
def __init__(self):
super().__init__("it exceeded the maximum formula size")
class UnknownFieldByIdReference(BaserowFormulaException):
def __init__(self, unknown_field_id):
super().__init__(
f"there is no field with id {unknown_field_id} but the formula"
f" included a direct reference to it"
)
class UnknownOperator(BaserowFormulaException):
def __init__(self, operatorText):
super().__init__(f"it used the unknown operator {operatorText}")
class BaserowFormulaSyntaxError(BaserowFormulaException):
pass
| StarcoderdataPython |
1789207 | import os
import re
from argparse import ArgumentParser
from argparse import ArgumentTypeError
def parse_arguments():
"""
Method to parse arguments, any check need is made on the type argument, each type represents a function.
:return: Parsed arguments
"""
parser = ArgumentParser(description="Creates and removes data from a Hadoop system to use in Spark Training.")
parser.add_argument("-j", "--json_config", dest="json_config", help="The JSON Config File Location",
type=json_config_file, required=True)
parser.add_argument("-a", "--action", dest="action", help="Create/Delete data from Hadoop", type=action,
required=True)
return parser.parse_args()
def json_config_file(value):
"""
Method that call the json validation method and raises an Exception if not
:param value: The json file path that was passed as argument
:return: Value or Exception in case of error
"""
def is_json_file_location_valid():
return os.path.basename(value).split(".")[-1] == "json"
if is_json_file_location_valid():
return value
else:
raise ArgumentTypeError
def action(value):
"""
Method to validate if action is valid: create | delete
:param value: The action passed as an argument
"""
def is_action_valid():
return (value in ["create", "delete"])
if is_action_valid():
return value
else:
raise ArgumentTypeError
| StarcoderdataPython |
3395639 | <reponame>dyning/AlexNet-Prod<gh_stars>10-100
import numpy as np
import torch
from torchvision.models.alexnet import alexnet
from reprod_log import ReprodLogger
if __name__ == "__main__":
# load model
# the model is save into ~/.cache/torch/hub/checkpoints/alexnet-owt-4df8aa71.pth
# def logger
reprod_logger = ReprodLogger()
model = alexnet(pretrained=True, num_classes=1000)
model.eval()
# read or gen fake data
fake_data = np.load("../../fake_data/fake_data.npy")
fake_data = torch.from_numpy(fake_data)
# forward
out = model(fake_data)
#
reprod_logger.add("logits", out.cpu().detach().numpy())
reprod_logger.save("forward_torch.npy")
| StarcoderdataPython |
172172 | from pyflowline.algorithms.auxiliary.check_head_water import check_head_water
def remove_small_river(aFlowline_in, dThreshold_in):
nFlowline = len(aFlowline_in)
aFlowline_out=list()
if nFlowline == 1:
aFlowline_out.append(aFlowline_in[0])
else:
lID = 0
for i in range(nFlowline):
pFlowline = aFlowline_in[i]
iFlag_dam = pFlowline.iFlag_dam
pVertex_start = pFlowline.pVertex_start
pVertex_end = pFlowline.pVertex_end
dLength = pFlowline.calculate_length()
if iFlag_dam ==1:
pFlowline.lIndex = lID
aFlowline_out.append(pFlowline)
lID = lID +1
else:
if check_head_water(aFlowline_in, pVertex_start)==1:
if dLength > dThreshold_in :
pFlowline.lIndex = lID
aFlowline_out.append(pFlowline)
lID = lID + 1
pass
else:
pass
else:
pFlowline.lIndex = lID
aFlowline_out.append(pFlowline)
lID = lID +1
pass
pass
return aFlowline_out | StarcoderdataPython |
86523 | # Copyright (c) 2012, GPy authors (see AUTHORS.txt).
# Licensed under the BSD 3-clause license (see LICENSE.txt)
from ....core.parameterization.parameter_core import Pickleable
from GPy.util.caching import Cache_this
from ....core.parameterization import variational
import rbf_psi_comp
import ssrbf_psi_comp
import sslinear_psi_comp
import linear_psi_comp
class PSICOMP_RBF(Pickleable):
@Cache_this(limit=2, ignore_args=(0,))
def psicomputations(self, variance, lengthscale, Z, variational_posterior):
if isinstance(variational_posterior, variational.NormalPosterior):
return rbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior)
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
return ssrbf_psi_comp.psicomputations(variance, lengthscale, Z, variational_posterior)
else:
raise ValueError, "unknown distriubtion received for psi-statistics"
@Cache_this(limit=2, ignore_args=(0,1,2,3))
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior):
if isinstance(variational_posterior, variational.NormalPosterior):
return rbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior)
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
return ssrbf_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, lengthscale, Z, variational_posterior)
else:
raise ValueError, "unknown distriubtion received for psi-statistics"
def _setup_observers(self):
pass
class PSICOMP_Linear(Pickleable):
@Cache_this(limit=2, ignore_args=(0,))
def psicomputations(self, variance, Z, variational_posterior):
if isinstance(variational_posterior, variational.NormalPosterior):
return linear_psi_comp.psicomputations(variance, Z, variational_posterior)
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
return sslinear_psi_comp.psicomputations(variance, Z, variational_posterior)
else:
raise ValueError, "unknown distriubtion received for psi-statistics"
@Cache_this(limit=2, ignore_args=(0,1,2,3))
def psiDerivativecomputations(self, dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior):
if isinstance(variational_posterior, variational.NormalPosterior):
return linear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior)
elif isinstance(variational_posterior, variational.SpikeAndSlabPosterior):
return sslinear_psi_comp.psiDerivativecomputations(dL_dpsi0, dL_dpsi1, dL_dpsi2, variance, Z, variational_posterior)
else:
raise ValueError, "unknown distriubtion received for psi-statistics"
def _setup_observers(self):
pass | StarcoderdataPython |
1652747 | #!/usr/bin/env python3
# -*- coding: utf-8 -*-
#Author:Winston.Wang
from collections import Iterable
print("---------使用生成器替换列表生成式节约空间----------");
#列表生成式
print([v for v in range(10)])
#[]替换为()变成生成器
g = (v for v in range(10));
print(g)
#获取生成器的值
print(next(g))
print(next(g))
print(next(g))
print(next(g))
#判断generator是否式可迭代
if(isinstance(g,Iterable)):
for i in g:
print(i);
print('---------斐波那契数列----------')
#生成斐波那契数列
def fib(max):
n,a,b = 0,0,1
while n < max:
print(b);
a,b = b,a+b
'''
t = (b, a + b) # t是一个tuple
a = t[0]
b = t[1]
'''
n = n+1;
return 'done'
fib(3)
#修改为generator,添加yield就表示是一个generator
def fibG(max):
n,a,b = 0,0,1
while(n < max):
yield b
a,b = b,a+b
n = n+1;
return "done"
#调用
o = fibG(3)
print(next(o))
print(next(o))
print(next(o))
#测试yield
def testYield():
print('step 1');
yield 1;
print('step 2');
yield 2;
print('step 3');
yield 3;
y = testYield();
for i in y:
print(i)
#输出杨辉三角
print('----------------输出杨辉三角---------------')
def triangles():
L1 = [1]
n = len(L1)
while True:
yield L1
l = [1]
#取第一个数加上第二个
i = 0;
while i+1 != n:
l.append(L1[i]+L1[i+1])
i = i + 1
#取第二个数加上第三个
l.append(1)
L1= l
n = len(L1)
return 'done'
n = 0
results = []
for t in triangles():
print(t)
results.append(t)
n = n + 1
if n == 10:
break
if results == [
[1],
[1, 1],
[1, 2, 1],
[1, 3, 3, 1],
[1, 4, 6, 4, 1],
[1, 5, 10, 10, 5, 1],
[1, 6, 15, 20, 15, 6, 1],
[1, 7, 21, 35, 35, 21, 7, 1],
[1, 8, 28, 56, 70, 56, 28, 8, 1],
[1, 9, 36, 84, 126, 126, 84, 36, 9, 1]
]:
print('测试通过!')
else:
print('测试失败!') | StarcoderdataPython |
3363091 | import json
import pytest
from share.tasks import ingest
from tests import factories
@pytest.mark.django_db
class TestIngestJobConsumer:
def test_no_output(self):
raw = factories.RawDatumFactory(datum=json.dumps({
'@graph': []
}))
job = factories.IngestJobFactory(raw=raw)
assert not raw.no_output
ingest(job_id=job.id)
raw.refresh_from_db()
assert raw.no_output
| StarcoderdataPython |
1669981 | from .analyzer import Analyzer
| StarcoderdataPython |
1606077 | <reponame>NeCTAR-RC/manuka<gh_stars>0
"""make user_id unique
Revision ID: 4b8f295e23e2
Revises: 53c5ca8ba<PASSWORD>
Create Date: 2020-04-30 15:58:32.976300
"""
from alembic import op
# revision identifiers, used by Alembic.
revision = '4<PASSWORD>'
down_revision = '53<PASSWORD>ba<PASSWORD>'
branch_labels = None
depends_on = None
def upgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.create_unique_constraint(None, 'user', ['user_id'])
# ### end Alembic commands ###
def downgrade():
# ### commands auto generated by Alembic - please adjust! ###
op.drop_constraint(None, 'user', type_='unique')
# ### end Alembic commands ###
| StarcoderdataPython |
113891 | <reponame>bhhaskin/bryans.website
from allauth.account.adapter import DefaultAccountAdapter
class ClosedAccountAdapter(DefaultAccountAdapter):
def is_open_for_signup(self, request):
return False
| StarcoderdataPython |
3211445 | import pytest
from astropy import units as u
import numpy as np
from xrtpy.response.channel import Channel
import pkg_resources
import sunpy
import sunpy.map
from sunpy.data import manager
import scipy.io
import sunpy.io.special
channel_names = [
"Al-mesh",
"Al-poly",
"C-poly",
"Ti-poly",
"Be-thin",
"Be-med",
"Al-med",
"Al-thick",
"Be-thick",
"Al-poly/Al-mesh",
"Al-poly/Ti-poly",
"Al-poly/Al-thick",
"Al-poly/Be-thick",
"C-poly/Ti-poly",
]
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_name(channel_name):
channel = Channel(channel_name)
assert channel.name == channel_name
filename = pkg_resources.resource_filename(
"xrtpy", "data/channels/xrt_channels_v0016.genx"
)
v6_genx = sunpy.io.special.genx.read_genx(filename)
v6_genx_s = v6_genx["SAVEGEN0"]
_channel_name_to_index_mapping = {
"Al-mesh": 0,
"Al-poly": 1,
"C-poly": 2,
"Ti-poly": 3,
"Be-thin": 4,
"Be-med": 5,
"Al-med": 6,
"Al-thick": 7,
"Be-thick": 8,
"Al-poly/Al-mesh": 9,
"Al-poly/Ti-poly": 10,
"Al-poly/Al-thick": 11,
"Al-poly/Be-thick": 12,
"C-poly/Ti-poly": 13,
}
@pytest.mark.parametrize("channel_name", channel_names)
def test_CCD_wavelength(channel_name):
channel_filter = Channel(channel_name)
ccd_wavelength_length = int(channel_filter.ccd.number_of_wavelengths)
ccd_wavelength = channel_filter.ccd.ccd_wavelength[:ccd_wavelength_length]
idl_ccd_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["LENGTH"]
)
idl_ccd_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["WAVE"][
:idl_ccd_array_length
]
* u.angstrom
)
assert u.allclose(idl_ccd_wavelength_auto, ccd_wavelength)
idl_ccd_wavelength_manu = [
1.00000,
1.10000,
1.20000,
1.30000,
1.40000,
1.50000,
1.60000,
1.70000,
1.80000,
1.90000,
] * u.angstrom
assert u.allclose(idl_ccd_wavelength_manu, ccd_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_CCD_quantum_efficiency(channel_name):
channel_filter = Channel(channel_name)
ccd_array_length = int(channel_filter.ccd.number_of_wavelengths)
ccd_quantum_efficiency = channel_filter.ccd.ccd_quantum_efficiency[
:ccd_array_length
]
idl_ccd_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["LENGTH"]
)
idl_ccd_quantum_efficiency_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["CCD"]["QE"][:idl_ccd_array_length]
assert u.allclose(idl_ccd_quantum_efficiency_auto, ccd_quantum_efficiency)
idl_ccd_quantum_efficiency_manu = [
0.0573069,
0.0751920,
0.0960381,
0.119867,
0.146638,
0.176252,
0.208541,
0.243277,
0.280167,
0.318879,
0.359036,
0.400219,
0.441984,
0.483898,
]
assert idl_ccd_quantum_efficiency_manu, ccd_quantum_efficiency[0:13]
@pytest.mark.parametrize("channel_name", channel_names)
def test_CCD_pixel_size(channel_name):
channel_filter = Channel(channel_name)
ccd_pixel_size = channel_filter.ccd.ccd_pixel_size
idl_ccd_quantum_efficiency_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["PIXEL_SIZE"]
* u.micron
)
assert u.allclose(idl_ccd_quantum_efficiency_auto, ccd_pixel_size)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_gain_left(channel_name):
channel_filter = Channel(channel_name)
ccd_gain_left = channel_filter.ccd.ccd_gain_left
idl_ccd_gain_left_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["GAIN_L"]
* u.electron
)
assert u.isclose(ccd_gain_left, idl_ccd_gain_left_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_gain_right(channel_name):
channel_filter = Channel(channel_name)
ccd_gain_right = channel_filter.ccd.ccd_gain_right
idl_ccd_gain_right_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["GAIN_R"]
* u.electron
)
assert u.isclose(ccd_gain_right, idl_ccd_gain_right_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_full_well(channel_name):
channel_filter = Channel(channel_name)
ccd_full_well = channel_filter.ccd.ccd_full_well
idl_ccd_full_well_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"]["FULL_WELL"]
* u.electron
)
assert u.isclose(ccd_full_well, idl_ccd_full_well_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_ev_ore_electron(channel_name):
channel_filter = Channel(channel_name)
ccd_full_well = channel_filter.ccd.ccd_ev_ore_electron
idl_ccd_full_well_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"CCD"
]["EV_PER_EL"] * (u.eV / u.electron)
assert u.isclose(ccd_full_well, idl_ccd_full_well_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_ccd_name(channel_name):
channel_filter = Channel(channel_name)
ccd_name = channel_filter.ccd.ccd_name
idl_ccd_name_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]]["CCD"][
"LONG_NAME"
]
assert ccd_name == idl_ccd_name_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_name(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_name = channel_filter.entrancefilter.entrancefilter_name
IDL_entrancefilter_name_AUTO = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["LONG_NAME"]
assert entrancefilter_name == IDL_entrancefilter_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_material(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_material = channel_filter.entrancefilter.entrancefilter_material
idl_entrancefilter_material_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["MATERIAL"]
if np.all(entrancefilter_material == idl_entrancefilter_material_auto):
pass
else:
raise ValueError("FAIL: test_entrancefilter_material")
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_thickness(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_thickness = channel_filter.entrancefilter.entrancefilter_thickness
idl_entrancefilter_thick_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["EN_FILTER"]["THICK"]
* u.angstrom
)
assert u.allclose(entrancefilter_thickness, idl_entrancefilter_thick_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_density(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_density = channel_filter.entrancefilter.entrancefilter_density
idl_entrancefilter_density_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["DENS"] * (u.g * u.cm ** -3)
assert u.allclose(entrancefilter_density, idl_entrancefilter_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_wavelength(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_wavelength_length = int(
channel_filter.entrancefilter.number_of_wavelengths
)
entrancefilter_wavelength = channel_filter.entrancefilter.entrancefilter_wavelength[
:entrancefilter_wavelength_length
]
idl_entrancefilter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["EN_FILTER"]["LENGTH"]
)
idl_entrancefilter_wavelength_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["WAVE"][:idl_entrancefilter_array_length] * u.Unit(
"Angstrom"
) # wavelength_CCD_unit
assert u.allclose(idl_entrancefilter_wavelength_auto, entrancefilter_wavelength)
idl_entrancefilter_wavelength_manu = [
1.00000,
1.00802,
1.01610,
1.02424,
1.03245,
1.04073,
1.04907,
1.05748,
1.06595,
1.07450,
] * u.angstrom
assert u.allclose(
idl_entrancefilter_wavelength_manu, entrancefilter_wavelength[0:10]
)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_transmission(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_transmission_length = int(
channel_filter.entrancefilter.number_of_wavelengths
)
entrancefilter_transmission = (
channel_filter.entrancefilter.entrancefilter_transmission[
:entrancefilter_transmission_length
]
)
idl_entrancefilter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["EN_FILTER"]["LENGTH"]
)
idl_entrancefilter_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["TRANS"][:idl_entrancefilter_array_length]
assert u.allclose(idl_entrancefilter_transmission_auto, entrancefilter_transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_mesh_transmission(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_mesh_transmission = (
channel_filter.entrancefilter.entrancefilter_mesh_transmission
)
idl_entrancefilter_mesh_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["MESH_TRANS"]
assert entrancefilter_mesh_transmission == idl_entrancefilter_mesh_transmission_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_entrancefilter_substrate(channel_name):
channel_filter = Channel(channel_name)
entrancefilter_substrate = channel_filter.entrancefilter.entrancefilter_substrate
idl_entrancefilter_substrate_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["EN_FILTER"]["SUBSTRATE"]
assert entrancefilter_substrate == idl_entrancefilter_substrate_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_name(channel_name):
channel_filter = Channel(channel_name)
filter_name = channel_filter.filter_1.name
idl_filter_name_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["LONG_NAME"]
assert filter_name == idl_filter_name_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_material(channel_name):
channel_filter = Channel(channel_name)
filter_material = channel_filter.filter_1.material
idl_filter_material_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["MATERIAL"]
assert np.all(filter_material == idl_filter_material_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_thickness(channel_name):
channel_filter = Channel(channel_name)
filter_thickness = channel_filter.filter_1.thickness
idl_filter_thick_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["THICK"]
* u.angstrom
)
assert np.all(filter_thickness == idl_filter_thick_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_density(channel_name):
channel_filter = Channel(channel_name)
filter_density = channel_filter.filter_1.density
idl_filter_density_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["DENS"] * (u.g * u.cm ** -3)
assert u.allclose(filter_density, idl_filter_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_wavelength(channel_name):
channel_filter = Channel(channel_name)
filter_wavelength_length = int(channel_filter.filter_1.number_of_wavelengths)
filter_wavelength = channel_filter.filter_1.wavelength[:filter_wavelength_length]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["LENGTH"]
)
idl_filter_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["WAVE"][
:idl_filter_array_length
]
* u.angstrom
)
assert u.allclose(idl_filter_wavelength_auto, filter_wavelength)
idl_filter_wavelength_manu = [
1.00000,
1.00802,
1.01610,
1.02424,
1.03245,
1.04073,
1.04907,
1.05748,
1.06595,
1.07450,
] * u.angstrom
assert u.allclose(idl_filter_wavelength_manu, filter_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_transmission_length = int(channel_filter.filter_1.number_of_wavelengths)
filter_transmission = channel_filter.filter_1.transmission[
:filter_transmission_length
]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER1"]["LENGTH"]
)
idl_filter_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER1"]["TRANS"][:idl_filter_array_length]
assert u.allclose(idl_filter_transmission_auto, filter_transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_mesh_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_mesh_transmission = channel_filter._filter_1.mesh_trans
idl_filter_mesh_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER1"]["MESH_TRANS"]
assert filter_mesh_transmission == idl_filter_mesh_transmission_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter1_substrate(channel_name):
channel_filter = Channel(channel_name)
filter_substrate = channel_filter.filter_1.substrate
idl_filter_substrate_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER1"
]["SUBSTRATE"]
assert filter_substrate == idl_filter_substrate_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_name(channel_name):
channel_filter = Channel(channel_name)
filter_name = channel_filter.filter_2.name
IDL_filter_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["LONG_NAME"]
assert filter_name == IDL_filter_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_material(channel_name):
channel_filter = Channel(channel_name)
filter_material = channel_filter.filter_2.material
idl_filter_material_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["MATERIAL"]
assert np.all(filter_material == idl_filter_material_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_thickness(channel_name):
channel_filter = Channel(channel_name)
filter_thickness = channel_filter.filter_2.thickness
idl_filter_thick_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["THICK"]
* u.angstrom
)
assert u.allclose(filter_thickness, idl_filter_thick_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_density(channel_name):
channel_filter = Channel(channel_name)
filter_density = channel_filter.filter_2.density
IDL_filter_density_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["DENS"] * (u.g * u.cm ** -3)
np.allclose(filter_density, IDL_filter_density_AUTO)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_wavelength(channel_name):
channel_filter = Channel(channel_name)
filter_wavelength_length = int(channel_filter.filter_2.number_of_wavelengths)
filter_wavelength = channel_filter.filter_2.wavelength[:filter_wavelength_length]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["LENGTH"]
)
idl_filter_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["WAVE"][
:idl_filter_array_length
]
* u.angstrom
)
assert u.allclose(idl_filter_wavelength_auto, filter_wavelength)
idl_filter_wavelength_manu = [
1.00000,
1.00802,
1.01610,
1.02424,
1.03245,
1.04073,
1.04907,
1.05748,
1.06595,
1.07450,
] * u.angstrom
assert u.allclose(idl_filter_wavelength_manu, filter_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_transmission_length = int(channel_filter.filter_2.number_of_wavelengths)
filter_transmission = channel_filter.filter_2.transmission[
:filter_transmission_length
]
idl_filter_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["FP_FILTER2"]["LENGTH"]
)
idl_filter_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER2"]["TRANS"][:idl_filter_array_length]
assert u.allclose(idl_filter_transmission_auto, filter_transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_mesh_transmission(channel_name):
channel_filter = Channel(channel_name)
filter_mesh_transmission = channel_filter.filter_2.mesh_trans
idl_filter_mesh_transmission_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["FP_FILTER2"]["MESH_TRANS"]
assert filter_mesh_transmission == idl_filter_mesh_transmission_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_filter2_substrate(channel_name):
channel_filter = Channel(channel_name)
filter_substrate = channel_filter.filter_2.substrate
idl_filter_substrate_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"FP_FILTER2"
]["SUBSTRATE"]
assert filter_substrate == idl_filter_substrate_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_geometry_name(channel_name):
channel_filter = Channel(channel_name)
geometry_name = channel_filter.geometry.name
IDL_geometry_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"GEOM"
]["LONG_NAME"]
assert geometry_name == IDL_geometry_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_geometry_focal_len(channel_name):
channel_filter = Channel(channel_name)
geometry_focal_len = channel_filter.geometry.focal_len
IDL_geometry_focal_len_AUTO = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["GEOM"]["FOC_LEN"]
* u.cm
)
assert u.isclose(geometry_focal_len, IDL_geometry_focal_len_AUTO)
@pytest.mark.parametrize("channel_name", channel_names)
def test_geometry_aperture_area(channel_name):
channel_filter = Channel(channel_name)
geometry_aperture_area = channel_filter.geometry.aperture_area
idl_geometry_aperture_area_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["GEOM"]["APERTURE_AREA"]
* u.cm ** 2
)
assert u.isclose(geometry_aperture_area, idl_geometry_aperture_area_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_name(channel_name):
channel_filter = Channel(channel_name)
mirror_name = channel_filter.mirror_1.name
IDL_mirror_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR1"
]["LONG_NAME"]
assert mirror_name == IDL_mirror_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_material(channel_name):
channel_filter = Channel(channel_name)
mirror_material = channel_filter.mirror_1.material
IDL_mirror_material_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR1"
]["MATERIAL"]
assert mirror_material == IDL_mirror_material_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_density(channel_name):
channel_filter = Channel(channel_name)
mirror_density = channel_filter.mirror_1.density
idl_mirror_density_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR1"
]["DENS"] * (u.g * u.cm ** -3)
assert u.isclose(mirror_density, idl_mirror_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirro1_graze_angle(channel_name):
channel_filter = Channel(channel_name)
mirror_graze_angle = channel_filter.mirror_1.graze_angle
idl_mirror_graze_angle_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"][
"GRAZE_ANGLE"
]
* u.deg
)
assert u.isclose(mirror_graze_angle, idl_mirror_graze_angle_auto)
idl_mirror_graze_angle_manu = [0.910000] * u.deg
assert u.isclose(idl_mirror_graze_angle_manu, mirror_graze_angle)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_wavelength(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_wavelength = channel_filter.mirror_1.wavelength[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"]["LENGTH"]
)
idl_mirror_wavelength_auto = v6_genx_s[
_channel_name_to_index_mapping[channel_name]
]["MIRROR1"]["WAVE"][:idl_mirror_array_length] * u.Unit("Angstrom")
assert u.allclose(idl_mirror_wavelength_auto, mirror_wavelength)
idl_mirror_wavelength_manu = [
1.00000,
1.10000,
1.20000,
1.30000,
1.40000,
1.50000,
1.60000,
1.70000,
1.80000,
1.90000,
] * u.angstrom
assert u.allclose(idl_mirror_wavelength_manu, mirror_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror1_reflection(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_reflection = channel_filter.mirror_1.reflection[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"]["LENGTH"]
)
idl_mirror_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR1"]["REFL"][
:idl_mirror_array_length
]
* u.angstrom
)
assert u.allclose(idl_mirror_wavelength_auto, mirror_reflection)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_name(channel_name):
channel_filter = Channel(channel_name)
mirror_name = channel_filter.mirror_1.name
idl_mirror_name_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR2"
]["LONG_NAME"]
assert mirror_name == idl_mirror_name_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_material(channel_name):
channel_filter = Channel(channel_name)
mirror_material = channel_filter.mirror_1.material
idl_mirror_material_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR2"
]["MATERIAL"]
assert mirror_material == idl_mirror_material_auto
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_density(channel_name):
channel_filter = Channel(channel_name)
mirror_density = channel_filter.mirror_1.density
idl_mirror_density_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"MIRROR2"
]["DENS"] * (u.g * u.cm ** -3)
assert u.isclose(mirror_density, idl_mirror_density_auto)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_graze_angle(channel_name):
channel_filter = Channel(channel_name)
mirror_graze_angle = channel_filter.mirror_1.graze_angle
idl_mirror_graze_angle_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"][
"GRAZE_ANGLE"
]
* u.deg
)
assert u.isclose(mirror_graze_angle, idl_mirror_graze_angle_auto)
idl_mirror_graze_angle_manu = [0.910000] * u.deg
assert u.isclose(idl_mirror_graze_angle_manu, mirror_graze_angle)
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_wavelength(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_wavelength = channel_filter.mirror_1.wavelength[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["LENGTH"]
)
idl_mirror_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["WAVE"][
:idl_mirror_array_length
]
* u.angstrom
)
assert u.allclose(idl_mirror_wavelength_auto, mirror_wavelength)
idl_mirror_wavelength_manu = [
1.00000,
1.10000,
1.20000,
1.30000,
1.40000,
1.50000,
1.60000,
1.70000,
1.80000,
1.90000,
] * u.angstrom
assert u.allclose(idl_mirror_wavelength_manu, mirror_wavelength[0:10])
@pytest.mark.parametrize("channel_name", channel_names)
def test_mirror2_reflection(channel_name):
channel_filter = Channel(channel_name)
mirror_number_of_length = int(channel_filter.mirror_1.number_of_wavelengths)
mirror_reflection = channel_filter.mirror_1.reflection[:mirror_number_of_length]
idl_mirror_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["LENGTH"]
)
idl_mirror_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["MIRROR2"]["REFL"][
:idl_mirror_array_length
]
* u.angstrom
)
assert u.allclose(idl_mirror_wavelength_auto, mirror_reflection)
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_name(channel_name):
channel_filter = Channel(channel_name)
name = channel_filter.name
IDL_mirror_name_AUTO = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"NAME"
]
assert name == IDL_mirror_name_AUTO
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_wavelength(channel_name):
channel_filter = Channel(channel_name)
wavelength_length = int(channel_filter.number_of_wavelengths)
wavelength = channel_filter.wavelength[:wavelength_length]
idl_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["LENGTH"]
)
idl_wavelength_auto = (
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["WAVE"][
:idl_array_length
]
* u.angstrom
)
assert u.allclose(idl_wavelength_auto, wavelength)
idl_mirror_wavelength_manu = [
9.00000,
9.10000,
9.20000,
9.30000,
9.40000,
9.50000,
9.60000,
9.70000,
9.80000,
9.90000,
] * u.angstrom
assert u.allclose(idl_mirror_wavelength_manu, wavelength[80:90])
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_transmission(channel_name):
channel_filter = Channel(channel_name)
transmission_length = int(channel_filter.number_of_wavelengths)
transmission = channel_filter.transmission[:transmission_length]
idl_array_length = int(
v6_genx_s[_channel_name_to_index_mapping[channel_name]]["LENGTH"]
)
idl_transmission_auto = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"TRANS"
][:idl_array_length]
assert u.allclose(idl_transmission_auto, transmission)
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_number_of_wavelengths(channel_name):
channel_filter = Channel(channel_name)
channel_number_of_wavelengths = channel_filter.number_of_wavelengths
idl_array_length = v6_genx_s[_channel_name_to_index_mapping[channel_name]]["LENGTH"]
assert channel_number_of_wavelengths == idl_array_length
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_observatory(channel_name):
channel_filter = Channel(channel_name)
observatory = channel_filter.observatory
idl_observatory = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"OBSERVATORY"
]
assert observatory == idl_observatory
@pytest.mark.parametrize("channel_name", channel_names)
def test_channel_instrument(channel_name):
channel_filter = Channel(channel_name)
instrument = channel_filter.instrument
idl_instrument = v6_genx_s[_channel_name_to_index_mapping[channel_name]][
"INSTRUMENT"
]
assert instrument == idl_instrument
| StarcoderdataPython |
352 | # -*- coding: utf-8 -*-
from .__module__ import Module, dependency, source, version
from .tools import Tools
from .boost import Boost
from .python import Python
@dependency(Tools, Python, Boost)
@source('git')
@version('4.0.1')
class Opencv(Module):
def build(self):
return r'''
RUN ln -fs /usr/share/zoneinfo/Asia/Hong_Kong /etc/localtime && \
DEBIAN_FRONTEND=noninteractive \
add-apt-repository "deb http://security.ubuntu.com/ubuntu xenial-security main" && \
apt update && \
$APT_INSTALL \
libatlas-base-dev \
libgflags-dev \
libgoogle-glog-dev \
libhdf5-serial-dev \
libleveldb-dev \
liblmdb-dev \
libprotobuf-dev \
libsnappy-dev \
protobuf-compiler \
libopencv-dev \
yasm \
libjpeg-dev \
libjasper-dev \
libavcodec-dev \
libavformat-dev \
libswscale-dev \
libdc1394-22-dev \
libv4l-dev \
libtbb-dev \
libqt4-dev \
libgtk2.0-dev \
libfaac-dev \
libmp3lame-dev \
libopencore-amrnb-dev \
libopencore-amrwb-dev \
libtheora-dev \
libvorbis-dev \
libxvidcore-dev \
x264 \
v4l-utils \
ffmpeg \
&& \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv opencv && \
$GIT_CLONE --branch {0} https://github.com/opencv/opencv_contrib.git opencv_contrib && \
mkdir -p opencv/build && cd opencv/build && \
cmake -D CMAKE_BUILD_TYPE=RELEASE \
-D CMAKE_INSTALL_PREFIX=/usr/local \
-D WITH_IPP=OFF \
-D WITH_CUDA=OFF \
-D WITH_TBB=ON \
-D WITH_V4L=ON \
-D WITH_QT=ON \
-D WITH_OPENCL=ON \
-D WITH_GTK=ON \
-D WITH_LIBV4L=ON \
-D BUILD_TESTS=OFF \
-D BUILD_PERF_TESTS=OFF \
-D WITH_FFMPEG=ON \
-D OPENCV_EXTRA_MODULES_PATH=../../opencv_contrib/modules \
.. && \
make -j"$(nproc)" install && \
ln -s /usr/local/include/opencv4/opencv2 /usr/local/include/opencv2
'''.format(self.version)
| StarcoderdataPython |
3235299 | from django.http import FileResponse
from django.shortcuts import render
def get_file_serve_view(filename:str):
def get_file(request):
return FileResponse(open('web_interface/oss-web/build/{}'.format(filename), 'rb'))
return get_file
| StarcoderdataPython |
3344 | import torch
import torchvision
import torchvision.transforms as transforms
import os.path
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
transform = transforms.Compose(
[transforms.ToTensor(),
transforms.Normalize((0.5, 0.5, 0.5), (0.5, 0.5, 0.5))])
root = os.path.join(BASE_DIR, '../data/')
trainset = torchvision.datasets.CIFAR10(root=root, train=True,
download=True, transform=transform)
trainloader = torch.utils.data.DataLoader(trainset,
shuffle=True, num_workers=2)
testset = torchvision.datasets.CIFAR10(root=root, train=False,
download=True, transform=transform)
testloader = torch.utils.data.DataLoader(testset,
shuffle=False, num_workers=2)
import torch.nn as nn
import torch.nn.functional as F
class Net(nn.Module):
def __init__(self):
super(Net, self).__init__()
self.conv1 = nn.Conv2d(3, 6, 5)
self.pool = nn.MaxPool2d(2, 2)
self.conv2 = nn.Conv2d(6, 16, 5)
self.fc1 = nn.Linear(16 * 5 * 5, 120)
self.fc2 = nn.Linear(120, 84)
self.fc3 = nn.Linear(84, 10)
def forward(self, x):
# print(x.shape)
x = self.pool(F.relu(self.conv1(x)))
# print(x.shape)
x = self.pool(F.relu(self.conv2(x)))
# print(x.shape)
x = x.view(-1, 16 * 5 * 5)
# print(x.shape)
x = F.relu(self.fc1(x))
# print(x.shape)
x = F.relu(self.fc2(x))
# print(x.shape)
x = self.fc3(x)
# print(x.shape)
return x
# torch.Size([1, 3, 32, 32])
# torch.Size([1, 6, 14, 14])
# torch.Size([1, 16, 5, 5])
# torch.Size([1, 400])
# torch.Size([1, 120])
# torch.Size([1, 84])
# torch.Size([1, 100])
model = Net()
import torch.optim as optim
criterion = nn.CrossEntropyLoss()
optimizer = optim.SGD(model.parameters(), lr=0.0002, momentum=0.9)
from util import train_eval
train_eval(model, criterion, trainloader, testloader, optimizer, epochs=5)
# [1, 5000] loss: 2.293
# [1, 10000] loss: 2.075
# [1, 15000] loss: 1.876
# [1, 20000] loss: 1.754
# [1, 25000] loss: 1.658
# [1, 30000] loss: 1.625
# [1, 35000] loss: 1.558
# [1, 40000] loss: 1.520
# [1, 45000] loss: 1.494
# [1, 50000] loss: 1.459
# 1/5 4456/10000 44.56% (107.18255376815796s)
# [2, 5000] loss: 1.413
# [2, 10000] loss: 1.398
# [2, 15000] loss: 1.386
# [2, 20000] loss: 1.379
# [2, 25000] loss: 1.358
# [2, 30000] loss: 1.324
# [2, 35000] loss: 1.333
# [2, 40000] loss: 1.280
# [2, 45000] loss: 1.296
# [2, 50000] loss: 1.304
# 2/5 5357/10000 53.56999999999999% (105.8866639137268s)
# [3, 5000] loss: 1.226
# [3, 10000] loss: 1.231
# [3, 15000] loss: 1.215
# [3, 20000] loss: 1.235
# [3, 25000] loss: 1.199
# [3, 30000] loss: 1.187
# [3, 35000] loss: 1.192
# [3, 40000] loss: 1.194
# [3, 45000] loss: 1.196
# [3, 50000] loss: 1.191
# 3/5 5729/10000 57.29% (105.63971090316772s)
# [4, 5000] loss: 1.117
# [4, 10000] loss: 1.096
# [4, 15000] loss: 1.121
# [4, 20000] loss: 1.123
# [4, 25000] loss: 1.107
# [4, 30000] loss: 1.120
# [4, 35000] loss: 1.124
# [4, 40000] loss: 1.094
# [4, 45000] loss: 1.105
# [4, 50000] loss: 1.102
# 4/5 5829/10000 58.29% (112.56915497779846s)
# [5, 5000] loss: 1.034
# [5, 10000] loss: 1.024
# [5, 15000] loss: 1.040
# [5, 20000] loss: 1.027
# [5, 25000] loss: 1.043
# [5, 30000] loss: 1.049
# [5, 35000] loss: 1.024
# [5, 40000] loss: 1.042
# [5, 45000] loss: 1.027
# [5, 50000] loss: 1.027
# 5/5 6178/10000 61.78% (109.75669193267822s)
# 61.0% (541.0347754955292s)
| StarcoderdataPython |
3383709 | <reponame>Holt59/Project-Matthew<filename>matlab/demos/UDPCommunication/PC/reception.py
from socket import *
s = socket(AF_INET, SOCK_DGRAM)
s.bind(('', 9000))
while True:
s.recv(4) | StarcoderdataPython |
48030 | <gh_stars>0
#!/bin/bash/python
#
# Importation des librairies utiles
import json
import os
import pipes
import time
import datetime
import sys
import zipfile
from os.path import basename
import paramiko
import logging
import boto3
from botocore.exceptions import ClientError
# Declaration des variables
DATETIME = time.strftime('%Y-%m-%d_%H-%M-%S')
BACKUP_PATH = '/home/fanny/Documents/Sauvegardes/BACKUP_' + DATETIME
FILE_MYSQL = '/home/fanny/Documents/Sauvegardes/MySQL/'
FILE_POSTGRE = '/home/fanny/Documents/Sauvegardes/PostgreSQL/'
FILE_SQLITE = '/home/fanny/Documents/Sauvegardes/SQLite/'
BUCKET_S3 = 'backup-srv-bdd'
# Declaration des fonctions
def upload_file(file_name, bucket, object_name=None):
# Si S3 object_name n'est pas spécifié, alors on utilisera le nom original de celui-ci
if object_name is None:
object_name = file_name
with open(infoJS, "r") as filejson:
donnees = json.load(filejson)
# Informations client S3
s3_client = boto3.client('s3',
aws_access_key_id=donnees["aws_access_key_id"],
aws_secret_access_key=donnees["aws_secret_access_key"])
# Permet de télécharger les fichiers que l'on souhaite et lesdéposer sur AWS
try:
response = s3_client.upload_file(file_name, bucket, object_name)
print (response)
except ClientError as e:
logging.error(e)
return False
return True
# On verifie que le dossier de Backup n'existe pas, s'il n'existe pas on le creer
try:
os.mkdir(BACKUP_PATH)
print("Dossier créé.")
os.mkdir(FILE_MYSQL)
print("Dossier MYSQL créé.")
os.mkdir(FILE_POSTGRE)
print("Dossier POSTGRE créé.")
os.mkdir(FILE_SQLITE)
print("Dossier SQLITE créé.")
except:
print("Certains dossiers existent déjà")
pass
# Importer les valeurs de JSON et les stocker dans des variables
infoJS = "/home/fanny/Documents/Projet6/info_srv.json"
with open(infoJS, "r") as filejson:
donnees = json.load(filejson)
# Debut de la boucle
for server in donnees["databases"]:
# Condition, boucler sur cette condition de connexion tant que c'est une BDD Mysql
if server["type"] == 'MariaDB':
# Sauvegarde des bases de données passées en paramètre
os.system("mysqldump --column-statistics=0 -h " + server["ip"] + " -u " + server["user"] + " -p" + server["password"] + " " + server["BDD"] + " > " + pipes.quote(BACKUP_PATH) + "/" + server["BDD"] + ".sql")
print("Sauvegarde de la base de donnée " + server["BDD"] + " a bien été effectuée.")
# ZIP du fichier sql de notre base de donnée
zf = zipfile.ZipFile (FILE_MYSQL + server["BDD"] + ".zip", mode='w')
try:
print("Zipping " + server["BDD"])
zf.write(BACKUP_PATH + "/" + server["BDD"] + ".sql", basename(BACKUP_PATH + "/" + server["BDD"] + ".sql",))
finally:
zf.close()
print("Le fichier ZIP a bien été créé.")
# Envoi des zip de sauvegarde sur AWS3
upload_file(FILE_MYSQL + "/" + server["BDD"] + ".zip", BUCKET_S3, "MYSQL/" + server["BDD"] + ".zip")
print("Fichier zip envoyé sur AWS.")
# Condition, boucler sur cette condition de connexion tant que c'est une BDD Postgre
elif server["type"] == 'PostgreSQL':
# Sauvegarde des bases de données passées en paramètre
os.system("export PGPASSWORD=" + server["password"] + " && pg_dump --user=" + server["user"] + " --host=" + server["ip"] + " --port=" + server["port"] + " > " + pipes.quote(BACKUP_PATH) + "/" + server["BDD"] + ".sql")
print("La sauvegarde de la base de donnée " + server["BDD"] + " a bien été effectuée.")
zf = zipfile.ZipFile (FILE_POSTGRE + server["BDD"] + ".zip", mode='w')
try:
print("Zipping " + server["BDD"])
zf.write(BACKUP_PATH + "/" + server["BDD"] + ".sql", basename(BACKUP_PATH + "/" + server["BDD"] + ".sql",))
finally:
zf.close()
print("Le fichier ZIP a bien été créé.")
upload_file(FILE_POSTGRE + "/" + server["BDD"] + ".zip", BUCKET_S3, "POSTGRE/" + server["BDD"] + ".zip")
# Condition, boucler sur cette condition de connexion tant que c'est une BDD SQLite
elif server["type"] == 'SQLite':
# Connexion au server pour sauvegarder la/les BDD
ssh_client=paramiko.SSHClient()
ssh_client.set_missing_host_key_policy(paramiko.AutoAddPolicy())
ssh_client.connect(server["ip"], 22, server["user"], server["password"])
print("Connexion au serveur réussie, lancement de la sauvegarde des bases de données du serveur.")
ssh_client.exec_command('sqlite3 bambou.db')
ssh_client.exec_command('.output /tmp/dump.sql')
ssh_client.exec_command('.dump')
ftp_client=ssh_client.open_sftp()
ftp_client.get("/tmp/dump.sql", "" + BACKUP_PATH + "/" + server["BDD"] + ".sql")
ftp_client.close()
ssh_client.close()
print("La sauvegarde de la base de donnée " + server["BDD"] + " a bien été effectuée.")
zf = zipfile.ZipFile (FILE_SQLITE + server["BDD"] + ".zip", mode='w')
try:
print("Zipping " + server["BDD"])
zf.write(BACKUP_PATH + "/" + server["BDD"] + ".sql", basename(BACKUP_PATH + "/" + server["BDD"] + ".sql",))
finally:
zf.close()
print("Le fichier ZIP a bien été créé.")
upload_file(FILE_SQLITE+ "/" + server["BDD"] + ".zip", BUCKET_S3 + "", "SQLITE/" + server["BDD"] + ".zip")
# Fin de la boucle
else:
print("Pus aucune base de donnée à sauvegarder.")
break | StarcoderdataPython |
1669842 | <filename>djangoFiles/logs/dbio.py<gh_stars>10-100
from base.dbio import AbstractBaseDbIO
from logs.models import AccessLog
class AccessLogDbIO(AbstractBaseDbIO):
def __init__(self):
self.model_name = AccessLog
| StarcoderdataPython |
3343223 | import os
import cv2
from constants import MODULE_CLASSIFIER_DIR
from cv_helpers import get_classifier_directories, apply_offset_to_locations, show
from modules import Type, ModuleSolver
from modules.maze_cv import get_maze_params, get_button_locations
from modules.maze_solution import find_path_through_maze, UP, RIGHT, DOWN, LEFT
from mouse_helpers import MouseButton, click_pixels, post_click_delay
def solve_stored_mazes():
vocab_path, unlabelled_dir, labelled_dir, features_dir, svm_data_dir = \
get_classifier_directories(MODULE_CLASSIFIER_DIR)
maze_dir = os.path.join(labelled_dir, "maze")
files_to_test = (
"/Users/danny/Dropbox (Personal)/Projects/KeepTalkingBot/module_specific_data/debug/0907.png",
)
for i, file_name in enumerate(os.listdir(maze_dir)):
# for file_name in files_to_test:
# if file_name != "0023-full-bottom-left.png":
# continue
print file_name
maze_image = cv2.imread(os.path.join(maze_dir, file_name))
lookup_key, start_coordinates, end_coordinates = get_maze_params(maze_image)
print "lookup", lookup_key
print "start (white)", start_coordinates
print "end (red)", end_coordinates
top, right, bottom, left = get_button_locations(maze_image)
for location in (top, right, bottom, left):
cv2.circle(maze_image, location, 10, (255, 0, 0), 10)
show(maze_image)
# print top, right, bottom, left
moves = find_path_through_maze(lookup_key, start_coordinates, end_coordinates)
print " ".join(moves)
# show(maze_image)
# if i > 10:
# break
class MazeSolver(ModuleSolver):
def get_type(self):
return Type.maze
def solve(self, image, offset, sides_info, screenshot_helper, current_module_position):
lookup_key, start_coordinates, end_coordinates = get_maze_params(image)
top, right, bottom, left = apply_offset_to_locations(get_button_locations(image), offset)
moves = find_path_through_maze(lookup_key, start_coordinates, end_coordinates)
move_to_button = {
UP: top,
RIGHT: right,
DOWN: bottom,
LEFT: left,
}
for move in moves:
x_raw, y_raw = move_to_button[move]
click_pixels(MouseButton.left, x_raw, y_raw)
post_click_delay()
if __name__ == '__main__':
solve_stored_mazes()
| StarcoderdataPython |
3231416 | # -*- coding: utf-8 -*-
"""
Created on Wed Jan 24 17:36:27 2018
@author: rantala2
"""
import mne
import sys
import subprocess
# import os
def createBem(subj):
src = mne.setup_source_space(subj, n_jobs=2)
subprocess.call(['mne', 'watershed_bem', '-s', subj])
model = mne.make_bem_model(subj, conductivity=[0.3])
bem = mne.make_bem_solution(model)
mne.write_bem_solution(subj+'-5120-5120-5120-bem-sol.fif', bem)
mne.viz.plot_bem(subj)
def createInv(subj):
# subjdir = os.environ['SUBJECTS_DIR']
file_path = '/m/nbe/scratch/restmeg/data/camcan/cc700/mri/pipeline/release004/BIDSsep/megraw/sub-'+subj+'/meg/'
raw = mne.io.fiff.Raw(file_path+'rest_raw.fif')
fname_trans = file_path + 'rest_raw-trans.fif'
src = mne.read_source_spaces('/m/nbe/scratch/restmeg/data/camcan/subjects/'+subj+'/bem/'+subj+'-oct-6-src.fif')
bem_sol = mne.read_bem_solution(subj+'-5120-5120-5120-bem-sol.fif')
fwd = mne.make_forward_solution(raw.info, fname_trans, src, bem_sol)
cov = mne.compute_raw_covariance(raw)
inv = mne.minimum_norm.make_inverse_operator(raw.info, fwd, cov, loose=0.2)
mne.minimum_norm.write_inverse_operator(subj+'-inv.fif', inv)
def createCov(subj):
file_path = '/m/nbe/scratch/restmeg/data/camcan/emptyroom/'+subj
raw = mne.io.fiff.Raw(file_path+'/emptyroom_' + subj + '.fif')
cov = mne.compute_raw_covariance(raw)
cov.save(file_path+'/emptyroom_' + subj + '-cov.fif')
def doSSS(subj):
file_path = '/m/nbe/scratch/restmeg/data/camcan/cc700/mri/pipeline/release004/BIDSsep/megraw/sub-'+subj+'/meg/'
raw = mne.io.fiff.Raw(file_path+'rest_raw.fif')
sss_cal = '/m/nbe/scratch/restmeg/data/camcan/cc700/mri/pipeline/release004/BIDSsep/megraw/sss_cal.dat'
ct = '/m/nbe/scratch/restmeg/data/camcan/cc700/mri/pipeline/release004/BIDSsep/megraw/ct_sparse.fif'
raw_sss = mne.preprocessing.maxfilter(raw, calibration=sss_cal, cross_talk= ct, st_duration=20)
raw_sss.save('/m/nbe/scratch/restmeg/data/camcan/processed/cc700/mri/pipeline/release004/BIDSsep/megraw/sub-'+subj+'/meg/rest_raw_sss.fif')
def shortTest(subj):
file_path = '/m/nbe/scratch/restmeg/data/camcan/cc700/mri/pipeline/release004/BIDSsep/megraw/sub-'+subj+'/meg/'
raw = mne.io.fiff.Raw(file_path+'rest_raw.fif')
rank = raw.estimate_rank()
file_name = '/m/nbe/scratch/restmeg/data/camcan/processed/cc700/mri/pipeline/release004/BIDSsep/megraw/sub-'+subj+'/meg/rank_rest_raw.txt'
with open(file_name, 'w') as f:
f.write(str(rank))
if __name__ == '__main__':
if len(sys.argv) > 1:
createBem(sys.argv[1])
else:
subj = 'CC110033'
# stc_bin = mne.read_source_estimate(fname=subj+'-bin')
VALUE = '/m/nbe/scratch/restmeg/data/camcan/subjects/'
mne.utils.set_config("SUBJECTS_DIR", VALUE, set_env=True)
createCov(subj)
#createInv('CC110033') | StarcoderdataPython |
3231052 | <filename>utils/pymeta_helper.py
#/usr/bin/python
# Copyright 2014 <NAME>. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from __future__ import print_function
import argparse
import importlib
import os
import sys
class ParseError(Exception):
pass
class ParserBase(object):
name = None
grammar = None
def __init__(self, name=None, grammar=None,
src_dir=None, filename=None,
classname=None, force=False):
self.grammar = grammar or self.grammar
self.name = name or self.name
self.src_dir = src_dir or os.path.dirname(os.path.abspath(__file__))
self.basename = filename or self.name.lower() + '_parser.py'
self.classname = classname or self.name.capitalize() + 'Parser'
self.grammar_constant_name = self.name.upper() + '_GRAMMAR'
self.filename = os.path.join(self.src_dir, self.basename)
assert self.name
assert self.grammar
module_name = self.basename.replace('.py', '')
if force or module_name not in sys.modules:
if force or (self.generated_grammar() != self.grammar.strip()):
self.generate_parser_module()
self._module = importlib.import_module(module_name)
else:
self._module = sys.modules[module_name]
self._cls = getattr(self._module, self.classname)
# pylint: disable=W0212
self._parse_error = self._module._MaybeParseError
def parse(self, txt):
try:
return self._cls.parse(txt)
except self._module.ParseError as e:
raise ParseError(str(e))
def generated_grammar(self):
if not os.path.exists(self.filename):
return None
with open(self.filename) as fp:
lines = fp.readlines()
start = lines.index('%s = """\n' % self.grammar_constant_name)
end = lines[start:].index('"""\n')
txt = ''.join(lines[start+1:start + end])
return txt.strip()
def generate_parser_module(self):
from pymeta.grammar import OMetaGrammar
from pymeta import builder
tree = OMetaGrammar(self.grammar).parseGrammar(self.classname,
builder.TreeBuilder)
with open(os.path.join(self.src_dir, 'pymeta', 'runtime.py')) as fp:
runtime_str = fp.read()
with open(os.path.join(self.src_dir, self.filename), 'w') as fp:
fp.write('# pylint: disable=C0103,C0301,C0302,R0201,'
'R0903,R0904,R0912,R0914\n\n')
fp.write(runtime_str)
fp.write('\n\n')
fp.write('%s = """\n%s\n"""\n\n' % (self.grammar_constant_name,
self.grammar))
fp.write('GrammarBase = OMetaBase\n')
fp.write('\n\n')
parser_cls_code = builder.writePython(tree)
fp.write(parser_cls_code)
def make_parser(grammar_file, name=None, _output=None, force=False):
with open(grammar_file) as f:
grammar = f.read()
basename = os.path.basename(grammar_file).replace('.pymeta', '')
name = name or basename.capitalize()
return ParserBase(grammar=grammar, name=name, force=force)
def main(argv=None):
parser = argparse.ArgumentParser(prog='pymeta_helper')
parser.usage = '[options] grammar'
parser.add_argument('grammar', nargs=1,
help=argparse.SUPPRESS)
parser.add_argument('-o', metavar='FILE', dest='output',
help=('destination file (defaults to '
's/grammar.pymeta/grammar.py)'))
parser.add_argument('-n', '--name',
help='base name of grammar')
args = parser.parse_args(args=argv)
try:
make_parser(args.grammar[0], args.name, args.output, force=True)
except IOError:
print("Error: '%s' not found" % args.grammar, file=sys.stderr)
sys.exit(1)
if __name__ == '__main__':
main()
| StarcoderdataPython |
63481 | <gh_stars>0
"""
Tests for skcore baseclasses
"""
import pytest
from sksurgerycore.baseclasses.tracker import SKSBaseTracker
def test_tracker_baseclass():
"""
We should throw not implemented error when we make a tracker without
the required functions.
"""
class BadTracker(SKSBaseTracker):# pylint: disable=abstract-method
"""
A tracker class without the necessary member functions.
"""
def close(self):
pass
with pytest.raises(TypeError):
_ = BadTracker() # pylint: disable=abstract-class-instantiated
class GoodTracker(SKSBaseTracker):
"""
A tracker class with the necessary member functions.
"""
def close(self):# pylint: disable=useless-super-delegation
super().close()
def get_frame(self):# pylint: disable=useless-super-delegation
super().get_frame()
def get_tool_descriptions(self):# pylint: disable=useless-super-delegation
super().get_tool_descriptions()
def start_tracking(self):# pylint: disable=useless-super-delegation
super().start_tracking()
def stop_tracking(self):# pylint: disable=useless-super-delegation
super().stop_tracking()
good_tracker = GoodTracker()
with pytest.raises(NotImplementedError):
good_tracker.close()
with pytest.raises(NotImplementedError):
good_tracker.get_frame()
with pytest.raises(NotImplementedError):
good_tracker.get_tool_descriptions()
with pytest.raises(NotImplementedError):
good_tracker.start_tracking()
with pytest.raises(NotImplementedError):
good_tracker.stop_tracking()
| StarcoderdataPython |
3374867 | __title__ = 'ics'
__version__ = '0.5'
__author__ = '<NAME>'
__license__ = 'Apache License, Version 2.0'
__copyright__ = 'Copyright 2013-2019 <NAME> and individual contributors'
| StarcoderdataPython |
1672406 | <reponame>welch/sportsball
# -*- coding: utf-8 -*-
from .arrow import Arrow
from .factory import ArrowFactory
from .api import get, now, utcnow
| StarcoderdataPython |
1720098 | <gh_stars>1-10
from flask_restful import Api
from . import api_bp
from .controllers import UserController, UserList, TaskList, TaskController
api = Api(api_bp)
api.add_resource(UserList, "/users/")
api.add_resource(UserController, "/users/<u_id>/")
api.add_resource(TaskList, "/users/<u_id>/tasks/")
api.add_resource(TaskController, "/users/<u_id>/tasks/<t_id>/") | StarcoderdataPython |
1638846 | <gh_stars>0
#ss HelloWorld.py $($Env:SPARK_HOME + "\README.md")
import sys
from pyspark.sql import SparkSession
from pyspark.sql.functions import lit
if __name__ == '__main__':
numArgs = len(sys.argv)
if numArgs > 1:
print("----------------------------------------------------------------------")
print(" HelloWorld")
print("----------------------------------------------------------------------")
filepath = sys.argv[1]
spark = SparkSession.builder.appName("HelloWorld").getOrCreate()
# Load and cache.
textFile = spark.read.format("text").load(filepath)
#textFile = spark.read.text(filepath)
df = textFile.cache()
# Inspect.
print(f"Number of lines: {df.count()}")
print(f"First line: {df.first()}")
# Filter and retrieve.
lines = df["value"]
linesSpark = df.filter(lines.contains("Spark"))
numLinesSpark = linesSpark.count()
numLinesA = df.filter(lines.contains("a")).count()
numLinesB = df.filter(lines.contains("b")).count()
print(f"Number of lines with \"Spark\": {numLinesSpark}")
print(f"Number of lines with \"a\": {numLinesA}")
print(f"Number of lines with \"b\": {numLinesB}")
# Apply a function to every row in the DataFrame.
splitfunc = lambda a : a.value.split(" ")
wordsMap = df.rdd.map(splitfunc)
wordsFlatMap = df.rdd.flatMap(splitfunc)
# Find the nubmer of words in the line with the most words.
numWordsPerLine = wordsMap.map(lambda ls: len(ls))
numWordsMax = numWordsPerLine.reduce(max)
print(f"Number of words in the line with most words: {numWordsMax}")
# Group the data by their content(words).
grouped = wordsFlatMap.map(lambda word: (word, 1)).groupByKey()
wordCounts = grouped.mapValues(len)
wordCountsSorted = wordCounts.sortBy(lambda tpl: tpl[1], ascending = False)
top5 = wordCountsSorted.take(5)
for key, val in top5:
print(f' {key}: {val}')
spark.stop()
else:
print("Please provide path of Spark's README.md") | StarcoderdataPython |
1709958 | <filename>RU_EN_examples/muse.py
from __future__ import absolute_import, division
import sys
import logging
import tensorflow_hub as hub
import tensorflow_text
# Set PATHs
PATH_TO_SENTEVAL = '../'
PATH_TO_DATA = '../data'
# import SentEval
sys.path.insert(0, PATH_TO_SENTEVAL)
import senteval
# SentEval prepare and batcher
def prepare(params, samples):
# Load model
params.model = hub.load("https://tfhub.dev/google/universal-sentence-encoder-multilingual-large/3")
return
def batcher(params, batch):
batch = [' '.join(sent) if sent != [] else '.' for sent in batch]
embeddings = params.model(batch)
return embeddings.numpy()
# Set params for SentEval
params_senteval = {'task_path': PATH_TO_DATA, 'usepytorch': False, 'kfold': 5, 'batch_size': 128,
'classifier': {'nhid': 0, 'optim': 'rmsprop', 'tenacity': 3, 'epoch_size': 2}}
# Set up logger
logging.basicConfig(format='%(asctime)s : %(message)s', level=logging.DEBUG)
if __name__ == "__main__":
se = senteval.engine.SE(params_senteval, batcher, prepare)
transfer_tasks = ['SICKEntailment', 'SST2', 'SST5', 'TREC', 'MRPC',
'SICKEntailment_RU', 'SST2_RU', 'SST3_RU', 'TREC_RU', 'MRPC_RU'
'STSBenchmark', 'SICKRelatedness',
'STSBenchmark_RU', 'SICKRelatedness_RU'
]
results = se.eval(transfer_tasks)
print(results)
| StarcoderdataPython |
1666314 | <filename>Lib/site-packages/sciplot/sciplotUI.py
# -*- coding: utf-8 -*-
"""
SciPlot-PyQt: Publication-ready scientific plotting for Python
==============================================================
SciPlot-PyQt (aka SciPlot) is a user-interface/matplotlib wrapper built with
PyQt5 that allows interactive plotting through an embedded matplotlib canvas.
It enables fast and easy publication-ready plots and images:
* Interactive plotting
* Theme and style editing (TODO)
* Figure saving and opening for later editing (TODO)
Supported Plot Types
---------------------
Line plots : plot
Bar plots : bar, hist
Polycollections : fill_between
Images : imshow
Notes
-----
SciPlot has a lot of advances/improvements to make. Feel free to contact me--
help is always welcome!
Usage
-----
import sciplot
sp = sciplot.SciPlotUI()
sp.show()
Example
-------
sp.plot((0,1),(2,3),label='Line', x_label='X', y_label='Y', ls='--')
sp.fill_between((0,1),(1,2),(3,4),label='Fill Between', color='r', alpha=0.25)
Authors
-------
* <NAME>. <<EMAIL>>
"""
import sys as _sys
import os as _os
import numpy as _np
import time as _time
# Generic imports for MPL-incorporation
import matplotlib as _mpl
_mpl.use('Qt5Agg')
import matplotlib.pyplot as _plt # Simply for it starting a QApplication
# Generic imports for QT-based programs
from PyQt5.QtWidgets import (QApplication as _QApplication,
QMainWindow as _QMainWindow,
QTableView as _QTableView,
QSizePolicy as _QSizePolicy,
QTabWidget as _QTabWidget,
QFileDialog as _QFileDialog,
QInputDialog as _QInputDialog)
from PyQt5.QtCore import pyqtSignal as _pyqtSignal
import PyQt5.QtCore as _QtCore
import sciplot
# Import from Designer-based GUI
from sciplot.ui.qt_Plotter import Ui_MainWindow as Ui_Plotter
from sciplot.ui.widget_mpl import MplCanvas as _MplCanvas
from sciplot.ui.dialogs import DualEntry
from sciplot.ui.models.lines import (TableModelLines as _TableModelLines,
EditDelegateLines as _EditDelegateLines)
from sciplot.ui.models.fillbetween import (TableModelFillBetween as
_TableModelFillBetween,
EditDelegateFillBetween as
_EditDelegateFillBetween)
from sciplot.ui.models.images import (TableModelImages as _TableModelImages,
EditDelegateImages as _EditDelegateImages)
from sciplot.ui.models.bars import (TableModelBars as _TableModelBars,
EditDelegateBars as _EditDelegateBars)
from sciplot.data.generic import DataGlobal as _DataGlobal
from sciplot.data.lines import DataLine as _DataLine
from sciplot.data.images import DataImages as _DataImages
from sciplot.data.bars import DataBar as _DataBar
from sciplot.data.special import DataFillBetween as _DataFillBetween
class SciPlotUI(_QMainWindow):
"""
Scientific plotting user-interface for creating publication-quality plots
and images
Parameters
----------
limit_to : list, optional (default = None)
Limit the application to implement only certain functionality. \
Default is all elements turned ON. See Notes for options.
show : bool, optional (default = True)
Whether to show the UI upon instantiation
Methods
-------
plot : MPL-like plotting functionality
imshow : MPL-like imshow
bar : MPL-like bar plot EXCEPT centered (rather than left-edge defined)
hist : MPL-like histogram
fill_between : MPL-like fill_between
Internal Methods
----------------
updatePlotDataStyle : Make updates to plots (lines) when a stylistic \
change is made within the model-table
updatePlotDataDelete : Remove a plot when deleted from model-table
updateFillBetweenDataStyle : Make updates to fill between's when a \
stylistic change is made within the model-table
updateFillBetweenDataDelete : Remove a fill between when deleted from \
model-table
updateImagesDataStyle : Make updates to images when a stylistic \
change is made within the model-table
updateImageDataDelete : Remove an image when deleted from model-table
updateBarsDataStyle : Make updates to bars plots when a stylistic \
change is made within the model-table
updateBarsDataDelete : Remove a bar plot when deleted from model-table
refreshAllPlots : Delete all plots and re-plot
updateAllLabels : Update all labels (x-, y-, title, etc) on MPL widget, \
in model, in data container, and in UI lineEdits
updateLineEditLabels : Update all labels (x-, y-, title, etc) in UI \
lineEdits
updateDataLabels : Update all labels (x-, y-, title, etc) in data \
container
updateMplLabels : Update all labels (x-, y-, title, etc) on MPL widget
updateLabelsFromLineEdit : Update all labels (x-, y-, title, etc) on MPL \
widget, in model, and in data container. Edits came from lineEdits.
axisAspect : Set MPL-axis aspect ratio setting
axisScaling : Set MPL-axis scaling ratio setting
axisVisible : Set MPL-axis on or off
axisLimits : Set MPL-axis limits
updateAxisParameters : Query and update UI lineEdits related to axis \
properties such as limits, visibility (on/off), scaling, and aspect \
ratio
Notes
-----
* limit_to options: 'lines', 'fill betweens', 'bars', images'
"""
# Signal emitted when clearAll is called
# Added for external programs
all_cleared = _pyqtSignal(int)
def __init__(self, limit_to=None, parent=None, show=True):
self.__version__ = sciplot.__version__
self.list_ids = []
self.list_all = []
# There are a number of changes and deprectaion
# in MPL v2 and v3 ; thus, this will be tracked
# so MPL 1 and 2 can be used seemlessly
self._mpl_v1 = int(_mpl.__version__.rsplit('.')[0]) == 1
# Check to see if QApp already exists
# if not, one has to be created
self.app = None
if _QApplication.instance() is None:
print('\nNo QApplication instance (this is common with certain \
version of Matplotlib). Creating one.\n\r\
You will need to exec manually after you finish plotting.\n\
-----------Example---------------\n\
import sciplot\n\
sp = sciplot.main()\n\n\
# Plot a line\n\
sp.plot((0,1),(0,1))\n\n\
# Start the QApplication\n\
sp.app.exec_()')
self.app = _QApplication(_sys.argv)
self.app.setQuitOnLastWindowClosed(True)
self.setup(limit_to=limit_to, parent=parent)
if show:
self.show()
def closeEvent(self, event):
pass
def _tabAvailability(self, limit_to=None):
"""
If limit_to is provided, limits the tabs (elements) that are available.
May be useful for built-upon applications.
"""
if limit_to is None:
self.elements = ['lines', 'fill betweens', 'images', 'bars']
self._to_setup = [self.setupLines, self.setupFillBetweens,
self.setupImages, self.setupBars]
else:
self._to_setup = []
self.elements = []
if limit_to.count('lines'):
self.elements.append('lines')
self._to_setup.append(self.setupLines)
if limit_to.count('fill betweens'):
self.elements.append('fill betweens')
self._to_setup.append(self.setupFillBetweens)
if limit_to.count('images'):
self.elements.append('images')
self._to_setup.append(self.setupImages)
if limit_to.count('bars'):
self.elements.append('bars')
self._to_setup.append(self.setupBars)
def setupLines(self):
"""
Enable and setup line plotting
"""
# Enable line plotting
self.plot = self.__plot
self.updatePlotDataStyle = self.__updatePlotDataStyle
self.updatePlotDataDelete = self.__updatePlotDataDelete
# Initial and insert table view for line plots
self.tableViewLine = _QTableView()
self.ui.modelTabWidget.addTab(self.tableViewLine, 'Lines')
# Set model and delegates
# Lines
self.modelLine = _TableModelLines()
self.delegateLine = _EditDelegateLines()
self.tableViewLine.setModel(self.modelLine)
self.tableViewLine.setItemDelegate(self.delegateLine)
self.tableViewLine.show()
# RESIZE COLUMNS
header = self.tableViewLine.horizontalHeader()
# alpha
col = self.modelLine._COL_ALPHA
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewLine.setColumnWidth(col, new_width)
# linewidth
col = self.modelLine._COL_LINEWIDTH
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewLine.setColumnWidth(col, new_width)
# markersize
col = self.modelLine._COL_MARKERSIZE
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewLine.setColumnWidth(col, new_width)
# delete
col = self.modelLine._COL_DELETE
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewLine.setColumnWidth(col, new_width)
# SIGNALS AND SLOTS
# Make use of double-clicking within table
self.tableViewLine.doubleClicked.connect(
self.modelLine.doubleClickCheck)
# When a model (table) elements changes or is deleted
self.modelLine.dataChanged.connect(self.updatePlotDataStyle)
self.modelLine.dataDeleted.connect(self.updatePlotDataDelete)
# Export lines to csv
self.ui.actionExport_Lines_to_CSV.setVisible(True)
self.ui.actionExport_Lines_to_CSV.triggered.connect(self.export_lines_csv)
def setupFillBetweens(self):
"""
Enable and setup fill between plotting
"""
# Enable fill_between plotting
self.fill_between = self.__fill_between
self.updateFillBetweenDataStyle = self.__updateFillBetweenDataStyle
self.updateFillBetweenDataDelete = self.__updateFillBetweenDataDelete
# Initial and insert table view for fill_between plots
self.tableViewFillBetween = _QTableView()
self.ui.modelTabWidget.addTab(self.tableViewFillBetween,
'Fill Between')
# Fill Between
self.modelFillBetween = _TableModelFillBetween()
self.delegateFillBetween = _EditDelegateFillBetween()
self.tableViewFillBetween.setModel(self.modelFillBetween)
self.tableViewFillBetween.setItemDelegate(self.delegateFillBetween)
self.tableViewFillBetween.show()
# RESIZE COLUMNS
header = self.tableViewFillBetween.horizontalHeader()
# alpha
col = self.modelFillBetween._COL_ALPHA
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewFillBetween.setColumnWidth(col, new_width)
# linewidth
col = self.modelFillBetween._COL_LINEWIDTH
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewFillBetween.setColumnWidth(col, new_width)
# delete
col = self.modelFillBetween._COL_DELETE
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewFillBetween.setColumnWidth(col, new_width)
# SIGNALS AND SLOTS
# Make use of double-clicking within table
self.tableViewFillBetween.doubleClicked.connect(
self.modelFillBetween.doubleClickCheck)
# When a model (table) elements changes or is deleted
self.modelFillBetween.dataChanged.connect(self.updateFillBetweenDataStyle)
self.modelFillBetween.dataDeleted.connect(self.updateFillBetweenDataDelete)
# Export fillbetweens to csv
self.ui.actionExport_Fill_Between_to_CSV.setVisible(True)
self.ui.actionExport_Fill_Between_to_CSV.triggered.connect(self.export_fillbetweens_csv)
def setupImages(self):
"""
Enable and setup image plotting
"""
# Enable imaging
self.imshow = self.__imshow
self.updateImagesDataStyle = self.__updateImagesDataStyle
self.updateImagesDataDelete = self.__updateImagesDataDelete
# images data-- similar to plot_data above
# Initial and insert table view for images
self.tableViewImages = _QTableView()
self.ui.modelTabWidget.addTab(self.tableViewImages, 'Images')
# Images
self.modelImages = _TableModelImages()
self.delegateImages = _EditDelegateImages()
self.tableViewImages.setModel(self.modelImages)
self.tableViewImages.setItemDelegate(self.delegateImages)
self.tableViewImages.show()
# RESIZE COLUMNS
header = self.tableViewImages.horizontalHeader()
# alpha
col = self.modelImages._COL_ALPHA
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewImages.setColumnWidth(col, new_width)
# clim low
col = self.modelImages._COL_CLIM_LOW
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewImages.setColumnWidth(col, new_width)
# clim high
col = self.modelImages._COL_CLIM_HIGH
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewImages.setColumnWidth(col, new_width)
# delete
col = self.modelImages._COL_DELETE
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewImages.setColumnWidth(col, new_width)
# SIGNALS AND SLOTS
# Make use of double-clicking within table
self.tableViewImages.doubleClicked.connect(
self.modelImages.doubleClickCheck)
# When a model (table) elements changes or is deleted
self.modelImages.dataChanged.connect(self.updateImagesDataStyle)
self.modelImages.dataDeleted.connect(self.updateImagesDataDelete)
def setupBars(self):
"""
Enable and setup bar and histogram plotting
"""
# Enable bar plotting
self.bar = self.__bar
self.hist = self.__hist
self.updateBarsDataStyle = self.__updateBarsDataStyle
self.updateBarsDataDelete = self.__updateBarsDataDelete
# Initial and insert table view for bars
self.tableViewBars = _QTableView()
self.ui.modelTabWidget.addTab(self.tableViewBars, 'Bars')
# Bars/Bars
self.modelBars = _TableModelBars()
self.delegateBars = _EditDelegateBars()
self.tableViewBars.setModel(self.modelBars)
self.tableViewBars.setItemDelegate(self.delegateBars)
self.tableViewBars.show()
# RESIZE COLUMNS
header = self.tableViewBars.horizontalHeader()
# alpha
col = self.modelBars._COL_ALPHA
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewBars.setColumnWidth(col, new_width)
# linewidth
col = self.modelBars._COL_LINEWIDTH
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewBars.setColumnWidth(col, new_width)
# widthfactor
col = self.modelBars._COL_WIDTH_FACTOR
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewBars.setColumnWidth(col, new_width)
# delete
col = self.modelBars._COL_DELETE
new_width = int(1.1*header.sectionSizeHint(col))
self.tableViewBars.setColumnWidth(col, new_width)
# SIGNALS AND SLOTS
# Make use of double-clicking within table
self.tableViewBars.doubleClicked.connect(
self.modelBars.doubleClickCheck)
# When a model (table) elements changes or is deleted
self.modelBars.dataChanged.connect(self.updateBarsDataStyle)
self.modelBars.dataDeleted.connect(self.updateBarsDataDelete)
# Export bars to csv
self.ui.actionExport_Bars_to_CSV.setVisible(True)
self.ui.actionExport_Bars_to_CSV.triggered.connect(self.export_bars_csv)
def setup(self, limit_to=None, parent=None):
"""
Basic UI setup
"""
# Generic start to any pyQT program
super(SciPlotUI, self).__init__(parent)
self.ui = Ui_Plotter()
self.ui.setupUi(self)
self.setSizePolicy(_QSizePolicy.Expanding,
_QSizePolicy.Expanding)
# Global "data" i.e., title, x-label, y-label, etc
self._global_data = _DataGlobal()
# MPL plot widget
self.mpl_widget = _MplCanvas(height=6, dpi=100)
# Hold is deprecated in MPL2
if self._mpl_v1:
self.mpl_widget.ax.hold(True)
# Insert MPL widget and toolbar
self.ui.verticalLayout.insertWidget(0, self.mpl_widget, 0, _QtCore.Qt.AlignHCenter)
self.ui.verticalLayout.insertWidget(0, self.mpl_widget.toolbar,0, _QtCore.Qt.AlignHCenter)
self.updateAxisParameters()
self.updateFigureParameters()
self.mpl_widget.draw()
# Insert TabWidget
self.ui.modelTabWidget = _QTabWidget()
self.ui.verticalLayout.insertWidget(-1, self.ui.modelTabWidget)
# Setup what tabs are available:
self._tabAvailability(limit_to)
for count in self._to_setup:
count()
# SIGNALS AND SLOTS
# Global labels
self.ui.lineEditTitle.editingFinished.connect(self.updateLabelsFromLineEdit)
self.ui.lineEditXLabel.editingFinished.connect(self.updateLabelsFromLineEdit)
self.ui.lineEditYLabel.editingFinished.connect(self.updateLabelsFromLineEdit)
# Non-tracked (not saved) properties
self.ui.comboBoxAspect.currentIndexChanged.connect(self.axisAspect)
self.ui.comboBoxAxisScaling.currentIndexChanged.connect(self.axisScaling)
self.ui.checkBoxAxisVisible.stateChanged.connect(self.axisVisible)
self.ui.lineEditXLimMin.editingFinished.connect(self.axisLimits)
self.ui.lineEditXLimMax.editingFinished.connect(self.axisLimits)
self.ui.lineEditYLimMin.editingFinished.connect(self.axisLimits)
self.ui.lineEditYLimMax.editingFinished.connect(self.axisLimits)
# Actions
self.ui.pushButtonClearAll.pressed.connect(self.clearAll)
self.ui.pushButtonDefaultView.pressed.connect(self.defaultView)
# Formatting Features
self.ui.actionFigureDPI.triggered.connect(self.figureDPI)
self.ui.actionFigureSavedDPI.triggered.connect(self.figureSaveDPI)
self.ui.actionFigure_Size_Display.triggered.connect(self.figureSizeDisplay)
self.ui.pushButtonApplyFigParams.pressed.connect(self.applyFigProps)
def __plot(self, x, y, label=None, x_label=None, y_label=None, meta={},
**kwargs):
"""
MPL-like plotting functionality
Parameters
----------
x : ndarray (1D)
X-axis data
y : ndarray (1D, for now)
Y-axis data
label : str
Label of plot
x_label : str
X-axis label (units)
y_label : str
Y-axis label (units)
kwargs : dict
Other parameters sent directly to mpl-plot
"""
# Temporary plot-data
plot_data = _DataLine()
plot_data.x = x
plot_data.y = y
plot_data.label = label
plot_data.meta = meta
plot_data.id = _time.time()
# Plot outputs a line object
plot_data.mplobj = self.mpl_widget.ax.plot(x, y, label=label, **kwargs)
try:
self.mpl_widget.ax.legend(loc='best')
except:
pass
# If labels are provided, update the global data and the linEdits
if x_label is not None or y_label is not None:
self.updateAllLabels(x_label=x_label, y_label=y_label)
self.mpl_widget.fig.tight_layout()
self.axisAspect()
self.mpl_widget.draw()
# Since the plot was not fed style-info (unless kwargs were used)
# we rely on the mpl stylesheet to setup color, linewidth, etc.
# Thus, we plot, then retrieve what the style info was
plot_data.retrieve_style_from_line(plot_data.mplobj[0])
# Append this specific plot data to out list of all plots
self.list_ids.append(plot_data.id)
self.list_all.append(plot_data)
# Update model
self.modelLine._model_data.append(plot_data.model_style)
self.modelLine.layoutChanged.emit()
def updateMplLabels(self, x_label=None, y_label=None, title=None):
"""
Within the MPL widget, update the x- and y-labels and the title
"""
if x_label is not None:
self.mpl_widget.ax.set_xlabel(x_label)
if y_label is not None:
self.mpl_widget.ax.set_ylabel(y_label)
if title is not None:
self.mpl_widget.ax.set_title(title)
self.mpl_widget.fig.tight_layout()
self.mpl_widget.draw()
def updateDataLabels(self, x_label=None, y_label=None, title=None):
"""
Within the global data container, update the x- and y-labels and the \
title
"""
if x_label is not None:
self._global_data.labels['x_label'] = x_label
if y_label is not None:
self._global_data.labels['y_label'] = y_label
if title is not None:
self._global_data.labels['title'] = title
def updateLineEditLabels(self, x_label=None, y_label=None, title=None):
"""
Within the pyQT lineEdit widgets, update the x- and y-labels and the \
title
"""
if x_label is not None:
self.ui.lineEditXLabel.setText(x_label)
if y_label is not None:
self.ui.lineEditYLabel.setText(y_label)
if title is not None:
self.ui.lineEditTitle.setText(title)
def updateAllLabels(self, x_label=None, y_label=None, title=None):
"""
Update the x- and y-labels and the title in the MPL widget, the \
lineEdit boxes, and the global data container
"""
self.updateMplLabels(x_label=x_label, y_label=y_label, title=title)
self.updateDataLabels(x_label=x_label, y_label=y_label, title=title)
self.updateLineEditLabels(x_label=x_label, y_label=y_label,
title=title)
def updateLabelsFromLineEdit(self):
"""
From the linEdit widgets, update the x- and y-labels and the title \
in the MPL widget and the global data container
"""
title = None
x_label = None
y_label = None
sender = self.sender()
if sender == self.ui.lineEditTitle:
title = self.ui.lineEditTitle.text()
elif sender == self.ui.lineEditXLabel:
x_label = self.ui.lineEditXLabel.text()
elif sender == self.ui.lineEditYLabel:
y_label = self.ui.lineEditYLabel.text()
self.updateDataLabels(x_label=x_label, y_label=y_label, title=title)
self.updateMplLabels(x_label=x_label, y_label=y_label, title=title)
def __updatePlotDataStyle(self):
"""
Something style-related changed in the model; thus, need to change \
these elements in the plot data
"""
for num, style_info in enumerate(self.modelLine._model_data):
idx = self.list_ids.index(style_info['id'])
self.list_all[idx].model_style = style_info
self.refreshAllPlots()
def __updatePlotDataDelete(self, row, plt_id):
"""
A plot was deleted (likely from within the model); thus, need to \
remove the corresponding plot data
"""
try:
idx_to_remove = self.list_ids.index(plt_id)
self.list_ids.pop(idx_to_remove)
self.list_all.pop(idx_to_remove)
except:
print('Error in __updatePlotDataDelete: {}'.format(idx_to_remove))
self.refreshAllPlots()
def refreshAllPlots(self):
"""
Clear and re-plot all plot data of all types
"""
# Clear axis -- in the future, maybe clear figure and recreate axis
self.mpl_widget.ax.clear()
for itm in self.list_all:
if isinstance(itm, _DataLine):
if self._mpl_v1:
self.mpl_widget.ax.hold(True)
# Hide label if alpha=0
if itm.style_dict['alpha'] == 0:
label = None
else:
label = itm.label
itm.mplobj = self.mpl_widget.ax.plot(itm.x, itm.y,
label=label,
color=itm.style_dict['color'],
alpha=itm.style_dict['alpha'],
linewidth=itm.style_dict['linewidth'],
linestyle=itm.style_dict['linestyle'],
marker=itm.style_dict['marker'],
markersize=itm.style_dict['markersize'])
elif isinstance(itm, _DataBar):
# print('Bar')
if self._mpl_v1:
self.mpl_widget.ax.hold(True)
# Hide label if alpha=0
if itm.style_dict['alpha'] == 0:
label = None
else:
label = itm.label
itm.mplobj = self.mpl_widget.ax.bar(itm._left, itm.y,
bottom=itm.bottom,
width=itm._width,
label=label,
facecolor=itm.style_dict['facecolor'],
alpha=itm.style_dict['alpha'],
edgecolor=itm.style_dict['edgecolor'],
linewidth=itm.style_dict['linewidth'])
elif isinstance(itm, _DataImages):
# print('Images')
if self._mpl_v1:
self.mpl_widget.ax.hold(True)
# Hide label if alpha=0
if itm.style_dict['alpha'] == 0:
label = None
else:
label = itm.label
if itm.cbar['obj'] is not None:
try: # Have had some unknown exceptions with .remove()
itm.cbar['obj'].remove()
itm.cbar['obj'] = None
except:
pass
itm.mplobj = self.mpl_widget.ax.imshow(itm.img, label=label,
interpolation='none',
origin='lower',
cmap=_mpl.cm.cmap_d[itm.style_dict['cmap_name']],
alpha=itm.style_dict['alpha'],
clim=itm.style_dict['clim'])
if itm.cbar['show']:
itm.cbar['obj'] = self.mpl_widget.fig.colorbar(itm.mplobj,
use_gridspec=True)
elif isinstance(itm, _DataFillBetween):
# print('Fill Between')
if self._mpl_v1:
self.mpl_widget.ax.hold(True)
# Hide label if alpha=0
if itm.style_dict['alpha'] == 0:
label = None
else:
label = itm.label
itm.mplobj = self.mpl_widget.ax.fill_between(itm.x, itm.y_low, itm.y_high,
label=label,
facecolor=itm.style_dict['facecolor'],
edgecolor=itm.style_dict['edgecolor'],
alpha=itm.style_dict['alpha'],
linewidth=itm.style_dict['linewidth'])
else:
print('Unknown')
# Only add legend if legend handles exist
h,l = self.mpl_widget.ax.get_legend_handles_labels()
if l:
self.mpl_widget.ax.legend(loc='best')
# Apply x- and y-labels and a title if they are set
if self._global_data.labels['title'] is not None:
self.mpl_widget.ax.set_title(self._global_data.labels['title'])
if self._global_data.labels['x_label'] is not None:
self.mpl_widget.ax.set_xlabel(self._global_data.labels['x_label'])
if self._global_data.labels['y_label'] is not None:
self.mpl_widget.ax.set_ylabel(self._global_data.labels['y_label'])
self.mpl_widget.fig.tight_layout()
self.updateAxisParameters()
self.updateFigureParameters()
self.axisAspect()
self.mpl_widget.draw()
def __fill_between(self, x, y_low, y_high, label=None, meta={},
x_label=None, y_label=None, **kwargs):
"""
MPL-like fill_between plotting functionality
Parameters
----------
x : ndarray (1D)
X-axis data
y_low : ndarray (1D, for now)
Low Y-axis data
y_high : ndarray (1D, for now)
High Y-axis data
label : str
Label of plot
x_label : str
X-axis label (units)
y_label : str
Y-axis label (units)
kwargs : dict
Other parameters sent directly to mpl-fill_between
"""
# Temporary fill_between-data
fill_between_data = _DataFillBetween()
fill_between_data.x = x
fill_between_data.y_low = y_low
fill_between_data.y_high = y_high
fill_between_data.label = label
fill_between_data.meta = meta
fill_between_data.id = _time.time()
# Fill between outputs a polycollection
fill_between_data.mplobj = self.mpl_widget.ax.fill_between(x, y_low, y_high,
label=label, **kwargs)
self.mpl_widget.ax.legend(loc='best')
self.mpl_widget.fig.tight_layout()
self.axisAspect()
self.mpl_widget.draw()
# Since the fill_between was not fed style-info (unless kwargs were used)
# we rely on the mpl stylesheet to setup color, linewidth, etc.
# Thus, we plot, then retrieve what the style info was
fill_between_data.retrieve_style_from_polycollection(fill_between_data.mplobj)
# Append this specific plot data to out list of all plots
self.list_ids.append(fill_between_data.id)
self.list_all.append(fill_between_data)
# Update model
self.modelFillBetween._model_data.append(fill_between_data.model_style)
self.modelFillBetween.layoutChanged.emit()
def __updateFillBetweenDataStyle(self):
"""
Something style-related changed in the model; thus, need to change \
these elements in the fill_between data
"""
for num, style_info in enumerate(self.modelFillBetween._model_data):
idx = self.list_ids.index(style_info['id'])
self.list_all[idx].model_style = style_info
self.refreshAllPlots()
def __updateFillBetweenDataDelete(self, row, plt_id):
"""
A plot was deleted (likely from within the model); thus, need to \
remove the corresponding plot data
"""
idx_to_remove = self.list_ids.index(plt_id)
self.list_ids.pop(idx_to_remove)
self.list_all.pop(idx_to_remove)
self.refreshAllPlots()
def __imshow(self, img, x=None, y=None, label=None, meta={},
x_label=None, y_label=None, cbar=False, **kwargs):
"""
MPL-like plotting functionality
Parameters
----------
img : ndarray (2D)
Image data
x : ndarray (1D)
X-axis data
y : ndarray (1D, for now)
Y-axis data
label : str
Label of plot
x_label : str
X-axis label (units)
y_label : str
Y-axis label (units)
cbar : bool
Attach a colorbar to the img
kwargs : dict
Other parameters sent directly to mpl-imshow
"""
# Temporary plot-data
image_data = _DataImages()
image_data.img = img
image_data.x = x
image_data.y = y
image_data.label = label
image_data.meta = meta
image_data.id = _time.time()
image_data.cbar['show'] = cbar
# Imshow outputs an image object
image_data.mplobj = self.mpl_widget.ax.imshow(img, interpolation='None',
origin='lower',
label=label,
**kwargs)
if image_data.cbar['show']:
image_data.cbar['obj'] = self.mpl_widget.fig.colorbar(image_data.mplobj,
use_gridspec=True)
# self.mpl_widget.ax.legend(loc='best')
# If labels are provided, update the global data and the linEdits
if x_label is not None or y_label is not None:
self.updateAllLabels(x_label=x_label, y_label=y_label)
self.mpl_widget.fig.tight_layout()
self.axisAspect()
self.mpl_widget.draw()
# Since the image was not fed style-info (unless kwargs were used)
# we rely on the mpl stylesheet to setup cmap, etc.
# Thus, we plot, then retrieve what the style info was
image_data.retrieve_style_from_image(image_data.mplobj)
# Append this specific plot data to out list of all plots
self.list_ids.append(image_data.id)
self.list_all.append(image_data)
# Update model
self.modelImages._model_data.append(image_data.model_style)
self.modelImages.layoutChanged.emit()
def __updateImagesDataStyle(self):
"""
Something style-related changed in the model; thus, need to change \
these elements in the fill_between data
"""
for num, style_info in enumerate(self.modelImages._model_data):
idx = self.list_ids.index(style_info['id'])
self.list_all[idx].model_style = style_info
self.refreshAllPlots()
def __updateImagesDataDelete(self, row, plt_id):
"""
A plot was deleted (likely from within the model); thus, need to \
remove the corresponding plot data
"""
idx_to_remove = self.list_ids.index(plt_id)
self.list_ids.pop(idx_to_remove)
popd = self.list_all.pop(idx_to_remove)
if popd.cbar['obj'] is not None:
popd.cbar['obj'].remove()
# self.axisAspect()
self.refreshAllPlots()
def __bar(self, x, y, bottom=0, width_factor=1.0, use_real_width=False,
label=None, meta={}, x_label=None, y_label=None, **kwargs):
"""
MPL-like plotting functionality
Note
----
Unlike MPL bar, this method uses centered data. Thus, x is the center \
position of the bar
Parameters
----------
x : ndarray (1D)
X-axis data (center of bars)
y : ndarray (1D, for now)
Y-axis data (height)
bottom : float (for now)
Baseline of bars
width_factor: float
If legnth of y>1, fraction of space between bars taken up by bar \
(e.g. 1.0 leads to bars that tough). If y is a single-value OR \
use_real_width is True), is the width of the bar.
use_real_width : bool, optional (default=False):
If True, width_factor is the real width (in x-units)
label : str
Label of plot
x_label : str
X-axis label (units)
y_label : str
Y-axis label (units)
kwargs : dict
Other parameters sent directly to mpl-plot
"""
# Temporary plot-data
bar_data = _DataBar()
bar_data.x = x
bar_data.y = y
bar_data.bottom = bottom
bar_data.label = label
bar_data.meta = meta
bar_data.id = _time.time()
bar_data.style_dict['width_factor'] = width_factor
_multi_value = None
if isinstance(y, (int, float)):
_multi_value = False
if isinstance(y, _np.ndarray):
if y.size == 1:
_multi_value = False
else:
_multi_value = True
if isinstance(y, (list, tuple)):
if len(y) == 1:
_multi_value = False
else:
_multi_value = True
if _multi_value and use_real_width == False:
# Distance between bars
bar_data._gap = _np.abs(x[1]-x[0])
# Width of a bar is a fraction of the gap
bar_data._width = bar_data._gap*bar_data.style_dict['width_factor']
else:
# Single-valued: no gap
bar_data._gap = None
bar_data._width = width_factor
# MPL-bar uses left-edge rather than center
bar_data._left = bar_data.x - bar_data._width/2
# Plot outputs a list of patch objects
bar_data.mplobj = self.mpl_widget.ax.bar(bar_data._left, y,
bottom=bar_data.bottom,
width=bar_data._width,
label=label, **kwargs)
self.mpl_widget.ax.legend(loc='best')
# If labels are provided, update the global data and the linEdits
if x_label is not None or y_label is not None:
self.updateAllLabels(x_label=x_label, y_label=y_label)
self.mpl_widget.fig.tight_layout()
self.axisAspect()
self.mpl_widget.draw()
# Since the plot was not fed style-info (unless kwargs were used)
# we rely on the mpl stylesheet to setup color, linewidth, etc.
# Thus, we plot, then retrieve what the style info was
bar_data.retrieve_style_from_bar(bar_data.mplobj[0])
# Append this specific plot data to out list of all plots
self.list_ids.append(bar_data.id)
self.list_all.append(bar_data)
# Update model
self.modelBars._model_data.append(bar_data.model_style)
self.modelBars.layoutChanged.emit()
# Note: New in MPL2, edgecolor is RGBA with A defaulting to 0
# (ie transparent, which Sciplot does not currently support).
self.refreshAllPlots()
def __hist(self, data, bins=10, label=None, meta={}, x_label=None,
y_label='Counts', **kwargs):
"""
MPL-like histogram plotting
Parameters
----------
data : ndarray (1D, for now)
Data (center of bars)
bins : int
Number of histogram bins
label : str
Label of plot
x_label : str
X-axis label (units)
y_label : str
Y-axis label (units)
kwargs : dict
Other parameters sent directly to mpl-plot
"""
counts, lefts = _np.histogram(data, bins=bins)
gap = _np.abs(lefts[1] - lefts[0])
offset = gap/2
self.bar(lefts[:-1]+offset, counts, width_factor=1.0, label=label,
x_label=x_label, y_label=y_label, meta=meta, **kwargs)
def __updateBarsDataStyle(self):
"""
Something style-related changed in the model; thus, need to change \
these elements in the fill_between data
"""
for num, style_info in enumerate(self.modelBars._model_data):
idx = self.list_ids.index(style_info['id'])
self.list_all[idx].model_style = style_info
self.refreshAllPlots()
def __updateBarsDataDelete(self, row, plt_id):
"""
A plot was deleted (likely from within the model); thus, need to \
remove the corresponding plot data
"""
idx_to_remove = self.list_ids.index(plt_id)
self.list_ids.pop(idx_to_remove)
self.list_all.pop(idx_to_remove)
self.refreshAllPlots()
def axisAspect(self):
"""
Set axis aspect ratio property
"""
aspect = self.ui.comboBoxAspect.currentText()
self.mpl_widget.ax.set_aspect(aspect)
self.mpl_widget.fig.tight_layout()
self.updateAxisParameters()
self.updateFigureParameters()
self.mpl_widget.draw()
def axisScaling(self):
"""
Set axis scaling property
"""
ratio = self.ui.comboBoxAxisScaling.currentText()
self.mpl_widget.ax.axis(ratio)
self.mpl_widget.fig.tight_layout()
self.updateAxisParameters()
self.updateFigureParameters()
self.mpl_widget.draw()
def axisVisible(self):
"""
Set whether axis is on or off
"""
state = self.ui.checkBoxAxisVisible.isChecked()
if state:
state = 'on'
else:
state = 'off'
self.mpl_widget.ax.axis(state)
self.mpl_widget.fig.tight_layout()
self.updateAxisParameters()
self.updateFigureParameters()
self.mpl_widget.draw()
def axisLimits(self):
"""
Set axis limits
"""
if self.sender() == self.ui.lineEditXLimMin:
value = float(self.ui.lineEditXLimMin.text())
self.mpl_widget.ax.axis(xmin=value)
elif self.sender() == self.ui.lineEditXLimMax:
value = float(self.ui.lineEditXLimMax.text())
self.mpl_widget.ax.axis(xmax=value)
elif self.sender() == self.ui.lineEditYLimMin:
value = float(self.ui.lineEditYLimMin.text())
self.mpl_widget.ax.axis(ymin=value)
elif self.sender() == self.ui.lineEditYLimMax:
value = float(self.ui.lineEditYLimMax.text())
self.mpl_widget.ax.axis(ymax=value)
self.mpl_widget.fig.tight_layout()
self.updateAxisParameters()
self.updateFigureParameters()
self.mpl_widget.draw()
def updateAxisParameters(self):
"""
Query current state of axis settings and update appropriate lineEdit's
"""
axis_visible = self.mpl_widget.ax.axison
self.ui.checkBoxAxisVisible.setChecked(axis_visible)
xmin, xmax, ymin, ymax = self.mpl_widget.ax.axis()
self.ui.lineEditXLimMin.setText(str(round(xmin,5)))
self.ui.lineEditXLimMax.setText(str(round(xmax,5)))
self.ui.lineEditYLimMin.setText(str(round(ymin,5)))
self.ui.lineEditYLimMax.setText(str(round(ymax,5)))
def updateFigureParameters(self):
"""
Query current state of axis settings and update appropriate lineEdit's
"""
fig_dpi = self.mpl_widget.fig.get_dpi()
save_dpi = _mpl.rcParams['savefig.dpi']
if save_dpi is 'figure':
save_dpi = fig_dpi
fig_size = self.mpl_widget.fig.get_size_inches()
self.ui.spinBoxFigureDPI.setValue(fig_dpi)
self.ui.spinBoxFigureSavedDPI.setValue(save_dpi)
self.ui.spinBoxFigSizeWidth.setValue(fig_size[0])
self.ui.spinBoxFigSizeHeight.setValue(fig_size[1])
def defaultView(self):
"""
Set default and Home view to the current one
"""
# New versions of MPL don't have these functions
try:
self.mpl_widget.toolbar._views.clear()
self.mpl_widget.toolbar._positions.clear()
except:
pass
self.mpl_widget.toolbar.update()
def clearAllBars(self):
try:
self.modelBars._model_data = []
ids = self.list_bar_ids
for i in ids:
self.clearID(i)
self.modelBars.layoutChanged.emit()
except:
print('Error in clearAllBars')
def clearID(self, clear_id):
idx_to_remove = self.list_ids.index(clear_id)
self.list_ids.pop(idx_to_remove)
self.list_all.pop(idx_to_remove)
def clearAll(self):
"""
Clear all plots and graphs and images
"""
try:
self.modelLine._model_data = []
self.modelLine.layoutChanged.emit()
except:
print('Error in clear all of plots/lines')
try:
self.modelBars._model_data = []
self.modelBars.layoutChanged.emit()
except:
print('Error in clear all of bars')
try:
# Need to iterate as to check for colorbar existance
for num, model_data in enumerate(self.modelImages._model_data):
idx_to_remove = self.list_ids.index(model_data['id'])
self.list_ids.pop(idx_to_remove)
popd = self.list_all.pop(idx_to_remove)
if popd.cbar['obj'] is not None:
popd.cbar['obj'].remove()
self.modelImages._model_data = []
self.modelImages.layoutChanged.emit()
except:
print('Error in clear all of images')
try:
self.modelFillBetween._model_data = []
self.modelFillBetween.layoutChanged.emit()
except:
print('Error in clear all of fill-betweens')
try:
self.list_ids = []
self.list_all = []
except:
print('Error in clear all')
finally:
self.refreshAllPlots()
self.all_cleared.emit(id(self))
def export_bars_csv(self):
ret = _QFileDialog.getSaveFileName(filter="Comma-Separated Values (*.csv);;All Files (*.*)")
if ret[0]:
# pth, fname = _os.path.split(ret[0])
with open(ret[0],'w') as f:
for q in self.list_bar_objs:
f.write('{}\n'.format(q.label))
f.write('left,')
q._left.tofile(f, sep=',')
f.write('\nx,')
q.x.tofile(f, sep=',')
f.write('\ny,')
q.y.tofile(f,sep=',')
f.write('\n\n')
def export_lines_csv(self):
ret = _QFileDialog.getSaveFileName(filter="Comma-Separated Values (*.csv);;All Files (*.*)")
if ret[0]:
# pth, fname = _os.path.split(ret[0])
with open(ret[0],'w') as f:
for q in self.list_line_objs:
f.write('{}\n'.format(q.label))
f.write('x,')
q.x.tofile(f, sep=',')
f.write('\ny,')
q.y.tofile(f,sep=',')
f.write('\n\n')
def export_fillbetweens_csv(self):
ret = _QFileDialog.getSaveFileName(filter="Comma-Separated Values (*.csv);;All Files (*.*)")
if ret[0]:
# pth, fname = _os.path.split(ret[0])
with open(ret[0],'w') as f:
for q in self.list_fillbetween_objs:
f.write('{}\n'.format(q.label))
f.write('x,')
q.x.tofile(f, sep=',')
f.write('\ny_low,')
q.y_low.tofile(f,sep=',')
f.write('\ny_high,')
q.y_high.tofile(f,sep=',')
f.write('\n\n')
@property
def n_lines(self):
return sum(isinstance(x, _DataLine) for x in self.list_all)
@property
def n_bars(self):
return sum(isinstance(x, _DataBar) for x in self.list_all)
@property
def n_fillbetweens(self):
return sum(isinstance(x, _DataFillBetween) for x in self.list_all)
@property
def n_images(self):
return sum(isinstance(x, _DataImages) for x in self.list_all)
@property
def list_line_objs(self):
return [x for x in self.list_all if isinstance(x, _DataLine)]
@property
def list_line_ids(self):
return [x.id for x in self.list_all if isinstance(x, _DataLine)]
@property
def list_bar_objs(self):
return [x for x in self.list_all if isinstance(x, _DataBar)]
@property
def list_bar_ids(self):
return [x.id for x in self.list_all if isinstance(x, _DataBar)]
@property
def list_fillbetween_objs(self):
return [x for x in self.list_all if isinstance(x, _DataFillBetween)]
@property
def list_fillbetween_ids(self):
return [x.id for x in self.list_all if isinstance(x, _DataFillBetween)]
@property
def list_image_objs(self):
return [x for x in self.list_all if isinstance(x, _DataImages)]
@property
def list_image_ids(self):
return [x.id for x in self.list_all if isinstance(x, _DataImages)]
def figureDPI(self):
curr_dpi = self.mpl_widget.fig.get_dpi()
dpi, okPressed = _QInputDialog.getInt(self, "New Figure DPI","DPI:", curr_dpi, 10, 100000, 25)
if okPressed:
self.mpl_widget.fig.set_dpi(dpi)
self.mpl_widget.updateGeometry()
self.mpl_widget.draw()
self.updateFigureParameters()
def figureSaveDPI(self):
curr_dpi = _mpl.rcParams['savefig.dpi']
if not isinstance(curr_dpi, int):
# Savefig is set to 'figure'
curr_dpi = self.mpl_widget.fig.get_dpi()
dpi, okPressed = _QInputDialog.getInt(self, "New DPI for Saved Figures","DPI:", curr_dpi, 10, 100000, 25)
if okPressed:
_mpl.rcParams['savefig.dpi'] = dpi
self.updateFigureParameters()
def figureSizeDisplay(self):
curr_size = self.mpl_widget.fig.get_size_inches()
new_size, okPressed = DualEntry.getDualEntries(curr_size[0], curr_size[1], input_type=float, text="Figure Size (W x H inches)", parent=self)
if okPressed:
self.mpl_widget.updateGeometry()
self.mpl_widget.fig.set_size_inches(new_size[0], new_size[1], forward=True)
self.mpl_widget.draw()
self.updateFigureParameters()
def applyFigProps(self):
""" Apply manually-entered figure properties (e.g., dpi) """
new_fig_size = (self.ui.spinBoxFigSizeWidth.value(), self.ui.spinBoxFigSizeHeight.value())
new_fig_dpi = self.ui.spinBoxFigureDPI.value()
new_save_dpi = self.ui.spinBoxFigureSavedDPI.value()
self.mpl_widget.fig.set_dpi(new_fig_dpi)
self.mpl_widget.fig.set_size_inches(new_fig_size)
_mpl.rcParams['savefig.dpi'] = new_save_dpi
self.mpl_widget.updateGeometry()
self.mpl_widget.draw()
if __name__ == '__main__':
app = _QApplication(_sys.argv)
#app.setQuitOnLastWindowClosed(True)
winPlotter = SciPlotUI(limit_to=['lines','bars', 'fill betweens',
'images'])
#winPlotter.show()
x = _np.arange(100)
y = x**2
winPlotter.plot(x, y, x_label='X', label='Plot')
# winPlotter.plot(x, y**1.1, label='Plot 2')
winPlotter.fill_between(x, y-1000, y+1000, label='Fill Between')
#
winPlotter.imshow(_np.random.randn(100,100), label='Imshow', cbar=True)
winPlotter.bar(x[::10],y[::10],label='Bar')
# winPlotter.hist(y,label='Hist')
# winPlotter.bar(0,10, label='Bar: single-value')
app.exec_() | StarcoderdataPython |
3248319 | <filename>olist_data_warehouse/project/src/data_warehouse/create_data_warehouse.py
# This file contains the functions "" that create the dataware house, fact table, dimentional tables, etc.
def create_data_warehouse_schema(cursor):
"""
Summary: Creates the Olist Data Warehouse Database Schema.
Args:
: cursor (DataBase cursor): Cursor of the connection with the database
"""
_DW_DB_NAME = "olist_data_warehouse" # Data Warehouse Database Name
cursor.execute("""CREATE SCHEMA IF NOT EXISTS {db_name}""".format(db_name=_DW_DB_NAME)) # Execute the SQL command that create the DW Schema
def create_data_warehouse_tables(cursor):
"""
Summary: Creates the Olist Data Warehouse Database tables.
Args:
: cursor (DataBase cursor): Cursor of the connection with the database
"""
cursor.execute(""" CREATE TABLE IF NOT EXISTS olist_data_warehouse.Dim_customers (
customer_id VARCHAR(32) PRIMARY KEY,
customer_unique_id VARCHAR(32),
customer_zip_code_prefix VARCHAR(5) NOT NULL
)""")
cursor.execute(""" CREATE TABLE IF NOT EXISTS olist_data_warehouse.Orders_Fact (
order_id VARCHAR(32),
customer_id VARCHAR(32) REFERENCES olist_data_warehouse.Dim_customers (customer_id),
payment_value NUMERIC(12, 2) CHECK (payment_value >= 0) NOT NULL
)""")
cursor.execute(""" CREATE TABLE IF NOT EXISTS olist_data_warehouse.Dim_payments (
order_id VARCHAR(32),
payment_installments VARCHAR(3) NOT NULL,
payment_type VARCHAR(20) NOT NULL
)""")
cursor.execute(""" CREATE TABLE IF NOT EXISTS olist_data_warehouse.Dim_geolocation (
geolocation_zip_code_prefix VARCHAR(5),
geolocation_lat VARCHAR(30) NOT NULL,
geolocation_lng VARCHAR(30) NOT NULL,
geolocation_city VARCHAR(40) NOT NULL,
geolocation_state VARCHAR(2)
)""")
cursor.execute(""" CREATE TABLE IF NOT EXISTS olist_data_warehouse.Dim_order_items (
order_id VARCHAR(32),
product_id VARCHAR(32) NOT NULL,
product_category_name VARCHAR(60),
order_item_id VARCHAR(2),
price NUMERIC(12, 2) CHECK (price > 0) NOT NULL,
freight_value NUMERIC(12, 2) CHECK (freight_value >= 0) NOT NULL,
PRIMARY KEY (order_id, order_item_id)
)""")
cursor.execute(""" CREATE TABLE IF NOT EXISTS olist_data_warehouse.Dim_date (
order_id VARCHAR(32),
order_status VARCHAR(20) NOT NULL,
order_purchase_timestamp TIMESTAMP NOT NULL,
order_delivered_customer_date TIMESTAMP,
order_estimated_delivery_date TIMESTAMP
)""")
cursor.execute(""" CREATE TABLE IF NOT EXISTS olist_data_warehouse.Dim_review (
order_id VARCHAR(32),
order_review_score NUMERIC(2)
)""")
| StarcoderdataPython |
165622 | from egpo_utils.egpo.egpo import EGPOTrainer
from egpo_utils.human_in_the_loop_env import HumanInTheLoopEnv
from egpo_utils.train.utils import initialize_ray
initialize_ray(test_mode=False)
def get_function(ckpt):
trainer = EGPOTrainer(dict(
env=HumanInTheLoopEnv,
# ===== Training =====
takeover_data_discard=False,
alpha=10.0,
recent_episode_num=5,
normalize=True,
twin_cost_q=True,
k_i=0.01,
k_p=5,
# search > 0
k_d=0.1,
# expected max takeover num
cost_limit=300,
optimization=dict(actor_learning_rate=1e-4, critic_learning_rate=1e-4, entropy_learning_rate=1e-4),
prioritized_replay=False,
horizon=400,
target_network_update_freq=1,
timesteps_per_iteration=100,
metrics_smoothing_episodes=10,
learning_starts=100,
clip_actions=False,
normalize_actions=True,
))
trainer.restore(ckpt)
def _f(obs):
ret = trainer.compute_actions({"default_policy": obs})
return ret
return _f
if __name__ == '__main__':
def make_env(env_id=None):
return HumanInTheLoopEnv(dict(manual_control=False, use_render=False))
from collections import defaultdict
super_data = defaultdict(list)
EPISODE_NUM = 50
env = make_env()
for ckpt_idx in range(12, 163, 10):
ckpt = ckpt_idx
compute_actions = get_function(
"/home/liquanyi/corl_human_exp/EGPO/SACPIDSaverTrainer_HumanInTheLoopEnv_0689e_00000_0_seed=0_2021-08-24_20-01-33/checkpoint_{}/checkpoint-{}".format(
ckpt, ckpt)
)
o = env.reset()
epi_num = 0
total_cost = 0
total_reward = 0
success_rate = 0
ep_cost = 0
ep_reward = 0
success_flag = False
horizon = 2000
step = 0
while True:
# action_to_send = compute_actions(w, [o], deterministic=False)[0]
step += 1
action_to_send = compute_actions(o)["default_policy"]
o, r, d, info = env.step(action_to_send)
total_reward += r
ep_reward += r
total_cost += info["cost"]
ep_cost += info["cost"]
if d or step > horizon:
if info["arrive_dest"]:
success_rate += 1
success_flag = True
epi_num += 1
if epi_num > EPISODE_NUM:
break
else:
o = env.reset()
super_data[ckpt].append({"reward": ep_reward, "success": success_flag, "cost": ep_cost})
ep_cost = 0.0
ep_reward = 0.0
success_flag = False
step = 0
print(
"CKPT:{} | success_rate:{}, mean_episode_reward:{}, mean_episode_cost:{}".format(ckpt,
success_rate / EPISODE_NUM,
total_reward / EPISODE_NUM,
total_cost / EPISODE_NUM))
del compute_actions
env.close()
import json
try:
with open("super_data_12_162_10.json", "w") as f:
json.dump(super_data, f)
except:
pass
print(super_data)
| StarcoderdataPython |
3359416 | #!/usr/bin/env python3
def solution(a: list) -> int:
"""
>>> solution([3, 1, 2, 4, 3])
1
"""
return min(abs(sum(a[:i]) - sum(a[i:])) for i in range(len(a)))
def solution(a: list) -> int:
"""
>>> solution([3, 1, 2, 4, 3])
1
"""
sums = [0] * len(a)
acum = 0
for i, x in enumerate(a):
acum += x
sums[i] = acum
return min(abs(acum - 2 * s) for s in sums[:-1])
if __name__ == '__main__':
assert solution([3, 1, 2, 4, 3]) == 1
| StarcoderdataPython |
1687586 | <gh_stars>1-10
# Default time after which transaction processing should be aborted.
DEFAULT_TX_TIME_TO_LIVE = "3600000ms"
# Default transaction gas price to apply.
DEFAULT_TX_GAS_PRICE = 10
# Default transaction fee to apply.
DEFAULT_TX_FEE = int(1e11)
# Default transaction fee for native transfers.
DEFAULT_TX_FEE_NATIVE_TRANSFER = int(1e4)
| StarcoderdataPython |
98772 | import re
import copy
from inspect import getmembers, ismethod
from collections import OrderedDict
class Viewer:
_attribute_regex = re.compile(r'^__.*__$')
METHOD = lambda k, v: ismethod(v)
FIELD = lambda k, v: not ismethod(v)
ATTRIBUTE = lambda k, v: Viewer._attribute_regex.match(k)
NOT_ATTRIBUTE = lambda k, v: not Viewer._attribute_regex.match(k)
PRIVATE = lambda k, v: re.compile(r'^_{1}[^_].*$').match(k)
PUBLIC = lambda k, v: re.compile(r'^[^_].*$').match(k)
CONSTANT = lambda k, v: k.upper() == k
def __init__(self, pyobject):
self.members = OrderedDict(getmembers(pyobject))
def _get_dict(self, parent_dict, conditional_callback):
return OrderedDict([
(k, v) for k, v in parent_dict.items()
if conditional_callback(k, v)
])
def _get_both_dict(self, parent_dict, conditional_callback):
main, sub = [], []
for k, v in parent_dict.items():
if conditional_callback(k, v):
main.append((k, v))
else:
sub.append((k, v))
return OrderedDict(main), OrderedDict(sub)
def get_data(self, conditional_callbacks=[], parent_dict=None):
if not conditional_callbacks:
return parent_dict
if parent_dict is None:
parent_dict = self.members
c = conditional_callbacks.pop()
child_dict = self._get_dict(parent_dict, c)
return self.get_data(conditional_callbacks, child_dict)
@staticmethod
def get_strlize_dict(odict):
newdict = copy.deepcopy(odict)
for k, v in newdict.items():
newdict[k] = str(v)
return newdict
| StarcoderdataPython |
20917 | #!/usr/bin/env python3
# do not hesitate to debug
import pdb
# python computation modules and visualization
import numpy as np
import sympy as sy
import scipy as sp
import matplotlib.pyplot as plt
from sympy import Q as syQ
sy.init_printing(use_latex=True,forecolor="White")
def Lyapunov_stability_test_linear(ev):
''' test if a linear homogeneous system with constant coefficients is stable
in the sense of Lyapunov by checking the theorem conditions against the
provided eigenvalues
source https://www.math24.net/stability-theory-basic-concepts/
TODO taking into account eigenvalue multiplicity '''
# the criteria result will be saved here
r = None
# system is asymptotically stable if only if
# all eigenvalues have negative real parts
r = 'asymptotically stable' if ( not r
and all(sy.ask(syQ.negative(sy.re(_))) for _ in ev) ) else None
# system is stable if and only if
# all eigenvalues have nonpositive real parts
# TODO incorporate algebraic and geometric multiplicity criteria
r = 'stable' if ( not r
and all(sy.ask(syQ.nonpositive(sy.re(_))) for _ in ev) ) else None
# system is unstable if
# at least one eigenvalue has positive real part
# TODO incorporate algebraic and geometric multiplicity criteria
r = 'unstable' if ( not r
and any(sy.ask(syQ.positive(sy.re(_))) for _ in ev) ) else None
return r
def Lyapunov_stability_test_nonlinear(ev):
''' test if the fixed point of a nonlinear structure stable system
is stable, unstable, critical or impossible to determine using Lyapunov
criteria of first order and thus other methods are needed
TODO tests are only applicable for structurally stable systems, i.e.
with purely imaginary eigenvalues are not taken into account
source https://www.math24.net/stability-first-approximation/ '''
# the criteria result will be saved here
r = None
# system is asymptotically stable if only if
# all eigenvalues have negative real parts
r = 'asymptotically stable' if ( not r
and all(sy.ask(syQ.negative(sy.re(_))) for _ in ev) ) else None
# system is unstable if
# at least one eigenvalue has positive real part
r = 'unstable' if ( not r
and any(sy.ask(syQ.positive(sy.re(_))) for _ in ev) ) else None
# if all eigenvalues have non-positive real parts,
# and there is at least one eigenvalue with zero real part
# then fixed point can be stable or unstable and other methods should be
# used, thus mark the point critical
r = 'critical' if ( not r
and all(sy.ask(Q.nonpositive(sy.re(_))) for _ in ev)
and any(sy.re(_) == 0 for _ in ev)
) else None
return r if r else 'not decided'
def RouthHurwitz_Criterion(p):
''' return principal minors of Hurwitz matrix as sympy polynomials, which if
all are positive it is sufficient condition for asymptotic stability
NOTE: if all n-1 principal minors are positive, and nth minor is zero,
the system is at the boundary of stability, with two cases:
a_n = 0 -- one of the root is zero and system is on the boundary of
aperiodic stability
n-1 minor is zero -- there are two complex conjugate imaginary roots and
the system is at boundary of oscillatory stability
source https://www.math24.net/routh-hurwitz-criterion/ '''
# initial key and index pair needed to create Hurwitz matrix via sympy banded
# each entry is of the type [ dictionary key, coefficient slice ]
idxs = [ [ 1, 0 ] ]
# generate next key by decrementing with 1
genKey = lambda _: _ - 1
# generate next index by incrementing with 1 if key was nonnegative
# or with 2 if key is negative
genSlice = lambda _, __: __ + 1 if _ >= 0 else __ + 2
# fill the rest pairs w.r.t. the polynomial degree - 1, as we already have
# one entry
for _ in range(p.degree() - 1):
key = genKey(idxs[-1][0])
idxs.append( [ key, genSlice(key, idxs[-1][1] ) ] )
# create the matrix itself
H = sy.banded({ k: p.all_coeffs()[v:] for k, v in idxs })
return [ H[:_, :_].det() if _ > 0 else p.LC() for _ in range(0, p.degree()+1) ]
# define independent variable
t = sy.symbols('t', real=True)
# define dependent variables individually and pact them in an variable
theta, omega = sy.symbols(r'\theta, \omega', real = True)
Y = theta, omega
# define free parameters of they system and pack them in a variable
g, L = sy.symbols('g, L', positive = True)
parms = g, L
# create rhs as sympy expressions
theta_dt = omega
omega_dt = -(g/L)*sy.sin(theta)
rhs = {}
rhs['sympy'] = sy.Matrix([theta_dt, omega_dt])
# convert the sympy matrix function to numpy function with usual signature
rhs['numpy'] = sy.lambdify((t, Y, *parms), rhs['sympy'], 'numpy')
# create Jacobian matrix as sympy expression
J = {}
J['sympy'] = rhs['sympy'].jacobian(Y)
# convert the sympy Jacobian expression to numpy function with usual signature
J['numpy'] = sy.lambdify((t, Y, *parms), J['sympy'])
# calculate rhs fixed points
fixed_points = sy.solve(rhs['sympy'], Y)
# substitute each fixed point in the Jacobian
# and calculate the eigenvalues
J_fixed = {}
for i, fp in enumerate(fixed_points):
J_subs = J['sympy'].subs( [(y, v) for y, v in zip(Y, fp)])
#J_eigenvals = J_subs.eigenvals(multiple=True)
J_eigenvals = J_subs.eigenvals()
# save the fixed point results in more details
# most importantly the eigenvalues and their corresponding multiplicity
J_fixed[i] = {
'fixed point': fp,
'subs': J_subs,
'eigenvalues': list(J_eigenvals.keys()),
'multiplicity': list(J_eigenvals.values())
}
def plot_phase_portrait(ax, rhs, section, args=(), n_points=25):
''' plot section of phase space of a field defined via its rhs '''
# create section grid
x_grid, y_grid = np.meshgrid(
np.linspace( section[0][0], section[0][1], n_points ),
np.linspace( section[1][0], section[1][1], n_points )
)
# calculate rhs on the grid
xx, yy = rhs(None, ( x_grid, y_grid ), *args)
# compute vector norms and make line width proportional to them
# i.e. greater the vector length, the thicker the line
# TODO not sure why rhs returns different shape
vector_norms = np.sqrt(xx[0]**2 + yy[0]**2)
lw = 0.25 + 3*vector_norms/vector_norms.max()
# plot the phase portrait
ax.streamplot(
x_grid, y_grid,
xx[0], yy[0],
linewidth = lw,
arrowsize = 1.2,
density = 1
)
return ax
def plot_main():
fig, ax = plt.subplots()
ax = plot_phase_portrait(
ax,
rhs['numpy'],
(
( -np.pi, np.pi ),
( -2*np.pi, 2*np.pi)
),
args = ( 5, 1 ),
)
if __name__ == '__main__':
plot_main()
| StarcoderdataPython |
176231 | <gh_stars>0
from io import BytesIO
from pathlib import Path
from typing import IO
from typing import List
from typing import Optional
from typing import Type
from typing import TypeVar
from typing import Union
from httpx import AsyncClient
from httpx import Response as HttpResponse
from consigliere.telegram import DeleteWebhookResponse
from consigliere.telegram import File
from consigliere.telegram import GetFileRequest
from consigliere.telegram import GetFileResponse
from consigliere.telegram import GetMeResponse
from consigliere.telegram import GetWebhookInfoResponse
from consigliere.telegram import Message
from consigliere.telegram import MessageEntity
from consigliere.telegram import SendMessageRequest
from consigliere.telegram import SendMessageResponse
from consigliere.telegram import SendPhotoRequest
from consigliere.telegram import SendPhotoResponse
from consigliere.telegram import SetWebhookRequest
from consigliere.telegram import SetWebhookResponse
from consigliere.telegram import User
from consigliere.telegram import WebhookInfo
from consigliere.telegram.base import Request
from consigliere.telegram.base import Response
from consigliere.telegram.entities import ReplyMarkupType
class Bot:
"""
The class represents an entrypoint to communicate with Telegram Bot API.
The methods are synchronized with those from official API doc.
Usage: instantiate using bot token and go.
If something is wrong, you'll get the `Bot.RequestError` exception raised.
"""
TELEGRAM_BOT_API_URL = "https://api.telegram.org"
class RequestError(RuntimeError):
"""
A base exception for any internal error, including those
caused by malformed requests and invalid data.
"""
pass
def __init__(self, token: str):
"""
Sets up the new Bot instance.
:param token: a bot token which BotFather gives to you.
"""
self.__token = token
@property
def api_url(self) -> str:
"""
An API URL to make requests to.
:return: the completed API URL as a string
"""
return f"{self.TELEGRAM_BOT_API_URL}/bot{self.__token}"
@property
def file_url(self) -> str:
"""
An URL to download files from.
:return: the completed URL as a string
"""
return f"{self.TELEGRAM_BOT_API_URL}/file/bot{self.__token}"
async def downloadFile(self, file: File) -> BytesIO:
"""
Downloads the file's content into the BytesIO object.
https://core.telegram.org/bots/api#getfile
https://core.telegram.org/bots/api#file
:param file: a File object
:return: a BytesIO object with content of the file
"""
if not file.file_path:
raise self.RequestError(f"file {file} has no file_path set")
return await self._download_file(file.file_path)
async def getFile(self, file_id: str) -> File:
"""
Use this method to get basic info about a file
and prepare it for downloading.
For the moment, bots can download files of up to 20MB in size.
The file can then be downloaded
via the link https://api.telegram.org/file/bot<token>/<file_path>,
where <file_path> is taken from the response.
It is guaranteed that the link will be valid for at least 1 hour.
When the link expires,
a new one can be requested by calling getFile again.
https://core.telegram.org/bots/api#getfile
:return: on success, a File object
"""
request = GetFileRequest(file_id=file_id)
return await self._call_api(
"getFile",
request,
response_cls=GetFileResponse,
)
async def getMe(self) -> User:
"""
A simple method for testing your bot's auth token.
https://core.telegram.org/bots/api#getme
:return: basic information about the bot in form of a User object
"""
return await self._call_api(
"getMe",
response_cls=GetMeResponse,
)
async def getWebhookInfo(self) -> WebhookInfo:
return await self._call_api(
"getWebhookInfo",
response_cls=GetWebhookInfoResponse,
)
async def deleteWebhook(self) -> bool:
return await self._call_api(
"deleteWebhook",
response_cls=DeleteWebhookResponse,
)
async def setWebhook(self, *, url: str) -> bool:
request = SetWebhookRequest(
url=url,
)
return await self._call_api(
"setWebhook",
request,
response_cls=SetWebhookResponse,
)
async def sendMessage(
self,
*,
chat_id: Union[int, str],
text: str,
parse_mode: Optional[str] = None,
entities: Optional[List[MessageEntity]] = None,
disable_web_page_preview: Optional[bool] = None,
disable_notification: Optional[bool] = None,
reply_to_message_id: Optional[int] = None,
allow_sending_without_reply: Optional[bool] = None,
reply_markup: Optional[ReplyMarkupType] = None,
) -> Message:
"""
Use this method to send text messages.
https://core.telegram.org/bots/api#sendmessage
:param chat_id: Unique identifier for the target chat
or username of the target channel
(in the format @channelusername).
:param text: Text of the message to be sent,
1-4096 characters after entities parsing.
:param parse_mode: Mode for parsing entities in the message text.
See formatting options for more details.
:param entities: A JSON-serialized list of special entities
that appear in message text,
which can be specified instead of parse_mode.
:param disable_web_page_preview: Disables link previews
for links in this message.
:param disable_notification: Sends the message silently.
Users will receive a notification with no sound.
:param reply_to_message_id: If the message is a reply,
ID of the original message.
:param allow_sending_without_reply: Pass True,
if the message should be sent
even if the specified replied-to message is not found.
:param reply_markup: Additional interface options.
A JSON-serialized object for an inline keyboard,
custom reply keyboard,
instructions to remove reply keyboard
or to force a reply from the user.
:return: on success, the sent Message.
"""
request = SendMessageRequest(
allow_sending_without_reply=allow_sending_without_reply,
chat_id=chat_id,
disable_notification=disable_notification,
disable_web_page_preview=disable_web_page_preview,
entities=entities,
parse_mode=parse_mode,
reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id,
text=text,
)
return await self._call_api(
"sendMessage",
request,
response_cls=SendMessageResponse,
)
async def sendPhoto(
self,
*,
chat_id: Union[int, str],
photo: Union[str, Path, IO],
caption: Optional[str] = None, # 0-1024
parse_mode: Optional[str] = None,
caption_entities: Optional[List[MessageEntity]] = None,
disable_notification: Optional[bool] = None,
reply_to_message_id: Optional[int] = None,
allow_sending_without_reply: Optional[bool] = None,
reply_markup: Optional[ReplyMarkupType] = None,
) -> Message:
"""
Use this method to send photos.
https://core.telegram.org/bots/api#sendphoto
:param chat_id: Unique identifier for the target chat
or username of the target channel
(in the format @channelusername).
:param photo: Photo to send.
Pass a file_id as String to send a photo
that exists on the Telegram servers (recommended),
pass an HTTP URL as a String for Telegram
to get a photo from the Internet,
or upload a new photo using multipart/form-data.
The photo must be at most 10 MB in size.
The photo's width and height must not exceed 10000 in total.
Width and height ratio must be at most 20.
:param caption: Photo caption
(may also be used when resending photos by file_id),
0-1024 characters after entities parsing.
:param parse_mode: Mode for parsing entities in the message text.
See formatting options for more details.
:param caption_entities: A JSON-serialized list of special entities
that appear in the caption,
which can be specified instead of parse_mode.
:param disable_notification: Sends the message silently.
Users will receive a notification with no sound.
:param reply_to_message_id: If the message is a reply,
ID of the original message.
:param allow_sending_without_reply: Pass True,
if the message should be sent
even if the specified replied-to message is not found.
:param reply_markup: Additional interface options.
A JSON-serialized object for an inline keyboard,
custom reply keyboard,
instructions to remove reply keyboard
or to force a reply from the user.
:return: on success, the sent Message.
"""
request = SendPhotoRequest(
allow_sending_without_reply=allow_sending_without_reply,
caption=caption,
caption_entities=caption_entities,
chat_id=chat_id,
disable_notification=disable_notification,
parse_mode=parse_mode,
photo=photo,
reply_markup=reply_markup,
reply_to_message_id=reply_to_message_id,
)
return await self._call_api(
"sendPhoto", request, response_cls=SendPhotoResponse
)
_T = TypeVar(
"_T"
) # don't worry about this: used as a generic type var in `_call_api`
async def _call_api( # noqa: CCR001
self,
method: str,
request: Optional[Request] = None,
*,
response_cls: Type[Response[_T]] = Response[_T],
) -> _T:
"""
Performs the call to the Bot API returning a value of proper type.
In case of error raises `Bot.RequestError`.
https://core.telegram.org/bots/api#making-requests
:param method: name of the supported Telegram Bot API method
:param request: request object,
composed from input params of public method
:param response_cls: desired response class with actual result type
:return: object of response class' result type
"""
try:
url = f"{self.api_url}/{method}"
request = (
request or Request()
) # for methods which do not need request at all
client: AsyncClient
async with AsyncClient() as client:
with request.files() as files:
# if files, data must be of multipart/form-data
# otherwise JSON bytes with Content-Type=application/json
data = request.dict() if files else request.json()
headers = (
{} if files else {"Content-Type": "application/json"}
)
http_response: HttpResponse = await client.post(
url,
# mypy can't into X if P else Y
data=data, # type: ignore
files=files,
headers=headers,
)
if http_response.status_code != 200:
raise self.RequestError(http_response.content)
payload = http_response.json()
if not payload:
raise self.RequestError(
f"unexpected empty payload on /{method}"
)
# actual&valid Telegram response
response = response_cls.parse_obj(payload)
if not response.ok:
raise self.RequestError(response.description)
if response.result is None:
raise self.RequestError(
f"unexpected null result on /{method} -> {response}"
)
return response.result
except Exception as err:
raise self.RequestError(err) from err
async def _download_file(
self,
file_path: str,
) -> BytesIO:
"""
Downloads a file using file path from API.
https://core.telegram.org/bots/api#getfile
https://core.telegram.org/bots/api#file
:param file_path: File path.
Use https://api.telegram.org/file/bot<token>/<file_path>
to get the file.
:return: a BytesIO object with file content.
"""
try:
url = f"{self.file_url}/{file_path}"
client: AsyncClient
async with AsyncClient() as client:
http_response: HttpResponse = await client.get(url)
if http_response.status_code != 200:
raise self.RequestError(http_response.content)
buffer = BytesIO()
buffer.write(http_response.content)
buffer.seek(0)
return buffer
except Exception as err:
raise self.RequestError(err) from err
| StarcoderdataPython |
130263 | """
Parsing text file into Experiment instance using strictyaml
(github.com/crdoconnor/strictyaml/)
The aim here is to make config:
* possible to use even for non-programmers
* hard to misuse
* easy debuggable
Hence, the process of parsing config is a bit more complicated than
it could be, but it produces more useful error messages. For example:
File $YOUR_CONFIG.yaml, line 42
topic_names: 10
^ this value should be a 'list' instead of 'int'
YAMLValidationError: 'int' passed instead of 'list'
instead of:
File $SOME_FILE.py, line 666, in $SOME_FUNCTION
for topic_name in topic_names:
TypeError: 'int' object is not iterable
To achieve this, strictyaml makes use of various validators which
keep track of individual line numbers and which fragments are already
checked and which aren't quite here yet.
Our process consists of three stages:
1) we check the high-level structure using `BASE_SCHEMA`.
The presence of each required key is ensured.
After this stage we could be sure than we can create a valid model
using specified parameters.
2) we make a second pass and revalidate 'regularizers' and 'stages'
This step is performed semi-automatically: using `inspect`,
we extract everything from `__init__` method signature.
For example:
def __init__(self, num_iters: int = 5)
allows us to infer that num_iters parameter should be int,
but it isn't strictly required.
3) we construct instances of classes required, convert types manually
and implement some shortcuts.
Ideally, this stage should be performed using revalidate() as well,
but it's a work-in-progress currently.
""" # noqa: W291
from inspect import signature, Parameter
from typing import (
Callable,
Type,
)
from .cubes import (
CubeCreator,
RegularizersModifierCube,
GreedyStrategy,
PerplexityStrategy,
)
from .experiment import Experiment
from .dataset import Dataset
from .models import scores as tnscores
from .models import TopicModel
from .model_constructor import (
create_default_topics,
init_simple_default_model,
)
from .rel_toolbox_lite import (
count_vocab_size,
handle_regularizer,
)
import artm
from strictyaml import Map, Str, Int, Seq, Float, Bool
from strictyaml import Any, Optional, EmptyDict, EmptyNone, EmptyList
from strictyaml import dirty_load
SUPPORTED_CUBES = [CubeCreator, RegularizersModifierCube]
SUPPORTED_STRATEGIES = [PerplexityStrategy, GreedyStrategy]
TYPE_VALIDATORS = {
'int': Int(), 'bool': Bool(), 'str': Str(), 'float': Float()
}
def choose_key(param):
"""
Parameters
----------
param : inspect.Parameter
Returns
-------
str or strictyaml.Optional
"""
if param.default is not Parameter.empty:
return Optional(param.name)
return param.name
def choose_validator(param):
"""
Parameters
----------
param : inspect.Parameter
Returns
-------
instance of strictyaml.Validator
"""
if param.annotation is int:
return Int()
if param.annotation is float:
return Float()
if param.annotation is bool:
return Bool()
if param.annotation is str:
return Str()
if param.name in ARTM_TYPES:
return ARTM_TYPES[param.name]
return Any()
# TODO: maybe this is cool, but do we really need this?
def build_schema_from_function(func: Callable) -> dict:
from docstring_parser import parse as docstring_parse
func_params = signature(func).parameters
func_params_schema = dict()
for elem in docstring_parse(func.__doc__).params:
if elem.arg_name in func_params:
key = choose_key(func_params[elem.arg_name])
func_params_schema[key] = TYPE_VALIDATORS[elem.type_name]
return func_params_schema
# TODO: use stackoverflow.com/questions/37929851/parse-numpydoc-docstring-and-access-components
# for now just hardcode most common / important types
ARTM_TYPES = {
"tau": Float(),
"topic_names": Str() | Seq(Str()) | EmptyNone(),
# TODO: handle class_ids in model and in regularizers separately
"class_ids": Str() | Seq(Str()) | EmptyNone(),
"gamma": Float() | EmptyNone(),
"seed": Int(),
"num_document_passes": Int(),
"num_processors": Int(),
"cache_theta": Bool(),
"reuse_theta": Bool(),
"theta_name": Str()
}
_ELEMENT = Any()
# TODO: maybe better _DICTIONARY_FILTER_SCHEMA = build_schema_from_function(artm.Dictionary.filter)
# TODO: modalities, filter params - these all are dataset's options, not model's
# maybe make separate YML block for dataset?
BASE_SCHEMA = Map({
'regularizers': Seq(_ELEMENT),
Optional('scores'): Seq(_ELEMENT),
'stages': Seq(_ELEMENT),
'model': Map({
"dataset_path": Str(),
Optional("dictionary_filter_parameters"): Map({
Optional("class_id"): Str(),
Optional("min_df"): Float(),
Optional("max_df"): Float(),
Optional("min_df_rate"): Float(),
Optional("max_df_rate"): Float(),
Optional("min_tf"): Float(),
Optional("max_tf"): Float(),
Optional("max_dictionary_size"): Float(),
Optional("recalculate_value"): Bool(),
}),
Optional("keep_in_memory"): Bool(),
Optional("internals_folder_path"): Bool(),
Optional("modalities_to_use"): Seq(Str()),
Optional("modalities_weights"): Any(),
"main_modality": Str(),
}),
'topics': Map({
"background_topics": Seq(Str()) | Int() | EmptyList(),
"specific_topics": Seq(Str()) | Int() | EmptyList(),
})
})
KEY_DICTIONARY_FILTER_PARAMETERS = 'dictionary_filter_parameters'
def build_schema_from_signature(class_of_object, use_optional=True):
"""
Parameters
----------
class_of_object : class
Returns
-------
dict
each element is either str -> Validator or Optional(str) -> Validator
"""
choose_key_func = choose_key if use_optional else (lambda param: param.name)
return {choose_key_func(param): choose_validator(param)
for param in signature(class_of_object.__init__).parameters.values()
if param.name != 'self'}
def wrap_in_map(dictionary):
could_be_empty = all(isinstance(key, Optional) for key in dictionary)
if could_be_empty:
return Map(dictionary) | EmptyDict()
return Map(dictionary)
def build_schema_for_scores():
"""
Returns
-------
strictyaml.Map
schema used for validation and type-coercion
"""
schemas = {}
for elem in artm.scores.__all__:
if "Score" in elem:
class_of_object = getattr(artm.scores, elem)
# TODO: check if every key is Optional. If it is, then "| EmptyDict()"
# otherwise, just Map()
res = wrap_in_map(build_schema_from_signature(class_of_object))
specific_schema = Map({class_of_object.__name__: res})
schemas[class_of_object.__name__] = specific_schema
for elem in tnscores.__all__:
if "Score" in elem:
class_of_object = getattr(tnscores, elem)
res = build_schema_from_signature(class_of_object)
# res["name"] = Str() # TODO: support custom names
res = wrap_in_map(res)
specific_schema = Map({class_of_object.__name__: res})
schemas[class_of_object.__name__] = specific_schema
return schemas
def build_schema_for_regs():
"""
Returns
-------
strictyaml.Map
schema used for validation and type-coercion
"""
schemas = {}
for elem in artm.regularizers.__all__:
if "Regularizer" in elem:
class_of_object = getattr(artm.regularizers, elem)
res = build_schema_from_signature(class_of_object)
if elem in ["SmoothSparseThetaRegularizer", "SmoothSparsePhiRegularizer",
"DecorrelatorPhiRegularizer"]:
res[Optional("relative", default=None)] = Bool()
res = wrap_in_map(res)
specific_schema = Map({class_of_object.__name__: res})
schemas[class_of_object.__name__] = specific_schema
return schemas
def is_key_in_schema(key, schema):
if key in schema:
return True
return any(
key_val.key == key for key_val in schema
if isinstance(key_val, Optional)
)
def build_schema_for_cubes():
"""
Returns
-------
dict
each element is str -> strictyaml.Map
where key is name of cube,
value is a schema used for validation and type-coercion
"""
schemas = {}
for class_of_object in SUPPORTED_CUBES:
res = build_schema_from_signature(class_of_object)
# "selection" isn't used in __init__, but we will need it later
res["selection"] = Seq(Str())
# shortcut for strategy intialization
if is_key_in_schema("strategy", res):
signature_validation = {}
for strategy_class in SUPPORTED_STRATEGIES:
local_signature_validation = build_schema_from_signature(strategy_class)
signature_validation.update(local_signature_validation)
res[Optional("strategy_params")] = Map(signature_validation)
# we will deal with "values" later, but we can check at least some simple things already
if class_of_object.__name__ == "CubeCreator":
element = Map({"name": Str(), "values": Seq(Any())})
res["parameters"] = Seq(element)
if class_of_object.__name__ == "RegularizersModifierCube":
element = Map({
Optional("name"): Str(),
Optional("regularizer"): Any(),
Optional("tau_grid"): Seq(Float())
})
res["regularizer_parameters"] = element | Seq(element)
res = Map(res)
specific_schema = Map({class_of_object.__name__: res})
schemas[class_of_object.__name__] = specific_schema
return schemas
def preprocess_parameters_for_cube_creator(elem_args):
"""
This function does two things:
1) convert class_ids from
name: class_ids@text, values: [0, 1, 2, 3]
to
name: class_ids, values: {"@text": [0, 1, 2, 3]}
2) type conversion for "values" field.
Parameters
----------
elem_args: strictyaml.YAML object
(contains dict inside)
Returns
-------
new_elem_args: dict
"""
for param_portion in elem_args["parameters"]:
name = str(param_portion["name"])
if name.startswith("class_ids"):
validator = Float() | Seq(Float())
else:
validator = Seq(ARTM_TYPES[name])
param_schema = Map({
"name": Str(),
"values": validator
})
param_portion.revalidate(param_schema)
def handle_special_cases(elem_args, kwargs):
"""
In-place fixes kwargs, handling special cases and shortcuts
(only strategy for now)
Parameters
----------
elem_args: dict
kwargs: dict
"""
# special case: shortcut for strategy
if "strategy" in elem_args:
strategy = None
for strategy_class in SUPPORTED_STRATEGIES:
if strategy_class.__name__ == elem_args["strategy"]:
strat_schema = build_schema_from_signature(strategy_class, use_optional=False)
strat_kwargs = {}
for key, value in elem_args["strategy_params"].items():
key = str(key)
value.revalidate(strat_schema[key])
strat_kwargs[key] = value.data
strategy = strategy_class(**strat_kwargs)
kwargs["strategy"] = strategy # or None if failed to identify it
def build_score(elemtype, elem_args, is_artm_score):
"""
Parameters
----------
elemtype : str
name of score
elem_args: dict
is_artm_score: bool
Returns
-------
instance of artm.scores.BaseScore or topicnet.cooking_machine.models.base_score
"""
module = artm.scores if is_artm_score else tnscores
class_of_object = getattr(module, elemtype)
kwargs = {name: value
for name, value in elem_args.items()}
return class_of_object(**kwargs)
def build_regularizer(elemtype, elem_args, specific_topic_names, background_topic_names):
"""
Parameters
----------
elemtype : str
name of regularizer
elem_args: dict
parsed: strictyaml.YAML object
Returns
-------
instance of artm.Regularizer
"""
class_of_object = getattr(artm.regularizers, elemtype)
kwargs = {name: value
for name, value in elem_args.items()}
# special case: shortcut for topic_names
if "topic_names" in kwargs:
if kwargs["topic_names"] == "background_topics":
kwargs["topic_names"] = background_topic_names
if kwargs["topic_names"] == "specific_topics":
kwargs["topic_names"] = specific_topic_names
return class_of_object(**kwargs)
def build_cube_settings(elemtype, elem_args):
"""
Parameters
----------
elemtype : str
name of regularizer
elem_args: strictyaml.YAML object
(contains dict inside)
Returns
-------
list of dict
"""
if elemtype == "CubeCreator":
preprocess_parameters_for_cube_creator(elem_args)
kwargs = {name: value
for name, value in elem_args.data.items()
if name not in ['selection', 'strategy', 'strategy_params']}
handle_special_cases(elem_args, kwargs)
return {elemtype: kwargs,
"selection": elem_args['selection'].data}
def _add_parsed_scores(parsed, topic_model):
""" """
for score in parsed.data.get('scores', []):
for elemtype, elem_args in score.items():
is_artm_score = elemtype in artm.scores.__all__
score_object = build_score(elemtype, elem_args, is_artm_score)
if is_artm_score:
topic_model._model.scores.add(score_object, overwrite=True)
else:
topic_model.custom_scores[elemtype] = score_object
def _add_parsed_regularizers(
parsed, model, specific_topic_names, background_topic_names, data_stats
):
""" """
regularizers = []
for stage in parsed.data['regularizers']:
for elemtype, elem_args in stage.items():
should_be_relative = None
if "relative" in elem_args:
should_be_relative = elem_args["relative"]
elem_args.pop("relative")
regularizer_object = build_regularizer(
elemtype, elem_args, specific_topic_names, background_topic_names
)
handle_regularizer(should_be_relative, model, regularizer_object, data_stats)
regularizers.append(model.regularizers[regularizer_object.name])
return regularizers
def parse_modalities_data(parsed):
has_modalities_to_use = is_key_in_schema("modalities_to_use", parsed["model"])
has_weights = is_key_in_schema("modalities_weights", parsed["model"])
main_modality = parsed["model"]["main_modality"]
# exactly one should be specified
if has_modalities_to_use == has_weights:
raise ValueError("Either 'modalities_to_use' or 'modalities_weights' should be specified")
if has_weights:
modalities_to_use = list(parsed["model"]["modalities_weights"].data)
if main_modality not in modalities_to_use:
modalities_to_use.append(main_modality)
local_schema = Map({
key: Float() for key in modalities_to_use
})
parsed["model"]["modalities_weights"].revalidate(local_schema)
modalities_weights = parsed["model"]["modalities_weights"].data
return modalities_weights
else:
modalities_to_use = parsed.data["model"]["modalities_to_use"]
return modalities_to_use
def parse(
yaml_string: str,
force_separate_thread: bool = False,
dataset_class: Type[Dataset] = Dataset
):
"""
Parameters
----------
yaml_string : str
force_separate_thread : bool
dataset_class : class
Returns
-------
cube_settings: list of dict
regularizers: list
topic_model: TopicModel
dataset: Dataset
"""
parsed = dirty_load(yaml_string, BASE_SCHEMA, allow_flow_style=True)
specific_topic_names, background_topic_names = create_default_topics(
parsed.data["topics"]["specific_topics"],
parsed.data["topics"]["background_topics"]
)
revalidate_section(parsed, "stages")
revalidate_section(parsed, "regularizers")
if "scores" in parsed:
revalidate_section(parsed, "scores")
dataset = dataset_class(
data_path=parsed.data["model"]["dataset_path"],
keep_in_memory=parsed.data["model"].get("keep_in_memory", True),
internals_folder_path=parsed.data["model"].get("internals_folder_path", None),
)
filter_parameters = parsed.data["model"].get(
KEY_DICTIONARY_FILTER_PARAMETERS, dict()
)
if len(filter_parameters) > 0:
filtered_dictionary = dataset.get_dictionary().filter(**filter_parameters)
dataset._cached_dict = filtered_dictionary
modalities_to_use = parse_modalities_data(parsed)
data_stats = count_vocab_size(dataset.get_dictionary(), modalities_to_use)
model = init_simple_default_model(
dataset=dataset,
modalities_to_use=modalities_to_use,
main_modality=parsed.data["model"]["main_modality"],
specific_topics=parsed.data["topics"]["specific_topics"],
background_topics=parsed.data["topics"]["background_topics"],
)
regularizers = _add_parsed_regularizers(
parsed, model, specific_topic_names, background_topic_names, data_stats
)
topic_model = TopicModel(model)
_add_parsed_scores(parsed, topic_model)
cube_settings = list()
for stage in parsed['stages']:
for elemtype, elem_args in stage.items():
settings = build_cube_settings(elemtype.data, elem_args)
settings[elemtype]["separate_thread"] = force_separate_thread
cube_settings.append(settings)
return cube_settings, regularizers, topic_model, dataset
def revalidate_section(parsed, section):
"""
Perofrms in-place type coercion and validation
Parameters
----------
parsed : strictyaml.YAML object
(half-parsed, half-validated chunk of config)
section: str
"""
if section == "stages":
schemas = build_schema_for_cubes()
elif section == "regularizers":
schemas = build_schema_for_regs()
elif section == "scores":
schemas = build_schema_for_scores()
else:
raise ValueError(f"Unknown section name '{section}'")
for i, stage in enumerate(parsed[section]):
assert len(stage) == 1
name = list(stage.data)[0]
if name not in schemas:
raise ValueError(f"Unsupported {section} value: {name} at line {stage.start_line}")
local_schema = schemas[name]
stage.revalidate(local_schema)
def build_experiment_environment_from_yaml_config(
yaml_string,
experiment_id,
save_path,
force_separate_thread=False,
):
"""
Wraps up parameter extraction and class instances creation
from yaml formatted string
together with the method that builds experiment pipeline from
given experiment parameters (model, cubes, regularizers, etc)
Parameters
----------
yaml_string: str
config that contains the whole experiment pipeline description
with its parameters
save_path: str
path to the folder to save experiment logs and models
experiment_id: str
name of the experiment folder
force_separate_thread: bool default = False
experimental feature that packs model training into
separate process which is killed upon training completion
by default is not used
Returns
-------
tuple experiment, dataset instances of corresponding classes from topicnet
"""
settings, regs, model, dataset = parse(yaml_string, force_separate_thread)
# TODO: handle dynamic addition of regularizers
experiment = Experiment(experiment_id=experiment_id, save_path=save_path, topic_model=model)
experiment.build(settings)
return experiment, dataset
| StarcoderdataPython |
1623187 | # @Author: <NAME> <varoon>
# @Date: 18-08-2017
# @Filename: kernel_convolution_ex.py
# @Last modified by: varoon
# @Last modified time: 18-08-2017
import cv2
import numpy as np
#GOAL: Apply the following kernel convolution to an image: [-1,0,1||-1,5,-1||0,-1,0]
#applying a sharpening kernel convolution manually. Bad way.
def sharpen(image):
image = cv2.cvtColor(image, c2.CV_8U)
height, width, num_channels = image.shape
result = np.zeros(image.shape, image.dtype)
for i in range(1,height-1):
for j in range(1,width-1):
for k in range(0,num_channels):
sum = 5*image[i,j,k] - image[i+1, j, k]\
-image[i-1,j,k] - image[i,j+1,k] - image[i,j-1,k]
if(sum>255):
sum = 255
if(sum<0):
sum=0
result[i,j,k] = sum
return result
#THE EASY WAY:
kernel = np.array([[0,-1,0],[-1,5,-1],[0,-1,0]],np.float32) #KERNEL NEEDS TO BE A FLOAT MATRIX
res = cv2.filter2D(I,-1,kernel) #ddepth = -1 means res image has same depth as I
| StarcoderdataPython |
1789539 | from utils.log import Log
from falcon import testing
from api import api
from reader.arg import Arg_Reader
from about import title, version
from extra.lcp_config import LCPConfig
from extra.clients_starter import end_client_threads
class LCPTestBase(testing.TestCase):
log = None
def setUp(self):
super(LCPTestBase, self).setUp()
LCPConfig.__drop_it__("examples/LCPConfig.yaml")
lcp = LCPConfig()
lcp.reset()
lcp.testing = True
self.db = Arg_Reader.read()
if LCPTestBase.log is None:
Log.init(config="../"+self.db.log_config)
LCPTestBase.log = Log.get('api')
self.app = api(title=title, version=version)
def tearDown(self) -> None:
end_client_threads()
| StarcoderdataPython |
164693 | """This module provides configuration values used by the application."""
import logging
import os
from collections.abc import Mapping, Sequence
from logging import config as lc
from typing import Any, Optional, Union, final
import jinja2
import yaml
from pydantic import AnyHttpUrl, BaseModel, BaseSettings, EmailStr, HttpUrl, validator
from document.domain import model
@final
class Settings(BaseSettings):
"""
BaseSettings subclasses like this one allow values of constants to
be overridden by environment variables like those defined in env
files, e.g., ../../.env
"""
REPO_URL_DICT_KEY: str = "../download-scripture?repo_url"
RESOURCE_TYPES_JSONPATH: str = "$[*].contents[*].code"
RESOURCE_TYPES_FOR_LANG_JSONPATH: str = "$[?code='{}'].contents[*].code"
RESOURCE_CODES_JSONPATH: str = "$[*].contents[*].subcontents[*].code"
RESOURCE_CODES_FOR_LANG_JSONPATH: str = (
"$[?code='{}'].contents[*].subcontents[*].code"
)
LANGUAGE_FMT_STR: str = "<h1>Language: {}</h1>"
RESOURCE_TYPE_NAME_FMT_STR: str = "<h2>{}</h2>"
RESOURCE_TYPE_NAME_WITH_REF_FMT_STR: str = "<h3>{} {}:{}</h3>"
TN_RESOURCE_TYPE_NAME_WITH_ID_AND_REF_FMT_STR: str = (
'<h3 id="{}-{}-tn-ch-{}-v-{}">{} {}:{}</h3>'
)
HTML_ROW_BEGIN: str = model.HtmlContent("<div class='row'>")
HTML_ROW_END: str = model.HtmlContent("</div>")
HTML_COLUMN_BEGIN: str = model.HtmlContent("<div class='column'>")
HTML_COLUMN_END: str = model.HtmlContent("</div>")
BOOK_FMT_STR: str = "<h2>Book: {}</h2>"
BOOK_AS_GROUPER_FMT_STR: str = "<h1>Book: {}</h1>"
VERSE_FMT_STR: str = "<h3>Verse {}:{}</h3>"
TRANSLATION_NOTE_FMT_STR: str = "<h3>Translation note {}:{}</h3>"
CHAPTER_HEADER_FMT_STR: str = '<h2 class="c-num" id="{}-{}-ch-{}">Chapter {}</h2>'
TRANSLATION_QUESTION_FMT_STR: str = "<h3>Translation question {}:{}</h3>"
TRANSLATION_ACADEMY_FMT_STR: str = "<h3>Translation academy {}:{}</h3>"
UNORDERED_LIST_BEGIN_STR: model.HtmlContent = model.HtmlContent("<ul>")
UNORDERED_LIST_END_STR: model.HtmlContent = model.HtmlContent("</ul>")
TRANSLATION_WORD_LIST_ITEM_FMT_STR: model.HtmlContent = model.HtmlContent(
'<li><a href="#{}-{}">{}</a></li>'
)
TRANSLATION_WORDS_FMT_STR: str = "<h3>Translation words {}:{}</h3>"
TRANSLATION_WORDS_SECTION_STR: str = "<h2>Translation words</h2>"
TRANSLATION_WORD_VERSE_SECTION_HEADER_STR: model.HtmlContent = model.HtmlContent(
"<h4>Uses:</h4>"
)
TRANSLATION_WORD_VERSE_REF_ITEM_FMT_STR: str = (
'<li><a href="#{}-{}-ch-{}-v-{}">{} {}:{}</a></li>'
)
FOOTNOTES_HEADING: model.HtmlContent = model.HtmlContent("<h3>Footnotes</h3>")
OPENING_H3_FMT_STR: str = "<h3>{}"
OPENING_H3_WITH_ID_FMT_STR: str = '<h3 id="{}-{}">{}'
TRANSLATION_WORD_ANCHOR_LINK_FMT_STR: str = "[{}](#{}-{})"
TRANSLATION_WORD_PREFIX_ANCHOR_LINK_FMT_STR: str = "({}: [{}](#{}-{}))"
TRANSLATION_NOTE_ANCHOR_LINK_FMT_STR: str = "[{}](#{}-{}-tn-ch-{}-v-{})"
# FIXME Tighten up the '.' usage in the following regex
VERSE_ANCHOR_ID_FMT_STR: str = 'id="(.+?)-ch-(.+?)-v-(.+?)"'
VERSE_ANCHOR_ID_SUBSTITUTION_FMT_STR: str = r"id='{}-\1-ch-\2-v-\3'"
LOGGING_CONFIG_FILE_PATH: str = "backend/document/logging_config.yaml"
DOCKER_CONTAINER_PDF_OUTPUT_DIR: str = "/pdf_output"
USFM_RESOURCE_TYPES: Sequence[str] = [
"cuv",
"f10",
"nav",
"reg",
"udb",
"udb-wa",
"ulb",
"ulb-wa",
"usfm",
]
TN_RESOURCE_TYPES: Sequence[str] = ["tn", "tn-wa"]
TQ_RESOURCE_TYPES: Sequence[str] = ["tq", "tq-wa"]
TW_RESOURCE_TYPES: Sequence[str] = ["tw", "tw-wa"]
def logger(self, name: str) -> logging.Logger:
"""
Return a Logger for scope named by name, e.g., module, that can be
used for logging.
"""
with open(self.LOGGING_CONFIG_FILE_PATH, "r") as fin:
logging_config = yaml.safe_load(fin.read())
lc.dictConfig(logging_config)
return logging.getLogger(name)
def api_test_url(self) -> str:
"""Non-secure local URL for running the Fastapi server for testing."""
return "http://localhost:{}".format(self.API_LOCAL_PORT)
# Get API prefix. Useful to have a prefix for versioning of the API.
# TODO Consider using API_ROOT in router prefix
API_ROOT: str
API_LOCAL_PORT: int
API_REMOTE_PORT: int
# FIXME HTTPS shouldn't be hardcoded. fastapi will have a sane way
# to deal with this that I've yet to research.
def api_url(self) -> str:
"""Return the full base URL of the Fastapi server."""
host = os.environ.get("API_HOST", "localhost")
port = self.API_LOCAL_PORT if host == "localhost" else self.API_REMOTE_PORT
root = self.API_ROOT
return "https://{}:{}{}".format(host, port, root)
# Location where resource assets will be downloaded.
RESOURCE_ASSETS_DIR: str = "/working/temp"
# Indicate whether running in Docker container.
IN_CONTAINER: bool = False
def working_dir(self) -> str:
"""
The directory where the resources will be placed once
acquired.
"""
if self.IN_CONTAINER:
return self.RESOURCE_ASSETS_DIR
else:
return self.RESOURCE_ASSETS_DIR[1:]
# Location where generated PDFs will be written to.
DOCUMENT_OUTPUT_DIR: str = "/working/output"
def output_dir(self) -> str:
"""The directory where the generated documents are placed."""
dirname = ""
if self.IN_CONTAINER:
dirname = self.DOCUMENT_OUTPUT_DIR
else:
dirname = self.DOCUMENT_OUTPUT_DIR[1:]
return dirname
# For options see https://wkhtmltopdf.org/usage/wkhtmltopdf.txt
WKHTMLTOPDF_OPTIONS: Mapping[str, Optional[str]] = {
"page-size": "Letter",
# 'margin-top': '0.75in',
# 'margin-right': '0.75in',
# 'margin-bottom': '0.75in',
# 'margin-left': '0.75in',
"encoding": "UTF-8",
"load-error-handling": "ignore",
"outline": None, # Produce an outline
"outline-depth": "3", # Only go depth of 3 on the outline
"enable-internal-links": None, # enable internal links
"header-left": "[section]",
"header-right": "[subsection]",
"header-line": None, # Produce a line under the header
"footer-center": "[page]",
"footer-line": None, # Produce a line above the footer
}
# Return the message to show to user on successful generation of
# PDF.
SUCCESS_MESSAGE: str = "Success! Please retrieve your generated document using a GET REST request to /pdf/{document_request_key} where document_request_key is the finished_document_request_key in this payload."
# Return the message to show to user on failure generating PDF.
FAILURE_MESSAGE: str = "The document request could not be fulfilled either because the resources requested are not available either currently or at all or because the system does not yet support the resources requested."
# The location where the JSON data file that we use to lookup
# location of resources is located.
TRANSLATIONS_JSON_LOCATION: HttpUrl
# The jsonpath location in TRANSLATIONS_JSON_LOCATION file where
# individual USFM files (per bible book) may be found.
INDIVIDUAL_USFM_URL_JSONPATH: str = "$[?code='{}'].contents[?code='{}'].subcontents[?code='{}'].links[?format='usfm'].url"
# The jsonpath location in TRANSLATIONS_JSON_LOCATION file where
# resource URL, e.g., tn, tq, tw, ta, obs, ulb, udb, etc., may normally
# be found.
RESOURCE_URL_LEVEL1_JSONPATH: str = (
"$[?code='{}'].contents[?code='{}'].links[?format='zip'].url"
)
# The json path to the language's name.
RESOURCE_LANG_NAME_JSONPATH: str = "$[?code='{}'].name"
# The json path to the resource type's name.
RESOURCE_TYPE_NAME_JSONPATH: str = "$[?code='{}'].contents[?code='{}'].name"
# The jsonpath location in TRANSLATIONS_JSON_LOCATION file where
# resource URL, e.g., tn, tq, tw, ta, obs, ulb, udb, etc., may
# additionally/alternatively be found.
RESOURCE_URL_LEVEL2_JSONPATH: str = (
"$[?code='{}'].contents[*].subcontents[?code='{}'].links[?format='zip'].url"
)
# The jsonpath location in TRANSLATIONS_JSON_LOCATION file where
# resource git repo may be found.
RESOURCE_DOWNLOAD_FORMAT_JSONPATH: str = "$[?code='{}'].contents[?code='{}'].subcontents[?code='{}'].links[?format='Download'].url"
# BACKEND_CORS_ORIGINS is a JSON-formatted list of origins
# e.g: '["http://localhost", "http://localhost:4200",
# "http://localhost:8000"]'
BACKEND_CORS_ORIGINS: list[AnyHttpUrl] = []
@validator("BACKEND_CORS_ORIGINS", pre=True)
def assemble_cors_origins(cls, v: str | list[str]) -> list[str] | str:
if isinstance(v, str) and not v.startswith("["):
return [i.strip() for i in v.split(",")]
elif isinstance(v, (list, str)):
return v
raise ValueError(v)
# Return the file names, excluding suffix, of files that do not
# contain content but which may be in the same directory or
# subdirectories of a resource's acquired files.
MARKDOWN_DOC_FILE_NAMES: list[str] = ["readme", "license"]
ENGLISH_GIT_REPO_MAP: Mapping[str, str] = {
"ulb-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_ulb",
"udb-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_udb",
"tn-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_tn",
"tw-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_tw",
"tq-wa": "https://content.bibletranslationtools.org/WycliffeAssociates/en_tq",
}
ENGLISH_RESOURCE_TYPE_MAP: Mapping[str, str] = {
"ulb-wa": "Unlocked Literal Bible (ULB)",
"udb-wa": "Unlocked Dynamic Bible (UDB)",
"tn-wa": "ULB Translation Helps",
"tq-wa": "ULB Translation Questions",
"tw-wa": "ULB Translation Words",
}
TEMPLATE_PATHS_MAP: Mapping[str, str] = {
"book_intro": "backend/templates/tn/book_intro_template.md",
"header_enclosing": "backend/templates/html/header_enclosing.html",
"footer_enclosing": "backend/templates/html/footer_enclosing.html",
"cover": "backend/templates/html/cover.html",
"email-html": "backend/templates/html/email.html",
"email": "backend/templates/text/email.txt",
}
# Return boolean indicating if caching of generated documents should be
# cached.
ASSET_CACHING_ENABLED: bool = True
# Caching window of time in which asset
# files on disk are considered fresh rather than re-acquiring (in
# the case of resource asset files) or re-generating them (in the
# case of the final PDF). In hours.
ASSET_CACHING_PERIOD: int
# Get the path to the logo image that will be used on the PDF cover,
# i.e., first, page.
LOGO_IMAGE_PATH: str = "icon-tn.png"
# It doesn't yet make sense to offer the (high level)
# assembly strategy _and_ the assembly sub-strategy to the end user
# as a document request parameter so we'll just choose an arbitrary
# sub-strategy here. This means that we can write code for multiple
# sub-strategies and choose one to put in play at a time here.
DEFAULT_ASSEMBLY_SUBSTRATEGY: model.AssemblySubstrategyEnum = (
model.AssemblySubstrategyEnum.VERSE
)
# Return a list of the Markdown section titles that our
# Python-Markdown remove_section_processor extension should remove.
MARKDOWN_SECTIONS_TO_REMOVE: list[str] = [
"Examples from the Bible stories",
"Links",
]
# Return the from email to use for sending email with generated PDF
# attachment to document request recipient. Look for the value to
# use in FROM_EMAIL environment variable, use default if it doesn't
# exist.
FROM_EMAIL_ADDRESS: EmailStr
# The to-email address to use for sending email with generated
# PDF attachment to document request recipient during testing - in
# production the to-email address is supplied by the user.
TO_EMAIL_ADDRESS: EmailStr
EMAIL_SEND_SUBJECT: str
# Return boolean representing if the system should execute the
# action of sending an email when appropriate to do so.
SEND_EMAIL: bool
@validator("SEND_EMAIL")
def send_email(cls, v: bool) -> bool:
return bool(v)
SMTP_PASSWORD: str
SMTP_HOST: str
SMTP_PORT: int
# Example fake user agent value required by domain host to allow serving
# files. Other values could possibly work. This value definitely
# works.
USER_AGENT: str = "Mozilla/5.0 (X11; Linux x86_64) AppleWebKit/537.11 (KHTML, like Gecko) Chrome/23.0.1271.64 Safari/537.11"
# Pydantic uses this inner class convention to configure the
# Settings class.
class Config:
env_file = ".env"
case_sensitive = True
settings: Settings = Settings()
| StarcoderdataPython |
1659159 | # -*- coding: utf-8 -*-
"""Dynamic inventories of Docker containers, served up fresh just for Ansible."""
import click
import json
import requests
import sys
if sys.version_info.major == 3:
import docker_dynamic_inventory.docker_dynamic_inventory as ddi
else:
import docker_dynamic_inventory as ddi
@click.command()
@click.option('--list', flag_value=True, help="Match all containers. This is the default behaviour.")
@click.option('--host', default=None, help="Only match containers with this name.")
@click.option('--metadata/--no-metadata', default=False, help="Include container metadata.")
@click.option('--pretty/--ugly', default=False, help="Pretty print JSON for output.")
@click.option('--docker_tls', default=True, help="Use TLS for Docker connections.")
@click.option('--docker_host', default='unix:///var/run/docker.sock',
help="Docker host to connect to.")
def main(list, host, metadata, pretty, docker_tls, docker_host):
"""Dynamic inventories of Docker containers, served up fresh just for Ansible."""
docker_opts = {'base_url': docker_host, 'tls': docker_tls}
# get container data
try:
if host:
containers = ddi.containers_by_host(host, metadata, docker_opts)
else:
containers = ddi.containers(metadata, docker_opts)
except requests.exceptions.ConnectionError as e:
raise click.BadParameter('Unable to connect to the Docker daemon. Check status, or use --docker_host.')
# output
data = ddi.format_containers(containers, False)
if pretty:
print(json.dumps(data, indent=4, sort_keys=True))
else:
print(json.dumps(data))
return 0
if __name__ == "__main__":
import sys
sys.exit(main())
| StarcoderdataPython |
1720722 | <reponame>03b8/TEfy<filename>tefy/__init__.py
from .tefy import OxGaWrap
__version__ = '0.1.3'
| StarcoderdataPython |
3293121 | <gh_stars>0
"""
目录结构:
├── ready_train_img.py # 脚本
├── big # 大图集合
│ └── 20220304-170511.jpeg
├── small # 小图集合
│ ├── 1
│ ├── 2
│ ├── 3
│ ├── 4
│ └── 5
├── suture # 导出集合
│ └── 20220304-170511.jpeg
运行:
python ready_train_img.py --img_path "./big" --simg_path "./small"
test某方法输出:
python ready_train_img.py --test get_left_up_points
"""
import os
import cv2
import random
import argparse
from labelImgsByTemplate import trans2shapes, save_yolo_fromat
def get_left_up_points(big_shape, small_shape, num=1):
"""
网格贴图, 先划分网格再进行随机获取
从一个大图中随机获取可以贴小图的位置
ret: 小图左上角点合集
"""
points = []
if len(big_shape) < 2 or len(small_shape) < 2:
return points
bx, by = big_shape[:2]
sx, sy = small_shape[:2]
x_times = bx // sx
y_times = by // sy
xs = [ i*sx for i in range(x_times)]
ys = [ i*sy for i in range(y_times)]
for x in xs:
for y in ys:
# xy 用shape读取出来的是反的, 这里矫正
points.append((y, x))
if len(points) <= num:
return points
else:
ps = set([])
rate = num/len(points)
while len(ps) < num:
for p in points:
if random.random() < rate:
ps = ps | {p, }
points = list(ps)[:num]
import ipdb; ipdb.set_trace()
return points
def put_img2img(small, big, left_up_x, left_up_y):
"""
samll + big ==> new
111 111
2 + 111 ==> 121
111 111
"""
smallB = big - big
smallB[left_up_y:left_up_y+small.shape[0],left_up_x:left_up_x+small.shape[1]] = small
# return cv2.addWeighted(big, 1, smallB, 1, 0)
big[left_up_y:left_up_y+small.shape[0],left_up_x:left_up_x+small.shape[1]] = small
return big
def chg_img_name2txt_name(img_name):
return img_name.replace(".png", '.txt').replace(".jpg", '.txt').replace(".jpeg", '.txt')
def getimg_from_dir(dir_path):
"""
传入文件夹, 使用cv2读取文件夹下所有图片
"""
dict_img = {}
if os.path.isdir(dir_path):
for ff in os.listdir(dir_path):
if ".png" in ff or ".jpg" in ff or ".jpeg" in ff:
img = cv2.imread(os.path.join(dir_path, ff))
dict_img[ff] = img
else:
continue
return dict_img
def run(big_path, small_path_father):
labels = {}
dict_big = getimg_from_dir(big_path)
max_x = 0
max_y = 0
for big_name, big_img in dict_big.items():
dict_small = {}
labels[big_name] = {}
for dir_small in os.listdir(small_path_father):
if dir_small == ".DS_Store":
# 垃圾 macos
continue
_dict_small = getimg_from_dir(os.path.join(small_path_father, dir_small))
list_s_keys = list(_dict_small.keys())
rand_choise = random.randint(0, len(list_s_keys)-1)
_small_name = list_s_keys[rand_choise]
img_choise_you = _dict_small[_small_name]
max_x = img_choise_you.shape[0] if max_x < img_choise_you.shape[0] else max_x
max_y = img_choise_you.shape[1] if max_y < img_choise_you.shape[1] else max_y
dict_small[dir_small] = img_choise_you
list_small_name = list(dict_small.keys())
list_left_up_points = get_left_up_points(big_img.shape, (max_x, max_y), num=len(list_small_name))
put_num = 0
for left_up_x, left_up_y in list_left_up_points:
small_name = list_small_name[put_num]
small_img = dict_small[small_name]
right_down_x, right_down_y = left_up_x+small_img.shape[0], left_up_y+small_img.shape[1]
big_img = put_img2img(small_img, big_img, left_up_x, left_up_y)
# print("nani????", left_up_x, left_up_y, right_down_x, right_down_y, "\t\t\t", small_img.shape)
# cv2.imshow("", big_img)
# cv2.waitKey(0)
# cv2.destroyAllWindows()
labels[big_name][small_name] = [(left_up_x, left_up_y), (right_down_x, right_down_y)]
put_num += 1
# save chged big img && positions
cv2.imwrite(os.path.join("suture", big_name), big_img)
big_img_path = os.path.join(big_path, big_name)
save_label(big_img_path, labels[big_name])
print(labels[big_name])
def save_label(img_path, label_info):
all_label = ["1", "2", "3", "4", "5"]
filename = chg_img_name2txt_name(img_path)
# TODO BUG POINT
filename = filename.replace("big", "suture")
dict_rst = {}
for label_name, v in label_info.items():
(left_up_x, left_up_y), (right_down_x, right_down_y) = v
dict_rst[label_name] = [
# [(左上), (左下), (右上), (右下)]
{"rectangle": [(left_up_x, left_up_y), (left_up_x, right_down_y), (right_down_x, left_up_y), (right_down_x, right_down_y)]}
]
shapes, all_label = trans2shapes(dict_rst, all_label)
save_yolo_fromat(filename, shapes, img_path, all_label)
if __name__ == "__main__":
parser = argparse.ArgumentParser()
parser.add_argument("--test", type=str, help="test func name")
parser.add_argument("--img_path", type=str, help="big img path")
parser.add_argument("--simg_path", type=str, help="small img path")
args = parser.parse_args()
PATH = args.img_path
sPATH = args.simg_path
dict_func = {
"run": {"func": run, "args": {"big_path": "./", "small_path": ""}},
"get_left_up_points": {
"func": get_left_up_points,
"args": {"big_shape": (100, 100), "small_shape": (10, 10),"num": 1}
},
}
dict_func = dict_func.get(args.test)
if dict_func:
ret = dict_func["func"](**dict_func["args"])
print(f"ret:\n {ret}")
if PATH and os.path.exists(PATH) and PATH and os.path.exists(PATH):
run(PATH, sPATH)
else:
print(f"输入合法路径")
| StarcoderdataPython |
31002 | <gh_stars>0
from io import BytesIO
import requests
from celery import Celery
from api import send_message, send_photo
from imdb2_api import get_movie_by_imdb_id
from imdb_api import IMDBAPIClient
# celery -A tasks worker --log-level INFO
app = Celery(
"tasks", backend="redis://localhost:6379/0", broker="redis://localhost:6379/0"
)
@app.task
def hello():
return "Hello"
@app.task
def reply(token: str, chat_id: int, text: str):
return send_message(token, chat_id, text)
@app.task
def search_movie(token: str, chat_id: int, rapidapi_key: str, movie_title: str):
c = IMDBAPIClient(rapidapi_key)
results = c.search_movies_by_title(movie_title)
result_message = "Movies found for search:\n"
result_message += "".join(
[f"- {result.title} ({result.year}) [{result.imdb_id}]\n" for result in results]
)
send_message(token, chat_id, result_message)
DETAILS_MESSAGE = """
{title}
{description}
- "{tagline}"
- Year: {year}
- Rating: {rating} ({vote_count})
"""
def show_movie(token: str, chat_id: int, rapidapi_key: str, imdb_id: str):
c = IMDBAPIClient(rapidapi_key)
details = c.get_movie_details(imdb_id)
image = c.get_movie_images(imdb_id)
i = image.poster_image
i.save("poster.jpg", "JPEG")
# Send photo
send_photo(token, chat_id, open("poster.jpg", "rb"))
# Send details
send_message(
token,
chat_id,
DETAILS_MESSAGE.format(
title=details.title,
description=details.description,
tagline=details.tagline,
year=details.year,
rating=details.imdb_rating,
vote_count=details.vote_count,
),
)
def show_movie2(token: str, chat_id: int, imdb_api_key: str, imdb_id: str):
# c = IMDBAPIClient(rapidapi_key)
# details = c.get_movie_details(imdb_id)
# image = c.get_movie_images(imdb_id)
movie = get_movie_by_imdb_id(imdb_api_key, imdb_id)
details = movie["results"]
banner = details["banner"]
# i = image.poster_image
# i.save("poster.jpg", "JPEG")
banner_response = requests.get(banner)
banner_response.raise_for_status()
# Send photo
send_photo(token, chat_id, banner_response.content)
# Send details
send_message(
token,
chat_id,
DETAILS_MESSAGE.format(
title=details["title"],
description=details["description"],
tagline="No",
year=details["year"],
rating=details["rating"],
vote_count=100,
),
)
| StarcoderdataPython |
4838413 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
# Preparing to run it:
# brew install pipenv # or other installation method
# pipenv install
# generate a personal access token at https://github.com/settings/tokens
# Running it:
# GITHUB_TOKEN=<PASSWORD> pipenv run python add-metadata.py < template.md > README.md
import fileinput
import os
import re
import sys
from datetime import datetime, timedelta
from github import Github
g = Github(os.environ['GITHUB_TOKEN'])
gh_repo_regex = re.compile('\[([\w\._-]+\/[\w\._-]+)\]\(@ghRepo\)')
def github_table_row(repo):
name = repo.name
if repo.stargazers_count >= 500:
name = f"**{name}**"
project_link = f"[{name}]({repo.html_url})"
stars_shield = f""
commit_shield = f""
return f"{project_link} | {repo.description} | {stars_shield} {commit_shield}"
def warn(str):
print(f"Warn: {str}", file=sys.stderr)
def retrieve_repo(name):
repo = g.get_repo(name)
print('.', file=sys.stderr, end='', flush=True)
check_freshness(repo)
return repo
def check_freshness(repo):
if repo.archived:
warn(f"Repo {repo.full_name} is archived")
elif repo.pushed_at < datetime.utcnow() - timedelta(days=180):
warn(f"Repo {repo.full_name} has not been pushed to since {repo.pushed_at}")
def parse(line):
m = gh_repo_regex.search(line)
if m:
[repo_name] = m.groups()
return github_table_row(retrieve_repo(repo_name))
else:
return line.rstrip()
def run():
print('<!--- This file is automatically generated. Do not edit directly. -->')
for line in fileinput.input():
print(parse(line))
run()
| StarcoderdataPython |
129447 | <reponame>Opendigitalradio/ODR-StaticPrecorrection
#!/usr/bin/env python
import numpy as np
from scipy import signal, optimize
import sys
import matplotlib.pyplot as plt
import dab_util as du
def gen_omega(length):
if (length % 2) == 1:
raise ValueError("Needs an even length array.")
halflength = int(length/2)
factor = 2.0 * np.pi / length
omega = np.zeros(length, dtype=np.float)
for i in range(halflength):
omega[i] = factor * i
for i in range(halflength, length):
omega[i] = factor * (i - length)
return omega;
def subsample_align(sig, ref_sig):
"""Do subsample alignment for sig relative to the reference signal
ref_sig. The delay between the two must be less than sample
Returns the aligned signal"""
n = len(sig)
if (n % 2) == 1:
raise ValueError("Needs an even length signal.")
halflen = int(n/2)
fft_sig = np.fft.fft(sig)
omega = gen_omega(n)
def correlate_for_delay(tau):
# A subsample offset between two signals corresponds, in the frequency
# domain, to a linearly increasing phase shift, whose slope
# corresponds to the delay.
#
# Here, we build this phase shift in rotate_vec, and multiply it with
# our signal.
rotate_vec = np.exp(1j * tau * omega)
# zero-frequency is rotate_vec[0], so rotate_vec[N/2] is the
# bin corresponding to the [-1, 1, -1, 1, ...] time signal, which
# is both the maximum positive and negative frequency.
# I don't remember why we handle it differently.
rotate_vec[halflen] = np.cos(np.pi * tau)
corr_sig = np.fft.ifft(rotate_vec * fft_sig)
# TODO why do we only look at the real part? Because it's faster than
# a complex cross-correlation? Clarify!
return -np.sum(np.real(corr_sig) * np.real(ref_sig.real))
optim_result = optimize.minimize_scalar(correlate_for_delay, bounds=(-1,1), method='bounded', options={'disp': True})
if optim_result.success:
#print("x:")
#print(optim_result.x)
best_tau = optim_result.x
#print("Found subsample delay = {}".format(best_tau))
# Prepare rotate_vec = fft_sig with rotated phase
rotate_vec = np.exp(1j * best_tau * omega)
rotate_vec[halflen] = np.cos(np.pi * best_tau)
return np.fft.ifft(rotate_vec * fft_sig).astype(np.complex64)
else:
#print("Could not optimize: " + optim_result.message)
return np.zeros(0, dtype=np.complex64)
if __name__ == "__main__":
phaseref_filename = "/home/andreas/dab/ODR-StaticPrecorrection/data/samples/sample_orig_0.iq"
phase_ref = np.fromfile(phaseref_filename, np.complex64)
delay = 15
n_up = 32
print("Generate signal with delay {}/{} = {}".format(delay, n_up, float(delay)/n_up))
phase_ref_up = signal.resample(phase_ref, phase_ref.shape[0] * n_up)
phase_ref_up_late = np.append(np.zeros(delay, dtype=np.complex64), phase_ref_up[:-delay])
phase_ref_late = signal.resample(phase_ref_up_late, phase_ref.shape[0])
phase_ref_realigned = subsample_align(phase_ref_late, phase_ref)
| StarcoderdataPython |
21368 | <reponame>dev1farms2face/f2f
from django.shortcuts import render
# Create your views here.
def subscribe(request):
return render(request, "subscribe.html",
{'data': {}})
| StarcoderdataPython |
189542 | <reponame>Vinicius-Tanigawa/Undergraduate-Research-Project
#Runway.py
#
# Created: Mar, 2014, SUAVE Team
# Modified: Jan, 2016, <NAME>
# ----------------------------------------------------------------------
# Imports
# ----------------------------------------------------------------------
from SUAVE.Core import Data
# ----------------------------------------------------------------------
# Runway Data Class
# ----------------------------------------------------------------------
class Runway(Data):
""" SUAVE.Attributes.Airport.Runway
Data object used to hold runway data
"""
def __defaults__(self):
self.tag = 'Runway'
| StarcoderdataPython |
49212 | with open('input', 'r') as file:
aim = 0
horizontal = 0
depth = 0
simple_depth=0
for line in file:
[com, n] = line.split(' ')
n = int(n)
if com == 'forward':
horizontal += n
depth += aim * n
elif com == 'down':
aim += n
simple_depth += n
elif com == 'up':
aim -= n
simple_depth -= n
print("1 star:", horizontal * simple_depth)
print("2 star:", horizontal * depth)
| StarcoderdataPython |
1602140 | <reponame>Nuullll/llvm-test-suite<gh_stars>10-100
"""Test module to collect compile time metrics. This just finds and summarizes
the *.time files generated by the build."""
from litsupport.modules import timeit
import os
def _getCompileTime(context):
# We compile multiple benchmarks in the same directory in SingleSource
# mode. Only look at compiletime files starting with the name of our test.
prefix = ""
if context.config.single_source:
prefix = "%s." % os.path.basename(context.executable)
compile_time = 0.0
link_time = 0.0
dir = os.path.dirname(context.test.getFilePath())
for path, subdirs, files in os.walk(dir):
for file in files:
if file.endswith('.o.time') and file.startswith(prefix):
fullpath = os.path.join(path, file)
compile_time += timeit.getUserTime(fullpath)
if file.endswith('.link.time') and file.startswith(prefix):
fullpath = os.path.join(path, file)
link_time += timeit.getUserTime(fullpath)
return {
'compile_time': compile_time,
'link_time': link_time,
}
def mutatePlan(context, plan):
plan.metric_collectors.append(_getCompileTime)
| StarcoderdataPython |
3298742 | <reponame>ramsuthar305/MIT-research-and-consultancy
import hashlib
from app import *
from flask import session
import os
#from pyresparser import ResumeParser
from bson import ObjectId
class Users:
def __init__(self):
self.mongo = mongo.db
def check_user_exists(self, username):
result = self.mongo.users.find_one({"$or": [{"username": username}, {"phone": username}]})
if result:
return True
else:
return False
def temp_user(self,user):
try:
result=mongo.db.tempuser.insert_one(user)
return result
except Exception as error:
print(error)
def save_user(self,user,user_type):
try:
if user_type=="Research Scholar":
result = mongo.db.researcher.insert_one(user)
else:
result = mongo.db.USERTYPE.insert_one(user)
if result:
result=mongo.db.authentication.insert_one({
"uid":user["_id"],
"email":user["email"],
"password":<PASSWORD>["password"],
"user_type":user["user_type"],
"status":user["status"]
})
if result:
print(result)
session["email"] = user["email"]
session["name"] = user["first_name"]+" "+user["last_name"]
session["logged_in"] = True
session["user_type"] = user["user_type"]
session['id'] = str(user["email"])
session['department'] = str(user["department"])
session['batch'] = str(user["batch"])
return True
else:
print("\nSomething went wrong: ",result)
return False
else:
print("\nSomething went wrong: ",result)
return False
except Exception as error:
print(error)
if error.code == 11000:
return "User already exists"
def login_user(self, username, password):
try:
h = hashlib.md5(password.encode())
password = <PASSWORD>()
login_result = self.mongo.authentication.find_one(
{"$and": [{"$or": [{"uid": username}, {"email": username}]},
{"password": password},{"status":"1"}]})
print(login_result)
if login_result is not None:
if login_result["user_type"]=="Research Scholar":
user_info=self.mongo.researcher.find_one({"_id":login_result["uid"]})
session["email"] = user_info["email"]
session["name"] = user_info["first_name"]+" "+user_info["last_name"]
session["logged_in"] = True
session["user_type"] = user_info['user_type']
session['id'] = str(user_info["_id"])
session['department'] = str(user_info["department"])
session['batch'] = str(user_info["batch"])
return True
elif login_result["user_type"]=="Research Supervisor":
user_info=self.mongo.supervisor.find_one({"_id":login_result["uid"]})
session["email"] = user_info["email"]
session["name"] = user_info["first_name"]+" "+user_info["last_name"]
session["logged_in"] = True
session["user_type"] = user_info['user_type']
session['id'] = str(user_info["_id"])
session['department'] = str(user_info["department"])
return True
elif login_result["user_type"]=="Research Co-Supervisor":
user_info=self.mongo.cosupervisor.find_one({"_id":login_result["uid"]})
session["email"] = user_info["email"]
session["name"] = user_info["first_name"]+" "+user_info["last_name"]
session["logged_in"] = True
session["user_type"] = user_info['user_type']
session['id'] = str(user_info["_id"])
session['department'] = str(user_info["department"])
return True
elif login_result["user_type"]=="Special User":
user_info=self.mongo.specialuser.find_one({"_id":login_result["uid"]})
session["email"] = user_info["email"]
session["name"] = user_info["title"]+" "+user_info["first_name"]+" "+user_info["last_name"]
session["logged_in"] = True
session["user_type"] = user_info['user_type']
session['id'] = str(user_info["_id"])
return True
else:
return "User does not exist"
except Exception as error:
return error
def get_user_profile(self):
try:
user_profile=self.mongo.users.find_one({"username": session["username"]})
return (user_profile)
except Exception as error:
print(error)
def check_old_pass(self,val):
try:
h = hashlib.md5(val.encode())
val = h.hexdigest()
if session['user_type']=="Research Scholar":
result = mongo.db.researcher.find({"$and":[{"email":session['email']},{"password":val}]})
if session['user_type']=="Research Supervisor":
result = mongo.db.supervisor.find({"$and":[{"email":session['email']},{"password":val}]})
if session['user_type']=="Research Co-Supervisor":
result = mongo.db.cosupervisor.find({"$and":[{"email":session['email']},{"password":val}]})
if session['user_type']=="Special User":
result = mongo.db.specialuser.find({"$and":[{"email":session['email']},{"password":val}]})
if result.count()>0:
return True
else:
return False
except Exception as error:
print(error)
def update_pass(self,val):
try:
h = hashlib.md5(val.encode())
val = h.hexdigest()
if session['user_type']=="Research Scholar":
result = mongo.db.researcher.update({"email":session['email']},{"$set":{"password":val}})
if session['user_type']=="Research Supervisor":
result = mongo.db.supervisor.update({"email":session['email']},{"$set":{"password":val}})
if session['user_type']=="Research Co-Supervisor":
result = mongo.db.cosupervisor.update({"email":session['email']},{"$set":{"password":val}})
if session['user_type']=="Special User":
result = mongo.db.specialuser.update({"email":session['email']},{"$set":{"password":val}})
result = mongo.db.authentication.update({"uid":session['email']},{"$set":{"password":val}})
except Exception as error:
print(error)
def check_email(self,email):
try:
result = mongo.db.authentication.find({"uid":email})
print(result)
print(result[0])
if result.count()>0:
result = result[0]
return result['user_type']
else:
return False
except Exception as error:
print(error)
def set_pass(self,usertype,email,password):
try:
h = hashlib.md5(password.encode())
password = h.hexdigest()
result = mongo.db.authentication.update({"email":email},{"$set":{"password":password}})
if usertype == "Research Scholar":
result = mongo.db.researcher.update({"email":email},{"$set":{"password":password}})
if usertype == "Research Supervisor":
result = mongo.db.supervisor.update({"email":email},{"$set":{"password":password}})
if usertype == "Research Co-Supervisor":
result = mongo.db.cosupervisor.update({"email":email},{"$set":{"password":password}})
if usertype == "Special User":
result = mongo.db.specialuser.update({"email":email},{"$set":{"password":password}})
except Exception as error:
print(error)
# def upload_file(self, file_data, file, file_type):
# try:
# print('called')
# if not os.path.exists(os.path.join(app.config['UPLOAD_FOLDER'] + file_data["directory"])):
# os.makedirs(os.path.join(app.config['UPLOAD_FOLDER'] + file_data["directory"]))
# file_path = os.path.join(app.config['UPLOAD_FOLDER'] + file_data["directory"])
# file.save(file_path + file_data["filename"])
# file_result = os.path.exists(file_path + file_data["filename"])
# print(file_result)
# if file_result:
# file_data["file_path"] = file_path.split("static/")[1]
# if file_type=="pic":
# if os.path.exists(file_data["file_path"]):
# os.remove(file_data["file_path"])
# result = self.mongo.users.update_one({"_id": session["id"]}, {"$set": {"profile_picture":file_data["file_path"] + file_data["filename"]}})
# if file_type=="resume":
# if os.path.exists(file_data["file_path"]):
# os.remove(file_data["file_path"])
# print(file_data['file_path'])
# data = ResumeParser(os.path.join(app.config['UPLOAD_FOLDER'] + file_data["directory"]+file_data["filename"])).get_extracted_data()
# result = self.mongo.users.update_one({"_id": session["id"]}, {"$set": {"resume":file_data["file_path"] + file_data["filename"],"skills":data["skills"]}})
# return True
# except Exception as error:
# print(error)
# return True
class Extract_Data:
def __init__(self):
self.mongo =mongo.db
def get_active_batch(self):
try:
result=mongo.db.batch.find({"status":"1"})
result=result[0]['batch_name']
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_batches(self):
try:
result=mongo.db.batch.find({"expire":"0"})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_researcher(self):
try:
if session["user_type"]=="Research Scholar":
result=mongo.db.researcher.find({"_id":session["id"]})
elif session["user_type"]=="Research Supervisor":
result=mongo.db.supervisor.find({"_id":session["id"]})
elif session["user_type"]=="Research Co-Supervisor":
result=mongo.db.cosupervisor.find({"_id":session["id"]})
elif session["user_type"]=="Special User":
result=mongo.db.specialuser.find({"_id":session["id"]})
result=result[0]
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_supervisors(self):
try:
result=mongo.db.supervisor.find()
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_cosupervisors(self):
try:
result=mongo.db.cosupervisor.find()
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_specialuser(self):
try:
result=mongo.db.specialuser.find()
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_users_by_id(self,email,usertype):
try:
if usertype=="Research Scholar":
result = mongo.db.researcher.find({"email":email})
return result
if usertype=="Research Supervisor":
result = mongo.db.supervisor.find({"email":email})
return result
if usertype=="Research Co-Supervisor":
result = mongo.db.cosupervisor.find({"email":email})
return result
if usertype=="Special User":
result = mongo.db.specialuser.find({"email":email})
return result
except Exception as error:
print(error)
def get_resource(self):
try:
result=mongo.db.resource.find()
if result:
return result
else:
return False
except Exception as error:
print(error)
return "Something went wrong"
def search(self,search_text):
try:
result=mongo.db.resource.find({"$text": {"$search": search_text}})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "Something went wrong"
class Submissions:
def __init__(self):
self.mongo =mongo.db
def add_submission(self,data):
try:
result=mongo.db.submissions.insert_one(data)
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_questions_by_author(self,data):
try:
result=mongo.db.submissions.find({"author":data})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_all_questions(self):
try:
result=mongo.db.submissions.find()
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_question_by_id(self,check):
try:
result=mongo.db.submissions.find({"qid":check})
result=result[0]
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_questions_answered_by_user(self):
try:
result=mongo.db.submissions.find({"solution.email":session['email']})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def get_questions_answered(self):
try:
result=mongo.db.submissions.find({"answers":{"$gt":"0"}})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def update_subs(self,check,sol,up):
try:
result=mongo.db.submissions.update_one({"qid":check},{"$set":{"solution":sol}})
result1=mongo.db.submissions.update_one({"qid":check},{"$set":{"answers":up}})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def update_eval(self,check,sol):
try:
result=mongo.db.submissions.update_one({"qid":check},{"$set":{"solution":sol}})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def delete_question(self,check):
try:
result=mongo.db.submissions.remove({"qid":check})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def upload_file(self, file_data, file, file_type,title):
try:
print('called')
if not os.path.exists(os.path.join(app.config['UPLOAD_FOLDER'] + file_data["directory"])):
os.makedirs(os.path.join(app.config['UPLOAD_FOLDER'] + file_data["directory"]))
file_path = os.path.join(app.config['UPLOAD_FOLDER'] + file_data["directory"])
#file.save(file_path + file_data["filename"])
file.save(file_data["filename"])
file_result = os.path.exists(file_path + file_data["filename"])
print(file_result)
if file_result:
file_data["file_path"] = file_path.split("static/")[1]
if file_type=="pic":
if os.path.exists(file_data["file_path"]):
os.remove(file_data["file_path"])
result = self.mongo.submissions.update_one({"$and":[{"title": title},{"author":session['id']}]}, {"$set": {{"pdf":file_data["file_path"] + file_data["filename"]},{"pdfname":file_data["filename"]}}})
if file_type=="pdf":
if os.path.exists(file_data["file_path"]):
os.remove(file_data["file_path"])
print(file_data['file_path'])
result = self.mongo.submissions.update_one({"$and":[{"title": title},{"author":session['id']}]}, {"$set": {{"pdf":file_data["file_path"] + file_data["filename"]}}})
return True
except Exception as error:
print(error)
return True
class Student_Resources:
def __init__(self):
self.mongo =mongo.db
def add_student_resource(self,data):
try:
result=mongo.db.mainresource.insert_one(data)
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def fetch_resources_by_guide(self):
try:
result=mongo.db.mainresource.find({"supervisor":session['name']})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def fetch_resource(self):
try:
result=mongo.db.mainresource.find()
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
def update_resource_by_id(self,rid,temp):
try:
result=mongo.db.mainresource.update_one({"rid":rid},{"$set":{"status":temp}})
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
class Collaborations:
def __init__(self):
self.mongo =mongo.db
def add_col(self,data):
try:
result = mongo.db.collaborations.insert_one(data)
return result
except Exception as error:
return "Something went wrong"
def fetch_col_supervisor(self):
try:
result = mongo.db.collaborations.find({"supervisor_email":session['email']})
if result:
return result
else:
return False
except Exception as error:
return "Something went wrong"
def fetch_col_researcher(self):
try:
result = mongo.db.collaborations.find({"student_email":session['email']})
if result:
return result
else:
return False
except Exception as error:
return "Something went wrong"
def delete_col(self,cid):
try:
result = mongo.db.collaborations.remove({'cid':cid})
return result
except Exception as error:
return "Something went wrong"
class Eresources:
def __init__(self):
self.mongo =mongo.db
def get_data(self):
try:
result=list(mongo.db.resource.find())
if result:
return result
else:
return False
except Exception as error:
print(error)
return "something went wrong"
class Resource_model:
def __init__(self):
self.mongo = mongo.db
def get_search_data(self, text):
try:
posts = list(self.mongo.resource.find({"$text": {"$search": text}}))
return posts
except Exception as error:
print('In exception:', error)
return []
'''
class Resource_model:
def get_search_data(self, text):
try:
posts = list(self.mongo.resource.find({"$text": {"$search": text}}))
return posts
except Exception as error:
print('In exception:', error)
return []
'''
| StarcoderdataPython |
55559 | <gh_stars>1-10
import time
from dronekit import connect, TimeoutError
vehicle = connect('127.0.0.1:14551', wait_ready=True, timeout=60)
# vehicle = connect('tcp:127.0.0.1:5762', wait_ready=True, timeout=60)
try:
vehicle.wait_for_mode("GUIDED")
vehicle.wait_for_armable()
vehicle.arm()
time.sleep(1)
vehicle.wait_simple_takeoff(100, timeout=60)
# vehicle.wait_simple_takeoff(20,0.5,15)
except TimeoutError as takeoffError:
print("Takeoff is timeout!!!")
# フェールセーフコード
| StarcoderdataPython |
1607061 | import unittest # Targeting Python 3
import footoe.helpers as h
class TestHelpers(unittest.TestCase):
def test_sanity(self):
self.assertEqual(sum([7, 7, 7, 7, 7, 7]), 42, "Should be 42")
def test_get_prefootnotes(self):
sample_text = "Here is one [^1] and another [^another]"
expected = ["1", "another"]
actual = h.getAllPreFootnotes(sample_text)
error_message = f"Should be a list {expected}"
self.assertEqual(expected, actual, error_message)
def test_get_prefootnotes_without_postfootnotes(self):
sample_text = "Here is one [^1] and two [^2] \n\n [^1]: hello"
expected = ["1", "2"]
actual = h.getAllPreFootnotes(sample_text)
error_message = f"Should be a list {expected}"
self.assertEqual(expected, actual, error_message)
def test_get_postfootnotes_without_prefootnotes(self):
sample_text = "Here is one [^1] and two [^2] \n\n [^1]: hello"
expected = ["1"]
actual = h.getAllPostFootnotes(sample_text)
error_message = f"Should be the following list: {expected} but got: {actual}"
self.assertEqual(expected, actual, error_message)
def test_get_multiple_postfootnotes(self):
sample_text = "Hola mundo \n\n [^first]: hey there \n\n [^2]: hi"
expected = ["first", "2"]
actual = h.getAllPostFootnotes(sample_text)
error_message = f"Should be the following list: {expected} but got: {actual}"
self.assertEqual(expected, actual, error_message)
def test_has_no_duplicates(self):
# Expect the following test to fail
sample_list = ["1", "1"]
with self.assertRaises(AssertionError) as cm: # cm probably stands for Context Manager
h.ensureAllUnique(sample_list)
expected_message = h.ERROR_MESSAGE_NO_DUPLICATES
actual_message = cm.exception.args[0]
self.assertEqual(expected_message, actual_message)
# Expect the following test to pass
sample_list_2 = ["1", "2"]
try:
h.ensureAllUnique(sample_list_2)
except:
self.fail("Shouldn't have an Exception")
def test_prefootnotes_have_counterpart_in_postfootnotes(self):
expected_message = h.ERROR_MESSAGE_SHOULD_HAVE_COUNTERPART
# Expect the following test to fail
list_of_pre = ["1"]
list_of_postfootnotes = ["2"]
with self.assertRaises(AssertionError) as cm:
h.ensureAllPreHasCounterpartAmongPostFootnotes(list_of_pre, list_of_postfootnotes)
actual_message = cm.exception.args[0]
self.assertEqual(expected_message, actual_message)
# Expect the following test to fail
list2_of_pre = ["2", "1", "4"]
list2_of_post = ["1", "2", "4"] # post is short for Post Footnotes, as you might have guessed
with self.assertRaises(AssertionError) as cm2:
h.ensureAllPreHasCounterpartAmongPostFootnotes(list2_of_pre, list2_of_post)
actual_message = cm2.exception.args[0]
self.assertEqual(expected_message, actual_message)
# Expect the following test to pass
list3_of_post = ["2", "1", "4"]
try:
# note that we compare list *two* with list *three*
h.ensureAllPreHasCounterpartAmongPostFootnotes(list2_of_pre, list3_of_post)
except:
self.fail("Shouldn't have an Exception")
def test_map_of_replacements_is_accurate(self):
fn = ["first", "second"]
expected = {
"first": "1",
"second": "2"
}
actual = h.mapFootnotesToNumbers(fn)
self.assertEqual(expected, actual)
# just out of curiosity
fn2 = []
expected2 = {}
actual2 = h.mapFootnotesToNumbers(fn2)
self.assertEqual(expected2, actual2)
def test_replace(self):
sample_text = "Here is one [^alpha] and two [^beta] \n\n [^alpha]: hello \n\n [^beta]: world"
sample_map = {
"alpha": "1",
"beta": "2"
}
expected = "Here is one [^1] and two [^2] \n\n [^1]: hello \n\n [^2]: world"
actual = h.replaceFootnotesWithNumbers(sample_text, sample_map)
self.assertEqual(expected, actual)
def test_build_footnote(self):
fn = "foo"
expected = "[^foo]"
actual = h.buildFootnote(fn)
self.assertEqual(expected, actual)
if __name__ == "__main__":
unittest.main() | StarcoderdataPython |
4818852 | from typing import List, Union
from stability_label_algorithm.modules.argumentation.argumentation_theory.literal import Literal
from stability_label_algorithm.modules.argumentation.argumentation_theory.queryable import Queryable
def queryable_set_is_consistent(queryable_list: List[Union[Literal, Queryable]]) -> bool:
for i1 in range(len(queryable_list)):
for i2 in range(i1 + 1, len(queryable_list)):
if queryable_list[i1].is_contrary_of(queryable_list[i2]):
return False
return True
| StarcoderdataPython |
3239834 | from snakeskin.pheremones import BasePheremoneModel
from snakeskin.pheremones import Max
# this is incomplete and needs some thought
TRAIL_DTYPE = [
("amplitude","float32"),
("epoch","float32"),
("width","float32")
]
class PheremoneTrail(object):
def __init__(self):
self.coefficients = []
def evaluate(self,epoch):
ar = np.array(self.coefficients,dtype=TRAIL_DTYPE)
return (ar['amplitude'] * np.e **(-(epoch-ar['epoch'])**2/ar['width'])).sum()
def evaporate(self,rate):
for coeff in self.coefficients:
coeff[0]*=rate
def __add__(self,coeff):
self.coefficients.append(coeff)
return self
class TimeDependentMaxMinPheremoneModel(BasePheremoneModel):
def __init__(self,params):
super(MaxMinPheremoneModel,self).__init__(DEFAULT_MAXMIN_PHEREMONE_MODEL)
self.parameters.update(params)
def __missing__(self,key):
trail = PheremoneTrail()
self.__setitem__(key,trail)
return trail
def evaporate(self):
rate = self.parameters['evaporation_rate']
for key in self.iterkeys():
self[key].evaporate(rate)
def deposit(self,tour,cost):
value = self.parameters['value_multiplier']/cost
width = self.parameters['temporal_width']
for path in tour:
key = path.origin,path.target
self.pheremones[key]+=(value,epoch,width)
def __setitem__(self,key,val):
max_val = self.parameters['max_value']
min_val = self.parameters['min_value']
val = min(max(min_val,val),max_val)
super(MaxMinPheremoneModel,self).__setitem__(key,val)
| StarcoderdataPython |
1706655 | <reponame>valq7711/bottlefly
import json as json_mod
import cgi
from tempfile import TemporaryFile
from io import BytesIO
from functools import partial
from ..common_helpers import touni
from .helpers import (
parse_qsl,
cache_in,
FileUpload,
FormsDict
)
from .errors import RequestError, BodyParsingError, BodySizeError
# fix bug cgi.FieldStorage context manager bug https://github.com/python/cpython/pull/14815
def _cgi_monkey_patch():
def patch_exit(self, *exc):
if self.file is not None:
self.file.close()
cgi.FieldStorage.__exit__ = patch_exit
_cgi_monkey_patch()
def _iter_body(read, buff_size, *, content_length):
rest_len = content_length
while rest_len > 0:
part_size = min(rest_len, buff_size)
part = read(part_size)
if not part:
break
yield part
rest_len -= part_size
def _iter_chunked(read, buff_size):
r, n, rn, sem = b'\r', b'\n', b'\r\n', b';'
header_size_buff = []
parsing_err = BodyParsingError()
while True:
# read header to get chunk size
header_size_buff.clear()
read_len = 0
seen_r = False
seen_sem = False
while True: # header[-2:] != rn:
c = read(1)
read_len += 1
if not c or read_len > buff_size:
raise parsing_err
# catch `\r\n`
if seen_r and c == n:
break
seen_r = c == r
# maybe behind `;` (chunk extra info)
if seen_sem:
# continue reading until `\r\n`
continue
seen_sem = c == sem
if seen_r or seen_sem:
continue
header_size_buff.append(c)
chunk_size = b''.join(header_size_buff)
try:
rest_len = int(chunk_size.strip(), 16)
except ValueError:
raise parsing_err
if rest_len == 0:
break
# read chunk body
while rest_len > 0:
part_size = min(rest_len, buff_size)
part = read(part_size)
if not part:
raise parsing_err
yield part
rest_len -= part_size
if read(2) != rn:
raise parsing_err
def _body_read(read, buff_size, *, content_length = None, chunked = None, max_body_size = None):
body_iter = (
_iter_chunked if chunked
else partial(_iter_body, content_length = content_length)
)
body, body_size, is_temp_file = BytesIO(), 0, False
for part in body_iter(read, buff_size):
body.write(part)
body_size += len(part)
if max_body_size is not None and body_size > max_body_size:
raise BodySizeError()
if not is_temp_file and body_size > buff_size:
body, tmp = TemporaryFile(mode='w+b'), body
body.write(tmp.getvalue())
del tmp
is_temp_file = True
return body
class BodyMixin:
@cache_in('environ[ ombott.request.content_length ]', read_only=True)
def content_length(self):
''' The request body length as an integer. The client is responsible to
set this header. Otherwise, the real length of the body is unknown
and -1 is returned. In this case, :attr:`body` will be empty. '''
return int(self.environ.get('CONTENT_LENGTH') or -1)
@cache_in('environ[ ombott.request.content_type ]', read_only=True)
def content_type(self):
''' The Content-Type header as a lowercase-string (default: empty). '''
return self.environ.get('CONTENT_TYPE', '').lower()
@cache_in('environ[ ombott.request.ctype ]', read_only=True)
def ctype(self):
ctype = self.content_type.split(';')
return [t.strip() for t in ctype]
@property
def chunked(self):
''' True if Chunked transfer encoding was. '''
return 'chunked' in self.environ.get('HTTP_TRANSFER_ENCODING', '').lower()
@property
def body(self):
ret = self._body
ret.seek(0)
return ret
@cache_in('environ[ ombott.request.query ]', read_only=True)
def query(self):
''' The :attr:`query_string` parsed into a :class:`FormsDict`. These
values are sometimes called "URL arguments" or "GET parameters", but
not to be confused with "URL wildcards" as they are provided by the
:class:`Router`. '''
ret = FormsDict()
qs = self._env_get('QUERY_STRING', '')
if qs:
parse_qsl(qs, setitem = ret.__setitem__)
self.environ['ombott.request.get'] = ret
return ret
#: An alias for :attr:`query`.
GET = query
@cache_in('environ[ ombott.request.json ]', read_only=True)
def json(self):
''' If the ``Content-Type`` header is ``application/json``, this
property holds the parsed content of the request body. Only requests
smaller than :attr:`MEMFILE_MAX` are processed to avoid memory
exhaustion. '''
if self.ctype[0] == 'application/json':
b = self._get_body_string()
if not b:
return None
return json_mod.loads(b)
return None
@cache_in('environ[ ombott.request.post ]', read_only=True)
def POST(self):
""" The values of :attr:`forms` and :attr:`files` combined into a single
:class:`FormsDict`. Values are either strings (form values) or
instances of :class:`cgi.FieldStorage` (file uploads).
"""
env = self.environ
files = env['ombott.request.files'] = FormsDict()
post = FormsDict()
# We default to application/x-www-form-urlencoded for everything that
# is not multipart and take the fast path
ctype = self.content_type
if not ctype.startswith('multipart/'):
if ctype.startswith('application/json'):
post.update(self.json)
else:
parse_qsl(
touni(self._get_body_string(), 'latin1'),
setitem = post.__setitem__
)
env['ombott.request.forms'] = post
return post
forms = env['ombott.request.forms'] = FormsDict()
safe_env = {'QUERY_STRING': ''} # Build a safe environment for cgi
for key in ('REQUEST_METHOD', 'CONTENT_TYPE', 'CONTENT_LENGTH'):
if key in env:
safe_env[key] = env[key]
args = dict(
fp=self.body,
environ=safe_env,
keep_blank_values=True,
encoding='utf8'
)
listified = set()
with cgi.FieldStorage(**args) as data:
self['_cgi.FieldStorage'] = data # http://bugs.python.org/issue18394#msg207958
data = data.list or []
for item in data:
if item.filename:
it = FileUpload(
item.file, item.name,
item.filename, item.headers
)
dct = files
else:
it = item.value
dct = forms
key = item.name
if key in post:
el = post[key]
if key not in listified:
el = post[key] = dct[key] = [el]
listified.add(key)
el.append(it)
else:
post[key] = dct[key] = it
return post
@cache_in('environ[ ombott.request.forms ]', read_only=True)
def forms(self):
""" Form values parsed from an `url-encoded` or `multipart/form-data`
encoded POST or PUT request body. The result is returned as a
:class:`FormsDict`. All keys and values are strings. File uploads
are stored separately in :attr:`files`.
"""
self.POST
return self.environ['ombott.request.forms']
@cache_in('environ[ ombott.request.files ]', read_only=True)
def files(self):
""" File uploads parsed from `multipart/form-data` encoded POST or PUT
request body. The values are instances of :class:`FileUpload`.
"""
self.POST
return self.environ['ombott.request.files']
@cache_in('environ[ ombott.request.body ]', read_only=True)
def _body(self):
try:
body = _body_read(
self.environ['wsgi.input'].read,
self.config.max_memfile_size,
content_length = self.content_length,
chunked = self.chunked,
max_body_size = self.config.max_body_size
)
except RequestError as err:
self._raise(err, RequestError)
self.environ['wsgi.input'] = body
body.seek(0)
return body
def _get_body_string(self):
''' read body until content-length or MEMFILE_MAX into a string. Raise
BodySizeError on requests that are to large.
'''
self._body.seek(0)
read = self._body.read
max_content_length = self.config.max_memfile_size
content_length = self.content_length
if content_length > max_content_length:
raise self._raise(BodySizeError(), RequestError)
if content_length < 0:
content_length = max_content_length + 1
data = read(content_length)
if len(data) > max_content_length: # Fail fast
raise self._raise(BodySizeError(), RequestError)
# raise HTTPError(413, 'Request to large')
return data
| StarcoderdataPython |
4806805 | <filename>blender/arm/props_collision_filter_mask.py
import bpy
class ARM_PT_RbCollisionFilterMaskPanel(bpy.types.Panel):
bl_label = "Collections Filter Mask"
bl_space_type = "PROPERTIES"
bl_region_type = "WINDOW"
bl_context = "physics"
bl_parent_id = "ARM_PT_PhysicsPropsPanel"
@classmethod
def poll(self, context):
obj = context.object
if obj is None:
return False
return obj.rigid_body is not None
def draw(self, context):
layout = self.layout
layout.use_property_split = False
layout.use_property_decorate = False
obj = context.object
layout.prop(obj, 'arm_rb_collision_filter_mask', text="", expand=True)
col_mask = ''
for b in obj.arm_rb_collision_filter_mask:
col_mask = ('1' if b else '0') + col_mask
col = layout.column()
row = col.row()
row.alignment = 'RIGHT'
row.label(text=f'Integer Mask Value: {str(int(col_mask, 2))}')
def register():
bpy.utils.register_class(ARM_PT_RbCollisionFilterMaskPanel)
def unregister():
bpy.utils.unregister_class(ARM_PT_RbCollisionFilterMaskPanel)
| StarcoderdataPython |
1785129 | # This part allows to import from main directory
import os
import sys
sys.path.insert(0, os.path.dirname('__file__'))
from unittest.mock import patch, call, Mock
import lib.nonogram as nonogram
def test_ModeData_initialisation_empty():
mode_data = nonogram.ModeData()
assert mode_data.fig == None
assert mode_data.image == None
assert mode_data.wait == None
assert mode_data.verbosity == -1
def test_ModeData_initialisation_wait():
mode_data = nonogram.ModeData(wait=5)
assert mode_data.fig == None
assert mode_data.image == None
assert mode_data.wait == 5
assert mode_data.verbosity == -1
def test_ModeData_initialisation_full():
mode_data = nonogram.ModeData(wait=5, verbosity=2)
assert mode_data.fig == None
assert mode_data.image == None
assert mode_data.wait == 5
assert mode_data.verbosity == 2
def test_ModeData_copy():
mode_data = nonogram.ModeData(wait=5, verbosity=2)
mode_data_copy = mode_data.copy()
assert mode_data_copy.image == None
assert mode_data_copy.fig == None
assert mode_data_copy.wait == None
assert mode_data_copy.verbosity == 2
@patch('lib.nonogram.plot')
def test_ModeData_plot_simple(mocked_plot):
mode_data = nonogram.ModeData()
mocked_plot.return_value = ('fig', 'im')
mode_data.plot('data')
assert mode_data.fig == 'fig'
assert mode_data.image == 'im'
assert mocked_plot.mock_calls == [call('data', interactive=False)]
@patch('lib.nonogram.plot')
def test_ModeData_plot_interactive(mocked_plot):
mode_data = nonogram.ModeData()
mocked_plot.return_value = ('fig', 'im')
mode_data.plot('data', True)
assert mode_data.fig == 'fig'
assert mode_data.image == 'im'
assert mocked_plot.mock_calls == [call('data', interactive=True)]
@patch('lib.nonogram.update_plot')
def test_ModeData_update_plot(mocked_uplot):
mode_data = nonogram.ModeData(wait=4.2)
mode_data.fig = 'fig'
mode_data.image = 'im'
mode_data.update_plot('data')
assert mocked_uplot.mock_calls == [call('data', 'fig', 'im', 4.2)]
@patch('lib.nonogram.end_iplot')
def test_ModeData_end_iplot(mocked_eiplot):
mode_data = nonogram.ModeData(wait=4.2)
mode_data.fig = 'fig'
mode_data.image = 'im'
mode_data.update_plot = Mock()
mode_data.end_iplot('data')
assert mode_data.update_plot.mock_calls == [call('data')]
assert mocked_eiplot.mock_calls == [call()]
def test_ModeData_is_interactive_plot_active():
mode_data = nonogram.ModeData()
assert mode_data.is_interactive_plot_active() == False
# To fake existence of a figure
mode_data.fig = True
assert mode_data.is_interactive_plot_active() == True
def test_ModeData_get_verbosity():
mode_data = nonogram.ModeData(verbosity=2)
assert mode_data.get_verbosity() == 2
def test_ModeData_set_verbosity():
mode_data = nonogram.ModeData(verbosity=2)
mode_data.set_verbosity(3)
assert mode_data.verbosity == 3
| StarcoderdataPython |
136887 | <filename>chrome/common/extensions/docs/server2/api_models_test.py
#!/usr/bin/env python
# Copyright 2013 The Chromium Authors. All rights reserved.
# Use of this source code is governed by a BSD-style license that can be
# found in the LICENSE file.
import json
import unittest
from api_models import APIModels
from compiled_file_system import CompiledFileSystem
from extensions_paths import API_PATHS, CHROME_API, CHROME_EXTENSIONS
from features_bundle import FeaturesBundle
from file_system import FileNotFoundError
from mock_file_system import MockFileSystem
from object_store_creator import ObjectStoreCreator
from test_file_system import TestFileSystem
from test_util import ReadFile
from future import Future
from schema_processor import SchemaProcessorFactoryForTest
_TEST_DATA = {
'api': {
'devtools': {
'inspected_window.json': ReadFile(
CHROME_API, 'devtools', 'inspected_window.json'),
},
'_api_features.json': json.dumps({
'alarms': {},
'app': {},
'app.runtime': {'noparent': True},
'app.runtime.foo': {},
'declarativeWebRequest': {},
'devtools.inspectedWindow': {},
'input': {},
'input.ime': {},
'storage': {},
}),
'_manifest_features.json': '{}',
'_permission_features.json': '{}',
'alarms.idl': ReadFile(CHROME_API, 'alarms.idl'),
'declarative_web_request.json': ReadFile(
CHROME_API, 'declarative_web_request.json'),
'input_ime.json': ReadFile(CHROME_API, 'input_ime.json'),
'page_action.json': ReadFile(CHROME_API, 'page_action.json'),
},
'docs': {
'templates': {
'json': {
'manifest.json': '{}',
'permissions.json': '{}',
}
}
},
}
class APIModelsTest(unittest.TestCase):
def setUp(self):
object_store_creator = ObjectStoreCreator.ForTest()
compiled_fs_factory = CompiledFileSystem.Factory(object_store_creator)
self._mock_file_system = MockFileSystem(
TestFileSystem(_TEST_DATA, relative_to=CHROME_EXTENSIONS))
features_bundle = FeaturesBundle(self._mock_file_system,
compiled_fs_factory,
object_store_creator,
'extensions')
self._api_models = APIModels(features_bundle,
compiled_fs_factory,
self._mock_file_system,
object_store_creator,
'extensions',
SchemaProcessorFactoryForTest())
def testGetNames(self):
# Both 'app' and 'app.runtime' appear here because 'app.runtime' has
# noparent:true, but 'app.runtime.foo' etc doesn't so it's a sub-feature of
# 'app.runtime' not a separate API. 'devtools.inspectedWindow' is an API
# because there is no 'devtools'.
self.assertEqual(
['alarms', 'app', 'app.runtime', 'declarativeWebRequest',
'devtools.inspectedWindow', 'input', 'storage'],
sorted(self._api_models.GetNames()))
def testGetModel(self):
def get_model_name(api_name):
return self._api_models.GetModel(api_name).Get().name
self.assertEqual('devtools.inspectedWindow',
get_model_name('devtools.inspectedWindow'))
self.assertEqual('devtools.inspectedWindow',
get_model_name('devtools/inspected_window.json'))
self.assertEqual('devtools.inspectedWindow',
get_model_name(CHROME_API +
'devtools/inspected_window.json'))
self.assertEqual('alarms', get_model_name('alarms'))
self.assertEqual('alarms', get_model_name('alarms.idl'))
self.assertEqual('alarms', get_model_name(CHROME_API + 'alarms.idl'))
self.assertEqual('declarativeWebRequest',
get_model_name('declarativeWebRequest'))
self.assertEqual('declarativeWebRequest',
get_model_name('declarative_web_request.json'))
self.assertEqual('declarativeWebRequest',
get_model_name(CHROME_API +
'declarative_web_request.json'))
self.assertEqual('input.ime', get_model_name('input.ime'))
self.assertEqual('input.ime', get_model_name('input_ime.json'))
self.assertEqual('input.ime',
get_model_name(CHROME_API + 'input_ime.json'))
self.assertEqual('pageAction', get_model_name('pageAction'))
self.assertEqual('pageAction', get_model_name('page_action.json'))
self.assertEqual('pageAction', get_model_name(CHROME_API +
'page_action.json'))
def testGetNonexistentModel(self):
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('notfound').Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('notfound.json').Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel(CHROME_API +
'notfound.json').Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel(CHROME_API +
'alarms.json').Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel('storage').Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel(CHROME_API +
'storage.json').Get)
self.assertRaises(FileNotFoundError,
self._api_models.GetModel(CHROME_API +
'storage.idl').Get)
def testSingleFile(self):
# 2 stats (1 for JSON and 1 for IDL) for each available API path.
# 1 read (for IDL file which existed).
future = self._api_models.GetModel('alarms')
self.assertTrue(*self._mock_file_system.CheckAndReset(
read_count=1, stat_count=len(API_PATHS)*2))
# 1 read-resolve (for the IDL file).
#
# The important part here and above is that it's only doing a single read;
# any more would break the contract that only a single file is accessed -
# see the SingleFile annotation in api_models._CreateAPIModel.
future.Get()
self.assertTrue(*self._mock_file_system.CheckAndReset(
read_resolve_count=1))
# 2 stats (1 for JSON and 1 for IDL) for each available API path.
# No reads (still cached).
future = self._api_models.GetModel('alarms')
self.assertTrue(*self._mock_file_system.CheckAndReset(
stat_count=len(API_PATHS)*2))
future.Get()
self.assertTrue(*self._mock_file_system.CheckAndReset())
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3376269 | """
mingprovider Module
This contains the class which allows sprox to interface with any database.
Copyright © 2009 <NAME>
Original Version by <NAME> 2009
Released under MIT license.
"""
from bson.errors import InvalidId
import itertools
from sprox.iprovider import IProvider
from sprox.util import timestamp
import datetime, inspect
try:
from ming.odm import mapper, ForeignIdProperty, FieldProperty, RelationProperty
from ming.odm.declarative import MappedClass
from ming.odm.property import OneToManyJoin, ManyToOneJoin, ORMProperty
from ming.odm.icollection import InstrumentedObj
except ImportError: #pragma nocover
from ming.orm import mapper, ForeignIdProperty, FieldProperty, RelationProperty
from ming.orm.declarative import MappedClass
from ming.orm.property import OneToManyJoin, ManyToOneJoin, ORMProperty
from ming.orm.icollection import InstrumentedObj
from ming import schema as S
import bson
from bson import ObjectId
import re
from .widgetselector import MingWidgetSelector
from .validatorselector import MingValidatorSelector
from pymongo import ASCENDING, DESCENDING
from sprox._compat import string_type, zip_longest
class MingProvider(IProvider):
default_view_names = ['_name', 'name', 'description', 'title']
default_widget_selector_type = MingWidgetSelector
default_validator_selector_type = MingValidatorSelector
def __init__(self, hint, **hints):
self.session = hint
def get_field(self, entity, name):
"""Get a field with the given field name."""
if '.' in name:
# Nested field
path = name.split('.')
name = path.pop(0)
field = mapper(entity).property_index[name]
while path:
name = path.pop(0)
if name == '$':
# Array element, the real entry was the parent
continue
field_schema = field.field.schema
field_type = field_schema
if isinstance(field_schema, S.Array):
field_type = field_schema.field_type
if isinstance(field_type, S.Object):
field_type = field_type.fields.get(name)
field = FieldProperty(name, field_type)
return field
return mapper(entity).property_index[name]
def get_fields(self, entity):
"""Get all of the fields for a given entity."""
if inspect.isfunction(entity):
entity = entity()
return [prop.name for prop in mapper(entity).properties if isinstance(prop, ORMProperty)]
@property
def _entities(self):
entities = getattr(self, '__entities', None)
if entities is None:
entities = dict(((m.mapped_class.__name__, m) for m in MappedClass._registry.values()))
self.__entities = entities
return entities
def get_entity(self, name):
"""Get an entity with the given name."""
return self._entities[name].mapped_class
def get_entities(self):
"""Get all entities available for this provider."""
return iter(self._entities.keys())
def get_primary_fields(self, entity):
"""Get the fields in the entity which uniquely identifies a record."""
return [self.get_primary_field(entity)]
def get_primary_field(self, entity):
"""Get the single primary field for an entity"""
return '_id'
def _get_meta(self, entity, field_name, metaprop):
"""Returns the value of the given sprox meta property for the field."""
field = self.get_field(entity, field_name)
return getattr(field, "sprox_meta", {}).get(metaprop, None)
def get_view_field_name(self, entity, possible_names, item=None):
"""Get the name of the field which first matches the possible colums
:Arguments:
entity
the entity where the field is located
possible_names
a list of names which define what the view field may contain. This allows the first
field that has a name in the list of names will be returned as the view field.
"""
if entity is InstrumentedObj:
# Cope with subdocuments
if item is not None:
fields = list(item.keys())
else:
fields = ['_impl']
else:
fields = self.get_fields(entity)
for field in fields:
if self._get_meta(entity, field, 'title'):
return field
view_field = None
for column_name in possible_names:
for actual_name in fields:
if column_name == actual_name:
view_field = actual_name
break
if view_field:
break
for actual_name in fields:
if column_name in actual_name:
view_field = actual_name
break
if view_field:
break
if view_field is None:
view_field = fields[0]
return view_field
def get_dropdown_options(self, entity_or_field, field_name, view_names=None):
"""Get all dropdown options for a given entity field.
:Arguments:
entity_or_field
either the entity where the field is located, or the field itself
field_name
if the entity is specified, name of the field in the entity. Otherwise, None
view_names
a list of names which define what the view field may contain. This allows the first
field that has a name in the list of names will be returned as the view field.
:Returns:
A list of tuples with (id, view_value) as items.
"""
if view_names is None:
view_names = self.default_view_names
if field_name is not None:
field = self.get_field(entity_or_field, field_name)
else:
field = entity_or_field
if isinstance(field, FieldProperty):
field_type = getattr(field, 'field_type', None)
if field_type is None:
f = getattr(field, 'field', None)
if f is not None:
field = field.field
field_type = field.type
schemaitem = field_type
if isinstance(schemaitem, S.OneOf):
return [ (opt,opt) for opt in schemaitem.options ]
raise NotImplementedError("get_dropdown_options doesn't know how to get the options for field %r of type %r" % (field, schemaitem))
if not isinstance(field, RelationProperty):
raise NotImplementedError("get_dropdown_options expected a FieldProperty or RelationProperty field, but got %r" % field)
try:
join = field.join
iter = join.rel_cls.query.find()
rel_cls = join.rel_cls
#this seems like a work around for a bug in ming.
except KeyError: # pragma: no cover
join = field.related
iter = join.query.find()
rel_cls = join
view_field = self.get_view_field_name(rel_cls, view_names)
return [ (str(obj._id), getattr(obj, view_field)) for obj in iter ]
def get_relations(self, entity):
"""Get all of the field names in an enity which are related to other entities."""
return [prop.name for prop in mapper(entity).properties if isinstance(prop, RelationProperty)]
def is_entity(self, entity):
try:
return bool(self.get_fields(entity))
except:
return False
def is_subdocument(self, entity):
return isinstance(entity, InstrumentedObj)
def is_relation(self, entity, field_name):
"""Determine if a field is related to a field in another entity."""
return isinstance(self.get_field(entity, field_name), RelationProperty)
def is_query(self, entity, value):
"""determines if a field is a query instead of actual list of data"""
#Currently not supported in MING
return False
def is_nullable(self, entity, field_name):
"""Determine if a field is nullable."""
fld = self.get_field(entity, field_name)
if isinstance(fld, RelationProperty):
# check the required attribute on the corresponding foreign key field
fld = fld.join.prop
fld = fld.field
schema = fld.schema
return not getattr(schema, 'required', False)
def get_field_default(self, field):
field = getattr(field, 'field', None)
if field is not None:
if_missing = field.schema.if_missing
if if_missing is not None:
return (True, if_missing)
return (False, None)
def get_field_provider_specific_widget_args(self, viewbase, field, field_name):
widget_args = {}
return widget_args
def _build_subfields(self, viewbase, field):
subfields_widget_args = {}
field = getattr(field, 'field', None)
if field is None:
return subfields_widget_args
schema = getattr(field, 'schema', None)
if isinstance(schema, (S.Array, S.Object)):
if isinstance(schema, S.Array):
field_type = schema.field_type
else:
field_type = schema
if isinstance(field_type, S.Object):
subfields = [FieldProperty('.'.join((field.name, n)), t) for n, t in field_type.fields.items()]
direct = False
else:
subfields = [FieldProperty('.'.join((field.name, '$')), field_type)]
direct = True
subfields_widget_args = {'children': [],
'direct': direct}
for subfield in subfields:
widget = viewbase._do_get_field_widget(subfield.name, subfield)
widget_args = {
'key': subfield.name.rsplit('.', 1)[-1],
'id': 'sx_' + subfield.name.replace('$', '-').replace('.', '_'),
}
if subfields_widget_args.get('direct', False):
widget_args['label'] = None
subfields_widget_args['children'].append(widget(**widget_args))
return subfields_widget_args
def get_default_values(self, entity, params):
return params
def _related_object_id(self, value):
if isinstance(value, MappedClass):
return value._id
return ObjectId(value)
def _cast_value_for_type(self, type, value):
if value is None:
return None
def _check(*types):
return type in types or isinstance(type, types)
if _check(S.DateTime, datetime.datetime):
if isinstance(value, string_type):
return datetime.datetime.strptime(value, "%Y-%m-%d %H:%M:%S")
else:
return value
elif _check(S.Binary):
return bson.Binary(value)
elif _check(S.Int, int):
return int(value)
elif _check(S.Bool, bool):
if value in ('true', 'false'):
return value == 'true' and True or False
else:
return bool(value)
elif _check(S.Object, dict):
if isinstance(type, S.Object):
type = type.fields
return dict((k, self._cast_value_for_type(type[k], v)) for k, v in value.items())
elif _check(S.Array, list):
if not isinstance(value, (list, tuple)):
value = [value]
if isinstance(type, S.Array):
type = [type.field_type]
return [self._cast_value_for_type(type[0], v) for v in value]
else:
return value
def _cast_value(self, entity, key, value):
# handles the case where an record with no id is being created
if key == '_id' and value == '':
value = ObjectId()
if value is None:
# Let none pass as is as it actually means a "null" on mongodb
return value
field = getattr(entity, key)
relations = self.get_relations(entity)
if key in relations:
related = field.related
if isinstance(value, list):
return related.query.find({'_id':{'$in':[self._related_object_id(i) for i in value]}}).all()
else:
return self.get_obj(related, {'_id':self._related_object_id(value)})
field = getattr(field, 'field', None)
if field is not None:
value = self._cast_value_for_type(field.type, value)
return value
def create(self, entity, params):
"""Create an entry of type entity with the given params."""
values = {}
fields = self.get_fields(entity)
for key, value in params.items():
if key not in fields:
continue
value = self._cast_value(entity, key, value)
if value is not None:
values[key] = value
obj = entity(**values)
self.flush()
return obj
def flush(self):
self.session.flush_all()
self.session.close_all()
def get_obj(self, entity, params, fields=None, omit_fields=None):
try:
return entity.query.get(_id=ObjectId(params['_id']))
except InvalidId:
return None
except KeyError:
return entity.query.find_by(**params).first()
def get(self, entity, params, fields=None, omit_fields=None):
return self.dictify(self.get_obj(entity, params), fields, omit_fields)
def update(self, entity, params, omit_fields=None):
"""Update an entry of type entity which matches the params."""
obj = self.get_obj(entity, params)
params.pop('_id')
try:
params.pop('sprox_id')
except KeyError:
pass
try:
params.pop('_method')
except KeyError:
pass
fields = self.get_fields(entity)
for key, value in params.items():
if key not in fields:
continue
if omit_fields and key in omit_fields:
continue
value = self._cast_value(entity, key, value)
setattr(obj, key, value)
self.flush()
return obj
def delete(self, entity, params):
"""Delete an entry of typeentity which matches the params."""
obj = self.get_obj(entity, params)
if obj is not None:
obj.delete()
return obj
def _modify_params_for_related_searches(self, entity, params, view_names=None, substrings=()):
if view_names is None:
view_names = self.default_view_names
relations = self.get_relations(entity)
for relation in relations:
if relation in params:
value = params[relation]
if not isinstance(value, string_type):
# When not a string consider it the related class primary key
params.update({relation: value})
continue
if not value:
# As we use ``contains``, searching for an empty text
# will just lead to all results so we just remove the filter.
del params[relation]
continue
relationship = getattr(entity, relation)
target_class = relationship.related
view_name = self.get_view_field_name(target_class, view_names)
if relation in substrings:
filter = {view_name: {'$regex': re.compile(re.escape(value), re.IGNORECASE)}}
else:
filter = {view_name: value}
value = target_class.query.find(filter).all()
params[relation] = value
return params
def _modify_params_for_relationships(self, entity, params):
relations = self.get_relations(entity)
for relation in relations:
if relation in params:
relationship = getattr(entity, relation)
value = params[relation]
if not isinstance(value, list):
value = [value]
adapted_value = []
for v in value:
if isinstance(v, ObjectId) or isinstance(v, string_type):
obj = self.get_obj(relationship.related, dict(_id=v))
if obj is not None:
adapted_value.append(obj)
else:
adapted_value.append(v)
value = adapted_value
join = relationship.join
my_foreign_key = relationship._detect_foreign_keys(relationship.mapper,
join.rel_cls,
False)
rel_foreign_key = relationship._detect_foreign_keys(mapper(relationship.related),
join.own_cls,
False)
params.pop(relation)
if my_foreign_key:
my_foreign_key = my_foreign_key[0]
params[my_foreign_key.name] = {'$in': [r._id for r in value]}
elif rel_foreign_key:
rel_foreign_key = rel_foreign_key[0]
value = [getattr(r, rel_foreign_key.name) for r in value]
if rel_foreign_key.uselist:
value = list(itertools.chain(*value))
params['_id'] = {'$in': value}
return params
def query(self, entity, limit=None, offset=0, limit_fields=None,
order_by=None, desc=False, filters={},
substring_filters=[], search_related=False, related_field_names=None,
**kw):
if '_id' in filters:
try:
filters['_id'] = ObjectId(filters['_id'])
except InvalidId:
pass
if search_related:
# Values for related fields contain the text to search
filters = self._modify_params_for_related_searches(entity, filters,
view_names=related_field_names,
substrings=substring_filters)
filters = self._modify_params_for_relationships(entity, filters)
for field in substring_filters:
if self.is_string(entity, field):
filters[field] = {'$regex': re.compile(re.escape(filters[field]), re.IGNORECASE)}
iter = entity.query.find(filters)
if offset:
iter = iter.skip(int(offset))
if limit is not None:
iter = iter.limit(int(limit))
if order_by is not None:
if not isinstance(order_by, (tuple, list)):
order_by = [order_by]
if not isinstance(desc, (tuple, list)):
desc = [desc]
sorting = [(field, DESCENDING if sort_descending else ASCENDING) for field, sort_descending in
zip_longest(order_by, desc)]
iter.sort(sorting)
count = iter.count()
return count, iter.all()
def is_string(self, entity, field_name):
fld = self.get_field(entity, field_name)
if isinstance(fld, RelationProperty):
# check the required attribute on the corresponding foreign key field
fld = fld.join.prop
fld = getattr(fld, 'field', None)
return isinstance(fld.schema, S.String)
def is_binary(self, entity, field_name):
fld = self.get_field(entity, field_name)
if isinstance(fld, RelationProperty):
# check the required attribute on the corresponding foreign key field
fld = fld.join.prop
fld = getattr(fld, 'field', None)
return isinstance(fld.schema,S.Binary)
def relation_fields(self, entity, field_name):
field = self.get_field(entity, field_name)
if not isinstance(field, RelationProperty):
raise TypeError("The field %r is not a relation field" % field)
#This is here for many-to-many turbogears-ming relations
if not field.join.prop:
return []
return [field.join.prop.name]
def relation_entity(self, entity, field_name):
"""If the field in the entity is a relation field, then returns the
entity which it relates to.
:Returns:
Related entity for the field
"""
field = self.get_field(entity, field_name)
return field.related
def get_field_widget_args(self, entity, field_name, field):
args = {}
args['provider'] = self
args['nullable'] = self.is_nullable(entity, field_name)
return args
def is_unique(self, entity, field_name, value):
iter = entity.query.find({ field_name: value })
return iter.count() == 0
def is_unique_field(self, entity, field_name):
for idx in getattr(entity.__mongometa__, "unique_indexes", ()):
if idx == (field_name,):
return True
return False
def dictify(self, obj, fields=None, omit_fields=None):
if obj is None:
return {}
r = {}
for prop in self.get_fields(obj.__class__):
if fields and prop not in fields:
continue
if omit_fields and prop in omit_fields:
continue
value = getattr(obj, prop)
if value is not None:
if self.is_relation(obj.__class__, prop):
klass = self.relation_entity(obj.__class__, prop)
pk_name = self.get_primary_field(klass)
if isinstance(value, list):
#joins
value = [getattr(value, pk_name) for value in value]
else:
#fks
value = getattr(value, pk_name)
r[prop] = value
return r
| StarcoderdataPython |
151203 | #A function for randomly generating prime numbers, including very large primes. The function does this by generating random odd numbers
#and testing their primality using the Fermat primality test. Note that the Fermat test is a probabilistic test which incorrectly labels
#some composite numbers ("pseudoprimes") as prime; in particular, the test as implemented here will falsely label "Poulet numbers" (which
#pass the Fermat test with 2 as a base) and "Carmichael numbers" (which pass the Fermat test for all bases) as prime. Therefore this
#function has a chance of generating a composite number; but only a very small chance, as while there are infinitely many pseudoprimes,
#they are quite rare. Note also that the function will never generate 2, even though 2 is a prime, but that doesn't really matter.
import random
#generates a random prime number with n digits. If no argument is passed, defaults to 30 digits.
def randPrime(n:int = 30):
if (n <= 0):
print("Error: non-positive integer passed as number of digits. Please only use positive integers.")
return None
if (n == 1):
lowerBound = 1
upperBound = 10
else:
lowerBound = 10**(n-1) + 1
upperBound = 10**n
prime = random.randrange(lowerBound, upperBound, 2)
while (pow(2, prime - 1, prime) != 1): #N.B.: implements "repeated squaring" for fast modular exponentiation
prime = random.randrange(lowerBound, upperBound, 2)
return prime | StarcoderdataPython |
1658267 | <filename>deeprank/utils/get_h5subset.py
#!/usr/bin/env python
"""Extract first N groups of a hdf5 to a new hdf5 file.
Usage: python {0} <hdf5 input file> <hdf5 output file> <number of groups to write>
Example: python {0} ./001_1GPW.hdf5 ./001_1GPW_sub10.hdf5 10
"""
import sys
import h5py
USAGE = __doc__.format(__file__)
def check_input(args):
if len(args) != 3:
sys.stderr.write(USAGE)
sys.exit(1)
def get_h5subset(fin, fout, n):
"""Extract first number of groups and write to a new hdf5 file.
Args:
fin (hdf5): input hdf5 file.
fout (hdf5): output hdf5 file.
n (int): first n groups to write.
"""
n = int(n)
h5 = h5py.File(fin, "r")
h5out = h5py.File(fout, "w")
print(f"First {n} groups in {fin}:")
for i in list(h5)[0:n]:
print(i)
h5.copy(h5[i], h5out)
print()
print(f"Groups in {fout}:")
print(list(h5out))
h5.close()
h5out.close()
print()
print(f"{fout} generated.")
if __name__ == "__main__":
check_input(sys.argv[1:])
fin, fout, n = sys.argv[1:]
get_h5subset(fin, fout, n)
| StarcoderdataPython |
3220261 | <filename>Recursion/tower_of_hanoi.py
def towerofhanoi(n, source, aux, dest):
# Please add your code here
if n==1:
print(source,"",dest)
return
towerofhanoi(n-1, source, dest, aux)
print(source,"",dest)
towerofhanoi(n-1, aux, source, dest)
n=int(input())
towerofhanoi(n, 'a', 'b', 'c')
| StarcoderdataPython |
3259087 | <filename>fdp/services.py<gh_stars>0
from pathlib import Path
import requests
from data_pipeline_api.registry.download import download_from_config_file
def registry_installed():
user_home = Path.home()
scrc_dir = user_home.joinpath(".scrc")
return scrc_dir.exists()
def registry_running():
try:
r = requests.get("http://localhost:8000/api?")
except requests.exceptions.ConnectionError:
return False
else:
if r.status_code == 200:
return True
else:
return False
def token():
"""
TODO: Use the registry's get_token endpoint for this
"""
with open("token.txt", "r") as file:
api_token = file.read()
return api_token
def download_data(config):
"""
Download any data required by read: from the remote data store.
"""
download_from_config_file(config, token())
pass
| StarcoderdataPython |
53555 | <reponame>marcinbodnar/debugger<filename>api/src/near/debugger_api/web/blueprint.py
import json
from flask import current_app, Blueprint, jsonify, request
from near.debugger_api.models import (
BeaconBlock, ContractInfo, ListBeaconBlockResponse, ListShardBlockResponse,
PaginationOptions, ShardBlock, TransactionInfo,
)
blueprint = Blueprint('api', __name__)
def _get_pagination_options_from_args(args):
options = PaginationOptions()
if 'page_size' in args:
options.page_size = args['page_size']
if 'page' in args:
options.page = args.get('page')
if 'sort_options' in args:
options.sort_options = json.loads(args['sort_options'])
options.validate()
return options
@blueprint.route(
'/list-beacon-blocks',
methods=['GET'],
output_schema=ListBeaconBlockResponse,
)
def list_beacon_blocks():
pagination_options = _get_pagination_options_from_args(request.args)
response = current_app.api.list_beacon_blocks(pagination_options)
return jsonify(response.to_primitive())
@blueprint.route(
'/get-beacon-block-by-index/<int:block_index>',
methods=['GET'],
output_schema=BeaconBlock,
)
def get_beacon_block_by_index(block_index):
response = current_app.api.get_beacon_block_by_index(block_index)
return jsonify(response.to_primitive())
@blueprint.route(
'/list-shard-blocks',
methods=['GET'],
output_schema=ListShardBlockResponse,
)
def list_shard_blocks():
pagination_options = _get_pagination_options_from_args(request.args)
response = current_app.api.list_shard_blocks(pagination_options)
return jsonify(response.to_primitive())
@blueprint.route(
'/get-shard-block-by-index/<int:block_index>',
methods=['GET'],
output_schema=ShardBlock,
)
def get_shard_block_by_index(block_index):
response = current_app.api.get_shard_block_by_index(block_index)
return jsonify(response.to_primitive())
@blueprint.route(
'/get-transaction-info/<transaction_hash>',
methods=['GET'],
output_schema=TransactionInfo,
)
def get_transaction_info(transaction_hash):
response = current_app.api.get_transaction_info(transaction_hash)
return jsonify(response.to_primitive())
@blueprint.route(
'/get-contract-info/<name>',
methods=['GET'],
output_schema=ContractInfo,
)
def get_contract_info(name):
response = current_app.api.get_contract_info(name)
response.validate()
return jsonify(response.to_primitive())
| StarcoderdataPython |
3270030 | import streamlit as st
import pandas as pd
import numpy as np
# read cleaned population_total data
pop_total = pd.read_csv("../data/clean/population_total.csv", sep=",", na_values='')
pop_total.set_index(['Year', 'Country'], inplace=True)
st.write(pop_total)
#print(pop_total[pop_total['PopTotal']<0])
# read cleaned population_per_age group data
pop_per_age = pd.read_csv("../data/clean/population_per_age.csv", sep=",", na_values='')
pop_per_age.set_index(['Year', 'Country'], inplace=True)
# drop code since it's not available for merges later anyway
pop_per_age.drop(columns="Code", inplace=True)
# create 3 df's for each group and sum the values for each year and country
pop_per_age_young = pop_per_age.query('AgeGrp in ["0-4", "5-9", "10-14", "15-19"]').sum(level=['Year', 'Country'])
pop_per_age_young.rename(columns={"PopMale": "PopMale_0-19", "PopFemale": "PopFemale_0-19", "PopTotal" : "PopTotal_0-19"}, inplace=True)
pop_per_age_mid = pop_per_age.query('AgeGrp in ["20-24", "25-29", "30-34", "35-39", "40-44", "45-49","50-54", "55-59"]').sum(level=['Year', 'Country'])
pop_per_age_mid.rename(columns={"PopMale": "PopMale_20-59", "PopFemale": "PopFemale_20-59", "PopTotal" : "PopTotal_20-59"}, inplace=True)
pop_per_age_old = pop_per_age.query('AgeGrp in ["60-64", "65-69", "70-74", "75-79", "80-84", "85-89", "90-94", "95-99", "100+"]').sum(level=['Year', 'Country'])
pop_per_age_old.rename(columns={"PopMale": "PopMale_60+", "PopFemale": "PopFemale_60+", "PopTotal" : "PopTotal_60+"}, inplace=True)
# merge all groups to one big df
pop_with_groups = pop_per_age_young.merge(pop_per_age_mid, left_index=True, right_index=True)
pop_with_groups = pop_with_groups.merge(pop_per_age_old, left_index=True, right_index=True)
pop_total_with_groups = pop_total.merge(pop_with_groups, left_index=True, right_index=True)
# load indicators dataset
indicators = pd.read_csv("../data/raw/WPP2019_Period_Indicators_Medium.csv", sep=",", na_values=''
# ,dtype={"Deaths":"int64", "DeathsMale":"int64", "DeathsFemale":"int64", "NetMigrations":"int64"}
)
# convert readable per 1000 values to ints
indicators[["Births", "Deaths", "DeathsMale", "DeathsFemale", "NetMigrations"]] = (indicators[["Births", "Deaths", "DeathsMale", "DeathsFemale", "NetMigrations"]].fillna(0) * 1000 / 5).astype("int64")
#indicators['RelMigrations'] = indicators[]
indicators_pop = indicators.merge(pop_total_with_groups, left_on=['MidPeriod','Location'], right_on=['Year','Country'])
indicators_pop["RelMigrations"] = indicators_pop["NetMigrations"]/indicators_pop["PopTotal"]
indicators_pop.drop(columns=['VarID', 'Variant'], inplace=True)
indicators_pop.rename(columns={"Location" : "Country"}, inplace=True)
scale_cols = ["Births", "Deaths", "DeathsMale", "DeathsFemale", "PopMale", "PopFemale", "PopMale_0-19", "PopFemale_0-19",
"PopTotal_0-19", "PopMale_20-59", "PopFemale_20-59", "PopTotal_20-59", "PopMale_60+", "PopFemale_60+", "PopTotal_60+"]
indicators_pop[scale_cols] = indicators_pop[scale_cols].div(indicators_pop["PopTotal"], axis=0)
st.write(indicators_pop)
# save df
indicators_pop.to_csv(index=False, path_or_buf="../data/clean/population_indicators.csv")
# read fragile states data
fragile_states = pd.read_csv("../data/clean/fragile_states_index.csv", sep=",", na_values='')
st.write(fragile_states.shape)
st.write(fragile_states)
st.write(indicators_pop.shape)
full_set = indicators_pop.merge(fragile_states, left_on=['Country','MidPeriod'], right_on=['country','year'])
query_result = full_set.query("Time == '2005-2010'")
full_set.loc[query_result.index, "change_from_previous_year"] = pd.Series(0, index=query_result.index)
previous = query_result
for years in ["2010-2015", "2015-2020"]:
query_result = full_set.query("Time == '" + years + "'")
full_set.loc[query_result.index, "change_from_previous_year"] = \
(query_result.set_index("Country")["total"] - previous.set_index("Country")["total"]).round(1).fillna(0).to_numpy()
previous = full_set.loc[query_result.index, :]
st.write(full_set.shape)
st.write(full_set)
full_set.to_csv(index=False, path_or_buf="../data/clean/full_set.csv")
| StarcoderdataPython |
3346577 | <reponame>brettkoonce/fairscale
from .auto_wrap import auto_wrap, default_auto_wrap_policy, enable_wrap, wrap
| StarcoderdataPython |
1787219 | class GenericObject(object):
STORAGE_PREFIX = "object"
PROPERTIES_IGNORE_DUMP = ()
PROPERTIES_IGNORE_LOAD = ()
def __init__(self, storage, *args, **kwargs):
self._storage = storage
self._storage_id = ":".join((
self.STORAGE_PREFIX,
kwargs.get("id", "changeme")
))
def _dump(self):
return {
k: v
for k, v
in self.__dict__.items()
if k not in self.PROPERTIES_IGNORE_DUMP and k not in ("_storage", "_storage_id",)
}
def _load(self, **kwargs):
[
setattr(self, k, v)
for k, v
in kwargs.items()
if k not in self.PROPERTIES_IGNORE_LOAD and k not in ("_storage", "_storage_id",)
]
def _load_self(self):
self._load(**self.read())
def _write(self):
return self._storage.create(self._storage_id, self._dump())
def create(self):
return self._write()
def read(self):
return self._storage.read(self._storage_id)
def update(self):
return self._write()
def delete(self):
return self._storage.delete(self._storage_id)
def ttl(self, key, time: int):
return self.storage.ttl(key, time)
| StarcoderdataPython |
1747047 | <reponame>mdietrichstein/xai-statlog-heart
def create_rf_estimator(verbose, random_state, n_jobs):
from sklearn.ensemble import RandomForestClassifier
return RandomForestClassifier(n_estimators=50, min_samples_split=2, min_samples_leaf=2,
max_features='auto', max_depth=30, bootstrap=False,
criterion='gini',
random_state=random_state, verbose=verbose, n_jobs=n_jobs)
def create_xgb_estimator(verbose, random_state, n_jobs):
from xgboost import XGBClassifier
return XGBClassifier(n_estimators=30, max_depth=20, n_jobs=n_jobs, random_state=random_state, verbose=verbose)
def create_knn_estimator(verbose, random_state, n_jobs):
from sklearn.neighbors import KNeighborsClassifier
return KNeighborsClassifier(n_neighbors=5, n_jobs=n_jobs)
def create_nb_estimator(verbose, random_state, n_jobs):
from sklearn.naive_bayes import GaussianNB
return GaussianNB()
def create_qda_estimator(verbose, random_state, n_jobs):
from sklearn.discriminant_analysis import QuadraticDiscriminantAnalysis
return QuadraticDiscriminantAnalysis()
def create_ada_estimator(verbose, random_state, n_jobs):
from sklearn.ensemble import AdaBoostClassifier
return AdaBoostClassifier(random_state=random_state)
def create_mlp_estimator(verbose, random_state, n_jobs):
from sklearn.neural_network import MLPClassifier
return MLPClassifier(alpha=1, max_iter=200, random_state=random_state, verbose=verbose)
def create_fcn_estimator(verbose, random_state, n_jobs):
import keras
from keras import layers
from keras import optimizers
from keras.wrappers.scikit_learn import KerasClassifier
def create_model(optimizer='rmsprop', kernel_initializer='glorot_uniform', scale_factor=1):
model = keras.Sequential([
layers.Dense(32 * scale_factor, input_dim=13, activation='relu', kernel_initializer=kernel_initializer),
layers.Dense(64 * scale_factor, activation='relu', kernel_initializer=kernel_initializer),
layers.Dense(1, activation='sigmoid')
])
model.compile(
loss='binary_crossentropy',
optimizer='adam',
metrics=['accuracy'])
return model
return KerasClassifier(build_fn=create_model, epochs=10, batch_size=256, verbose=verbose)
classifier_factories = {
'Random Forest': create_rf_estimator,
'XGBoost': create_xgb_estimator,
'K-Nearest Neighbour': create_knn_estimator,
'Naive Bayes': create_nb_estimator,
'Quadratic Discriminant Analysis': create_qda_estimator,
'Adaptive Boosting': create_ada_estimator,
'Multilayer Perceptron': create_mlp_estimator,
'Deep Neural Network': create_fcn_estimator,
} | StarcoderdataPython |
1749331 | # Generated by the protocol buffer compiler. DO NOT EDIT!
# sources: frontend_pb.proto
# plugin: python-betterproto
from dataclasses import dataclass
import betterproto
class Unit(betterproto.Enum):
"""TODO: should metric be 0? (i.e. the default)"""
imperial = 0
metric = 1
class ThemeVariant(betterproto.Enum):
dark = 0
light = 1
@dataclass
class RotaryEncoder(betterproto.Message):
step: int = betterproto.int32_field(1)
last_step_change: float = betterproto.float_field(2)
button_pressed: bool = betterproto.bool_field(3)
last_button_down: float = betterproto.float_field(4)
last_button_up: float = betterproto.float_field(5)
@dataclass
class FrontendDisplaySetting(betterproto.Message):
"""
TODO: we also need a request version of this message,
FrontendDisplaySettingsRequest TODO: rename this to FrontendDisplaySettings
(with the "s" at the end)
"""
theme: "ThemeVariant" = betterproto.enum_field(1)
unit: "Unit" = betterproto.enum_field(2)
@dataclass
class SystemSettings(betterproto.Message):
# We use a double and units of sec because otherwise we'd use uint64 and
# units of ms, but then mcu_pb.LogEvent needs to have oldUint64 and newUint64
# instead of oldUint32 and newUint32, and the frontend runs into problems
# because it can only have 53 bits of precision in the number type (in which
# case the frontend would then need to use BigInt), which prevents the
# frontend from working correctly on other things which require
# LogEvent.oldUint32/newUint32.
date: float = betterproto.double_field(1)
# TODO: move display_brightness into FrontendDisplaySetting
display_brightness: int = betterproto.uint32_field(2)
seq_num: int = betterproto.uint32_field(3)
@dataclass
class SystemSettingsRequest(betterproto.Message):
date: float = betterproto.double_field(1)
display_brightness: int = betterproto.uint32_field(2)
seq_num: int = betterproto.uint32_field(3)
| StarcoderdataPython |
1631671 | <filename>reactivex/operators/_sequenceequal.py
from typing import Callable, Iterable, List, Optional, TypeVar, Union
import reactivex
from reactivex import Observable, abc, typing
from reactivex.disposable import CompositeDisposable
from reactivex.internal import default_comparer
_T = TypeVar("_T")
def sequence_equal_(
second: Union[Observable[_T], Iterable[_T]],
comparer: Optional[typing.Comparer[_T]] = None,
) -> Callable[[Observable[_T]], Observable[bool]]:
comparer_ = comparer or default_comparer
second_ = (
reactivex.from_iterable(second) if isinstance(second, Iterable) else second
)
def sequence_equal(source: Observable[_T]) -> Observable[bool]:
"""Determines whether two sequences are equal by comparing the
elements pairwise using a specified equality comparer.
Examples:
>>> res = sequence_equal([1,2,3])
>>> res = sequence_equal([{ "value": 42 }], lambda x, y: x.value == y.value)
>>> res = sequence_equal(reactivex.return_value(42))
>>> res = sequence_equal(
reactivex.return_value({ "value": 42 }),
lambda x, y: x.value == y.value
)
Args:
source: Source obserable to compare.
Returns:
An observable sequence that contains a single element which
indicates whether both sequences are of equal length and their
corresponding elements are equal according to the specified
equality comparer.
"""
first = source
def subscribe(
observer: abc.ObserverBase[bool],
scheduler: Optional[abc.SchedulerBase] = None,
):
donel = [False]
doner = [False]
ql: List[_T] = []
qr: List[_T] = []
def on_next1(x: _T) -> None:
if len(qr) > 0:
v = qr.pop(0)
try:
equal = comparer_(v, x)
except Exception as e:
observer.on_error(e)
return
if not equal:
observer.on_next(False)
observer.on_completed()
elif doner[0]:
observer.on_next(False)
observer.on_completed()
else:
ql.append(x)
def on_completed1() -> None:
donel[0] = True
if not ql:
if qr:
observer.on_next(False)
observer.on_completed()
elif doner[0]:
observer.on_next(True)
observer.on_completed()
def on_next2(x: _T):
if len(ql) > 0:
v = ql.pop(0)
try:
equal = comparer_(v, x)
except Exception as exception:
observer.on_error(exception)
return
if not equal:
observer.on_next(False)
observer.on_completed()
elif donel[0]:
observer.on_next(False)
observer.on_completed()
else:
qr.append(x)
def on_completed2():
doner[0] = True
if not qr:
if len(ql) > 0:
observer.on_next(False)
observer.on_completed()
elif donel[0]:
observer.on_next(True)
observer.on_completed()
subscription1 = first.subscribe(
on_next1, observer.on_error, on_completed1, scheduler=scheduler
)
subscription2 = second_.subscribe(
on_next2, observer.on_error, on_completed2, scheduler=scheduler
)
return CompositeDisposable(subscription1, subscription2)
return Observable(subscribe)
return sequence_equal
__all__ = ["sequence_equal_"]
| StarcoderdataPython |
191241 | <reponame>kamyabdesign/DRF_Django
from django.db import models
from rest_framework import serializers
from blog.models import Article
class ArticleSerializer(serializers.ModelSerializer):
class Meta:
model = Article
fields = '__all__'
| StarcoderdataPython |
1653282 | <reponame>RvstFyth/discordballz
"""
Manages the selection phase.
--
Author : DrLarck
Last update : 19/10/19 (DrLarck)
"""
# dependancies
import asyncio
# utils
# translation
from utility.translation.translator import Translator
# displayer
from utility.cog.displayer.character import Character_displayer
from utility.cog.displayer.team import Team_displayer
# wait for
from utility.cog.fight_system.wait_for.player_choice import Player_choice
# selection phase manager
class Selection_phase:
"""
Manages the selection phase.
- Parameter :
- Attribute :
- Method :
"""
# attribute
def __init__(self, client, ctx, player, turn):
self.client = client
self.player = player
self.ctx = ctx
self.turn = turn
# method
async def start_selection(self, player_team, team):
"""
`coroutine`
Start the selection phase.
--
Return : None
"""
# init
translation = Translator(self.client.db, self.player)
#_ = await translation.translate()
# define the bot's team
if(player_team == 0):
bot_team = team[1]
bot_enemy = team[0]
elif(player_team == 1):
bot_team = team[1]
bot_enemy = team[0]
if(player_team == 0):
enemy_team = 1
else:
enemy_team = 0
player_team = team[player_team]
possible_action = [] # list of possible actions (str)
all_character = team[0]+team[1]
order = 1
# stores the move in it.
move_list = [] # stores the move_choice
# choice
choice = Player_choice(self.client, self.player, True)
for character in player_team:
await asyncio.sleep(0)
# init
move_choice = {
"move" : None,
"target" : None
}
if(character != None):
if(character.health.current > 0 and character.posture.stunned == False):
if(character.is_npc == False):
# displying the character
displayer = Character_displayer(self.client, self.ctx, self.player)
displayer.character = character
await displayer.display(combat_format = True)
await asyncio.sleep(2)
# displaying the kit
if(self.turn == 1): # first turn
kit = "`1. Skip the turn ⏩` | "
kit += "`3. Defend 🏰`\n"
# init the possible actions
possible_action = ["check", "flee", "1", "3"]
else:
kit = "`1. Sequence 👊` | "
kit += "`2. Ki charge 🔥` | "
kit += "`3. Defend 🏰`"
# init the possible actions
possible_action = ["check", "flee", "1", "2", "3"]
if(len(character.ability) > 0): # if the character has an ability
# init
kit += "\n\n__Abilities__ :\n\n"
ability_index = 4
new_ability_list = []
for ability in character.ability:
await asyncio.sleep(0)
if(character.ability_sorted == False):
# create a fake instance of the ability if not sorted
ability = ability(
self.client,
self.ctx,
character,
None,
player_team,
team[1]
)
await ability.init()
new_ability_list.append(ability)
await ability.init()
# reduce the cd by one
if(ability.cooldown > 0):
ability.cooldown -= 1
# add a new possible action
possible_action.append(str(ability_index))
# check if the character could use the ability
if(character.ki.current >= ability.cost and ability.cooldown <= 0):
kit += f"`{ability_index}. {ability.name}`{ability.icon} ({character.ki.current} / {ability.cost:,} :fire:)"
if(ability.tooltip != None): # add the tooltip after the ability
kit += f" : *{ability.tooltip}*"
else:
# check the cooldown
if(ability.cooldown > 0):
kit += f"**Cooldown** : **{ability.cooldown}** :hourglass:"
kit += f"~~`{ability_index}. {ability.name}`{ability.icon} ({character.ki.current} / {ability.cost:,} :fire:)~~ "
if(ability.tooltip != None): # add the tooltip after the ability
kit += f"~~ : *{ability.tooltip}*~~"
kit += "\n--\n"
ability_index += 1
if(character.ability_sorted == False):
# replace the current character ability list by the new one
character.ability = new_ability_list
character.ability_sorted = True
else:
kit += "\n"
kit += f"\nTo **flee** the fight, type `flee`, to **take a look at** a specific unit, type `check [unit index]`."
# ask for the player's action
decision = False
# main loop
while(decision == False):
await asyncio.sleep(0)
# init
target_display = ""
unit_index = 1
# display the actions
actions = f"<@{self.player.id}> Please select an action among the following for #{order} {character.image.icon}**{character.info.name}**{character.type.icon} - {character.ki.current} :fire:\n{kit}"
await self.ctx.send(actions)
if(self.turn == 1): # manages the first turn possible actions
move = await choice.wait_for_choice(possible_action, all_character)
if(type(move) == str):
if(move.lower() == "flee"):
return("flee")
elif(move.lower() == "1"):
move_choice["move"] = "skip"
move_list.append(move_choice)
decision = True
elif(move.lower() == "3"):
move_choice["move"] = 3
move_list.append(move_choice)
decision = True
elif(type(move) == list):
if(move[0].lower() == "check" and move[1].isdigit()): # manages the check option
index = int(move[1]) - 1
to_display = all_character[index]
displayer.character = to_display
await self.ctx.send(f"<@{self.<EMAIL>}> Here are some informations about {to_display.image.icon}**{to_display.info.name}**{to_display.type.icon} :")
await displayer.display(combat_format = True)
await asyncio.sleep(2)
decision = False
else: # turn > 1
move = await choice.wait_for_choice(possible_action, all_character)
if(type(move) == str):
if(move.lower() == "flee"):
return("flee")
if(move.isdigit()): # convert the str to int
move = int(move)
# basic choice
if(move > 0 and move <= 3):
if(move == 1): # sequence
team_display = Team_displayer(self.client, self.ctx, self.player, team[0], team[1])
targetable_team_a, targetable_team_b = await team_display.get_targetable("sequence")
# allies
if(len(targetable_team_a) > 0):
target_display += "\n__Target__ : \n🔵 - Your team :\n"
# retrieve all the targetable units
unit_index = 1
display_targetable, unit_index = await self.display_targetable(targetable_team_a, unit_index)
target_display += display_targetable
# enemies
if(len(targetable_team_b) > 0):
target_display += "\n🔴 - Enemy team :\n"
# retrieve all the targetable enemies
display_targetable, unit_index = await self.display_targetable(targetable_team_b, unit_index)
target_display += display_targetable
# display the targets
await self.ctx.send(f"<@{self.<EMAIL>}> Please select a target among the following for `Sequence 👊` :\n{target_display}")
targetable = targetable_team_a + targetable_team_b
target = await choice.wait_for_target(targetable)
move_choice["move"], move_choice["target"] = move, target
move_list.append(move_choice)
decision = True
elif(move == 2):
move_choice["move"] = 2
move_list.append(move_choice)
decision = True
elif(move == 3):
move_choice["move"] = 3
move_list.append(move_choice)
decision = True
# ability choice
# now check if the chosen ability is possible
elif(move > 3 and move <= len(character.ability)+3):
# -4 because we start counting at 4
# 4(choice) == first ability
ability = character.ability[move-4]
# check if the ability needs a target
need_target = ability.need_target
# if the ability is not on cooldown
if(ability.cooldown <= 0):
# check if the character has enough ki
if(character.ki.current >= ability.cost):
# check if it needs a target or not
if(need_target):
team_display = Team_displayer(self.client, self.ctx, self.player, team[0], team[1])
targetable_team_a, targetable_team_b = await team_display.get_targetable("ability", ability = ability)
# allies
if(len(targetable_team_a) > 0):
target_display += "\n__Target__ : \n🔵 - Your team :\n"
# retrieve all the targetable units
unit_index = 1
display_targetable, unit_index = await self.display_targetable(targetable_team_a, unit_index)
target_display += display_targetable
# enemies
if(len(targetable_team_b) > 0):
target_display += "\n🔴 - Enemy team :\n"
# retrieve all the targetable enemies
display_targetable, unit_index = await self.display_targetable(targetable_team_b, unit_index)
target_display += display_targetable
# send the message
await self.ctx.send(f"<@{self.player.id}> Please select a target among the following for `{ability.name}`{ability.icon} : \n{target_display}")
# get all the targetable units
targetable = targetable_team_a + targetable_team_b
# wait for target
target = await choice.wait_for_target(targetable)
move_choice["move"], move_choice["target"] = move, target
move_list.append(move_choice)
decision = True
else: # doesn't need a target
move_choice["move"] = move
move_list.append(move_choice)
decision = True
else:
decision = False
await self.ctx.send(f"<@{self.player.id}> 🔥 ⚠ Not enough ki : {character.ki.current} / {ability.cost}")
await asyncio.sleep(1)
else: # ability is on cooldown
decision = False
await self.ctx.send(f"<@{self.player.id}> ⏳ ⚠ Ability on cooldown : {ability.cooldown} turns.")
await asyncio.sleep(1)
elif(type(move) == list):
if(move[0].lower() == "check" and move[1].isdigit()): # manages the check option
index = int(move[1]) - 1
to_display = all_character[index]
displayer.character = to_display
await self.ctx.send(f"<@{self.player.id}> Here are some informations about {to_display.image.icon}**{to_display.info.name}**{to_display.type.icon} :")
await displayer.display(combat_format = True)
await asyncio.sleep(2)
decision = False
else: # the character is a bot
# sort the bot abilities
if(len(character.ability) > 0): # if the character has an ability
# init
new_ability_list = []
for ability in character.ability:
await asyncio.sleep(0)
if(character.ability_sorted == False):
# create a fake instance of the ability if not sorted
ability = ability(
self.client,
self.ctx,
None,
None,
None,
None
)
new_ability_list.append(ability)
# reduce the cd by one
if(ability.cooldown > 0):
ability.cooldown -= 1
if(character.ability_sorted == False):
# replace the current character ability list by the new one
character.ability = new_ability_list
character.ability_sorted = True
# generate a move for the npc
bot_move = await character.bot(self.client, self.ctx, self.player, bot_team, bot_enemy, self.turn)
move_list.append(bot_move)
# end main while
# end for character in team
order += 1
# dead
elif(character.health.current <= 0): # trigger on death effects
if(character.on_death_triggered == False):
if(len(character.on_death) > 0):
# now trigger all the effects
for on_death_ in character.on_death:
await asyncio.sleep(0)
await on_death_.apply()
character.on_death_triggered = True
move_list.append(None)
# end of method
return(move_list)
async def display_targetable(self, _list, unit_index):
"""
`coroutine`
Return the display of the targetables units in the list.
- Parameter :
`_list` : List of targetable units to display.
`unit_index` : The index to count with.
--
Return : str, int
"""
# init
target_display = ""
for unit in _list:
await asyncio.sleep(0)
# get the posture
posture, posture_icon = await unit.posture.get_posture()
health_percent = int((unit.health.current * 100) / unit.health.maximum)
target_display += f"{unit_index}. {unit.image.icon}**{unit.info.name}**{unit.type.icon} - **{unit.health.current:,}**/**{unit.health.maximum:,}**:hearts: *({health_percent} %)* : {posture}{posture_icon}\n"
# get the ally's bonus
if(len(unit.bonus) > 0):
target_display += f"__Bonus__ : "
bonus_index = 0
for bonus in unit.bonus:
await asyncio.sleep(0)
if(bonus_index == 0):
if(bonus.is_permanent):
target_display += f"{bonus.icon}[{bonus.stack}|*∞*]"
else: # non perma bonus
target_display += f"{bonus.icon}[{bonus.stack}|{bonus.duration}]"
else:
if(bonus.is_permanent):
target_display += f", {bonus.icon}[{bonus.stack}|*∞*]"
else:
target_display += f", {bonus.icon}[{bonus.stack}|{bonus.duration}]"
bonus_index += 1
target_display += "\n"
# get the ally's malus
if(len(unit.malus) > 0):
target_display += f"__Malus__ : "
malus_index = 0
for malus in unit.malus:
await asyncio.sleep(0)
if(malus_index == 0):
target_display += f"{malus.icon}[{malus.stack}|{malus.duration}]"
else:
target_display += f", {malus.icon}[{malus.stack}|{malus.duration}]"
malus_index += 1
target_display += "\n\n"
unit_index += 1
return(target_display, unit_index) | StarcoderdataPython |
4824537 | <gh_stars>0
#!/usr/bin/python
#
# Copyright 2018-2021 Polyaxon, Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
import pytest
from marshmallow import ValidationError
from polyaxon.exceptions import PolyaxonfileError
from polyaxon.k8s import k8s_schemas
from polyaxon.polyaxonfile import check_polyaxonfile
from polyaxon.polyaxonfile.specs import (
CompiledOperationSpecification,
OperationSpecification,
)
from polyaxon.polyflow import V1CompiledOperation, V1Hyperband
from polyaxon.polyflow.io import V1IO
from polyaxon.polyflow.matrix import V1GridSearch
from polyaxon.polyflow.matrix.params import V1HpChoice, V1HpLinSpace
from polyaxon.polyflow.params import V1Param
from tests.utils import BaseTestCase
@pytest.mark.polyaxonfile_mark
class TestPolyaxonfileWithTypes(BaseTestCase):
def test_using_untyped_params_raises(self):
with self.assertRaises(PolyaxonfileError):
check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/untyped_params.yml"
),
is_cli=False,
)
def test_no_params_for_required_inputs_outputs_raises(self):
# Get compiled_operation data
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_inputs.yml"),
{"kind": "compiled_operation"},
]
)
# Inputs don't have delayed validation by default
with self.assertRaises(ValidationError):
CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_outputs.yml"),
{"kind": "compiled_operation"},
]
)
# Outputs have delayed validation by default
CompiledOperationSpecification.apply_operation_contexts(run_config)
def test_validation_for_required_inputs_outputs_raises(self):
# Get compiled_operation data
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_inputs.yml"),
{"kind": "compiled_operation"},
]
)
# Inputs don't have delayed validation by default
with self.assertRaises(ValidationError):
run_config.validate_params(is_template=False, check_runs=True)
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_outputs.yml"),
{"kind": "compiled_operation"},
]
)
# Outputs have delayed validation by default
run_config.validate_params(is_template=False, check_runs=True)
def test_required_inputs_with_params(self):
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_inputs.yml"),
{"kind": "compiled_operation"},
]
)
with self.assertRaises(ValidationError):
CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.inputs[0].value is None
assert run_config.inputs[1].value is None
run_config.apply_params(
params={"loss": {"value": "bar"}, "flag": {"value": False}}
)
assert run_config.inputs[0].value == "bar"
assert run_config.inputs[1].value is False
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
assert run_config.run.container.image == "my_image"
assert run_config.run.container.command == ["/bin/sh", "-c"]
assert run_config.run.container.args == "video_prediction_train --loss=bar "
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/required_inputs.yml"),
{"kind": "compiled_operation"},
]
)
assert run_config.inputs[0].value is None
assert run_config.inputs[1].value is None
run_config.apply_params(
params={"loss": {"value": "bar"}, "flag": {"value": True}}
)
assert run_config.inputs[0].value == "bar"
assert run_config.inputs[1].value is True
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
assert run_config.run.container.image == "my_image"
assert run_config.run.container.command == ["/bin/sh", "-c"]
assert (
run_config.run.container.args == "video_prediction_train --loss=bar --flag"
)
# Adding extra value raises
with self.assertRaises(ValidationError):
run_config.validate_params(
params={
"loss": {"value": "bar"},
"flag": {"value": True},
"value": {"value": 1.1},
}
)
with self.assertRaises(PolyaxonfileError):
check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/required_inputs.yml"
),
params={"loss": {"value": "bar"}, "value": {"value": 1.1}},
is_cli=False,
)
# Adding non valid params raises
with self.assertRaises(ValidationError):
run_config.validate_params(params={"value": {"value": 1.1}})
def test_required_inputs_with_arg_format(self):
run_config = V1CompiledOperation.read(
[
os.path.abspath(
"tests/fixtures/typing/required_inputs_with_arg_format.yml"
),
{"kind": "compiled_operation"},
]
)
with self.assertRaises(ValidationError):
CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.inputs[0].value is None
assert run_config.inputs[1].value is None
run_config.apply_params(
params={"loss": {"value": "bar"}, "flag": {"value": False}}
)
assert run_config.inputs[0].value == "bar"
assert run_config.inputs[1].value is False
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
assert run_config.run.container.image == "my_image"
assert run_config.run.container.command == ["/bin/sh", "-c"]
assert run_config.run.container.args == "video_prediction_train --loss=bar "
run_config = V1CompiledOperation.read(
[
os.path.abspath(
"tests/fixtures/typing/required_inputs_with_arg_format.yml"
),
{"kind": "compiled_operation"},
]
)
assert run_config.inputs[0].value is None
assert run_config.inputs[1].value is None
run_config.apply_params(
params={"loss": {"value": "bar"}, "flag": {"value": True}}
)
assert run_config.inputs[0].value == "bar"
assert run_config.inputs[1].value is True
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
assert run_config.run.container.image == "my_image"
assert run_config.run.container.command == ["/bin/sh", "-c"]
assert (
run_config.run.container.args == "video_prediction_train --loss=bar --flag"
)
# Adding extra value raises
with self.assertRaises(ValidationError):
run_config.validate_params(
params={
"loss": {"value": "bar"},
"flag": {"value": True},
"value": {"value": 1.1},
}
)
with self.assertRaises(PolyaxonfileError):
check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/required_inputs.yml"
),
params={"loss": {"value": "bar"}, "value": {"value": 1.1}},
is_cli=False,
)
# Adding non valid params raises
with self.assertRaises(ValidationError):
run_config.validate_params(params={"value": {"value": 1.1}})
def test_matrix_file_passes_int_float_types(self):
plxfile = check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/matrix_file_with_int_float_types.yml"
),
is_cli=False,
to_op=False,
)
# Get compiled_operation data
run_config = OperationSpecification.compile_operation(plxfile)
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.version == 1.1
assert run_config.has_pipeline
assert run_config.is_dag_run is False
assert isinstance(run_config.matrix.params["param1"], V1HpChoice)
assert isinstance(run_config.matrix.params["param2"], V1HpChoice)
assert run_config.matrix.params["param1"].to_dict() == {
"kind": "choice",
"value": [1, 2],
}
assert run_config.matrix.params["param2"].to_dict() == {
"kind": "choice",
"value": [3.3, 4.4],
}
assert isinstance(run_config.matrix, V1GridSearch)
assert run_config.matrix.concurrency == 2
assert run_config.matrix.kind == V1GridSearch.IDENTIFIER
assert run_config.matrix.early_stopping is None
def test_matrix_job_file_passes_int_float_types(self):
plxfile = check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/matrix_job_file_with_int_float_types.yml"
),
is_cli=False,
to_op=False,
)
# Get compiled_operation data
run_config = OperationSpecification.compile_operation(plxfile)
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.version == 1.1
assert isinstance(run_config.matrix.params["param1"], V1HpChoice)
assert isinstance(run_config.matrix.params["param2"], V1HpChoice)
assert run_config.matrix.params["param1"].to_dict() == {
"kind": "choice",
"value": [1, 2],
}
assert run_config.matrix.params["param2"].to_dict() == {
"kind": "choice",
"value": [3.3, 4.4],
}
assert isinstance(run_config.matrix, V1GridSearch)
assert run_config.matrix.concurrency == 2
assert run_config.matrix.kind == V1GridSearch.IDENTIFIER
assert run_config.matrix.early_stopping is None
def test_matrix_file_with_required_inputs_and_wrong_matrix_type_fails(self):
with self.assertRaises(PolyaxonfileError):
check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/matrix_job_required_inputs_file_wrong_matrix_type.yml"
),
is_cli=False,
)
def test_matrix_file_with_required_inputs_passes(self):
plx_file = check_polyaxonfile(
polyaxonfile=os.path.abspath(
"tests/fixtures/typing/matrix_job_required_inputs_file.yml"
),
is_cli=False,
)
run_config = OperationSpecification.compile_operation(plx_file)
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
assert run_config.version == 1.1
assert isinstance(run_config.matrix, V1Hyperband)
assert isinstance(run_config.matrix.params["lr"], V1HpLinSpace)
assert isinstance(run_config.matrix.params["loss"], V1HpChoice)
assert run_config.matrix.params["lr"].to_dict() == {
"kind": "linspace",
"value": {"start": 0.01, "stop": 0.1, "num": 5},
}
assert run_config.matrix.params["loss"].to_dict() == {
"kind": "choice",
"value": ["MeanSquaredError", "AbsoluteDifference"],
}
assert run_config.matrix.concurrency == 2
assert isinstance(run_config.matrix, V1Hyperband)
assert run_config.matrix.kind == V1Hyperband.IDENTIFIER
assert run_config.matrix.early_stopping is None
def test_run_simple_file_passes(self):
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/run_cmd_simple_file.yml"),
{"kind": "compiled_operation"},
]
)
assert run_config.inputs[0].value == "MeanSquaredError"
assert run_config.inputs[1].value is None
validated_params = run_config.validate_params()
assert run_config.inputs[0].value == "MeanSquaredError"
assert run_config.inputs[1].value is None
assert {
"loss": V1Param(value="MeanSquaredError"),
"num_masks": V1Param(value=None),
} == {p.name: p.param for p in validated_params}
with self.assertRaises(ValidationError):
CompiledOperationSpecification.apply_operation_contexts(run_config)
validated_params = run_config.validate_params(
params={"num_masks": {"value": 100}}
)
assert {
"loss": V1Param(value="MeanSquaredError"),
"num_masks": V1Param(value=100),
} == {p.name: p.param for p in validated_params}
assert run_config.run.container.args == [
"video_prediction_train",
"--num_masks={{num_masks}}",
"--loss={{loss}}",
]
with self.assertRaises(ValidationError):
# Applying context before applying params
CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config.apply_params(params={"num_masks": {"value": 100}})
run_config = CompiledOperationSpecification.apply_operation_contexts(run_config)
run_config = CompiledOperationSpecification.apply_runtime_contexts(run_config)
assert run_config.version == 1.1
assert run_config.tags == ["foo", "bar"]
container = run_config.run.container
assert isinstance(container, k8s_schemas.V1Container)
assert container.image == "my_image"
assert container.command == ["/bin/sh", "-c"]
assert container.args == [
"video_prediction_train",
"--num_masks=100",
"--loss=MeanSquaredError",
]
def test_run_with_refs(self):
# Get compiled_operation data
run_config = V1CompiledOperation.read(
[
os.path.abspath("tests/fixtures/typing/run_with_refs.yml"),
{"kind": "compiled_operation"},
]
)
params = {
"num_masks": {"value": 2},
"model_path": {
"ref": "runs.64332180bfce46eba80a65caf73c5396",
"value": "outputs.doo",
},
}
validated_params = run_config.validate_params(params=params)
param_specs_by_name = {p.name: p.param for p in validated_params}
assert param_specs_by_name == {
"num_masks": V1Param(value=2),
"model_path": V1Param(
ref="runs.64332180bfce46eba80a65caf73c5396", value="outputs.doo"
),
}
ref_param = param_specs_by_name["model_path"]
assert ref_param.to_dict() == params["model_path"]
with self.assertRaises(ValidationError):
run_config.apply_params(params=params)
# Passing correct context
run_config.apply_params(
params=params,
context={
"runs.64332180bfce46eba80a65caf73c5396.outputs.doo": V1IO(
name="model_path",
value="model_path",
is_optional=True,
type="path",
)
},
)
# New params
params = {
"num_masks": {"value": 2},
"model_path": {"ref": "ops.A", "value": "outputs.doo"},
}
validated_params = run_config.validate_params(params=params)
param_specs_by_name = {p.name: p.param for p in validated_params}
assert param_specs_by_name == {
"num_masks": V1Param(value=2),
"model_path": V1Param(ref="ops.A", value="outputs.doo"),
}
ref_param = param_specs_by_name["model_path"]
assert ref_param.to_dict() == params["model_path"]
with self.assertRaises(ValidationError):
run_config.apply_params(params=params)
run_config.apply_params(
params=params,
context={
"ops.A.outputs.doo": V1IO(
name="model_path",
value="model_path",
is_optional=True,
type="path",
)
},
)
| StarcoderdataPython |
1602039 | <gh_stars>1-10
"""
This file expands all mujoco include nodes (i.e., it flatten the file).
This can be a useful utility for debugging.
"""
import argparse
from lisdf.parsing.mjcf import MJCFVisitorFlatten
parser = argparse.ArgumentParser(description=__doc__)
parser.add_argument("file")
args = parser.parse_args()
def main() -> None:
visitor = MJCFVisitorFlatten()
node = visitor.load_file(args.file)
print(node)
if __name__ == "__main__":
main()
| StarcoderdataPython |
1665506 | #!/usr/bin/env python
# -*- coding: utf-8 -*-
"""
SleekXMPP: The Sleek XMPP Library
Copyright (C) 2010 <NAME>
This file is part of SleekXMPP.
See the file LICENSE for copying permission.
"""
import sys
import logging
import getpass
from optparse import OptionParser
import sleekxmpp
import chat.utils
CHATROOM_SCRIPT = """\
Users register to use Kvoti on the Kvoti homepage at kvoti.technology.
Once they have signed up and verified their email account, a user can then set up their own Kvoti network http://kvoti.technology/[networkname]/.
Basic Information
Name: The given name of a Kvoti network
Theme: The visual theme of a Kvoti network
Type: The type of business sector, i.e. business, charity, social, voluntary etc.
Description: A brief description of the network
Size Cap: Determines the maximum number of users on the network.
All basic setup information can be changed at any time via the ‘Settings’ menu by the network Administrator.
Roles: Sets the number and name of the roles in a Kvoti network. ‘Administrator’ and ‘Member’ roles are set as defaults.
Example: A Sexual Health Service might specify the following roles:
Counsellor, GP, Nurse Manager, Sexual Health Adviser
Permissions: Sets the permissions for each role; determines how roles communicate with each other (IM, video, audio).
"""
# Python versions before 3.0 do not use UTF-8 encoding
# by default. To ensure that Unicode is handled properly
# throughout SleekXMPP, we will set the default encoding
# ourselves to UTF-8.
if sys.version_info < (3, 0):
from sleekxmpp.util.misc_ops import setdefaultencoding
setdefaultencoding('utf8')
else:
raw_input = input
# Need to query the domain here as doing it inside SendMsgBot doesnt work
DOMAIN = chat.utils.domain()
class SendMsgBot(sleekxmpp.ClientXMPP):
"""
A basic SleekXMPP bot that will log in, send a message,
and then log out.
"""
def __init__(self, jid, password):
self.me = jid
sleekxmpp.ClientXMPP.__init__(self, jid, password)
# The session_start event will be triggered when
# the bot establishes its connection with the server
# and the XML streams are ready for use. We want to
# listen for this event so that we we can initialize
# our roster.
self.add_event_handler("session_start", self.start, threaded=True)
self.add_event_handler("presence", self.presence)
#self.add_event_handler("groupchat_message", self.group_message)
def start(self, event):
"""
Process the session_start event.
Typical actions for the session_start event are
requesting the roster and broadcasting an initial
presence stanza.
Arguments:
event -- An empty dictionary. The session_start
event does not provide any additional
data.
"""
self.send_presence()
self.get_roster()
self.room = "main@muc.%s" % DOMAIN
self.plugin['xep_0045'].joinMUC(self.room,
"mark",
# If a room password is needed, use:
# password=<PASSWORD>,
pfrom=self.me,
wait=True)
for recipient in ("sarah", "ross"):
self.send_message(mto=jid(recipient),
mfrom=self.me,
mbody="hi, this is your friendly chat bot",
mtype='chat')
# TODO what event should I wait on before sending the group message?
# cant see how to wait on response to configureRoom
from time import sleep
sleep(2)
for msg in CHATROOM_SCRIPT.splitlines():
msg = msg.strip()
if msg:
self.send_message(mto=self.room,
mbody=msg,
mtype='groupchat')
def presence(self, pr):
print "presence", pr
# TODO only do this once (when joined room), not on each presence
self.plugin['xep_0045'].configureRoom(self.room, ifrom=self.me)
def group_message(self, msg):
if "friendly" in msg["body"]:
# Using wait=True ensures that the send queue will be
# emptied before ending the session.
self.disconnect(wait=True)
def jid(username):
return "%s@%s" % (username, DOMAIN)
def run():
# Setup logging.
logging.basicConfig(level=logging.DEBUG,
format='%(levelname)-8s %(message)s')
# Setup the EchoBot and register plugins. Note that while plugins may
# have interdependencies, the order in which you register them does
# not matter.
xmpp = SendMsgBot(jid("mark"), chat.utils.password("<PASSWORD>"))
xmpp.register_plugin('xep_0030') # Service Discovery
xmpp.register_plugin('xep_0199') # XMPP Ping
xmpp.register_plugin('xep_0045')
# If you are working with an OpenFire server, you may need
# to adjust the SSL version used:
# xmpp.ssl_version = ssl.PROTOCOL_SSLv3
# If you want to verify the SSL certificates offered by a server:
# xmpp.ca_certs = "path/to/ca/cert"
# Connect to the XMPP server and start processing XMPP stanzas.
if xmpp.connect((chat.utils.server(), 5222)):
# If you do not have the dnspython library installed, you will need
# to manually specify the name of the server if it does not match
# the one in the JID. For example, to use Google Talk you would
# need to use:
#
# if xmpp.connect(('talk.google.com', 5222)):
# ...
xmpp.process(block=True)
print("Done")
else:
print("Unable to connect.")
| StarcoderdataPython |
1746727 | #!/usr/bin/env python2
import unittest, os, shutil
from planet import config, splice, logger
from xml.dom import minidom
workdir = 'tests/work/apply'
configfile = 'tests/data/apply/config-%s.ini'
testfeed = 'tests/data/apply/feed.xml'
class ApplyTest(unittest.TestCase):
def setUp(self):
testfile = open(testfeed)
self.feeddata = testfile.read()
testfile.close()
try:
os.makedirs(workdir)
except:
self.tearDown()
os.makedirs(workdir)
def tearDown(self):
shutil.rmtree(os.path.split(workdir)[0])
def apply_asf(self):
splice.apply(self.feeddata)
# verify that selected files are there
for file in ['index.html', 'default.css', 'images/foaf.png']:
path = os.path.join(workdir, file)
self.assertTrue(os.path.exists(path))
self.assertTrue(os.stat(path).st_size > 0, file + ' has size 0')
# verify that index.html is well formed, has content, and xml:lang
html = open(os.path.join(workdir, 'index.html'))
doc = minidom.parse(html)
list = []
content = lang = 0
for div in doc.getElementsByTagName('div'):
if div.getAttribute('class') != 'content': continue
content += 1
if div.getAttribute('xml:lang') == 'en-us': lang += 1
html.close()
self.assertEqual(12, content)
self.assertEqual(3, lang)
def test_apply_asf(self):
config.load(configfile % 'asf')
self.apply_asf()
def test_apply_classic_fancy(self):
config.load(configfile % 'fancy')
self.apply_fancy()
def test_apply_genshi_fancy(self):
config.load(configfile % 'genshi')
self.apply_fancy()
def test_apply_filter_html(self):
config.load(configfile % 'html')
self.apply_asf()
output = open(os.path.join(workdir, 'index.html')).read()
self.assertTrue(output.find('/>')>=0)
output = open(os.path.join(workdir, 'index.html4')).read()
self.assertTrue(output.find('/>')<0)
def test_apply_filter_mememe(self):
config.load(configfile % 'mememe')
self.apply_fancy()
output = open(os.path.join(workdir, 'index.html')).read()
self.assertTrue(output.find('<div class="sidebar"><h2>Memes <a href="memes.atom">')>=0)
def apply_fancy(self):
# drop slow templates unrelated to test at hand
templates = config.parser.get('Planet','template_files').split()
templates.remove('rss10.xml.tmpl')
templates.remove('rss20.xml.tmpl')
config.parser.set('Planet','template_files',' '.join(templates))
splice.apply(self.feeddata)
# verify that selected files are there
for file in ['index.html', 'planet.css', 'images/jdub.png']:
path = os.path.join(workdir, file)
self.assertTrue(os.path.exists(path), path)
self.assertTrue(os.stat(path).st_size > 0)
# verify that index.html is well formed, has content, and xml:lang
html = open(os.path.join(workdir, 'index.html')).read()
self.assertTrue(html.find('<h1>test planet</h1>')>=0)
self.assertTrue(html.find(
'<h4><a href="http://example.com/2">Venus</a></h4>')>=0)
def test_apply_filter(self):
config.load(configfile % 'filter')
splice.apply(self.feeddata)
# verify that index.html is well formed, has content, and xml:lang
html = open(os.path.join(workdir, 'index.html')).read()
self.assertTrue(html.find(' href="http://example.com/default.css"')>=0)
import test_filter_genshi
for method in dir(test_filter_genshi.GenshiFilterTests):
if method.startswith('test_'): break
else:
delattr(ApplyTest,'test_apply_genshi_fancy')
try:
import libxml2
except ImportError:
delattr(ApplyTest,'test_apply_filter_mememe')
try:
import win32pipe
(stdin,stdout) = win32pipe.popen4('xsltproc -V', 't')
stdin.close()
stdout.read()
try:
exitcode = stdout.close()
except IOError:
exitcode = -1
except:
import commands
(exitstatus,output) = commands.getstatusoutput('xsltproc -V')
exitcode = ((exitstatus>>8) & 0xFF)
if exitcode:
logger.warn("xsltproc is not available => can't test XSLT templates")
for method in dir(ApplyTest):
if method.startswith('test_'): delattr(ApplyTest,method)
| StarcoderdataPython |
154963 | import unittest
import models.EndNode as n
class TestEndNode(unittest.TestCase):
def setUp(self):
self.a = n.EndNode('192.168.0.1', id = 1)
self.b = n.EndNode('192.168.0.1')
self.c = n.EndNode('192.168.0.3')
def testEquality(self):
self.assertTrue(self.a == self.a)
self.assertTrue(self.b == self.b)
self.assertTrue(self.c == self.c)
self.assertTrue(self.a == self.b)
self.assertFalse(self.a == self.c)
self.assertFalse(self.b == self.c)
def testIp(self):
self.assertEqual(self.a.ip,"192.168.0.1")
self.assertNotEqual(self.a.ip,"192.168.0.3")
self.assertEqual(self.a.ip, self.b.ip)
self.assertEqual(self.b.ip,"192.168.0.1")
self.assertEqual(self.c.ip, "192.168.0.3")
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
3217310 | <reponame>mqlight/qpid-proton
#
# Licensed to the Apache Software Foundation (ASF) under one
# or more contributor license agreements. See the NOTICE file
# distributed with this work for additional information
# regarding copyright ownership. The ASF licenses this file
# to you under the Apache License, Version 2.0 (the
# "License"); you may not use this file except in compliance
# with the License. You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing,
# software distributed under the License is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY
# KIND, either express or implied. See the License for the
# specific language governing permissions and limitations
# under the License
#
from org.apache.qpid.proton.messenger.impl import Address
def pn_url():
return Address()
def pn_url_parse(urlstr):
return Address(urlstr)
def pn_url_free(url): pass
def pn_url_clear(url):
url.clear();
def pn_url_str(url): return url.toString()
def pn_url_get_scheme(url): return url.getScheme()
def pn_url_get_username(url): return url.getUser()
def pn_url_get_password(url): return url.getPass()
def pn_url_get_host(url): return url.getHost() or None
def pn_url_get_port(url): return url.getPort()
def pn_url_get_path(url): return url.getName()
def pn_url_set_scheme(url, value): url.setScheme(value)
def pn_url_set_username(url, value): url.setUser(value)
def pn_url_set_password(url, value): url.setPass(value)
def pn_url_set_host(url, value): url.setHost(value)
def pn_url_set_port(url, value): url.setPort(value)
def pn_url_set_path(url, value): url.setName(value)
| StarcoderdataPython |
3263759 | #!/usr/bin/python3
# Info:
# <NAME>.
from datetime import date
def age(birth_date):
# determines important info based on given age (in form YYYY-MM-DD)
today = date.today()
expected_death = date(2080, 12, 14)
years_alive = today.year - birth_date.year
days_alive = (today - birth_date).days
days_left = (expected_death - today).days
years_left = (expected_death - today).years
print(f"you were born {years_alive} years ago")
print(f"you have been alive for {days_alive} days")
print(f"you have approximately {days_left} days left to live")
print(f"you have approximately {years_left} years left to live")
def parse_date(user_date):
# converts string of form (YYYY-MM-DD) into a datetime representation
L = []
for part in user_date.split("-"):
L.append(int(part))
return date(L[0], L[1], L[2])
def main():
age(parse_date(input("when were you born (YYYY-MM-DD)? ")))
if __name__ == "__main__":
main()
| StarcoderdataPython |
64593 | """Support for Sonarr sensors."""
from datetime import timedelta
import logging
from typing import Any, Callable, Dict, List, Optional
from sonarr import Sonarr, SonarrConnectionError, SonarrError
from homeassistant.config_entries import ConfigEntry
from homeassistant.const import DATA_GIGABYTES
from homeassistant.helpers.entity import Entity
from homeassistant.helpers.typing import HomeAssistantType
import homeassistant.util.dt as dt_util
from . import SonarrEntity
from .const import CONF_UPCOMING_DAYS, CONF_WANTED_MAX_ITEMS, DATA_SONARR, DOMAIN
_LOGGER = logging.getLogger(__name__)
async def async_setup_entry(
hass: HomeAssistantType,
entry: ConfigEntry,
async_add_entities: Callable[[List[Entity], bool], None],
) -> None:
"""Set up Sonarr sensors based on a config entry."""
options = entry.options
sonarr = hass.data[DOMAIN][entry.entry_id][DATA_SONARR]
entities = [
SonarrCommandsSensor(sonarr, entry.entry_id),
SonarrDiskspaceSensor(sonarr, entry.entry_id),
SonarrQueueSensor(sonarr, entry.entry_id),
SonarrSeriesSensor(sonarr, entry.entry_id),
SonarrUpcomingSensor(sonarr, entry.entry_id, days=options[CONF_UPCOMING_DAYS]),
SonarrWantedSensor(
sonarr, entry.entry_id, max_items=options[CONF_WANTED_MAX_ITEMS]
),
]
async_add_entities(entities, True)
def sonarr_exception_handler(func):
"""Decorate Sonarr calls to handle Sonarr exceptions.
A decorator that wraps the passed in function, catches Sonarr errors,
and handles the availability of the entity.
"""
async def handler(self, *args, **kwargs):
try:
await func(self, *args, **kwargs)
self.last_update_success = True
except SonarrConnectionError as error:
if self.available:
_LOGGER.error("Error communicating with API: %s", error)
self.last_update_success = False
except SonarrError as error:
if self.available:
_LOGGER.error("Invalid response from API: %s", error)
self.last_update_success = False
return handler
class SonarrSensor(SonarrEntity):
"""Implementation of the Sonarr sensor."""
def __init__(
self,
*,
sonarr: Sonarr,
entry_id: str,
enabled_default: bool = True,
icon: str,
key: str,
name: str,
unit_of_measurement: Optional[str] = None,
) -> None:
"""Initialize Sonarr sensor."""
self._unit_of_measurement = unit_of_measurement
self._key = key
self._unique_id = f"{entry_id}_{key}"
self.last_update_success = False
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
device_id=entry_id,
name=name,
icon=icon,
enabled_default=enabled_default,
)
@property
def unique_id(self) -> str:
"""Return the unique ID for this sensor."""
return self._unique_id
@property
def available(self) -> bool:
"""Return sensor availability."""
return self.last_update_success
@property
def unit_of_measurement(self) -> str:
"""Return the unit this state is expressed in."""
return self._unit_of_measurement
class SonarrCommandsSensor(SonarrSensor):
"""Defines a Sonarr Commands sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str) -> None:
"""Initialize Sonarr Commands sensor."""
self._commands = []
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:code-braces",
key="commands",
name=f"{sonarr.app.info.app_name} Commands",
unit_of_measurement="Commands",
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
self._commands = await self.sonarr.commands()
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for command in self._commands:
attrs[command.name] = command.state
return attrs
@property
def state(self) -> int:
"""Return the state of the sensor."""
return len(self._commands)
class SonarrDiskspaceSensor(SonarrSensor):
"""Defines a Sonarr Disk Space sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str) -> None:
"""Initialize Sonarr Disk Space sensor."""
self._disks = []
self._total_free = 0
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:harddisk",
key="diskspace",
name=f"{sonarr.app.info.app_name} Disk Space",
unit_of_measurement=DATA_GIGABYTES,
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
app = await self.sonarr.update()
self._disks = app.disks
self._total_free = sum([disk.free for disk in self._disks])
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for disk in self._disks:
free = disk.free / 1024 ** 3
total = disk.total / 1024 ** 3
usage = free / total * 100
attrs[
disk.path
] = f"{free:.2f}/{total:.2f}{self._unit_of_measurement} ({usage:.2f}%)"
return attrs
@property
def state(self) -> str:
"""Return the state of the sensor."""
free = self._total_free / 1024 ** 3
return f"{free:.2f}"
class SonarrQueueSensor(SonarrSensor):
"""Defines a Sonarr Queue sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str) -> None:
"""Initialize Sonarr Queue sensor."""
self._queue = []
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:download",
key="queue",
name=f"{sonarr.app.info.app_name} Queue",
unit_of_measurement="Episodes",
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
self._queue = await self.sonarr.queue()
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for item in self._queue:
remaining = 1 if item.size == 0 else item.size_remaining / item.size
remaining_pct = 100 * (1 - remaining)
name = f"{item.episode.series.title} {item.episode.identifier}"
attrs[name] = f"{remaining_pct:.2f}%"
return attrs
@property
def state(self) -> int:
"""Return the state of the sensor."""
return len(self._queue)
class SonarrSeriesSensor(SonarrSensor):
"""Defines a Sonarr Series sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str) -> None:
"""Initialize Sonarr Series sensor."""
self._items = []
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:television",
key="series",
name=f"{sonarr.app.info.app_name} Shows",
unit_of_measurement="Series",
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
self._items = await self.sonarr.series()
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for item in self._items:
attrs[item.series.title] = f"{item.downloaded}/{item.episodes} Episodes"
return attrs
@property
def state(self) -> int:
"""Return the state of the sensor."""
return len(self._items)
class SonarrUpcomingSensor(SonarrSensor):
"""Defines a Sonarr Upcoming sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str, days: int = 1) -> None:
"""Initialize Sonarr Upcoming sensor."""
self._days = days
self._upcoming = []
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:television",
key="upcoming",
name=f"{sonarr.app.info.app_name} Upcoming",
unit_of_measurement="Episodes",
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
local = dt_util.start_of_local_day().replace(microsecond=0)
start = dt_util.as_utc(local)
end = start + timedelta(days=self._days)
self._upcoming = await self.sonarr.calendar(
start=start.isoformat(), end=end.isoformat()
)
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
for episode in self._upcoming:
attrs[episode.series.title] = episode.identifier
return attrs
@property
def state(self) -> int:
"""Return the state of the sensor."""
return len(self._upcoming)
class SonarrWantedSensor(SonarrSensor):
"""Defines a Sonarr Wanted sensor."""
def __init__(self, sonarr: Sonarr, entry_id: str, max_items: int = 10) -> None:
"""Initialize Sonarr Wanted sensor."""
self._max_items = max_items
self._results = None
self._total: Optional[int] = None
super().__init__(
sonarr=sonarr,
entry_id=entry_id,
icon="mdi:television",
key="wanted",
name=f"{sonarr.app.info.app_name} Wanted",
unit_of_measurement="Episodes",
enabled_default=False,
)
@sonarr_exception_handler
async def async_update(self) -> None:
"""Update entity."""
self._results = await self.sonarr.wanted(page_size=self._max_items)
self._total = self._results.total
@property
def device_state_attributes(self) -> Optional[Dict[str, Any]]:
"""Return the state attributes of the entity."""
attrs = {}
if self._results is not None:
for episode in self._results.episodes:
name = f"{episode.series.title} {episode.identifier}"
attrs[name] = episode.airdate
return attrs
@property
def state(self) -> Optional[int]:
"""Return the state of the sensor."""
return self._total
| StarcoderdataPython |
3356838 | import toml
from lambda_cache import __version__
def test_version():
"""
Test that version in __init.py__ matches pyproject.toml file
"""
with open('../pyproject.toml') as toml_file:
config = toml.load(toml_file)
version = config.get('tool').get('poetry').get('version')
assert __version__ == version
| StarcoderdataPython |
1772471 | <filename>output/python_checkkeyplace.py
from statistics import mean
import numpy as np
import matplotlib.pyplot as plt
import seaborn as sns
plt.style.use("ggplot") # グラフのデザインを指定する
sns.set_palette('Set2') # グラフの色を指定する
import warnings
warnings.filterwarnings('ignore') # 警告メッセージを出ないようにしている
hakohige_labels=[]
hakohige_lawdata=[]
hakohige_data=[]
class CheckKeyPlace:
def __init__(self, f, key):
self.key = key
self.file = f+".txt"
self.check_key_place()
def check_key_place(self):
global hakohige_labels
global hakohige_lawdata
global hakohige_data
handle=open(self.file, 'r')
lines=handle.readlines()
scores=[]
lawscores=[]
for line in lines:
tokens=line.strip().split()
linelength=len(tokens)
for i,token in enumerate(tokens):
if token==self.key:
scores.append(i/(linelength-1) if linelength>=2 else 0)
lawscores.append(i)
break
score=mean(scores)
hakohige_labels.append(self.file[7:-4])
hakohige_lawdata.append(lawscores)
hakohige_data.append(scores)
stderr=np.std(scores, ddof=1) / np.sqrt(len(scores)) if len(scores)>0 else 0
print("{} {} score:{} stderr:{}".format(self.file,self.key,score,stderr))
ckp=CheckKeyPlace("output_mr_good", "good")
ckp=CheckKeyPlace("output_mr_good2", "good")
ckp=CheckKeyPlace("output_mr_bad", "bad")
ckp=CheckKeyPlace("output_mr_bad2", "bad")
ckp=CheckKeyPlace("output_enmini_good", "good")
ckp=CheckKeyPlace("output_enmini_good2", "good")
ckp=CheckKeyPlace("output_enmini_bad", "bad")
ckp=CheckKeyPlace("output_enmini_bad2", "bad")
ckp=CheckKeyPlace("output_coco_man", "man")
ckp=CheckKeyPlace("output_coco_man2", "man")
ckp=CheckKeyPlace("output_coco_toilet", "toilet")
ckp=CheckKeyPlace("output_coco_toilet2", "toilet")
plt.figure(figsize=(20,12))
plt.boxplot(hakohige_lawdata,labels=hakohige_labels)
plt.savefig("keyplace.eps")
plt.figure(figsize=(20,12))
plt.boxplot(hakohige_data,labels=hakohige_labels)
plt.savefig("keyplace_nomarized.eps") | StarcoderdataPython |
3320233 | <reponame>vhte/bagpipewriter<filename>tests/test_bagpipemanager.py
from bagpipemanager.bagpipemanager import BagpipeManager
def test_get_properties(mocker):
sheet = mocker.Mocker()
manager = BagpipeManager()
| StarcoderdataPython |
3371713 | <reponame>DrLarck/DiscordBallZ_
"""
Manager the levelling
--
Author : DrLarck
Last update : 15/02/20 (DrLarck)
"""
# dependancies
import asyncio
from random import randint
# util
from utility.database.database_manager import Database
from utility.cog.character.getter import Character_getter
# leveller
class Leveller:
"""
Manages the way the character and player level up.
- Parameter :
`client` : Represents a `discord.Client`. The client must handle a database connection pool.
`ctx` : Represents the `commands.Context`.
- Method :
:coro:`character_levelling(player, unique_character, xp)` : Check if the character levels up or not.
:coro:`player_levelling(player, xp)` : Same as `character_levelling()` but for players.
"""
# attribute
def __init__(self, client, ctx):
# basic
self.client = client
self.ctx = ctx
self.db = Database(self.client.db)
self.getter = Character_getter()
# method
async def character_levelling(self, player, unique_character, xp):
"""
`coroutine`
Make the character levelling up.
- Parameter :
`player` : The player who is affected.
`unique_character` : The character to level up
`xp` : The amount of xp gained by the character
--
Return : send a message in the channel if the character is levelling up, otherwise None
"""
# init
# level per rarity
max_level = {
"0" : 20,
"1" : 40,
"2" : 60,
"3" : 80,
"4" : 120,
"5" : 150
}
# character
character = await self.getter.get_from_unique(self.client, unique_character)
await character.init()
level = character.level
# get the xp infos
character_xp = int(await self.db.fetchval(
f"SELECT character_experience FROM character_unique WHERE character_unique_id = '{unique_character}';")
)
# check if the character's level is equal to its rarity or inferior
if(character.level < max_level[f"{character.rarity.value}"]):
character_xp += xp
await self.db.execute(
f"""
UPDATE character_unique SET character_experience = {character_xp}
WHERE character_unique_id = '{unique_character}';
"""
)
# sum the necessary xp + total exp to retrieve the nedded amount
next_level = int((100 + (50 * level)) + ((level - 1) * (100 + (50 * level - 1))))
# levelling it up
while character_xp > next_level:
await asyncio.sleep(0)
# avoid the character to reach a too high level for its rarity
if(level < max_level[f"{character.rarity.value}"]):
# increase the level value
level += 1
# just get the new exp value after levelling
next_level = int((100 + (50 * level)) + ((level - 1) * (100 + (50 * level - 1))))
else:
break
# if the level has changed
if(level > character.level):
# set the level
await self.db.execute(
f"""
UPDATE character_unique SET character_level = {level}
WHERE character_unique_id = '{unique_character}';
"""
)
return(f"Your character {character.image.icon}**{character.info.name}** {character.type.icon}{character.rarity.icon} has reached the level **{level:,}** :star: by earning *{xp:,}* as **XP** !")
else: # just xp gain
return(f"Your character {character.image.icon}**{character.info.name}** {character.type.icon}{character.rarity.icon} has gained *{xp:,}* as **XP** !")
return
async def team_add_xp(self, player, team, xp):
"""
`coroutine`
Add the `xp` amount to the characters that are contained in `team`, then display a message.
- Parameter :
`player` : Represents the `team` owner.
`team` : list - Represents the player's team. Contains characters' `unique id`.
`xp` : int - The amount of xp gained by the team.
--
Return : Send a message.
"""
# init
displaying = ""
for character in team:
await asyncio.sleep(0)
if(character != None):
xp = randint(int((0.9 * xp)), int((1.1 * xp))) # rng xp
future_display = await self.character_levelling(player, character, xp)
if(future_display != None):
displaying += future_display
displaying += "\n"
if(displaying != f"<@{player.id}> "):
await self.ctx.send(displaying)
else:
return | StarcoderdataPython |
3276129 | """
An active learning example using linear regression.
-- <EMAIL>
"""
# pylint: disable=invalid-name
# pylint: disable=no-name-in-module
# pylint: disable=abstract-method
import numpy as np
try:
from scipy.stats import multivariate_normal
except ImportError:
from numpy.random import multivariate_normal
# Local
from mps.exd.domains import EuclideanDomain
from mps.exd.goal_oriented_exd import GoalOrientedExperimentDesigner # args
from mps.exd.worker_manager import SyntheticWorkerManager
from mps.exd.experiment_caller import EuclideanFunctionCaller
from mps.prob.disc_prob_examples import BayesianLinearRBF
from mps.policies import mps
from mps.policies import random
from mps import load_options_for_policy
def get_problem_params(options=None, reporter=None):
""" Returns problem parameters. Picks centers in [0, 1]^2. """
n1 = 2 # number of centers in the bottom left quadrant = n1 ^ 2
n2 = 1 # number of centers in each of the other 3 quadrants = n2 ^ 2
grid_botleft = np.linspace(0, 0.5, n1 + 2)[1:-1]
centers_botleft = np.array([[x, y] for y in grid_botleft for x in grid_botleft])
grid_else = np.linspace(0, 0.5, n2 + 2)[1:-1]
centers_else = np.array([[x, y] for y in grid_else for x in grid_else])
centers_topright = centers_else + 0.5
centers_topleft = centers_else + np.array([0, 0.5])
centers_botright = centers_else + np.array([0.5, 0])
centers = np.concatenate((centers_botleft, centers_topright, centers_topleft,
centers_botright), axis=0)
def _weight_fn(pt):
""" A toy function that determines the weights for each center. """
x, y = pt
return np.sin(4 * np.pi * (x**2 + y))
true_theta = np.array([_weight_fn(c) for c in centers])
x_domain = EuclideanDomain([[0.0, 1.0], [0.0, 1.0]])
# prior mean and covariance
prior_info = (np.zeros(len(centers)), np.eye(len(centers)))
rbf_var = 0.1
eta2 = 0.001
model = BayesianLinearRBF(centers, rbf_var, eta2, x_domain, None, prior_info,
options=options, reporter=reporter)
experiment_eval_func = lambda x: model.sample_y_giv_x_t(1, x, true_theta)[0]
experiment_caller = EuclideanFunctionCaller(experiment_eval_func, x_domain,
'linear_rbf')
return true_theta, model, experiment_caller
def compute_least_squares_est(centers, cov, X, Y, regularize=True):
""" Compute regularized least-squares estimate for theta given X, Y. """
gaussians = [multivariate_normal(mean=c, cov=cov) for c in centers]
densities = np.array([g.pdf(X) for g in gaussians]).T # n x d, where d is num centers
if len(densities.shape) == 1:
densities = np.reshape(densities, (1, len(densities)))
# compute least-squares weights with or without regularization
if regularize:
if len(Y) == 1:
reg_lambda = 0.1
else:
reg_lambda = np.std(Y) / (10 * len(X))
densities_T_densities = densities.T.dot(densities)
reg_term = reg_lambda * np.eye(len(densities_T_densities))
return np.linalg.lstsq(densities_T_densities + reg_term, densities.T.dot(Y))[0]
else:
return np.linalg.lstsq(densities, Y)[0]
class LinearRBFProblem(GoalOrientedExperimentDesigner):
""" Describes the problem for active learning. """
def __init__(self, experiment_caller, worker_manager, model, true_theta,
options=None, reporter='default', *args, **kwargs):
""" Constructor. """
self.true_theta = true_theta
super(LinearRBFProblem, self).__init__(experiment_caller, worker_manager, model,
self._reward, self._true_reward, options=options, reporter=reporter,
*args, **kwargs)
def _reward(self, theta, X, Y):
""" The reward function. """
if len(X) == 0:
return np.inf
raw_X = self.experiment_caller.get_raw_domain_coords(X)
est_theta = compute_least_squares_est(self.model.centers, self.model.var, raw_X, Y)
norm_err = (theta - est_theta) / (self.true_theta + 0.001)
neg_ret = np.linalg.norm(norm_err)**2
return - neg_ret
def _true_reward(self, X, Y):
""" The True reward. """
return self._reward(self.true_theta, X, Y)
# The following classes inherit the problem and policy classes ==========================
class LinearRBFActiveLearnerMPS(LinearRBFProblem, mps.MPSExperimentDesigner):
""" Active Learning on the LinearRBF Model with Posterior Sampling. """
pass
class LinearRBFActiveLearnerMO(LinearRBFProblem, mps.MyopicOracleExperimentDesigner):
""" Active Learning on the LinearRBF Model with Posterior Sampling using the
Oracle policy. """
pass
class LinearRBFActiveLearnerRandom(LinearRBFProblem,
random.EuclideanRandomExperimentDesigner):
""" Random Designer on the LinearRBF problem. """
pass
def main():
""" Main function. """
budget = 40
true_theta, model, experiment_caller = get_problem_params()
worker_manager = SyntheticWorkerManager(1)
# Random sampling
print('\nRandom designer:')
worker_manager.reset()
rand_options = load_options_for_policy('rand')
rand_designer = LinearRBFActiveLearnerRandom(experiment_caller, worker_manager,
model, true_theta, options=rand_options)
rand_designer.run_experiments(budget)
# Random sampling
print('\nOracle designer:')
worker_manager.reset()
mo_options = load_options_for_policy('mo')
mo_designer = LinearRBFActiveLearnerMO(experiment_caller, worker_manager,
model, true_theta, options=mo_options)
mo_designer.run_experiments(budget)
# Posterior sampling
print('\nMPS designer:')
worker_manager.reset()
mps_options = load_options_for_policy('mps')
mps_designer = LinearRBFActiveLearnerMPS(experiment_caller, worker_manager,
model, true_theta, options=mps_options)
mps_designer.run_experiments(budget)
if __name__ == '__main__':
main()
| StarcoderdataPython |
145013 |
from rest_framework import serializers
from authx.models import User
from rest_framework_jwt.utils import jwt_payload_handler as drf_jwt_payload_handler
class UserSerializer(serializers.ModelSerializer):
password = serializers.CharField(
style={'input_type': 'password'},
write_only=True # password not showing for the time being
)
class Meta:
model = User
def jwt_response_payload_handler(token, user=None, request=None):
"""
Returns the response data for both the login and refresh views.
Override to return a custom response such as including the
serialized representation of the User.
Deprecated: as some info can be within token xxx.yyy.zzz
payload => yyy (base64 encoded)
"""
return {
'token': token,
#'user': UserSerializer(user).data,
}
def jwt_payload_handler(user):
payload = drf_jwt_payload_handler(user)
'''
warnings.warn(
'The following fields will be removed in the future: '
'`email` and `user_id`. ',
DeprecationWarning
)
payload = {
'user_id': user.pk,
'email': user.email,
'username': username,
'exp': datetime.utcnow() + api_settings.JWT_EXPIRATION_DELTA
}
'''
payload.setdefault('selfie_path', user.selfie_path)
payload.setdefault('nickname', user.nickname or user.username)
# payload.setdefault('roles', []) # TODO
return payload | StarcoderdataPython |
3308596 | <filename>settings.py
#demo/settings.py
from os import environ
SECRET_KEY = environ.get('connectiondata') | StarcoderdataPython |
1667308 | <gh_stars>1-10
from nodes.serializers import NodeListSerializer
from rest_framework import serializers
from users.serializers import UserSerializer
from .models import Post
class PostCreateSerializer(serializers.ModelSerializer):
user = serializers.ReadOnlyField(source="user.username")
class Meta:
model = Post
fields = [
"id",
"title",
"body",
"node",
"views",
"pinned",
"highlighted",
"deleted",
"created_at",
"edited_at",
"modified_at",
"user",
]
read_only_fields = [
"id",
"created_at",
"edited_at",
"modified_at",
"pinned",
"highlighted",
"deleted",
"views",
]
class PostListSerializer(serializers.ModelSerializer):
node = NodeListSerializer(read_only=True)
user = UserSerializer(read_only=True)
class Meta:
model = Post
fields = [
"id",
"title",
"body",
"views",
"user",
"pinned",
"highlighted",
"deleted",
"created_at",
"edited_at",
"modified_at",
"node",
]
| StarcoderdataPython |
1787990 | <reponame>Elyavor/ITMO_ICT_WebDevelopment_2021-2022
from rest_framework import serializers
from .models import Subject
class SubjectSerializer(serializers.ModelSerializer):
class Meta:
model = Subject
fields = '__all__'
| StarcoderdataPython |
179418 | # -*- coding: utf-8 -*-
"""
class Horizon for accessing horizon
Created on Fri July 20 2017
"""
from __future__ import (absolute_import, division, print_function,
unicode_literals)
__author__ = "yuhao"
import pandas as pd
class Horizon(object):
"""
Horizon using excel file as input
Parameters
----------
data_file : str
path to excel data file
"""
def __init__(self, data_file):
self.hdf_file = None
self.horizon_name = None
# self.data_frame = pd.read_excel(data_file)
self.data_frame = pd.read_csv(data_file, sep='\t')
def __str__(self):
return "Horizon Object: {}".format(self.horizon_name)
def get_cdp(self, cdp):
"""
Get value for a CDP point on the horizon.
Parameters
----------
cdp : tuple of int (inline, crossline)
"""
inl, crl = cdp
return self.data_frame[
(self.data_frame.inline == inl) & \
(self.data_frame.crline == crl)].z.values[-1]
| StarcoderdataPython |
3226858 | # -*- coding: utf-8 -*-
# cython: language_level=3
# Copyright (c) 2020 Nekokatt
# Copyright (c) 2021 davfsa
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
"""Events that fire if messages are sent/updated/deleted."""
from __future__ import annotations
__all__: typing.List[str] = [
"MessageEvent",
"MessageCreateEvent",
"MessageUpdateEvent",
"MessageDeleteEvent",
"GuildMessageCreateEvent",
"GuildMessageUpdateEvent",
"GuildMessageDeleteEvent",
"GuildBulkMessageDeleteEvent",
"DMMessageCreateEvent",
"DMMessageUpdateEvent",
"DMMessageDeleteEvent",
]
import abc
import typing
import attr
from hikari import channels
from hikari import intents
from hikari import snowflakes
from hikari import traits
from hikari import undefined
from hikari.events import base_events
from hikari.events import shard_events
from hikari.internal import attr_extensions
if typing.TYPE_CHECKING:
from hikari import embeds as embeds_
from hikari import guilds
from hikari import messages
from hikari import users
from hikari.api import shard as shard_
@base_events.requires_intents(intents.Intents.DM_MESSAGES, intents.Intents.GUILD_MESSAGES)
class MessageEvent(shard_events.ShardEvent, abc.ABC):
"""Any event that concerns manipulation of messages."""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def channel_id(self) -> snowflakes.Snowflake:
"""ID of the channel that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the channel that this event concerns.
"""
@property
@abc.abstractmethod
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the message that this event concerns.
"""
@base_events.requires_intents(intents.Intents.DM_MESSAGES, intents.Intents.GUILD_MESSAGES)
class MessageCreateEvent(MessageEvent, abc.ABC):
"""Event that is fired when a message is created."""
__slots__: typing.Sequence[str] = ()
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.message.app
@property
def author(self) -> users.User:
"""User that sent the message.
Returns
-------
hikari.users.User
The user that sent the message.
"""
return self.message.author
@property
def author_id(self) -> snowflakes.Snowflake:
"""ID of the author of the message this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the author.
"""
return self.author.id
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from MessageEvent>>
return self.message.channel_id
@property
def content(self) -> typing.Optional[str]:
"""Content of the message.
Returns
-------
typing.Optional[builtins.str]
The content of the message, if present. This may be `builtins.None`
or an empty string (or any falsy value) if no content is present
(e.g. if only an embed was sent).
"""
return self.message.content
@property
def embeds(self) -> typing.Sequence[embeds_.Embed]:
"""Sequence of embeds in the message.
Returns
-------
typing.Sequence[hikari.embeds.Embed]
The embeds in the message.
"""
return self.message.embeds
@property
def is_bot(self) -> bool:
"""Return `builtins.True` if the message is from a bot.
Returns
-------
builtins.bool
`builtins.True` if from a bot, or `builtins.False` otherwise.
"""
return self.message.author.is_bot
@property
def is_human(self) -> bool:
"""Return `builtins.True` if the message was created by a human.
Returns
-------
builtins.bool
`builtins.True` if from a human user, or `builtins.False` otherwise.
"""
# Not second-guessing some weird edge case will occur in the future with this,
# so I am being safe rather than sorry.
return not self.message.author.is_bot and self.message.webhook_id is None
@property
def is_webhook(self) -> bool:
"""Return `builtins.True` if the message was created by a webhook.
Returns
-------
builtins.bool
`builtins.True` if from a webhook, or `builtins.False` otherwise.
"""
return self.message.webhook_id is not None
@property
@abc.abstractmethod
def message(self) -> messages.Message:
"""Message that was sent in the event.
Returns
-------
hikari.messages.Message
The message object that was sent with this event.
"""
@property
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the message that this event concerns.
"""
return self.message.id
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildMessageCreateEvent(MessageCreateEvent):
"""Event that is fired when a message is created within a guild.
This contains the full message in the internal `message` attribute.
"""
message: messages.Message = attr.field()
# <<inherited docstring from MessageCreateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@property
def author(self) -> users.User:
"""User object of the user that sent the message.
Returns
-------
hikari.users.User
The user object of the user that sent the message.
"""
return self.message.author
@property
def member(self) -> typing.Optional[guilds.Member]:
"""Member object of the user that sent the message.
Returns
-------
typing.Optional[hikari.guilds.Member]
The member object of the user that sent the message or
`builtins.None` if sent by a webhook.
"""
return self.message.member
@property
def guild_id(self) -> snowflakes.Snowflake:
"""ID of the guild that this event occurred in.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the guild that this event occurred in.
"""
guild_id = self.message.guild_id
# Always present on guild events
assert isinstance(guild_id, snowflakes.Snowflake), "no guild_id attribute set"
return guild_id
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Channel that the message was sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel that the message was sent in, if known and cached,
otherwise, `builtins.None`.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild that this event occurred in, if known.
!!! note
This will require the `GUILDS` intent to be specified on start-up
in order to be known.
Returns
-------
typing.Optional[hikari.guilds.GatewayGuild]
The guild that this event occurred in, if cached. Otherwise,
`builtins.None` instead.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
def get_member(self) -> typing.Optional[guilds.Member]:
"""Get the member that sent this message from the cache if available.
Returns
-------
typing.Optional[hikari.guilds.Member]
Cached object of the member that sent the message if found.
"""
if isinstance(self.app, traits.CacheAware):
return self.app.cache.get_member(self.guild_id, self.message.author.id)
return None
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.DM_MESSAGES)
class DMMessageCreateEvent(MessageCreateEvent):
"""Event that is fired when a message is created within a DM.
This contains the full message in the internal `message` attribute.
"""
message: messages.Message = attr.field()
# <<inherited docstring from MessageCreateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@base_events.requires_intents(intents.Intents.DM_MESSAGES, intents.Intents.GUILD_MESSAGES)
class MessageUpdateEvent(MessageEvent, abc.ABC):
"""Event that is fired when a message is updated.
!!! note
Less information will be available here than in the creation event
due to Discord limitations.
"""
__slots__: typing.Sequence[str] = ()
@property
def app(self) -> traits.RESTAware:
# <<inherited docstring from Event>>.
return self.message.app
@property
def author(self) -> undefined.UndefinedOr[users.User]:
"""User that sent the message.
This will be `hikari.undefined.UNDEFINED` in some cases such as when Discord
updates a message with an embed URL preview.
"""
return self.message.author
@property
def author_id(self) -> undefined.UndefinedOr[snowflakes.Snowflake]:
"""ID of the author that triggered this event.
This will be `hikari.undefined.UNDEFINED` in some cases such as when Discord
updates a message with an embed URL preview.
"""
author = self.message.author
return author.id if author is not undefined.UNDEFINED else undefined.UNDEFINED
@property
def channel_id(self) -> snowflakes.Snowflake:
# <<inherited docstring from MessageEvent>>.
return self.message.channel_id
@property
def content(self) -> undefined.UndefinedNoneOr[str]:
"""Content of the message.
Returns
-------
hikari.undefined.UndefinedNoneOr[builtins.str]
The content of the message, if present. This may be `builtins.None`
or an empty string (or any falsy value) if no content is present
(e.g. if only an embed was sent). If not part of the update, then
this will be `hikari.undefined.UNDEFINED` instead.
"""
return self.message.content
@property
def embeds(self) -> undefined.UndefinedOr[typing.Sequence[embeds_.Embed]]:
"""Sequence of embeds in the message.
Returns
-------
hikari.undefined.UndefinedOr[typing.Sequence[hikari.embeds.Embed]]
The embeds in the message. If the embeds were not changed in this
event, then this may instead be `hikari.undefined.UNDEFINED`.
"""
return self.message.embeds
@property
def is_bot(self) -> undefined.UndefinedOr[bool]:
"""Return `builtins.True` if the message is from a bot.
Returns
-------
typing.Optional[builtins.bool]
`builtins.True` if from a bot, or `builtins.False` otherwise.
If the author is not known, due to the update event being caused
by Discord adding an embed preview to accompany a URL, then this
will return `hikari.undefined.UNDEFINED` instead.
"""
if (author := self.message.author) is not undefined.UNDEFINED:
return author.is_bot
return undefined.UNDEFINED
@property
def is_human(self) -> undefined.UndefinedOr[bool]:
"""Return `builtins.True` if the message was created by a human.
Returns
-------
typing.Optional[builtins.bool]
`builtins.True` if from a human user, or `builtins.False` otherwise.
If the author is not known, due to the update event being caused
by Discord adding an embed preview to accompany a URL, then this
may return `hikari.undefined.UNDEFINED` instead.
"""
# Not second-guessing some weird edge case will occur in the future with this,
# so I am being safe rather than sorry.
if (webhook_id := self.message.webhook_id) is not undefined.UNDEFINED:
return webhook_id is None
if (author := self.message.author) is not undefined.UNDEFINED:
return not author.is_bot
return undefined.UNDEFINED
@property
def is_webhook(self) -> undefined.UndefinedOr[bool]:
"""Return `builtins.True` if the message was created by a webhook.
Returns
-------
builtins.bool
`builtins.True` if from a webhook, or `builtins.False` otherwise.
"""
if (webhook_id := self.message.webhook_id) is not undefined.UNDEFINED:
return webhook_id is not None
return undefined.UNDEFINED
@property
@abc.abstractmethod
def message(self) -> messages.PartialMessage:
"""Partial message that was sent in the event.
Returns
-------
hikari.messages.PartialMessage
The partial message object that was sent with this event.
"""
@property
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that this event concerns.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the message that this event concerns.
"""
return self.message.id
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildMessageUpdateEvent(MessageUpdateEvent):
"""Event that is fired when a message is updated in a guild.
!!! note
Less information will be available here than in the creation event
due to Discord limitations.
"""
old_message: typing.Optional[messages.PartialMessage] = attr.field()
"""The old message object.
This will be `builtins.None` if the message missing from the cache.
"""
message: messages.PartialMessage = attr.field()
# <<inherited docstring from MessageUpdateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@property
def member(self) -> undefined.UndefinedNoneOr[guilds.Member]:
"""Member that sent the message if provided by the event.
If the message is not in a guild, this will be `builtins.None`.
This will also be `hikari.undefined.UNDEFINED` in some cases such as when Discord
updates a message with an embed URL preview.
"""
return self.message.member
def get_member(self) -> typing.Optional[guilds.Member]:
"""Get the member that sent this message from the cache if available.
Returns
-------
typing.Optional[hikari.guilds.Member]
Cached object of the member that sent the message if found.
"""
if self.message.author is not undefined.UNDEFINED and isinstance(self.app, traits.CacheAware):
return self.app.cache.get_member(self.guild_id, self.message.author.id)
return None
@property
def guild_id(self) -> snowflakes.Snowflake:
"""ID of the guild that this event occurred in.
Returns
-------
hikari.snowflakes.Snowflake
The ID of the guild that this event occurred in.
"""
guild_id = self.message.guild_id
# Always present on guild events
assert isinstance(guild_id, snowflakes.Snowflake), f"expected guild_id, got {guild_id}"
return guild_id
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Channel that the message was sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel that the message was sent in, if known and cached,
otherwise, `builtins.None`.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild that this event occurred in, if known.
!!! note
This will require the `GUILDS` intent to be specified on start-up
in order to be known.
Returns
-------
typing.Optional[hikari.guilds.GatewayGuild]
The guild that this event occurred in, if cached. Otherwise,
`builtins.None` instead.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.DM_MESSAGES)
class DMMessageUpdateEvent(MessageUpdateEvent):
"""Event that is fired when a message is updated in a DM.
!!! note
Less information will be available here than in the creation event
due to Discord limitations.
"""
old_message: typing.Optional[messages.PartialMessage] = attr.field()
"""The old message object.
This will be `builtins.None` if the message missing from the cache.
"""
message: messages.PartialMessage = attr.field()
# <<inherited docstring from MessageUpdateEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES, intents.Intents.DM_MESSAGES)
class MessageDeleteEvent(MessageEvent, abc.ABC):
"""Special event that is triggered when a message gets deleted.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
__slots__: typing.Sequence[str] = ()
@property
@abc.abstractmethod
def message_id(self) -> snowflakes.Snowflake:
"""ID of the message that was deleted."""
@property
@abc.abstractmethod
def old_message(self) -> typing.Optional[messages.Message]:
"""Object of the message that was deleted.
Will be `None` if the message was not found in the cache.
"""
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildMessageDeleteEvent(MessageDeleteEvent):
"""Event that is triggered if a message is deleted in a guild.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>
channel_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageEvent>>
guild_id: snowflakes.Snowflake = attr.field()
"""ID of the guild that this event occurred in."""
message_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
old_message: typing.Optional[messages.Message] = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Get the cached channel the message were sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel the messages were sent in, or `builtins.None` if not
known/cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild this event corresponds to, if known.
!!! note
You will need `hikari.intents.Intents.GUILDS` enabled to receive this
information.
Returns
-------
hikari.guilds.GatewayGuild
The gateway guild that this event corresponds to, if known and
cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.DM_MESSAGES)
class DMMessageDeleteEvent(MessageDeleteEvent):
"""Event that is triggered if a message is deleted in a DM.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>
channel_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageEvent>>
message_id: snowflakes.Snowflake = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
old_message: typing.Optional[messages.Message] = attr.field()
# <<inherited docstring from MessageDeleteEvent>>
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
@attr_extensions.with_copy
@attr.define(kw_only=True, weakref_slot=False)
@base_events.requires_intents(intents.Intents.GUILD_MESSAGES)
class GuildBulkMessageDeleteEvent(shard_events.ShardEvent):
"""Event that is triggered when a bulk deletion is triggered in a guild.
!!! note
Due to Discord limitations, most message information is unavailable
during deletion events.
"""
app: traits.RESTAware = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from Event>>
channel_id: snowflakes.Snowflake = attr.field()
"""ID of the channel that this event concerns."""
guild_id: snowflakes.Snowflake = attr.field()
"""ID of the guild that this event occurred in."""
message_ids: typing.AbstractSet[snowflakes.Snowflake] = attr.field()
"""Set of message IDs that were bulk deleted."""
old_messages: typing.Mapping[snowflakes.Snowflake, messages.Message] = attr.field()
"""Mapping of a snowflake to the deleted message object.
If the message was not found in the cache it will be missing from the mapping.
"""
shard: shard_.GatewayShard = attr.field(metadata={attr_extensions.SKIP_DEEP_COPY: True})
# <<inherited docstring from ShardEvent>>
def get_channel(self) -> typing.Optional[channels.TextableGuildChannel]:
"""Get the cached channel the messages were sent in, if known.
Returns
-------
typing.Optional[hikari.channels.TextableGuildChannel]
The channel the messages were sent in, or `builtins.None` if not
known/cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
channel = self.app.cache.get_guild_channel(self.channel_id)
assert channel is None or isinstance(
channel, channels.TextableGuildChannel
), f"Cached channel ID is not a TextableGuildChannel, but a {type(channel).__name__}!"
return channel
def get_guild(self) -> typing.Optional[guilds.GatewayGuild]:
"""Get the cached guild this event corresponds to, if known.
!!! note
You will need `hikari.intents.Intents.GUILDS` enabled to receive this
information.
Returns
-------
hikari.guilds.GatewayGuild
The gateway guild that this event corresponds to, if known and
cached.
"""
if not isinstance(self.app, traits.CacheAware):
return None
return self.app.cache.get_guild(self.guild_id)
| StarcoderdataPython |
3262530 | #@ Python in interactive mode properly initializes sys.argv
testutil.call_mysqlsh(["--py", "-i" ,"-e", "import sys; print('sys.argv = {0}'.format(sys.argv))"]) | StarcoderdataPython |
170097 | import argparse
from pathlib import Path
if __name__=="__main__":
parser = argparse.ArgumentParser()
parser.add_argument('--input', type=str, required=True,
help='input corpus to split into words')
parser.add_argument('--output', type=str, required=True,
help='output corpus filename')
parser.add_argument('--max_len', type=int, default=25,
help='max length of a word to save')
args = parser.parse_args()
line_cnt = 0
out = Path(args.output_corpus).open('w', encoding='utf8')
with Path(args.corpus).open('r', encoding='utf8') as f:
for line in f:
line = line.strip()
for word in line.split():
if args.max_len < len(word):
continue
out.write(word+'\n')
line_cnt += 1
print(line_cnt, 'lines processed')
| StarcoderdataPython |
1604298 | import os
import unittest
from pathlib import Path
from src.qchainpy.client import Client
class TestClientMethods(unittest.TestCase):
def setUp(self):
key_path = os.path.join(Path(__file__).resolve().parent, 'keys/private.key')
self.client = Client(
api_url=os.getenv('API_URL', default=None),
key_path=key_path,
passphrase=None
).get_client()
def test_transfer_to_node_id(self):
response = self.client.transfer(
token=os.getenv('TOKEN', default=None),
amount=0.01,
recipient='37542'
)
self.assertEqual(response.get('success'), 'true')
if __name__ == '__main__':
unittest.main()
| StarcoderdataPython |
1703970 | AVAILABLE_IDS= {"ensembl_gene_id": {
"uri": "http://identifiers.org/ensembl.gene/",
"example": "ENSG00000139618"
},
"entrez_gene_id": {
"uri": "http://identifiers.org/hgnc/",
"example": 1017
},
"hgnc_gene_symbol": {
"uri": "http://identifiers.org/hgnc.symbol/",
"example": "CDK7"
},
"hgvs_id": {
"uri": "http://identifiers.org/hgvs*/",
"example": "chr6:123456G>A"
},
"dbsnp_id": {
"uri": "http://identifiers.org/dbsnp/",
"example": "rs123456"
},
"drugbank_id": {
"uri": "http://identifiers.org/drugbank/",
"example": "DB00002"
},
"pubchem_id": {
"uri": "http://identifiers.org/pubchem.compound/",
"example": 100101
},
"pubmed_id": {
"uri": "http://identifiers.org/pubmed/",
"example": 16333295
},
"uniprot_id": {
"uri": "http://identifiers.org/uniprot/",
"example": "P62158"
},
"wikipathway_id": {
"uri": "http://identifiers.org/wikipathways/",
"example": "WP100"
},
"clinicaltrial_id": {
"uri": "http://identifiers.org/clinicaltrials/",
"example": "NCT01314001"
}}
AVAILABLE_API_SOURCES={"mygene.info": {
"annotate_ids": ["entrez_gene_id", "ensembl_gene_id"],
"query_ids": ["uniprot_id", "ensembl_gene_id", "hgnc_gene_symbol", "wikipathway_id", "pubmed_id"],
"annotate_syntax": "http://mygene.info/v3/gene/*?dotfield=true",
"query_syntax": "http://mygene.info/v3/query?q=*",
"description": "gene annotation service",
"jsonld": {
"context_file_path": "context/mygene_context.json"
}
},
"myvariant.info": {
"annotate_ids": ["hgvs_id"],
"query_ids": ["entrez_gene_id", "hgnc_gene_symbol", "ensembl_gene_id", "dbsnp_id", "pubmed_id", "uniprot_id"],
"annotate_syntax": "http://myvariant.info/v1/variant/*?dotfield=true",
"query_syntax": "http://myvariant.info/v1/query?q=*",
"jsonld": {
"context_file_path": "context/myvariant_context.json"
}
},
"mydrug.info": {
"annotate_ids": ["drugbank_id"],
"query_ids": ["dbsnp_id", "pubchem_id", "drugbank_id", "pubmed_id", "hgnc_gene_symbol", "uniprot_id", "clinicaltrial_id"],
"annotate_syntax": "http://c.biothings.io/v1/drug/*?dotfield=true",
"query_syntax": "http://c.biothings.io/v1/query?q=*",
"jsonld": {
"context_file_path": "context/mydrug_context.json"
}
}
}
CLIENT_LIST = {"mygene.info", "myvariant.info", "mydrug.info"}
| StarcoderdataPython |
1759006 | <filename>utils/help_command.py
import asyncio
from math import ceil
import discord
from discord.ext import commands
class CommandHelpEntry:
def __init__(self, name, usage, desc):
self.name = name
self.usage = usage
self.desc = desc
class CustomHelpCommand(commands.DefaultHelpCommand):
def __init__(self):
super().__init__()
async def send_bot_help(self, mapping):
commands_listing = self.context.bot.commands
help_commands = await self.filter_commands(commands_listing, sort=True)
inline = False
# If the message was spawned from a DM, let's not go through the trouble of paginating
if self.context.guild is not None:
manage_messages = self.context.channel.permissions_for(self.context.guild.me).manage_messages
add_reactions = self.context.channel.permissions_for(self.context.guild.me).add_reactions
print(str(manage_messages))
print(str(add_reactions))
else:
manage_messages = False
add_reactions = False
if manage_messages is True and add_reactions is True:
commands_to_paginate = []
for command in help_commands:
command_help_entry = CommandHelpEntry(command.name, command.usage, str(command.short_doc))
commands_to_paginate.append(command_help_entry)
await self.paginate_help(commands_to_paginate)
else:
embed = discord.Embed(title='Randomi Commands')
embed.colour = discord.Colour.blue()
embed.description = '''
List of commands available for Randomi
For a more user-friendly list of help commands, visit:
https://trottinger.github.io/discord-randomify/commands
'''
embed.set_author(name='Randomi Help Page')
embed.set_thumbnail(url=self.context.bot.user.avatar_url)
embed.set_footer(text='Thanks for using Randomi! Review and upvote at https://top.gg/bot/770197604155785216')
for command in help_commands:
if command.usage is not None:
embed.add_field(name=str(command.name) + ' ' + str(command.usage), value=str(command.short_doc),
inline=inline)
else:
embed.add_field(name=str(command.name), value=str(command.short_doc), inline=inline)
if len(embed.fields) == 20:
await self.context.author.send(embed=embed)
embed.clear_fields()
await self.context.author.send(embed=embed)
# Credit Diggy on Stack Overflow: https://stackoverflow.com/a/61793587
async def paginate_help(self, command_listing):
bot = self.context.bot
content = []
pages = ceil(len(command_listing) / 10)
for i in range(0, pages):
embed = discord.Embed(title='Randomi Commands')
embed.colour = discord.Colour.purple()
embed.set_thumbnail(url=self.context.bot.user.avatar_url)
embed.set_footer(text='Review and upvote at https://top.gg/bot/770197604155785216')
embed.description = '''
Thank you for using Randomi, the bot for all your random needs!
To review and upvote, visit: https://top.gg/bot/770197604155785216
For a more user-friendly list of help commands, visit:
https://trottinger.github.io/discord-randomify/commands
To set a custom prefix, try: !rt help prefix
This message will delete after 60 seconds of inactivity
Page {page}/{pages}
'''.format(page=str(i + 1), pages=str(pages))
for j in range(0, 10):
if (i * 10) + j == len(command_listing):
break
curr_command = command_listing[(i * 10) + j]
if curr_command.usage is not None:
embed.add_field(name=str(curr_command.name) + ' ' + str(curr_command.usage),
value=str(curr_command.desc),
inline=False)
else:
embed.add_field(name=str(curr_command.name), value=str(curr_command.desc),
inline=False)
content.append(embed)
cur_page = 1
message = await self.context.send(embed=content[cur_page - 1])
# getting the message object for editing and reacting
await message.add_reaction("◀️")
await message.add_reaction("▶️")
await message.add_reaction("🛑")
def check(react, usr):
return usr == self.context.author and str(react.emoji) in ["◀️", "▶️", "🛑"]
# This makes sure nobody except the command sender can interact with the "menu"
while True:
try:
reaction, user = await bot.wait_for("reaction_add", timeout=60, check=check)
# waiting for a reaction to be added - times out after x seconds, 60 in this
# example
# Spam prevention
await asyncio.sleep(delay=1)
if str(reaction.emoji) == "▶️" and cur_page != pages:
cur_page += 1
await message.edit(embed=content[cur_page - 1])
await message.remove_reaction(reaction, user)
elif str(reaction.emoji) == "◀️" and cur_page > 1:
cur_page -= 1
await message.edit(embed=content[cur_page - 1])
await message.remove_reaction(reaction, user)
elif str(reaction.emoji) == "🛑":
await message.delete()
break
else:
await message.remove_reaction(reaction, user)
# removes reactions if the user tries to go forward on the last page or
# backwards on the first page
except asyncio.TimeoutError:
await message.delete()
break
# ending the loop if user doesn't react after x seconds
except discord.Forbidden:
print('Invalid perms')
break
| StarcoderdataPython |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.