max_stars_repo_path
stringlengths
4
286
max_stars_repo_name
stringlengths
5
119
max_stars_count
int64
0
191k
id
stringlengths
1
7
content
stringlengths
6
1.03M
content_cleaned
stringlengths
6
1.03M
language
stringclasses
111 values
language_score
float64
0.03
1
comments
stringlengths
0
556k
edu_score
float64
0.32
5.03
edu_int_score
int64
0
5
python/ray/tune/examples/pbt_dcgan_mnist/pbt_dcgan_mnist.py
sunho/ray
2
6625251
<gh_stars>1-10 #!/usr/bin/env python from __future__ import absolute_import from __future__ import division from __future__ import print_function import ray from ray import tune from ray.tune.schedulers import PopulationBasedTraining from ray.tune.trial import ExportFormat import argparse import os from filelock import FileLock import random import torch import torch.nn as nn import torch.nn.parallel import torch.optim as optim import torch.utils.data import torchvision.datasets as dset import torchvision.transforms as transforms import torchvision.utils as vutils import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from torch.autograd import Variable from torch.nn import functional as F from scipy.stats import entropy # Training parameters dataroot = "/tmp/" workers = 2 batch_size = 64 image_size = 32 # Number of channels in the training images. For color images this is 3 nc = 1 # Size of z latent vector (i.e. size of generator input) nz = 100 # Size of feature maps in generator ngf = 32 # Size of feature maps in discriminator ndf = 32 # Beta1 hyperparam for Adam optimizers beta1 = 0.5 # iterations of actual training in each Trainable _train train_iterations_per_step = 5 def get_data_loader(): dataset = dset.MNIST( root=dataroot, download=True, transform=transforms.Compose([ transforms.Resize(image_size), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, )), ])) # Create the dataloader dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=True, num_workers=workers) return dataloader # __GANmodel_begin__ # custom weights initialization called on netG and netD def weights_init(m): classname = m.__class__.__name__ if classname.find("Conv") != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find("BatchNorm") != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) # Generator Code class Generator(nn.Module): def __init__(self): super(Generator, self).__init__() self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d(nz, ngf * 4, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh()) def forward(self, input): return self.main(input) class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() self.main = nn.Sequential( nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=False), nn.Sigmoid()) def forward(self, input): return self.main(input) # __GANmodel_end__ # __INCEPTION_SCORE_begin__ class Net(nn.Module): """ LeNet for MNist classification, used for inception_score """ def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) def inception_score(imgs, batch_size=32, splits=1): N = len(imgs) dtype = torch.FloatTensor dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size) cm = ray.get(mnist_model_ref) up = nn.Upsample(size=(28, 28), mode="bilinear").type(dtype) def get_pred(x): x = up(x) x = cm(x) return F.softmax(x).data.cpu().numpy() preds = np.zeros((N, 10)) for i, batch in enumerate(dataloader, 0): batch = batch.type(dtype) batchv = Variable(batch) batch_size_i = batch.size()[0] preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv) # Now compute the mean kl-div split_scores = [] for k in range(splits): part = preds[k * (N // splits):(k + 1) * (N // splits), :] py = np.mean(part, axis=0) scores = [] for i in range(part.shape[0]): pyx = part[i, :] scores.append(entropy(pyx, py)) split_scores.append(np.exp(np.mean(scores))) return np.mean(split_scores), np.std(split_scores) # __INCEPTION_SCORE_end__ def train(netD, netG, optimG, optimD, criterion, dataloader, iteration, device): real_label = 1 fake_label = 0 for i, data in enumerate(dataloader, 0): if i >= train_iterations_per_step: break netD.zero_grad() real_cpu = data[0].to(device) b_size = real_cpu.size(0) label = torch.full((b_size, ), real_label, device=device) output = netD(real_cpu).view(-1) errD_real = criterion(output, label) errD_real.backward() D_x = output.mean().item() noise = torch.randn(b_size, nz, 1, 1, device=device) fake = netG(noise) label.fill_(fake_label) output = netD(fake.detach()).view(-1) errD_fake = criterion(output, label) errD_fake.backward() D_G_z1 = output.mean().item() errD = errD_real + errD_fake optimD.step() netG.zero_grad() label.fill_(real_label) output = netD(fake).view(-1) errG = criterion(output, label) errG.backward() D_G_z2 = output.mean().item() optimG.step() is_score, is_std = inception_score(fake) # Output training stats if iteration % 10 == 0: print("[%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z))" ": %.4f / %.4f \tInception score: %.4f" % (iteration, len(dataloader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2, is_score)) return errG.item(), errD.item(), is_score # __Trainable_begin__ class PytorchTrainable(tune.Trainable): def _setup(self, config): use_cuda = config.get("use_gpu") and torch.cuda.is_available() self.device = torch.device("cuda" if use_cuda else "cpu") self.netD = Discriminator().to(self.device) self.netD.apply(weights_init) self.netG = Generator().to(self.device) self.netG.apply(weights_init) self.criterion = nn.BCELoss() self.optimizerD = optim.Adam( self.netD.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999)) self.optimizerG = optim.Adam( self.netG.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999)) with FileLock(os.path.expanduser("~/.data.lock")): self.dataloader = get_data_loader() def _train(self): lossG, lossD, is_score = train( self.netD, self.netG, self.optimizerG, self.optimizerD, self.criterion, self.dataloader, self._iteration, self.device) return {"lossg": lossG, "lossd": lossD, "is_score": is_score} def _save(self, checkpoint_dir): path = os.path.join(checkpoint_dir, "checkpoint") torch.save({ "netDmodel": self.netD.state_dict(), "netGmodel": self.netG.state_dict(), "optimD": self.optimizerD.state_dict(), "optimG": self.optimizerG.state_dict(), }, path) return checkpoint_dir def _restore(self, checkpoint_dir): path = os.path.join(checkpoint_dir, "checkpoint") checkpoint = torch.load(path) self.netD.load_state_dict(checkpoint["netDmodel"]) self.netG.load_state_dict(checkpoint["netGmodel"]) self.optimizerD.load_state_dict(checkpoint["optimD"]) self.optimizerG.load_state_dict(checkpoint["optimG"]) def reset_config(self, new_config): if "netD_lr" in new_config: for param_group in self.optimizerD.param_groups: param_group["lr"] = new_config["netD_lr"] if "netG_lr" in new_config: for param_group in self.optimizerG.param_groups: param_group["lr"] = new_config["netG_lr"] self.config = new_config return True def _export_model(self, export_formats, export_dir): if export_formats == [ExportFormat.MODEL]: path = os.path.join(export_dir, "exported_models") torch.save({ "netDmodel": self.netD.state_dict(), "netGmodel": self.netG.state_dict() }, path) return {ExportFormat.MODEL: path} else: raise ValueError("unexpected formats: " + str(export_formats)) # __Trainable_end__ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--smoke-test", action="store_true", help="Finish quickly for testing") args, _ = parser.parse_known_args() ray.init() dataloader = get_data_loader() if not args.smoke_test: # Plot some training images real_batch = next(iter(dataloader)) plt.figure(figsize=(8, 8)) plt.axis("off") plt.title("Original Images") plt.imshow( np.transpose( vutils.make_grid( real_batch[0][:64], padding=2, normalize=True).cpu(), (1, 2, 0))) plt.show() # load the pretrained mnist classification model for inception_score mnist_cnn = Net() model_path = os.path.join( os.path.dirname(ray.__file__), "tune/examples/pbt_dcgan_mnist/mnist_cnn.pt") mnist_cnn.load_state_dict(torch.load(model_path)) mnist_cnn.eval() mnist_model_ref = ray.put(mnist_cnn) # __tune_begin__ scheduler = PopulationBasedTraining( time_attr="training_iteration", metric="is_score", mode="max", perturbation_interval=5, hyperparam_mutations={ # distribution for resampling "netG_lr": lambda: np.random.uniform(1e-2, 1e-5), "netD_lr": lambda: np.random.uniform(1e-2, 1e-5), }) tune_iter = 5 if args.smoke_test else 300 analysis = tune.run( PytorchTrainable, name="pbt_dcgan_mnist", scheduler=scheduler, reuse_actors=True, verbose=1, checkpoint_at_end=True, stop={ "training_iteration": tune_iter, }, num_samples=8, export_formats=[ExportFormat.MODEL], config={ "netG_lr": tune.sample_from( lambda spec: random.choice([0.0001, 0.0002, 0.0005])), "netD_lr": tune.sample_from( lambda spec: random.choice([0.0001, 0.0002, 0.0005])) }) # __tune_end__ # demo of the trained Generators if not args.smoke_test: logdirs = analysis.dataframe()["logdir"].tolist() img_list = [] fixed_noise = torch.randn(64, nz, 1, 1) for d in logdirs: netG_path = os.path.join(d, "exported_models") loadedG = Generator() loadedG.load_state_dict(torch.load(netG_path)["netGmodel"]) with torch.no_grad(): fake = loadedG(fixed_noise).detach().cpu() img_list.append(vutils.make_grid(fake, padding=2, normalize=True)) fig = plt.figure(figsize=(8, 8)) plt.axis("off") ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)] for i in img_list] ani = animation.ArtistAnimation( fig, ims, interval=1000, repeat_delay=1000, blit=True) ani.save("./generated.gif", writer="imagemagick", dpi=72) plt.show()
#!/usr/bin/env python from __future__ import absolute_import from __future__ import division from __future__ import print_function import ray from ray import tune from ray.tune.schedulers import PopulationBasedTraining from ray.tune.trial import ExportFormat import argparse import os from filelock import FileLock import random import torch import torch.nn as nn import torch.nn.parallel import torch.optim as optim import torch.utils.data import torchvision.datasets as dset import torchvision.transforms as transforms import torchvision.utils as vutils import numpy as np import matplotlib.pyplot as plt import matplotlib.animation as animation from torch.autograd import Variable from torch.nn import functional as F from scipy.stats import entropy # Training parameters dataroot = "/tmp/" workers = 2 batch_size = 64 image_size = 32 # Number of channels in the training images. For color images this is 3 nc = 1 # Size of z latent vector (i.e. size of generator input) nz = 100 # Size of feature maps in generator ngf = 32 # Size of feature maps in discriminator ndf = 32 # Beta1 hyperparam for Adam optimizers beta1 = 0.5 # iterations of actual training in each Trainable _train train_iterations_per_step = 5 def get_data_loader(): dataset = dset.MNIST( root=dataroot, download=True, transform=transforms.Compose([ transforms.Resize(image_size), transforms.ToTensor(), transforms.Normalize((0.5, ), (0.5, )), ])) # Create the dataloader dataloader = torch.utils.data.DataLoader( dataset, batch_size=batch_size, shuffle=True, num_workers=workers) return dataloader # __GANmodel_begin__ # custom weights initialization called on netG and netD def weights_init(m): classname = m.__class__.__name__ if classname.find("Conv") != -1: nn.init.normal_(m.weight.data, 0.0, 0.02) elif classname.find("BatchNorm") != -1: nn.init.normal_(m.weight.data, 1.0, 0.02) nn.init.constant_(m.bias.data, 0) # Generator Code class Generator(nn.Module): def __init__(self): super(Generator, self).__init__() self.main = nn.Sequential( # input is Z, going into a convolution nn.ConvTranspose2d(nz, ngf * 4, 4, 1, 0, bias=False), nn.BatchNorm2d(ngf * 4), nn.ReLU(True), nn.ConvTranspose2d(ngf * 4, ngf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf * 2), nn.ReLU(True), nn.ConvTranspose2d(ngf * 2, ngf, 4, 2, 1, bias=False), nn.BatchNorm2d(ngf), nn.ReLU(True), nn.ConvTranspose2d(ngf, nc, 4, 2, 1, bias=False), nn.Tanh()) def forward(self, input): return self.main(input) class Discriminator(nn.Module): def __init__(self): super(Discriminator, self).__init__() self.main = nn.Sequential( nn.Conv2d(nc, ndf, 4, 2, 1, bias=False), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf, ndf * 2, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 2), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf * 2, ndf * 4, 4, 2, 1, bias=False), nn.BatchNorm2d(ndf * 4), nn.LeakyReLU(0.2, inplace=True), nn.Conv2d(ndf * 4, 1, 4, 1, 0, bias=False), nn.Sigmoid()) def forward(self, input): return self.main(input) # __GANmodel_end__ # __INCEPTION_SCORE_begin__ class Net(nn.Module): """ LeNet for MNist classification, used for inception_score """ def __init__(self): super(Net, self).__init__() self.conv1 = nn.Conv2d(1, 10, kernel_size=5) self.conv2 = nn.Conv2d(10, 20, kernel_size=5) self.conv2_drop = nn.Dropout2d() self.fc1 = nn.Linear(320, 50) self.fc2 = nn.Linear(50, 10) def forward(self, x): x = F.relu(F.max_pool2d(self.conv1(x), 2)) x = F.relu(F.max_pool2d(self.conv2_drop(self.conv2(x)), 2)) x = x.view(-1, 320) x = F.relu(self.fc1(x)) x = F.dropout(x, training=self.training) x = self.fc2(x) return F.log_softmax(x, dim=1) def inception_score(imgs, batch_size=32, splits=1): N = len(imgs) dtype = torch.FloatTensor dataloader = torch.utils.data.DataLoader(imgs, batch_size=batch_size) cm = ray.get(mnist_model_ref) up = nn.Upsample(size=(28, 28), mode="bilinear").type(dtype) def get_pred(x): x = up(x) x = cm(x) return F.softmax(x).data.cpu().numpy() preds = np.zeros((N, 10)) for i, batch in enumerate(dataloader, 0): batch = batch.type(dtype) batchv = Variable(batch) batch_size_i = batch.size()[0] preds[i * batch_size:i * batch_size + batch_size_i] = get_pred(batchv) # Now compute the mean kl-div split_scores = [] for k in range(splits): part = preds[k * (N // splits):(k + 1) * (N // splits), :] py = np.mean(part, axis=0) scores = [] for i in range(part.shape[0]): pyx = part[i, :] scores.append(entropy(pyx, py)) split_scores.append(np.exp(np.mean(scores))) return np.mean(split_scores), np.std(split_scores) # __INCEPTION_SCORE_end__ def train(netD, netG, optimG, optimD, criterion, dataloader, iteration, device): real_label = 1 fake_label = 0 for i, data in enumerate(dataloader, 0): if i >= train_iterations_per_step: break netD.zero_grad() real_cpu = data[0].to(device) b_size = real_cpu.size(0) label = torch.full((b_size, ), real_label, device=device) output = netD(real_cpu).view(-1) errD_real = criterion(output, label) errD_real.backward() D_x = output.mean().item() noise = torch.randn(b_size, nz, 1, 1, device=device) fake = netG(noise) label.fill_(fake_label) output = netD(fake.detach()).view(-1) errD_fake = criterion(output, label) errD_fake.backward() D_G_z1 = output.mean().item() errD = errD_real + errD_fake optimD.step() netG.zero_grad() label.fill_(real_label) output = netD(fake).view(-1) errG = criterion(output, label) errG.backward() D_G_z2 = output.mean().item() optimG.step() is_score, is_std = inception_score(fake) # Output training stats if iteration % 10 == 0: print("[%d/%d]\tLoss_D: %.4f\tLoss_G: %.4f\tD(x): %.4f\tD(G(z))" ": %.4f / %.4f \tInception score: %.4f" % (iteration, len(dataloader), errD.item(), errG.item(), D_x, D_G_z1, D_G_z2, is_score)) return errG.item(), errD.item(), is_score # __Trainable_begin__ class PytorchTrainable(tune.Trainable): def _setup(self, config): use_cuda = config.get("use_gpu") and torch.cuda.is_available() self.device = torch.device("cuda" if use_cuda else "cpu") self.netD = Discriminator().to(self.device) self.netD.apply(weights_init) self.netG = Generator().to(self.device) self.netG.apply(weights_init) self.criterion = nn.BCELoss() self.optimizerD = optim.Adam( self.netD.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999)) self.optimizerG = optim.Adam( self.netG.parameters(), lr=config.get("lr", 0.01), betas=(beta1, 0.999)) with FileLock(os.path.expanduser("~/.data.lock")): self.dataloader = get_data_loader() def _train(self): lossG, lossD, is_score = train( self.netD, self.netG, self.optimizerG, self.optimizerD, self.criterion, self.dataloader, self._iteration, self.device) return {"lossg": lossG, "lossd": lossD, "is_score": is_score} def _save(self, checkpoint_dir): path = os.path.join(checkpoint_dir, "checkpoint") torch.save({ "netDmodel": self.netD.state_dict(), "netGmodel": self.netG.state_dict(), "optimD": self.optimizerD.state_dict(), "optimG": self.optimizerG.state_dict(), }, path) return checkpoint_dir def _restore(self, checkpoint_dir): path = os.path.join(checkpoint_dir, "checkpoint") checkpoint = torch.load(path) self.netD.load_state_dict(checkpoint["netDmodel"]) self.netG.load_state_dict(checkpoint["netGmodel"]) self.optimizerD.load_state_dict(checkpoint["optimD"]) self.optimizerG.load_state_dict(checkpoint["optimG"]) def reset_config(self, new_config): if "netD_lr" in new_config: for param_group in self.optimizerD.param_groups: param_group["lr"] = new_config["netD_lr"] if "netG_lr" in new_config: for param_group in self.optimizerG.param_groups: param_group["lr"] = new_config["netG_lr"] self.config = new_config return True def _export_model(self, export_formats, export_dir): if export_formats == [ExportFormat.MODEL]: path = os.path.join(export_dir, "exported_models") torch.save({ "netDmodel": self.netD.state_dict(), "netGmodel": self.netG.state_dict() }, path) return {ExportFormat.MODEL: path} else: raise ValueError("unexpected formats: " + str(export_formats)) # __Trainable_end__ if __name__ == "__main__": parser = argparse.ArgumentParser() parser.add_argument( "--smoke-test", action="store_true", help="Finish quickly for testing") args, _ = parser.parse_known_args() ray.init() dataloader = get_data_loader() if not args.smoke_test: # Plot some training images real_batch = next(iter(dataloader)) plt.figure(figsize=(8, 8)) plt.axis("off") plt.title("Original Images") plt.imshow( np.transpose( vutils.make_grid( real_batch[0][:64], padding=2, normalize=True).cpu(), (1, 2, 0))) plt.show() # load the pretrained mnist classification model for inception_score mnist_cnn = Net() model_path = os.path.join( os.path.dirname(ray.__file__), "tune/examples/pbt_dcgan_mnist/mnist_cnn.pt") mnist_cnn.load_state_dict(torch.load(model_path)) mnist_cnn.eval() mnist_model_ref = ray.put(mnist_cnn) # __tune_begin__ scheduler = PopulationBasedTraining( time_attr="training_iteration", metric="is_score", mode="max", perturbation_interval=5, hyperparam_mutations={ # distribution for resampling "netG_lr": lambda: np.random.uniform(1e-2, 1e-5), "netD_lr": lambda: np.random.uniform(1e-2, 1e-5), }) tune_iter = 5 if args.smoke_test else 300 analysis = tune.run( PytorchTrainable, name="pbt_dcgan_mnist", scheduler=scheduler, reuse_actors=True, verbose=1, checkpoint_at_end=True, stop={ "training_iteration": tune_iter, }, num_samples=8, export_formats=[ExportFormat.MODEL], config={ "netG_lr": tune.sample_from( lambda spec: random.choice([0.0001, 0.0002, 0.0005])), "netD_lr": tune.sample_from( lambda spec: random.choice([0.0001, 0.0002, 0.0005])) }) # __tune_end__ # demo of the trained Generators if not args.smoke_test: logdirs = analysis.dataframe()["logdir"].tolist() img_list = [] fixed_noise = torch.randn(64, nz, 1, 1) for d in logdirs: netG_path = os.path.join(d, "exported_models") loadedG = Generator() loadedG.load_state_dict(torch.load(netG_path)["netGmodel"]) with torch.no_grad(): fake = loadedG(fixed_noise).detach().cpu() img_list.append(vutils.make_grid(fake, padding=2, normalize=True)) fig = plt.figure(figsize=(8, 8)) plt.axis("off") ims = [[plt.imshow(np.transpose(i, (1, 2, 0)), animated=True)] for i in img_list] ani = animation.ArtistAnimation( fig, ims, interval=1000, repeat_delay=1000, blit=True) ani.save("./generated.gif", writer="imagemagick", dpi=72) plt.show()
en
0.734335
#!/usr/bin/env python # Training parameters # Number of channels in the training images. For color images this is 3 # Size of z latent vector (i.e. size of generator input) # Size of feature maps in generator # Size of feature maps in discriminator # Beta1 hyperparam for Adam optimizers # iterations of actual training in each Trainable _train # Create the dataloader # __GANmodel_begin__ # custom weights initialization called on netG and netD # Generator Code # input is Z, going into a convolution # __GANmodel_end__ # __INCEPTION_SCORE_begin__ LeNet for MNist classification, used for inception_score # Now compute the mean kl-div # __INCEPTION_SCORE_end__ # Output training stats # __Trainable_begin__ # __Trainable_end__ # Plot some training images # load the pretrained mnist classification model for inception_score # __tune_begin__ # distribution for resampling # __tune_end__ # demo of the trained Generators
2.116016
2
app/matching_algorithm/test_ford_fulkerson.py
KaviMD/ppe-exchange
0
6625252
from ford_fulkerson import FlowNetwork participants = { "1": "Jim", "2": "Mike", "3": "Kathy" } sku = { "a": "type a", "b": "type b", "c": "type c" } want = { "1": [ { "sku": "b", "count": 125 }, { "sku": "c", "count": 125 } ], "2": [ { "sku": "a", "count": 125 }, { "sku": "c", "count": 125 } ], "3": [ { "sku": "a", "count": 125 }, { "sku": "b", "count": 125 } ] } has = { "1": [ { "sku": "a", "count": 250 } ], "2": [ { "sku": "b", "count": 250 } ], "3": [ { "sku": "c", "count": 250 } ] } fn = FlowNetwork() fn.addVertex('s', True, False) fn.addVertex('t', False, True) new_vertices = [] for p in participants: for s in sku: for w in want[p]: if w["sku"] == s: #print(participants[p], "wants", sku[s]) v_name = p+"-"+s+"-w" new_vertices.append([v_name, w["count"]]) fn.addVertex(v_name) fn.addEdge('s', v_name, w["count"]) for p in participants: for s in sku: for h in has[p]: if h["sku"] == s: #print(participants[p], "has", sku[s]) v_name = p+"-"+s+"-h" fn.addVertex(v_name) fn.addEdge(v_name, 't', h["count"]) for nv in new_vertices: nv_sku = nv[0].split("-")[1] if nv_sku == s: fn.addEdge(nv[0], v_name, h["count"]) print(fn.calculateMaxFlow()) # Display all connections for e in fn.getEdges(): if e.capacity > 0: print('{} -> {} {}/{}'.format(e.start, e.end, e.flow, e.capacity)) # Display transactions for e in fn.getEdges(): if e.capacity > 0: if e.start != "s" and e.end != "t": start = e.start.split("-") end = e.end.split("-") print(participants[end[0]], "gives", e.flow, start[1], "to", participants[start[0]]) ''' fn = FlowNetwork() fn.addVertex('s', True, False) fn.addVertex('t', False, True) for v in ['a', 'b','c','d']: fn.addVertex(v) fn.addEdge('s', 'a', 4) fn.addEdge('a', 'b', 4) fn.addEdge('b', 't', 2) fn.addEdge('s', 'c', 3) fn.addEdge('c', 'd', 6) fn.addEdge('d', 't', 6) print(fn.calculateMaxFlow()) for e in fn.getEdges(): if e.flow > 0: print('{} -> {} {}/{}'.format(e.start, e.end, e.flow, e.capacity)) '''
from ford_fulkerson import FlowNetwork participants = { "1": "Jim", "2": "Mike", "3": "Kathy" } sku = { "a": "type a", "b": "type b", "c": "type c" } want = { "1": [ { "sku": "b", "count": 125 }, { "sku": "c", "count": 125 } ], "2": [ { "sku": "a", "count": 125 }, { "sku": "c", "count": 125 } ], "3": [ { "sku": "a", "count": 125 }, { "sku": "b", "count": 125 } ] } has = { "1": [ { "sku": "a", "count": 250 } ], "2": [ { "sku": "b", "count": 250 } ], "3": [ { "sku": "c", "count": 250 } ] } fn = FlowNetwork() fn.addVertex('s', True, False) fn.addVertex('t', False, True) new_vertices = [] for p in participants: for s in sku: for w in want[p]: if w["sku"] == s: #print(participants[p], "wants", sku[s]) v_name = p+"-"+s+"-w" new_vertices.append([v_name, w["count"]]) fn.addVertex(v_name) fn.addEdge('s', v_name, w["count"]) for p in participants: for s in sku: for h in has[p]: if h["sku"] == s: #print(participants[p], "has", sku[s]) v_name = p+"-"+s+"-h" fn.addVertex(v_name) fn.addEdge(v_name, 't', h["count"]) for nv in new_vertices: nv_sku = nv[0].split("-")[1] if nv_sku == s: fn.addEdge(nv[0], v_name, h["count"]) print(fn.calculateMaxFlow()) # Display all connections for e in fn.getEdges(): if e.capacity > 0: print('{} -> {} {}/{}'.format(e.start, e.end, e.flow, e.capacity)) # Display transactions for e in fn.getEdges(): if e.capacity > 0: if e.start != "s" and e.end != "t": start = e.start.split("-") end = e.end.split("-") print(participants[end[0]], "gives", e.flow, start[1], "to", participants[start[0]]) ''' fn = FlowNetwork() fn.addVertex('s', True, False) fn.addVertex('t', False, True) for v in ['a', 'b','c','d']: fn.addVertex(v) fn.addEdge('s', 'a', 4) fn.addEdge('a', 'b', 4) fn.addEdge('b', 't', 2) fn.addEdge('s', 'c', 3) fn.addEdge('c', 'd', 6) fn.addEdge('d', 't', 6) print(fn.calculateMaxFlow()) for e in fn.getEdges(): if e.flow > 0: print('{} -> {} {}/{}'.format(e.start, e.end, e.flow, e.capacity)) '''
en
0.206274
#print(participants[p], "wants", sku[s]) #print(participants[p], "has", sku[s]) # Display all connections # Display transactions fn = FlowNetwork() fn.addVertex('s', True, False) fn.addVertex('t', False, True) for v in ['a', 'b','c','d']: fn.addVertex(v) fn.addEdge('s', 'a', 4) fn.addEdge('a', 'b', 4) fn.addEdge('b', 't', 2) fn.addEdge('s', 'c', 3) fn.addEdge('c', 'd', 6) fn.addEdge('d', 't', 6) print(fn.calculateMaxFlow()) for e in fn.getEdges(): if e.flow > 0: print('{} -> {} {}/{}'.format(e.start, e.end, e.flow, e.capacity))
2.62542
3
factual/query/write.py
gvelez17/factual-python-driver
1
6625253
from base import Base class Write(Base): def __init__(self, api, table, factual_id, params): self.table = table self.factual_id = factual_id Base.__init__(self, api, self._path(), params) def write(self): return self.api.post(self) def factual_id(self, factual_id): self.factual_id = factual_id self.path = self._path() def user(self, user): return self._copy({'user': user}) def comment(self, comment): return self._copy({'comment': comment}) def reference(self, reference): return self._copy({'reference': reference}) def _path(self): pass def _copy(self, params): pass
from base import Base class Write(Base): def __init__(self, api, table, factual_id, params): self.table = table self.factual_id = factual_id Base.__init__(self, api, self._path(), params) def write(self): return self.api.post(self) def factual_id(self, factual_id): self.factual_id = factual_id self.path = self._path() def user(self, user): return self._copy({'user': user}) def comment(self, comment): return self._copy({'comment': comment}) def reference(self, reference): return self._copy({'reference': reference}) def _path(self): pass def _copy(self, params): pass
none
1
2.793994
3
code/fireflyFunction.py
laurencee9/Symposium_optimization
0
6625254
<reponame>laurencee9/Symposium_optimization<gh_stars>0 """ Find minimum of a function (in 2D) """ import numpy as np import random as rdm import matplotlib.pyplot as plt import matplotlib.animation as animation plt.rcParams['text.usetex']=True plt.rcParams['text.latex.preamble']=[r'\usepackage{amsmath}'] plt.rc('font',**{'family':'serif','serif':['Computer Modern']}) plt.rcParams['text.latex.unicode']=True plt.rcParams['axes.linewidth'] = 2 plt.rc('xtick', labelsize=20) plt.rc('ytick', labelsize=20) class Firefly(): position = np.array([0,0]) intensity = 1.0 def fly(fireflies, func,beta0=0.5,gamma=1.0,alpha=0.01,delta=0.95): for i in range(len(fireflies)): moved = False for j in range(len(fireflies)): if fireflies[j].intensity<fireflies[i].intensity: r = np.sqrt((fireflies[i].position[0]-fireflies[j].position[0])**2.0+(fireflies[i].position[1]-fireflies[j].position[1])**2.0) fireflies[i].position[0] += beta0*np.exp(-1.0*gamma*(r)**2.0)*(fireflies[j].position[0]-fireflies[i].position[0]) + alpha * delta**t*(rdm.random()*2-1) fireflies[i].position[1] += beta0*np.exp(-1.0*gamma*(r)**2.0)*(fireflies[j].position[1]-fireflies[i].position[1]) + alpha * delta**t*(rdm.random()*2-1) moved = True fireflies[i].intensity = np.abs(func(fireflies[i].position)) if moved == False: fireflies[i].position += alpha * delta**t*(rdm.random()*2-1) fireflies[i].intensity = np.abs(func(fireflies[i].position)) #update intensity return fireflies def flyGreedy(fireflies, func, tmax=100,beta0=0.5,gamma=1.0,alpha=0.01,delta=0.95): for t in range(tmax): for i in range(len(fireflies)): moved = False minj = 0 minintensity = fireflies[i].intensity for j in range(len(fireflies)): if fireflies[j].intensity<fireflies[i].intensity: if minintensity > fireflies[j].intensity: minintensity = fireflies[j].intensity minj = j moved = True if moved: r = np.sqrt((fireflies[i].position[0]-fireflies[minj].position[0])**2.0+(fireflies[i].position[1]-fireflies[minj].position[1])**2.0) fireflies[i].position[0] += beta0*np.exp(-1.0*gamma*(r)**2.0)*(fireflies[minj].position[0]-fireflies[i].position[0]) + alpha * delta**t*(rdm.random()*2-1) fireflies[i].position[1] += beta0*np.exp(-1.0*gamma*(r)**2.0)*(fireflies[minj].position[1]-fireflies[i].position[1]) + alpha * delta**t*(rdm.random()*2-1) moved = True fireflies[i].intensity = np.abs(func(fireflies[i].position)) if moved == False: fireflies[i].position += alpha * delta**t*(rdm.random()*2-1) fireflies[i].intensity = np.abs(func(fireflies[i].position)) #update intensity return fireflies def initialiseFirefly(N, xmin, xmax, ymin, ymax, func): fireflies = [] for i in range(N): a = Firefly() a.position = np.array([rdm.random()*(xmax-xmin)+xmin,rdm.random()*(ymax-ymin)+ymin]) a.intensity = np.abs(func(a.position)) fireflies.append(a) return fireflies def showFunction(func,xmin,xmax,ymin,ymax,n=50): R = np.zeros((n,n)) hx = (xmax-xmin)/n hy = (ymax-ymin)/n for i in range(n): for j in range(n): R[i,j] = func((xmin+hx*i,ymin+hy*j)) l = plt.matshow(R,cmap="RdGy", origin='lower',extent=[xmin,xmax,ymin,ymax]) cbar = plt.colorbar(l) plt.show() def showFunctionFirefly(fireflies,func,xmin,xmax,ymin,ymax,n=50,name="a"): R = np.zeros((n,n)) hx = (xmax-xmin)/n hy = (ymax-ymin)/n plt.figure(figsize=(3,3)) for i in range(n): for j in range(n): R[i,j] = np.abs(func((xmin+hx*i,ymin+hy*j))) l = plt.matshow(R,cmap="Blues", origin='lower',extent=[xmin,xmax,ymin,ymax]) # cbar = plt.colorbar(l) X = [fireflies[k].position[0] for k in range(len(fireflies))] Y = [fireflies[k].position[1] for k in range(len(fireflies))] plt.plot(X,Y,"o",markersize=9,markeredgecolor="none",markerfacecolor = "black") print([fireflies[k].intensity for k in range(len(fireflies))]) plt.xlim([xmin,xmax]) plt.ylim([ymin,ymax]) plt.savefig(name) plt.clf() # plt.show() def fourMaximum(x,y): if x>=0 and y>=0: return (np.abs(x-2.5)+np.abs(y-2.5))**0.7 if x>=0 and y<0: return (np.abs(x-2.5)+np.abs(y+2.5))**0.7 if x<0 and y>=0: return (np.abs(x+2.5)+np.abs(y-2.5))**0.7 if x<0 and y<0: return (np.abs(x+2.5)+np.abs(y+2.5))**0.7 def update_line(temps,data,line): # X = data[temps] line.set_data([f[0] for f in data[temps]],[f[1] for f in data[temps]]) return line, def doExtrapolation(data,N): newData = [] for t in range(len(data)*N-N): u = t%N v = t//N L = [] for j in range(len(data[v])): newX = data[v][j][0] + u*(data[v+1][j][0]-data[v][j][0])/N newY = data[v][j][1] + u*(data[v+1][j][1]-data[v][j][1])/N L.append((newX,newY)) # print(t) newData.append([kl for kl in L]) return newData # func = lambda x,y : (np.abs(x-2.5)+np.abs(y-2.5))**0.7 func = lambda x : fourMaximum(x[0],x[1]) # func = lambda x : (np.sin((x[0]**2.0+x[1]**2.0)/2.0)/((x[0]/5)**2.0+(x[1]/5.0)**2.0))*-1 # func = lambda x : (np.abs(x[0])+np.abs(x[1]))**0.7 xmin, xmax, ymin, ymax = -5.0,5.0,-5.0,5.0 # showFunction(func,xmin,xmax,ymin,ymax,n=50) N = 100 fireflies = initialiseFirefly(N, xmin, xmax, ymin, ymax, func) showFunctionFirefly(fireflies,func,xmin,xmax,ymin,ymax,n=50,name="fireflyout1.pdf") tmax = 50 data = [] data.append([(f.position[0],f.position[1]) for f in fireflies]) for t in range(tmax): fireflies = fly(fireflies, func, beta0=0.8,gamma=5.0,alpha=0.2,delta=0.95) data.append([(f.position[0],f.position[1]) for f in fireflies]) # print(data[0]) showFunctionFirefly(fireflies,func,xmin,xmax,ymin,ymax,n=50,name="fireflyout2.pdf") # extrapo = 8 # data = doExtrapolation(data,N=extrapo) # fig1 = plt.figure() # ax = plt.gca() # l, = ax.plot([f[0] for f in data[0]],[f[1] for f in data[0]], 'ok',markersize=6) # n=25 # R = np.zeros((n,n)) # hx = (xmax-xmin)/n # hy = (ymax-ymin)/n # for i in range(n): # for j in range(n): # R[i,j] = np.abs(func((xmin+hx*i,ymin+hy*j))) # ax.matshow(R,cmap="Blues", origin='lower',extent=[xmin,xmax,ymin,ymax]) line_ani = animation.FuncAnimation(fig1, update_line, tmax*extrapo, interval=100,fargs=(data, l), blit=True) line_ani.save('lines.mp4')
""" Find minimum of a function (in 2D) """ import numpy as np import random as rdm import matplotlib.pyplot as plt import matplotlib.animation as animation plt.rcParams['text.usetex']=True plt.rcParams['text.latex.preamble']=[r'\usepackage{amsmath}'] plt.rc('font',**{'family':'serif','serif':['Computer Modern']}) plt.rcParams['text.latex.unicode']=True plt.rcParams['axes.linewidth'] = 2 plt.rc('xtick', labelsize=20) plt.rc('ytick', labelsize=20) class Firefly(): position = np.array([0,0]) intensity = 1.0 def fly(fireflies, func,beta0=0.5,gamma=1.0,alpha=0.01,delta=0.95): for i in range(len(fireflies)): moved = False for j in range(len(fireflies)): if fireflies[j].intensity<fireflies[i].intensity: r = np.sqrt((fireflies[i].position[0]-fireflies[j].position[0])**2.0+(fireflies[i].position[1]-fireflies[j].position[1])**2.0) fireflies[i].position[0] += beta0*np.exp(-1.0*gamma*(r)**2.0)*(fireflies[j].position[0]-fireflies[i].position[0]) + alpha * delta**t*(rdm.random()*2-1) fireflies[i].position[1] += beta0*np.exp(-1.0*gamma*(r)**2.0)*(fireflies[j].position[1]-fireflies[i].position[1]) + alpha * delta**t*(rdm.random()*2-1) moved = True fireflies[i].intensity = np.abs(func(fireflies[i].position)) if moved == False: fireflies[i].position += alpha * delta**t*(rdm.random()*2-1) fireflies[i].intensity = np.abs(func(fireflies[i].position)) #update intensity return fireflies def flyGreedy(fireflies, func, tmax=100,beta0=0.5,gamma=1.0,alpha=0.01,delta=0.95): for t in range(tmax): for i in range(len(fireflies)): moved = False minj = 0 minintensity = fireflies[i].intensity for j in range(len(fireflies)): if fireflies[j].intensity<fireflies[i].intensity: if minintensity > fireflies[j].intensity: minintensity = fireflies[j].intensity minj = j moved = True if moved: r = np.sqrt((fireflies[i].position[0]-fireflies[minj].position[0])**2.0+(fireflies[i].position[1]-fireflies[minj].position[1])**2.0) fireflies[i].position[0] += beta0*np.exp(-1.0*gamma*(r)**2.0)*(fireflies[minj].position[0]-fireflies[i].position[0]) + alpha * delta**t*(rdm.random()*2-1) fireflies[i].position[1] += beta0*np.exp(-1.0*gamma*(r)**2.0)*(fireflies[minj].position[1]-fireflies[i].position[1]) + alpha * delta**t*(rdm.random()*2-1) moved = True fireflies[i].intensity = np.abs(func(fireflies[i].position)) if moved == False: fireflies[i].position += alpha * delta**t*(rdm.random()*2-1) fireflies[i].intensity = np.abs(func(fireflies[i].position)) #update intensity return fireflies def initialiseFirefly(N, xmin, xmax, ymin, ymax, func): fireflies = [] for i in range(N): a = Firefly() a.position = np.array([rdm.random()*(xmax-xmin)+xmin,rdm.random()*(ymax-ymin)+ymin]) a.intensity = np.abs(func(a.position)) fireflies.append(a) return fireflies def showFunction(func,xmin,xmax,ymin,ymax,n=50): R = np.zeros((n,n)) hx = (xmax-xmin)/n hy = (ymax-ymin)/n for i in range(n): for j in range(n): R[i,j] = func((xmin+hx*i,ymin+hy*j)) l = plt.matshow(R,cmap="RdGy", origin='lower',extent=[xmin,xmax,ymin,ymax]) cbar = plt.colorbar(l) plt.show() def showFunctionFirefly(fireflies,func,xmin,xmax,ymin,ymax,n=50,name="a"): R = np.zeros((n,n)) hx = (xmax-xmin)/n hy = (ymax-ymin)/n plt.figure(figsize=(3,3)) for i in range(n): for j in range(n): R[i,j] = np.abs(func((xmin+hx*i,ymin+hy*j))) l = plt.matshow(R,cmap="Blues", origin='lower',extent=[xmin,xmax,ymin,ymax]) # cbar = plt.colorbar(l) X = [fireflies[k].position[0] for k in range(len(fireflies))] Y = [fireflies[k].position[1] for k in range(len(fireflies))] plt.plot(X,Y,"o",markersize=9,markeredgecolor="none",markerfacecolor = "black") print([fireflies[k].intensity for k in range(len(fireflies))]) plt.xlim([xmin,xmax]) plt.ylim([ymin,ymax]) plt.savefig(name) plt.clf() # plt.show() def fourMaximum(x,y): if x>=0 and y>=0: return (np.abs(x-2.5)+np.abs(y-2.5))**0.7 if x>=0 and y<0: return (np.abs(x-2.5)+np.abs(y+2.5))**0.7 if x<0 and y>=0: return (np.abs(x+2.5)+np.abs(y-2.5))**0.7 if x<0 and y<0: return (np.abs(x+2.5)+np.abs(y+2.5))**0.7 def update_line(temps,data,line): # X = data[temps] line.set_data([f[0] for f in data[temps]],[f[1] for f in data[temps]]) return line, def doExtrapolation(data,N): newData = [] for t in range(len(data)*N-N): u = t%N v = t//N L = [] for j in range(len(data[v])): newX = data[v][j][0] + u*(data[v+1][j][0]-data[v][j][0])/N newY = data[v][j][1] + u*(data[v+1][j][1]-data[v][j][1])/N L.append((newX,newY)) # print(t) newData.append([kl for kl in L]) return newData # func = lambda x,y : (np.abs(x-2.5)+np.abs(y-2.5))**0.7 func = lambda x : fourMaximum(x[0],x[1]) # func = lambda x : (np.sin((x[0]**2.0+x[1]**2.0)/2.0)/((x[0]/5)**2.0+(x[1]/5.0)**2.0))*-1 # func = lambda x : (np.abs(x[0])+np.abs(x[1]))**0.7 xmin, xmax, ymin, ymax = -5.0,5.0,-5.0,5.0 # showFunction(func,xmin,xmax,ymin,ymax,n=50) N = 100 fireflies = initialiseFirefly(N, xmin, xmax, ymin, ymax, func) showFunctionFirefly(fireflies,func,xmin,xmax,ymin,ymax,n=50,name="fireflyout1.pdf") tmax = 50 data = [] data.append([(f.position[0],f.position[1]) for f in fireflies]) for t in range(tmax): fireflies = fly(fireflies, func, beta0=0.8,gamma=5.0,alpha=0.2,delta=0.95) data.append([(f.position[0],f.position[1]) for f in fireflies]) # print(data[0]) showFunctionFirefly(fireflies,func,xmin,xmax,ymin,ymax,n=50,name="fireflyout2.pdf") # extrapo = 8 # data = doExtrapolation(data,N=extrapo) # fig1 = plt.figure() # ax = plt.gca() # l, = ax.plot([f[0] for f in data[0]],[f[1] for f in data[0]], 'ok',markersize=6) # n=25 # R = np.zeros((n,n)) # hx = (xmax-xmin)/n # hy = (ymax-ymin)/n # for i in range(n): # for j in range(n): # R[i,j] = np.abs(func((xmin+hx*i,ymin+hy*j))) # ax.matshow(R,cmap="Blues", origin='lower',extent=[xmin,xmax,ymin,ymax]) line_ani = animation.FuncAnimation(fig1, update_line, tmax*extrapo, interval=100,fargs=(data, l), blit=True) line_ani.save('lines.mp4')
en
0.228385
Find minimum of a function (in 2D) #update intensity #update intensity # cbar = plt.colorbar(l) # plt.show() # X = data[temps] # print(t) # func = lambda x,y : (np.abs(x-2.5)+np.abs(y-2.5))**0.7 # func = lambda x : (np.sin((x[0]**2.0+x[1]**2.0)/2.0)/((x[0]/5)**2.0+(x[1]/5.0)**2.0))*-1 # func = lambda x : (np.abs(x[0])+np.abs(x[1]))**0.7 # showFunction(func,xmin,xmax,ymin,ymax,n=50) # print(data[0]) # extrapo = 8 # data = doExtrapolation(data,N=extrapo) # fig1 = plt.figure() # ax = plt.gca() # l, = ax.plot([f[0] for f in data[0]],[f[1] for f in data[0]], 'ok',markersize=6) # n=25 # R = np.zeros((n,n)) # hx = (xmax-xmin)/n # hy = (ymax-ymin)/n # for i in range(n): # for j in range(n): # R[i,j] = np.abs(func((xmin+hx*i,ymin+hy*j))) # ax.matshow(R,cmap="Blues", origin='lower',extent=[xmin,xmax,ymin,ymax])
3.278247
3
techman_robot_get_status/tm_get_status/image_pub.py
Fry-Bot/tmr_ros2
14
6625255
<reponame>Fry-Bot/tmr_ros2 import sys import socket import rclpy import queue import signal from rclpy.node import Node from sensor_msgs.msg import Image from flask import Flask, request, jsonify import numpy as np import cv2 from waitress import serve from datetime import datetime from cv_bridge import CvBridge, CvBridgeError import threading class ImagePub(Node): def __init__(self,nodeName,isTest,path): super().__init__(nodeName) self.publisher = self.create_publisher(Image, 'techman_image', 10) self.con = threading.Condition() self.imageQ = queue.Queue() self.leaveThread = False if(isTest): self.t = threading.Thread(target = self.pub_data_thread, args=(False,)) timer_period = 1.0 self.img = cv2.imread(path) self.tmr = self.create_timer(timer_period, self.publish_test_image) else: self.t = threading.Thread(target = self.pub_data_thread, args=(True,)) self.t.start() def set_image_and_notify_send(self, img): self.con.acquire() self.imageQ.put(img) self.con.notify() self.con.release() def signal_handler(self,signal, frame): self.close_thread() sys.exit(0) def publish_test_image(self): self.img = cv2.flip(self.img, 1) self.set_image_and_notify_send(self.img) def image_publisher(self,image): bridge = CvBridge() msg = bridge.cv2_to_imgmsg(image) self.get_logger().info('Publishing something !, queue size is ' + str(self.imageQ.qsize())) self.publisher.publish(msg) def close_thread(self): self.leaveThread = True self.con.acquire() self.con.notify() self.con.release() def pub_data_thread(self, isRequestData): self.con.acquire() while(True): self.con.wait() while(not self.imageQ.empty()): if(isRequestData): file2np = np.fromstring(self.imageQ.get(), np.uint8) img = cv2.imdecode(file2np, cv2.IMREAD_UNCHANGED) self.image_publisher(img) else: self.image_publisher(self.imageQ.get()) if(self.leaveThread): break self.con.release() def fake_result(self,m_method): # clsssification if m_method == 'CLS': # inference img here result = { "message": "success", "result": "NG", "score": 0.987 } # detection elif m_method == 'DET': # inference img here result = { "message":"success", "annotations": [ { "box_cx": 150, "box_cy": 150, "box_w": 100, "box_h": 100, "label": "apple", "score": 0.964, "rotate": -45 }, { "box_cx": 550, "box_cy": 550, "box_w": 100, "box_h": 100, "label": "car", "score": 1.000, "rotation": 0 }, { "box_cx": 350, "box_cy": 350, "box_w": 150, "box_h": 150, "label": "mobilephone", "score": 0.886, "rotation": 135 } ], "result": None } # no method else: result = { "message": "no method", "result": None } return result def get_none(self): print('\n[{0}] [{1}] -> Get()'.format(request.environ['REMOTE_ADDR'], datetime.now())) # user defined method result = { "result": "api", "message": "running", } return jsonify(result) def get(self,m_method): print('\n[{0}] [{1}] -> Get({2})'.format(request.environ['REMOTE_ADDR'], datetime.now(), m_method)) # user defined method if m_method == 'status': result = { "result": "status", "message": "im ok" } else: result = { "result": "fail", "message": "wrong request" } return jsonify(result) def post(self,m_method): print('\n[{0}] [{1}] -> Post({2})'.format(request.environ['REMOTE_ADDR'], datetime.now(), m_method)) # get key/value model_id = request.args.get('model_id') print('model_id: {}'.format(model_id)) # check key/value if model_id is None: print("model_id is not set") result={ "message": "fail", "result": "model_id required" } return jsonify(result) # convert image data #file2np = np.fromstring(request.files['file'].read(), np.uint8) #img = cv2.imdecode(file2np, cv2.IMREAD_UNCHANGED) #cv2.imwrite('test.png',img) self.set_image_and_notify_send(request.files['file'].read()) result = self.fake_result(m_method) return jsonify(result) def set_route(app,node): app.route('/api/<string:m_method>', methods=['POST'])(node.post) app.route('/api/<string:m_method>', methods=['GET'])(node.get) app.route('/api', methods=['GET'])(node.get_none) def main(): rclpy.init(args=None) isTest = False app = Flask(__name__) if(isTest): try: print(sys.argv[1:]) except : print("arg is not correct!") return node = ImagePub('image_pub',isTest,sys.argv[1]) else: node = ImagePub('image_pub',isTest,None) set_route(app,node) print("Listening on an ip port:6189 combination") serve(app, port=6189) signal.signal(signal.SIGINT, node.signal_handler) rclpy.spin(node) node.destroy_node() rclpy.shutdown() if __name__ == '__main__': main()
import sys import socket import rclpy import queue import signal from rclpy.node import Node from sensor_msgs.msg import Image from flask import Flask, request, jsonify import numpy as np import cv2 from waitress import serve from datetime import datetime from cv_bridge import CvBridge, CvBridgeError import threading class ImagePub(Node): def __init__(self,nodeName,isTest,path): super().__init__(nodeName) self.publisher = self.create_publisher(Image, 'techman_image', 10) self.con = threading.Condition() self.imageQ = queue.Queue() self.leaveThread = False if(isTest): self.t = threading.Thread(target = self.pub_data_thread, args=(False,)) timer_period = 1.0 self.img = cv2.imread(path) self.tmr = self.create_timer(timer_period, self.publish_test_image) else: self.t = threading.Thread(target = self.pub_data_thread, args=(True,)) self.t.start() def set_image_and_notify_send(self, img): self.con.acquire() self.imageQ.put(img) self.con.notify() self.con.release() def signal_handler(self,signal, frame): self.close_thread() sys.exit(0) def publish_test_image(self): self.img = cv2.flip(self.img, 1) self.set_image_and_notify_send(self.img) def image_publisher(self,image): bridge = CvBridge() msg = bridge.cv2_to_imgmsg(image) self.get_logger().info('Publishing something !, queue size is ' + str(self.imageQ.qsize())) self.publisher.publish(msg) def close_thread(self): self.leaveThread = True self.con.acquire() self.con.notify() self.con.release() def pub_data_thread(self, isRequestData): self.con.acquire() while(True): self.con.wait() while(not self.imageQ.empty()): if(isRequestData): file2np = np.fromstring(self.imageQ.get(), np.uint8) img = cv2.imdecode(file2np, cv2.IMREAD_UNCHANGED) self.image_publisher(img) else: self.image_publisher(self.imageQ.get()) if(self.leaveThread): break self.con.release() def fake_result(self,m_method): # clsssification if m_method == 'CLS': # inference img here result = { "message": "success", "result": "NG", "score": 0.987 } # detection elif m_method == 'DET': # inference img here result = { "message":"success", "annotations": [ { "box_cx": 150, "box_cy": 150, "box_w": 100, "box_h": 100, "label": "apple", "score": 0.964, "rotate": -45 }, { "box_cx": 550, "box_cy": 550, "box_w": 100, "box_h": 100, "label": "car", "score": 1.000, "rotation": 0 }, { "box_cx": 350, "box_cy": 350, "box_w": 150, "box_h": 150, "label": "mobilephone", "score": 0.886, "rotation": 135 } ], "result": None } # no method else: result = { "message": "no method", "result": None } return result def get_none(self): print('\n[{0}] [{1}] -> Get()'.format(request.environ['REMOTE_ADDR'], datetime.now())) # user defined method result = { "result": "api", "message": "running", } return jsonify(result) def get(self,m_method): print('\n[{0}] [{1}] -> Get({2})'.format(request.environ['REMOTE_ADDR'], datetime.now(), m_method)) # user defined method if m_method == 'status': result = { "result": "status", "message": "im ok" } else: result = { "result": "fail", "message": "wrong request" } return jsonify(result) def post(self,m_method): print('\n[{0}] [{1}] -> Post({2})'.format(request.environ['REMOTE_ADDR'], datetime.now(), m_method)) # get key/value model_id = request.args.get('model_id') print('model_id: {}'.format(model_id)) # check key/value if model_id is None: print("model_id is not set") result={ "message": "fail", "result": "model_id required" } return jsonify(result) # convert image data #file2np = np.fromstring(request.files['file'].read(), np.uint8) #img = cv2.imdecode(file2np, cv2.IMREAD_UNCHANGED) #cv2.imwrite('test.png',img) self.set_image_and_notify_send(request.files['file'].read()) result = self.fake_result(m_method) return jsonify(result) def set_route(app,node): app.route('/api/<string:m_method>', methods=['POST'])(node.post) app.route('/api/<string:m_method>', methods=['GET'])(node.get) app.route('/api', methods=['GET'])(node.get_none) def main(): rclpy.init(args=None) isTest = False app = Flask(__name__) if(isTest): try: print(sys.argv[1:]) except : print("arg is not correct!") return node = ImagePub('image_pub',isTest,sys.argv[1]) else: node = ImagePub('image_pub',isTest,None) set_route(app,node) print("Listening on an ip port:6189 combination") serve(app, port=6189) signal.signal(signal.SIGINT, node.signal_handler) rclpy.spin(node) node.destroy_node() rclpy.shutdown() if __name__ == '__main__': main()
en
0.339383
# clsssification # inference img here # detection # inference img here # no method # user defined method # user defined method # get key/value # check key/value # convert image data #file2np = np.fromstring(request.files['file'].read(), np.uint8) #img = cv2.imdecode(file2np, cv2.IMREAD_UNCHANGED) #cv2.imwrite('test.png',img)
2.332017
2
ptp/components/transforms/__init__.py
aasseman/pytorchpipe
232
6625256
<filename>ptp/components/transforms/__init__.py from .concatenate_tensor import ConcatenateTensor from .list_to_tensor import ListToTensor from .reduce_tensor import ReduceTensor from .reshape_tensor import ReshapeTensor __all__ = [ 'ConcatenateTensor', 'ListToTensor', 'ReduceTensor', 'ReshapeTensor', ]
<filename>ptp/components/transforms/__init__.py from .concatenate_tensor import ConcatenateTensor from .list_to_tensor import ListToTensor from .reduce_tensor import ReduceTensor from .reshape_tensor import ReshapeTensor __all__ = [ 'ConcatenateTensor', 'ListToTensor', 'ReduceTensor', 'ReshapeTensor', ]
none
1
1.244741
1
django_scripts/dj.py
rsalmaso/django-scripts
0
6625257
<reponame>rsalmaso/django-scripts #!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2007-2015, <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import, division, print_function, unicode_literals import sys import os from os.path import dirname from stua.commands import BaseCommand from stua.os import system class Command(BaseCommand): def find_manage_py(self, path=None): path = path or os.getcwd() while path != '/': manage = os.path.join(path, 'manage.py') if os.path.exists(manage): return path path = dirname(path) return False def build_cmd(self, command, args): cmd = [] command = command.split('/')[-1] try: cmd.append({"dj2": "python2", "dj3": "python3"}[command]) except: pass cmd.append("./manage.py") if len(args) > 0: if args[0] == 'test': # test are always verbose if not tell otherwise verbosity = [True for arg in args if arg.startswith("--verbosity")] if not verbosity: args.append('--verbosity=2') cmd.extend(args) return cmd def handle(self, command, args): path = self.find_manage_py() status = 0 if path: os.chdir(path) cmd = self.build_cmd(command, args) status = system(*cmd) return status def main(): dj = Command() status = dj.run(sys.argv) sys.exit(status)
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2007-2015, <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. from __future__ import absolute_import, division, print_function, unicode_literals import sys import os from os.path import dirname from stua.commands import BaseCommand from stua.os import system class Command(BaseCommand): def find_manage_py(self, path=None): path = path or os.getcwd() while path != '/': manage = os.path.join(path, 'manage.py') if os.path.exists(manage): return path path = dirname(path) return False def build_cmd(self, command, args): cmd = [] command = command.split('/')[-1] try: cmd.append({"dj2": "python2", "dj3": "python3"}[command]) except: pass cmd.append("./manage.py") if len(args) > 0: if args[0] == 'test': # test are always verbose if not tell otherwise verbosity = [True for arg in args if arg.startswith("--verbosity")] if not verbosity: args.append('--verbosity=2') cmd.extend(args) return cmd def handle(self, command, args): path = self.find_manage_py() status = 0 if path: os.chdir(path) cmd = self.build_cmd(command, args) status = system(*cmd) return status def main(): dj = Command() status = dj.run(sys.argv) sys.exit(status)
en
0.747323
#!/usr/bin/env python # -*- coding: utf-8 -*- # Copyright (C) 2007-2015, <NAME> <<EMAIL>> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in # all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN # THE SOFTWARE. # test are always verbose if not tell otherwise
2.266728
2
circuitPython/examples/image-slide-viewer/ui_astc.py
BRTSG-FOSS/pico-bteve
1
6625258
<filename>circuitPython/examples/image-slide-viewer/ui_astc.py import time from brteve.brt_eve_bt817_8 import BrtEve class ui_astc: def __init__(self, eve: BrtEve) -> None: self.eve=eve self.x = 0 self.y = 0 self.w = 0 self.h = 0 self.index = 0 self.image_num = 0 self.images=[] self.names=[] self.sizes=[] self.name="" def set_images(self, images, names, size): self.images=images self.names=names self.sizes=size print(self.sizes) print(self.sizes[self.index]) print(self.sizes[self.index][0]) self.image_num=len(images) self.draw_1_image(images[self.index], self.names[self.index], self.sizes[self.index]) def swipe_left(self): self.swipe_image('left') self.index+=1 if self.index >= self.image_num: self.index=0 self.draw_1_image(self.images[self.index], self.names[self.index], self.sizes[self.index]) def swipe_right(self): self.swipe_image('right') self.index -=1 if self.index < 0: self.index=self.image_num-1 self.draw_1_image(self.images[self.index], self.names[self.index], self.sizes[self.index]) def swipe_image(self, direc): eve=self.eve offset = 0 x = self.x y = self.y w = self.w h = self.h name = self.name if direc=='right': direc=-1 else: direc=1 while offset < (x + w): eve.ClearColorRGB(255, 255, 255) eve.Clear() eve.ColorRGB(255, 255, 255) eve.VertexFormat(4) eve.Begin(eve.BITMAPS) eve.Vertex2f(x-offset*direc, y) tx = x - offset*direc + w/2 - len(name) * 5 ty = y + h + 10 eve.ColorRGB(0, 0, 0) eve.cmd_text(tx, ty, 30, 0, name) eve.swap() offset += 10 + offset/5 time.sleep(0.01) eve.ClearColorRGB(255, 255, 255) eve.Clear() eve.ColorRGB(255, 255, 255) eve.swap() def draw_1_image(self,image, name, size): eve=self.eve print(size) print(size[0]) w=(int)(size[0]) h=(int)(size[1]) addr=0 wp=addr CHUNK = 32 with open(image, 'rb') as file: buff = file.read(CHUNK) while buff != b"": eve.wr(wp, buff) wp += CHUNK buff = file.read(CHUNK) eve.ClearColorRGB(255, 255, 255) eve.Clear() eve.ColorRGB(255, 255, 255) eve.VertexFormat(4) eve.cmd_setbitmap(addr, eve.ASTC_8x8, w, h) # eve.cmd_setbitmap(addr, eve.ASTC_4x4, w, h) x=eve.lcd_width/2-w/2 y=eve.lcd_height/2-h/2 eve.Begin(eve.BITMAPS) eve.Vertex2f(x, y) tx = x + w/2 - len(name) * 5 ty = y+ h + 10 eve.ColorRGB(0, 0, 0) eve.cmd_text(tx, ty, 30, 0, name) eve.swap() eve.flush() self.x = x self.y = y self.w = w self.h = h self.name = name
<filename>circuitPython/examples/image-slide-viewer/ui_astc.py import time from brteve.brt_eve_bt817_8 import BrtEve class ui_astc: def __init__(self, eve: BrtEve) -> None: self.eve=eve self.x = 0 self.y = 0 self.w = 0 self.h = 0 self.index = 0 self.image_num = 0 self.images=[] self.names=[] self.sizes=[] self.name="" def set_images(self, images, names, size): self.images=images self.names=names self.sizes=size print(self.sizes) print(self.sizes[self.index]) print(self.sizes[self.index][0]) self.image_num=len(images) self.draw_1_image(images[self.index], self.names[self.index], self.sizes[self.index]) def swipe_left(self): self.swipe_image('left') self.index+=1 if self.index >= self.image_num: self.index=0 self.draw_1_image(self.images[self.index], self.names[self.index], self.sizes[self.index]) def swipe_right(self): self.swipe_image('right') self.index -=1 if self.index < 0: self.index=self.image_num-1 self.draw_1_image(self.images[self.index], self.names[self.index], self.sizes[self.index]) def swipe_image(self, direc): eve=self.eve offset = 0 x = self.x y = self.y w = self.w h = self.h name = self.name if direc=='right': direc=-1 else: direc=1 while offset < (x + w): eve.ClearColorRGB(255, 255, 255) eve.Clear() eve.ColorRGB(255, 255, 255) eve.VertexFormat(4) eve.Begin(eve.BITMAPS) eve.Vertex2f(x-offset*direc, y) tx = x - offset*direc + w/2 - len(name) * 5 ty = y + h + 10 eve.ColorRGB(0, 0, 0) eve.cmd_text(tx, ty, 30, 0, name) eve.swap() offset += 10 + offset/5 time.sleep(0.01) eve.ClearColorRGB(255, 255, 255) eve.Clear() eve.ColorRGB(255, 255, 255) eve.swap() def draw_1_image(self,image, name, size): eve=self.eve print(size) print(size[0]) w=(int)(size[0]) h=(int)(size[1]) addr=0 wp=addr CHUNK = 32 with open(image, 'rb') as file: buff = file.read(CHUNK) while buff != b"": eve.wr(wp, buff) wp += CHUNK buff = file.read(CHUNK) eve.ClearColorRGB(255, 255, 255) eve.Clear() eve.ColorRGB(255, 255, 255) eve.VertexFormat(4) eve.cmd_setbitmap(addr, eve.ASTC_8x8, w, h) # eve.cmd_setbitmap(addr, eve.ASTC_4x4, w, h) x=eve.lcd_width/2-w/2 y=eve.lcd_height/2-h/2 eve.Begin(eve.BITMAPS) eve.Vertex2f(x, y) tx = x + w/2 - len(name) * 5 ty = y+ h + 10 eve.ColorRGB(0, 0, 0) eve.cmd_text(tx, ty, 30, 0, name) eve.swap() eve.flush() self.x = x self.y = y self.w = w self.h = h self.name = name
en
0.335104
# eve.cmd_setbitmap(addr, eve.ASTC_4x4, w, h)
2.707298
3
src/lxml/tests/test_builder.py
wenovus/lxml
1,794
6625259
<gh_stars>1000+ # -*- coding: utf-8 -*- """ Tests that ElementMaker works properly. """ from __future__ import absolute_import import unittest from lxml import etree from lxml.builder import E from .common_imports import HelperTestCase, _bytes class BuilderTestCase(HelperTestCase): etree = etree def test_build_from_xpath_result(self): class StringSubclass(str): pass wrapped = E.b(StringSubclass('Hello')) self.assertEqual(_bytes('<b>Hello</b>'), etree.tostring(wrapped)) def test_unknown_type_raises(self): class UnknownType(object): pass self.assertRaises(TypeError, E.b, UnknownType()) def test_cdata(self): wrapped = E.b(etree.CDATA('Hello')) self.assertEqual(_bytes('<b><![CDATA[Hello]]></b>'), etree.tostring(wrapped)) def test_cdata_solo(self): self.assertRaises(ValueError, E.b, 'Hello', etree.CDATA('World')) def test_suite(): suite = unittest.TestSuite() suite.addTests([unittest.makeSuite(BuilderTestCase)]) return suite if __name__ == '__main__': print('to test use test.py %s' % __file__)
# -*- coding: utf-8 -*- """ Tests that ElementMaker works properly. """ from __future__ import absolute_import import unittest from lxml import etree from lxml.builder import E from .common_imports import HelperTestCase, _bytes class BuilderTestCase(HelperTestCase): etree = etree def test_build_from_xpath_result(self): class StringSubclass(str): pass wrapped = E.b(StringSubclass('Hello')) self.assertEqual(_bytes('<b>Hello</b>'), etree.tostring(wrapped)) def test_unknown_type_raises(self): class UnknownType(object): pass self.assertRaises(TypeError, E.b, UnknownType()) def test_cdata(self): wrapped = E.b(etree.CDATA('Hello')) self.assertEqual(_bytes('<b><![CDATA[Hello]]></b>'), etree.tostring(wrapped)) def test_cdata_solo(self): self.assertRaises(ValueError, E.b, 'Hello', etree.CDATA('World')) def test_suite(): suite = unittest.TestSuite() suite.addTests([unittest.makeSuite(BuilderTestCase)]) return suite if __name__ == '__main__': print('to test use test.py %s' % __file__)
en
0.860268
# -*- coding: utf-8 -*- Tests that ElementMaker works properly.
2.618012
3
allure/structure.py
sergeychipiga/allure-python
0
6625260
<reponame>sergeychipiga/allure-python<gh_stars>0 ''' This holds allure report xml structures Created on Oct 23, 2013 @author: pupssman ''' from allure.rules import xmlfied, Attribute, Element, WrappedMany, Nested, Many from allure.constants import ALLURE_NAMESPACE, COMMON_NAMESPACE Attach = xmlfied('attachment', source=Attribute(), title=Attribute(), type=Attribute()) Failure = xmlfied('failure', message=Element(), trace=Element('stack-trace')) TestCase = xmlfied('test-case', name=Element(), title=Element().if_(lambda x: x), description=Element().if_(lambda x: x), failure=Nested().if_(lambda x: x), steps=WrappedMany(Nested()), attachments=WrappedMany(Nested()), labels=WrappedMany(Nested()), status=Attribute(), start=Attribute(), stop=Attribute()) TestSuite = xmlfied('test-suite', namespace=ALLURE_NAMESPACE, name=Element(), title=Element().if_(lambda x: x), description=Element().if_(lambda x: x), tests=WrappedMany(Nested(), name='test-cases'), labels=WrappedMany(Nested()), start=Attribute(), stop=Attribute()) TestStep = xmlfied('step', name=Element(), title=Element().if_(lambda x: x), attachments=WrappedMany(Nested()), steps=WrappedMany(Nested()), start=Attribute(), stop=Attribute(), status=Attribute()) TestLabel = xmlfied('label', name=Attribute(), value=Attribute()) EnvParameter = xmlfied('parameter', name=Element(), key=Element(), value=Element()) Environment = xmlfied('environment', namespace=COMMON_NAMESPACE, id=Element(), name=Element(), parameters=Many(Nested()))
''' This holds allure report xml structures Created on Oct 23, 2013 @author: pupssman ''' from allure.rules import xmlfied, Attribute, Element, WrappedMany, Nested, Many from allure.constants import ALLURE_NAMESPACE, COMMON_NAMESPACE Attach = xmlfied('attachment', source=Attribute(), title=Attribute(), type=Attribute()) Failure = xmlfied('failure', message=Element(), trace=Element('stack-trace')) TestCase = xmlfied('test-case', name=Element(), title=Element().if_(lambda x: x), description=Element().if_(lambda x: x), failure=Nested().if_(lambda x: x), steps=WrappedMany(Nested()), attachments=WrappedMany(Nested()), labels=WrappedMany(Nested()), status=Attribute(), start=Attribute(), stop=Attribute()) TestSuite = xmlfied('test-suite', namespace=ALLURE_NAMESPACE, name=Element(), title=Element().if_(lambda x: x), description=Element().if_(lambda x: x), tests=WrappedMany(Nested(), name='test-cases'), labels=WrappedMany(Nested()), start=Attribute(), stop=Attribute()) TestStep = xmlfied('step', name=Element(), title=Element().if_(lambda x: x), attachments=WrappedMany(Nested()), steps=WrappedMany(Nested()), start=Attribute(), stop=Attribute(), status=Attribute()) TestLabel = xmlfied('label', name=Attribute(), value=Attribute()) EnvParameter = xmlfied('parameter', name=Element(), key=Element(), value=Element()) Environment = xmlfied('environment', namespace=COMMON_NAMESPACE, id=Element(), name=Element(), parameters=Many(Nested()))
en
0.895656
This holds allure report xml structures Created on Oct 23, 2013 @author: pupssman
1.825868
2
src/kestrel/codegen/commands.py
imolloy/kestrel-lang
0
6625261
################################################################ # Module Summary # # - Code generation for each command in kestrel.lark # - The execution function names match commands in kestrel.lark # - Each command takes 2 arguments # ( statement, session ) # - statement is the current statement to process, # which is a dict from the parser # - session is the current session (context) # - Every command returns a tuple (VarStruct, Display) # - VarStruct is a new object associated with the output var # - VarStruct associated with stmt["output"] # - None for some commands, e.g., DISP, SAVE, STAT # - Display is the data to display on the user interface # - a string # - a list of (str,str|list(str)) tuples # - a table that can be imported to pandas dataframe ################################################################ import os import pathlib import functools import logging import time import itertools from collections import OrderedDict from kestrel.utils import remove_empty_dicts, dedup_ordered_dicts from kestrel.exceptions import * from kestrel.semantics import get_entity_table, get_entity_type, get_entity_len from kestrel.symboltable import new_var from kestrel.syntax.parser import get_all_input_var_names from kestrel.codegen.data import load_data, load_data_file, dump_data_to_file from kestrel.codegen.display import DisplayDataframe, DisplayDict from kestrel.codegen.pattern import build_pattern, or_patterns from kestrel.codegen.relations import ( generic_relations, compile_generic_relation_to_pattern, compile_specific_relation_to_pattern, compile_identical_entity_search_pattern, compile_x_ibm_event_search_flow_in_pattern, compile_x_ibm_event_search_flow_out_pattern, are_entities_associated_with_x_ibm_event, ) _logger = logging.getLogger(__name__) ################################################################ # Private Decorators ################################################################ def _default_output(func): # by default, create a table/view in the backend # using the output var name # in this case, the store backend can return no VarStruct @functools.wraps(func) def wrapper(stmt, session): ret = func(stmt, session) if not ret: var_struct = new_var( session.store, stmt["output"], [], stmt, session.symtable ) return var_struct, None else: return ret return wrapper def _guard_empty_input(func): @functools.wraps(func) def wrapper(stmt, session): input_len_dict = { v: get_entity_len(v, session.symtable) for v in get_all_input_var_names(stmt) } for v, size in input_len_dict.items(): if size == 0: raise EmptyInputVariable(v) else: return func(stmt, session) return wrapper def _debug_logger(func): @functools.wraps(func) def wrapper(stmt, session): _logger.debug(f"Executing '{func.__name__}' with statement: {stmt}") return func(stmt, session) return wrapper ################################################################ # Code Generation for Commands ################################################################ @_debug_logger @_default_output def merge(stmt, session): entity_types = list( set( [get_entity_type(var_name, session.symtable) for var_name in stmt["inputs"]] ) ) if len(entity_types) > 1: raise NonUniformEntityType(entity_types) entity_tables = [ get_entity_table(var_name, session.symtable) for var_name in stmt["inputs"] ] session.store.merge(stmt["output"], entity_tables) output = new_var(session.store, stmt["output"], [], stmt, session.symtable) return output, None @_debug_logger @_default_output def new(stmt, session): stmt["type"] = load_data(session.store, stmt["output"], stmt["data"], stmt["type"]) @_debug_logger @_default_output def load(stmt, session): stmt["type"] = load_data_file( session.store, stmt["output"], stmt["path"], stmt["type"] ) @_debug_logger @_guard_empty_input def save(stmt, session): dump_data_to_file( session.store, get_entity_table(stmt["input"], session.symtable), stmt["path"] ) return None, None @_debug_logger def info(stmt, session): header = session.store.columns(get_entity_table(stmt["input"], session.symtable)) direct_attrs, associ_attrs, custom_attrs, references = [], [], [], [] for field in header: if field.startswith("x_"): custom_attrs.append(field) elif ( field.endswith("_ref") or field.endswith("_refs") or field.endswith("_reference") or field.endswith("_references") ): # not useful in existing version, so do not display references.append(field) elif "_ref." in field or "_ref_" in field: associ_attrs.append(field) else: direct_attrs.append(field) disp = OrderedDict() disp["Entity Type"] = session.symtable[stmt["input"]].type disp["Number of Entities"] = str(len(session.symtable[stmt["input"]])) disp["Number of Records"] = str(session.symtable[stmt["input"]].records_count) disp["Entity Attributes"] = ", ".join(direct_attrs) disp["Indirect Attributes"] = [ ", ".join(g) for _, g in itertools.groupby(associ_attrs, lambda x: x.rsplit(".", 1)[0]) ] disp["Customized Attributes"] = ", ".join(custom_attrs) disp["Birth Command"] = session.symtable[stmt["input"]].birth_statement["command"] disp["Associated Datasource"] = session.symtable[stmt["input"]].data_source disp["Dependent Variables"] = ", ".join( session.symtable[stmt["input"]].dependent_variables ) return None, DisplayDict(disp) @_debug_logger def disp(stmt, session): if len(session.symtable[stmt["input"]]) > 0: content = session.store.lookup( get_entity_table(stmt["input"], session.symtable), stmt["attrs"], stmt["limit"], ) else: content = [] return None, DisplayDataframe(dedup_ordered_dicts(remove_empty_dicts(content))) @_debug_logger @_default_output def get(stmt, session): local_var_name = stmt["output"] + "_local" return_var_name = stmt["output"] return_type = stmt["type"] start_offset = session.config["stixquery"]["timerange_start_offset"] end_offset = session.config["stixquery"]["timerange_stop_offset"] pattern = build_pattern( stmt["patternbody"], stmt["timerange"], start_offset, end_offset, session.symtable, session.store, ) if "variablesource" in stmt: session.store.filter( stmt["output"], stmt["type"], get_entity_table(stmt["variablesource"], session.symtable), pattern, ) output = new_var(session.store, return_var_name, [], stmt, session.symtable) _logger.debug(f"get from variable source \"{stmt['variablesource']}\"") elif "datasource" in stmt: # rs: RetStruct rs = session.data_source_manager.query( stmt["datasource"], pattern, session.session_id ) query_id = rs.load_to_store(session.store) session.store.extract(local_var_name, return_type, query_id, pattern) _output = new_var(session.store, local_var_name, [], stmt, session.symtable) output = _output if session.config["prefetch"]["get"] and len(_output): prefetch_ret_var_name = return_var_name + "_prefetch" pattern_pf = _prefetch( return_type, prefetch_ret_var_name, local_var_name, stmt["timerange"], start_offset, end_offset, {local_var_name: _output}, session.store, session.session_id, session.data_source_manager, ) if pattern_pf: # this is a fix when the unique identifier in # `kestrel.codegen.relations.stix_2_0_identical_mapping` is # missing, especially for process. # TODO: this or_pattern() code can be removed when we have # better logic of unique identifier of entities. full_pat = or_patterns([pattern, pattern_pf]) session.store.extract(return_var_name, return_type, None, full_pat) output = new_var( session.store, return_var_name, [], stmt, session.symtable ) else: raise KestrelInternalError(f"unknown type of source in {str(stmt)}") return output, None @_debug_logger @_default_output @_guard_empty_input def find(stmt, session): return_type = stmt["type"] input_type = session.symtable[stmt["input"]].type input_var_name = stmt["input"] return_var_name = stmt["output"] local_var_name = stmt["output"] + "_local" local_var_event_name = stmt["output"] + "_asso_event" relation = stmt["relation"] is_reversed = stmt["reversed"] time_range = stmt["timerange"] event_type = "x-oca-event" start_offset = session.config["stixquery"]["timerange_start_offset"] end_offset = session.config["stixquery"]["timerange_stop_offset"] if return_type not in session.store.tables(): # return empty variable output = new_var(session.store, None, [], stmt, session.symtable) else: _symtable = {input_var_name: session.symtable[input_var_name]} event_pattern = None # First, get information from local store if relation in generic_relations: raw_pattern_body = compile_generic_relation_to_pattern( return_type, input_type, input_var_name ) if ( event_type in session.store.tables() and are_entities_associated_with_x_ibm_event([input_type, return_type]) and input_type != return_type ): try: event_in_pattern_body = compile_x_ibm_event_search_flow_in_pattern( input_type, input_var_name ) event_in_pattern = build_pattern( event_in_pattern_body, time_range, start_offset, end_offset, _symtable, session.store, ) session.store.extract( local_var_event_name, event_type, None, event_in_pattern ) _symtable[local_var_event_name] = new_var( session.store, local_var_event_name, [], stmt, session.symtable ) event_out_pattern_body = ( compile_x_ibm_event_search_flow_out_pattern( return_type, local_var_event_name ) ) event_pattern = build_pattern( event_out_pattern_body, time_range, start_offset, end_offset, _symtable, session.store, ) except InvalidAttribute: _logger.warning( "attributes not in DB when building event pattern for x-oca-event" ) else: raw_pattern_body = compile_specific_relation_to_pattern( return_type, relation, input_type, is_reversed, input_var_name ) try: local_pattern = build_pattern( raw_pattern_body, time_range, start_offset, end_offset, _symtable, session.store, ) except InvalidAttribute: local_pattern = None local_pattern = or_patterns([local_pattern, event_pattern]) if not local_pattern: _logger.info(f'no relation "{relation}" on this dataset') # by default, `session.store.extract` will generate new entity_table named `local_var_name` session.store.extract(local_var_name, return_type, None, local_pattern) _output = new_var(session.store, local_var_name, [], stmt, session.symtable) # default output without remote query output = _output # Second, prefetch all records of the entities and associated entities if session.config["prefetch"]["find"] and len(_output) and _output.data_source: if _prefetch( return_type, return_var_name, local_var_name, time_range, start_offset, end_offset, {local_var_name: _output}, session.store, session.session_id, session.data_source_manager, ): output = new_var( session.store, return_var_name, [], stmt, session.symtable ) return output, None @_debug_logger @_default_output @_guard_empty_input def join(stmt, session): session.store.join( stmt["output"], get_entity_table(stmt["input"], session.symtable), stmt["path"], get_entity_table(stmt["input_2"], session.symtable), stmt["path_2"], ) @_debug_logger @_default_output @_guard_empty_input def group(stmt, session): session.store.assign( stmt["output"], get_entity_table(stmt["input"], session.symtable), op="group", by=stmt["path"], ) @_debug_logger @_default_output @_guard_empty_input def sort(stmt, session): session.store.assign( stmt["output"], get_entity_table(stmt["input"], session.symtable), op="sort", by=stmt["path"], ascending=stmt["ascending"], ) @_debug_logger @_default_output @_guard_empty_input def apply(stmt, session): arg_vars = [session.symtable[v_name] for v_name in stmt["inputs"]] display = session.analytics_manager.execute( stmt["workflow"], arg_vars, session.session_id, stmt["parameter"] ) return None, display ################################################################ # Helper Functions ################################################################ def _prefetch( return_type, return_var_name, input_var_name, time_range, start_offset, end_offset, symtable, store, session_id, ds_manager, ): """prefetch identical entities and associated entities. Put the input entities in the center of an observation and query the remote data source of associated with input variable, so we get back: 1. all records about the input entities. 2. associated entities such as parent/child processes of processes, processes of network-traffic, etc. The function does not have explicit return, but a side effect: a view in the store named after `return_var_name`. Args: input_var_name (str): input variable name. return_var_name (str): return variable name. return_type (str): return entity type. time_range ((str, str)): start and end time in ISOTIMESTAMP. start_offset (int): start time offset by seconds. end_offset (int): end time offset by seconds. symtable ({str:VarStruct}): should has ``input_var_name``. store (firepit.SqlStorage): store. session_id (str): session ID. Returns: [str]: pattern if the prefetch is performed. """ # only need to return bool in the future pattern_body = compile_identical_entity_search_pattern(return_type, input_var_name) if pattern_body: # this may fail if the attribute in `stix_2_0_identical_mapping` does not exists # this is important since STIX does not have any mandatory attributes for process/file remote_pattern = build_pattern( pattern_body, time_range, start_offset, end_offset, symtable, store ) if remote_pattern: data_source = symtable[input_var_name].data_source resp = ds_manager.query(data_source, remote_pattern, session_id) query_id = resp.load_to_store(store) # build the return_var_name view in store store.extract(return_var_name, return_type, None, remote_pattern) return remote_pattern return None
################################################################ # Module Summary # # - Code generation for each command in kestrel.lark # - The execution function names match commands in kestrel.lark # - Each command takes 2 arguments # ( statement, session ) # - statement is the current statement to process, # which is a dict from the parser # - session is the current session (context) # - Every command returns a tuple (VarStruct, Display) # - VarStruct is a new object associated with the output var # - VarStruct associated with stmt["output"] # - None for some commands, e.g., DISP, SAVE, STAT # - Display is the data to display on the user interface # - a string # - a list of (str,str|list(str)) tuples # - a table that can be imported to pandas dataframe ################################################################ import os import pathlib import functools import logging import time import itertools from collections import OrderedDict from kestrel.utils import remove_empty_dicts, dedup_ordered_dicts from kestrel.exceptions import * from kestrel.semantics import get_entity_table, get_entity_type, get_entity_len from kestrel.symboltable import new_var from kestrel.syntax.parser import get_all_input_var_names from kestrel.codegen.data import load_data, load_data_file, dump_data_to_file from kestrel.codegen.display import DisplayDataframe, DisplayDict from kestrel.codegen.pattern import build_pattern, or_patterns from kestrel.codegen.relations import ( generic_relations, compile_generic_relation_to_pattern, compile_specific_relation_to_pattern, compile_identical_entity_search_pattern, compile_x_ibm_event_search_flow_in_pattern, compile_x_ibm_event_search_flow_out_pattern, are_entities_associated_with_x_ibm_event, ) _logger = logging.getLogger(__name__) ################################################################ # Private Decorators ################################################################ def _default_output(func): # by default, create a table/view in the backend # using the output var name # in this case, the store backend can return no VarStruct @functools.wraps(func) def wrapper(stmt, session): ret = func(stmt, session) if not ret: var_struct = new_var( session.store, stmt["output"], [], stmt, session.symtable ) return var_struct, None else: return ret return wrapper def _guard_empty_input(func): @functools.wraps(func) def wrapper(stmt, session): input_len_dict = { v: get_entity_len(v, session.symtable) for v in get_all_input_var_names(stmt) } for v, size in input_len_dict.items(): if size == 0: raise EmptyInputVariable(v) else: return func(stmt, session) return wrapper def _debug_logger(func): @functools.wraps(func) def wrapper(stmt, session): _logger.debug(f"Executing '{func.__name__}' with statement: {stmt}") return func(stmt, session) return wrapper ################################################################ # Code Generation for Commands ################################################################ @_debug_logger @_default_output def merge(stmt, session): entity_types = list( set( [get_entity_type(var_name, session.symtable) for var_name in stmt["inputs"]] ) ) if len(entity_types) > 1: raise NonUniformEntityType(entity_types) entity_tables = [ get_entity_table(var_name, session.symtable) for var_name in stmt["inputs"] ] session.store.merge(stmt["output"], entity_tables) output = new_var(session.store, stmt["output"], [], stmt, session.symtable) return output, None @_debug_logger @_default_output def new(stmt, session): stmt["type"] = load_data(session.store, stmt["output"], stmt["data"], stmt["type"]) @_debug_logger @_default_output def load(stmt, session): stmt["type"] = load_data_file( session.store, stmt["output"], stmt["path"], stmt["type"] ) @_debug_logger @_guard_empty_input def save(stmt, session): dump_data_to_file( session.store, get_entity_table(stmt["input"], session.symtable), stmt["path"] ) return None, None @_debug_logger def info(stmt, session): header = session.store.columns(get_entity_table(stmt["input"], session.symtable)) direct_attrs, associ_attrs, custom_attrs, references = [], [], [], [] for field in header: if field.startswith("x_"): custom_attrs.append(field) elif ( field.endswith("_ref") or field.endswith("_refs") or field.endswith("_reference") or field.endswith("_references") ): # not useful in existing version, so do not display references.append(field) elif "_ref." in field or "_ref_" in field: associ_attrs.append(field) else: direct_attrs.append(field) disp = OrderedDict() disp["Entity Type"] = session.symtable[stmt["input"]].type disp["Number of Entities"] = str(len(session.symtable[stmt["input"]])) disp["Number of Records"] = str(session.symtable[stmt["input"]].records_count) disp["Entity Attributes"] = ", ".join(direct_attrs) disp["Indirect Attributes"] = [ ", ".join(g) for _, g in itertools.groupby(associ_attrs, lambda x: x.rsplit(".", 1)[0]) ] disp["Customized Attributes"] = ", ".join(custom_attrs) disp["Birth Command"] = session.symtable[stmt["input"]].birth_statement["command"] disp["Associated Datasource"] = session.symtable[stmt["input"]].data_source disp["Dependent Variables"] = ", ".join( session.symtable[stmt["input"]].dependent_variables ) return None, DisplayDict(disp) @_debug_logger def disp(stmt, session): if len(session.symtable[stmt["input"]]) > 0: content = session.store.lookup( get_entity_table(stmt["input"], session.symtable), stmt["attrs"], stmt["limit"], ) else: content = [] return None, DisplayDataframe(dedup_ordered_dicts(remove_empty_dicts(content))) @_debug_logger @_default_output def get(stmt, session): local_var_name = stmt["output"] + "_local" return_var_name = stmt["output"] return_type = stmt["type"] start_offset = session.config["stixquery"]["timerange_start_offset"] end_offset = session.config["stixquery"]["timerange_stop_offset"] pattern = build_pattern( stmt["patternbody"], stmt["timerange"], start_offset, end_offset, session.symtable, session.store, ) if "variablesource" in stmt: session.store.filter( stmt["output"], stmt["type"], get_entity_table(stmt["variablesource"], session.symtable), pattern, ) output = new_var(session.store, return_var_name, [], stmt, session.symtable) _logger.debug(f"get from variable source \"{stmt['variablesource']}\"") elif "datasource" in stmt: # rs: RetStruct rs = session.data_source_manager.query( stmt["datasource"], pattern, session.session_id ) query_id = rs.load_to_store(session.store) session.store.extract(local_var_name, return_type, query_id, pattern) _output = new_var(session.store, local_var_name, [], stmt, session.symtable) output = _output if session.config["prefetch"]["get"] and len(_output): prefetch_ret_var_name = return_var_name + "_prefetch" pattern_pf = _prefetch( return_type, prefetch_ret_var_name, local_var_name, stmt["timerange"], start_offset, end_offset, {local_var_name: _output}, session.store, session.session_id, session.data_source_manager, ) if pattern_pf: # this is a fix when the unique identifier in # `kestrel.codegen.relations.stix_2_0_identical_mapping` is # missing, especially for process. # TODO: this or_pattern() code can be removed when we have # better logic of unique identifier of entities. full_pat = or_patterns([pattern, pattern_pf]) session.store.extract(return_var_name, return_type, None, full_pat) output = new_var( session.store, return_var_name, [], stmt, session.symtable ) else: raise KestrelInternalError(f"unknown type of source in {str(stmt)}") return output, None @_debug_logger @_default_output @_guard_empty_input def find(stmt, session): return_type = stmt["type"] input_type = session.symtable[stmt["input"]].type input_var_name = stmt["input"] return_var_name = stmt["output"] local_var_name = stmt["output"] + "_local" local_var_event_name = stmt["output"] + "_asso_event" relation = stmt["relation"] is_reversed = stmt["reversed"] time_range = stmt["timerange"] event_type = "x-oca-event" start_offset = session.config["stixquery"]["timerange_start_offset"] end_offset = session.config["stixquery"]["timerange_stop_offset"] if return_type not in session.store.tables(): # return empty variable output = new_var(session.store, None, [], stmt, session.symtable) else: _symtable = {input_var_name: session.symtable[input_var_name]} event_pattern = None # First, get information from local store if relation in generic_relations: raw_pattern_body = compile_generic_relation_to_pattern( return_type, input_type, input_var_name ) if ( event_type in session.store.tables() and are_entities_associated_with_x_ibm_event([input_type, return_type]) and input_type != return_type ): try: event_in_pattern_body = compile_x_ibm_event_search_flow_in_pattern( input_type, input_var_name ) event_in_pattern = build_pattern( event_in_pattern_body, time_range, start_offset, end_offset, _symtable, session.store, ) session.store.extract( local_var_event_name, event_type, None, event_in_pattern ) _symtable[local_var_event_name] = new_var( session.store, local_var_event_name, [], stmt, session.symtable ) event_out_pattern_body = ( compile_x_ibm_event_search_flow_out_pattern( return_type, local_var_event_name ) ) event_pattern = build_pattern( event_out_pattern_body, time_range, start_offset, end_offset, _symtable, session.store, ) except InvalidAttribute: _logger.warning( "attributes not in DB when building event pattern for x-oca-event" ) else: raw_pattern_body = compile_specific_relation_to_pattern( return_type, relation, input_type, is_reversed, input_var_name ) try: local_pattern = build_pattern( raw_pattern_body, time_range, start_offset, end_offset, _symtable, session.store, ) except InvalidAttribute: local_pattern = None local_pattern = or_patterns([local_pattern, event_pattern]) if not local_pattern: _logger.info(f'no relation "{relation}" on this dataset') # by default, `session.store.extract` will generate new entity_table named `local_var_name` session.store.extract(local_var_name, return_type, None, local_pattern) _output = new_var(session.store, local_var_name, [], stmt, session.symtable) # default output without remote query output = _output # Second, prefetch all records of the entities and associated entities if session.config["prefetch"]["find"] and len(_output) and _output.data_source: if _prefetch( return_type, return_var_name, local_var_name, time_range, start_offset, end_offset, {local_var_name: _output}, session.store, session.session_id, session.data_source_manager, ): output = new_var( session.store, return_var_name, [], stmt, session.symtable ) return output, None @_debug_logger @_default_output @_guard_empty_input def join(stmt, session): session.store.join( stmt["output"], get_entity_table(stmt["input"], session.symtable), stmt["path"], get_entity_table(stmt["input_2"], session.symtable), stmt["path_2"], ) @_debug_logger @_default_output @_guard_empty_input def group(stmt, session): session.store.assign( stmt["output"], get_entity_table(stmt["input"], session.symtable), op="group", by=stmt["path"], ) @_debug_logger @_default_output @_guard_empty_input def sort(stmt, session): session.store.assign( stmt["output"], get_entity_table(stmt["input"], session.symtable), op="sort", by=stmt["path"], ascending=stmt["ascending"], ) @_debug_logger @_default_output @_guard_empty_input def apply(stmt, session): arg_vars = [session.symtable[v_name] for v_name in stmt["inputs"]] display = session.analytics_manager.execute( stmt["workflow"], arg_vars, session.session_id, stmt["parameter"] ) return None, display ################################################################ # Helper Functions ################################################################ def _prefetch( return_type, return_var_name, input_var_name, time_range, start_offset, end_offset, symtable, store, session_id, ds_manager, ): """prefetch identical entities and associated entities. Put the input entities in the center of an observation and query the remote data source of associated with input variable, so we get back: 1. all records about the input entities. 2. associated entities such as parent/child processes of processes, processes of network-traffic, etc. The function does not have explicit return, but a side effect: a view in the store named after `return_var_name`. Args: input_var_name (str): input variable name. return_var_name (str): return variable name. return_type (str): return entity type. time_range ((str, str)): start and end time in ISOTIMESTAMP. start_offset (int): start time offset by seconds. end_offset (int): end time offset by seconds. symtable ({str:VarStruct}): should has ``input_var_name``. store (firepit.SqlStorage): store. session_id (str): session ID. Returns: [str]: pattern if the prefetch is performed. """ # only need to return bool in the future pattern_body = compile_identical_entity_search_pattern(return_type, input_var_name) if pattern_body: # this may fail if the attribute in `stix_2_0_identical_mapping` does not exists # this is important since STIX does not have any mandatory attributes for process/file remote_pattern = build_pattern( pattern_body, time_range, start_offset, end_offset, symtable, store ) if remote_pattern: data_source = symtable[input_var_name].data_source resp = ds_manager.query(data_source, remote_pattern, session_id) query_id = resp.load_to_store(store) # build the return_var_name view in store store.extract(return_var_name, return_type, None, remote_pattern) return remote_pattern return None
en
0.554832
################################################################ # Module Summary # # - Code generation for each command in kestrel.lark # - The execution function names match commands in kestrel.lark # - Each command takes 2 arguments # ( statement, session ) # - statement is the current statement to process, # which is a dict from the parser # - session is the current session (context) # - Every command returns a tuple (VarStruct, Display) # - VarStruct is a new object associated with the output var # - VarStruct associated with stmt["output"] # - None for some commands, e.g., DISP, SAVE, STAT # - Display is the data to display on the user interface # - a string # - a list of (str,str|list(str)) tuples # - a table that can be imported to pandas dataframe ################################################################ ################################################################ # Private Decorators ################################################################ # by default, create a table/view in the backend # using the output var name # in this case, the store backend can return no VarStruct ################################################################ # Code Generation for Commands ################################################################ # not useful in existing version, so do not display # rs: RetStruct # this is a fix when the unique identifier in # `kestrel.codegen.relations.stix_2_0_identical_mapping` is # missing, especially for process. # TODO: this or_pattern() code can be removed when we have # better logic of unique identifier of entities. # return empty variable # First, get information from local store # by default, `session.store.extract` will generate new entity_table named `local_var_name` # default output without remote query # Second, prefetch all records of the entities and associated entities ################################################################ # Helper Functions ################################################################ prefetch identical entities and associated entities. Put the input entities in the center of an observation and query the remote data source of associated with input variable, so we get back: 1. all records about the input entities. 2. associated entities such as parent/child processes of processes, processes of network-traffic, etc. The function does not have explicit return, but a side effect: a view in the store named after `return_var_name`. Args: input_var_name (str): input variable name. return_var_name (str): return variable name. return_type (str): return entity type. time_range ((str, str)): start and end time in ISOTIMESTAMP. start_offset (int): start time offset by seconds. end_offset (int): end time offset by seconds. symtable ({str:VarStruct}): should has ``input_var_name``. store (firepit.SqlStorage): store. session_id (str): session ID. Returns: [str]: pattern if the prefetch is performed. # only need to return bool in the future # this may fail if the attribute in `stix_2_0_identical_mapping` does not exists # this is important since STIX does not have any mandatory attributes for process/file # build the return_var_name view in store
2.21754
2
tests/s3/test_s3_copier.py
BigNerd/justmltools
0
6625262
from unittest import TestCase from unittest.mock import MagicMock, patch from justmltools.s3.aws_credentials import AwsCredentials class TestS3Copier(TestCase): @patch('boto3.resource', autospec=True) @patch('justmltools.s3.s3_bucket_object_finder.S3BucketObjectFinder', autospec=True) def test_copy_s3_objects( self, s3_bucket_object_finder_mock: MagicMock, boto3_resource_mock: MagicMock ): from_prefix = "my/from/prefix" expected_from_keys = [f"{from_prefix}/x", f"{from_prefix}/y"] s3_bucket_object_finder_mock.return_value.get_matching_s3_keys.return_value = expected_from_keys from justmltools.s3.s3_copier import S3Copier sut = S3Copier( credentials=AwsCredentials( aws_secret_access_key_id="test_id", aws_secret_access_key="test_key", region_name="test_region" ) ) to_prefix = "my/to/prefix" expected_to_keys = [key.replace(from_prefix, to_prefix) for key in expected_from_keys] actual_from_keys = [] actual_to_keys = [] for from_key, to_key in sut.copy_s3_objects( bucket="my_bucket", from_prefix="my/from/prefix", to_prefix="my/to/prefix"): actual_from_keys.append(from_key) actual_to_keys.append(to_key) self.assertEqual(expected_from_keys, actual_from_keys) self.assertEqual(expected_to_keys, actual_to_keys)
from unittest import TestCase from unittest.mock import MagicMock, patch from justmltools.s3.aws_credentials import AwsCredentials class TestS3Copier(TestCase): @patch('boto3.resource', autospec=True) @patch('justmltools.s3.s3_bucket_object_finder.S3BucketObjectFinder', autospec=True) def test_copy_s3_objects( self, s3_bucket_object_finder_mock: MagicMock, boto3_resource_mock: MagicMock ): from_prefix = "my/from/prefix" expected_from_keys = [f"{from_prefix}/x", f"{from_prefix}/y"] s3_bucket_object_finder_mock.return_value.get_matching_s3_keys.return_value = expected_from_keys from justmltools.s3.s3_copier import S3Copier sut = S3Copier( credentials=AwsCredentials( aws_secret_access_key_id="test_id", aws_secret_access_key="test_key", region_name="test_region" ) ) to_prefix = "my/to/prefix" expected_to_keys = [key.replace(from_prefix, to_prefix) for key in expected_from_keys] actual_from_keys = [] actual_to_keys = [] for from_key, to_key in sut.copy_s3_objects( bucket="my_bucket", from_prefix="my/from/prefix", to_prefix="my/to/prefix"): actual_from_keys.append(from_key) actual_to_keys.append(to_key) self.assertEqual(expected_from_keys, actual_from_keys) self.assertEqual(expected_to_keys, actual_to_keys)
none
1
2.346538
2
tests/scanner/test_scanner.py
amasiukevich/InterpreterNew
0
6625263
from src.data_source.string_source import StringSource from src.data_source.file_source import FileSource from src.exceptions.scanning_exception import ScanningException from src.scanner.scanner import Scanner from src.utils.token_type import TokenType from src.utils.token import Token from src.utils.position import Position from src.exceptions.scanner_exception import ScannerException import io import os import unittest class TestScanner(unittest.TestCase): def test_invalid_source(self): with self.assertRaises(ScannerException) as cm: Scanner(source="string_source") the_exception = cm.exception self.assertEqual( the_exception.message, "The given source is not an instance of BaseSource" ) def test_ignoring_whitespaces(self): file_stream = io.open( os.path.abspath("../../lang_codes/testing/test_arithmetic") ) file_source = FileSource(file_stream) scanner = Scanner(source=file_source) chars = [] while scanner.source.character != -1: scanner.ignore_whitespaces() chars.append(file_source.get_curr_char()) file_source.read_char() file_stream.close() real_chars = [c for c in "a+bc-d"] self.assertListEqual(chars, real_chars, "Doesn't ignore whitespaces") def test_eof_creation(self): string_stream_eof = io.StringIO("") string_source = StringSource(string_stream_eof) scanner = Scanner(source=string_source) scanner.next_token() string_stream_eof.close() self.assertEqual(scanner.token.token_type, TokenType.EOF, "Token must be of type EOF") def test_single_param_creation(self): string_source = StringSource( io.StringIO("+ - / {{ } )( * % ][,.;") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.PLUS, Position(line=1, column=1), value="+"), Token(TokenType.MINUS, Position(line=1, column=6), value="-"), Token(TokenType.DIVIDE, Position(line=1, column=8), value="/"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=14), value="{"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=15), value=""), Token(TokenType.CLOSING_CURLY_BRACKET, Position(line=1, column=17), value="}"), Token(TokenType.CLOSING_PARENTHESIS, Position(line=1, column=19), value=""), Token(TokenType.OPEN_PARENTHESIS, Position(line=1, column=20), value="("), Token(TokenType.MULTIPLY, Position(line=1, column=22), value="*"), Token(TokenType.MODULO, Position(line=1, column=24), value="%"), Token(TokenType.CLOSING_BRACKET, Position(line=1, column=26), value="]"), Token(TokenType.OPEN_BRACKET, Position(line=1, column=27), value="["), Token(TokenType.COMMA, Position(line=1, column=28), value=","), Token(TokenType.ACCESS, Position(line=1, column=29), value="."), Token(TokenType.SEMICOLON, Position(line=1, column=30), value=';') ], "Something went wrong while detecting tokens" ) def test_double_param_creation(self): string_source = StringSource( io.StringIO(">= <= != == && ||") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.GREATER_EQUAL, Position(line=1, column=1), value=">="), Token(TokenType.LESS_EQUAL, Position(line=1, column=4), value="<="), Token(TokenType.NOT_EQUAL, Position(line=1, column=7), value="!="), Token(TokenType.EQUAL, Position(line=1, column=11), value="=="), Token(TokenType.AND, Position(line=1, column=14), value="&&"), Token(TokenType.OR, Position(line=1, column=17), value="||") ], "Something went wrong while detecting double-character tokens" ) def test_leading_zero(self): string_source = StringSource( io.StringIO("010") ) scanner = Scanner(source=string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() the_exception = cm.exception self.assertEqual( the_exception.message, f"Scanning exception at position {scanner.token_position}:\n" f"Non-zero number can't contain anything after zero" ) def test_max_number(self): string_source = StringSource( io.StringIO("1000000000000000000000000000000000;") ) scanner = Scanner(string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() the_exception = cm.exception self.assertEqual( the_exception.message, f"Scanning exception at position {scanner.token_position}:\n" f"Max allowed number value is {Token.MAX_NUMBER}" ) def test_number(self): string_source = StringSource( io.StringIO("[10, 12, 13, 0]") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.OPEN_BRACKET, Position(line=1, column=1), value="["), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=2), value=10), Token(TokenType.COMMA, Position(line=1, column=4), value=","), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=6), value=12), Token(TokenType.COMMA, Position(line=1, column=8), value=","), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=10), value=13), Token(TokenType.COMMA, Position(line=1, column=12), value=","), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=14), value=0), Token(TokenType.CLOSING_BRACKET, Position(line=1, column=15), value="]"), ], "Something went wrong during the number tokenization" ) def test_fraction_number(self): string_source = StringSource( io.StringIO("[7.806, 5.25]") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.OPEN_BRACKET, Position(line=1, column=1), value="["), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=2), value=7.806), Token(TokenType.COMMA, Position(line=1, column=7), value=","), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=9), value=5.25), Token(TokenType.CLOSING_BRACKET, Position(line=1, column=13), value="]"), ], "Something went wrong during fraction number tokenization" ) def test_string_literal(self): string_source = StringSource( io.StringIO('= "To be or not to be.";') ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.ASSIGN, Position(line=1, column=1), value="="), Token(TokenType.STRING_LITERAL, Position(line=1, column=3), value="To be or not to be"), Token(TokenType.SEMICOLON, Position(line=1, column=24), value=";") ], "Something went wrong during the string literal tokenization" ) def test_not_closed_exception(self): # test if raised exception for missing " string_source = StringSource( io.StringIO('"To be or not to be.;') ) scanner = Scanner(source=string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() exception_message = cm.exception.message self.assertEqual( exception_message, f"Scanning exception at position {scanner.token_position}:\n" f"Missing closing \"" ) def test_identifier(self): string_source = StringSource( io.StringIO("is_prime = true;") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.IDENTIFIER, Position(line=1, column=1), value="is_prime"), Token(TokenType.ASSIGN, Position(line=1, column=10), value="="), Token(TokenType.BOOL_LITERAL, Position(line=1, column=12), value="true"), Token(TokenType.SEMICOLON, Position(line=1, column=16), value=";") ], "Something went wrong during identifier and boolean tokenization" ) def test_id_length(self): string_source = StringSource( io.StringIO("is_prime_second_third_fourth_louis_armstrongs_anything_else_that_comes_to_your_mind_when_creating_a_too_long_identifier_name = true;") ) scanner = Scanner(string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() exception_message = cm.exception.message self.assertEqual( exception_message, f"Scanning exception at position {scanner.token_position}:\n" f"Exceeded length of the identifier" ) def test_id_valid(self): string_source = StringSource( io.StringIO("$/") ) scanner = Scanner(string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() self.assertEqual( cm.exception.message, f"Scanning exception at position {scanner.token_position}:\n" f"Invalid identifier" ) def test_comment(self): file_source = FileSource( io.open("../../lang_codes/real_codes/comment.txt") ) scanner = Scanner(file_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token file_source.close() self.assertListEqual( tokens, [ Token(TokenType.COMMENT, Position(line=1, column=1), value="# this is the comment"), Token(TokenType.COMMENT, Position(line=2, column=1), value="# 234134 th1s 1$ also # a comment"), Token(TokenType.COMMENT, Position(line=3, column=1), value="# and :sd...a91.4/3 is comment either") ], "Something went wrong while detecting comment tokens" ) def test_keyword1(self): string_source = StringSource( io.StringIO("if (true) {} else if {} else {}") ) scanner = Scanner(string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.string_stream.close() self.assertListEqual( tokens, [ Token(TokenType.IF, Position(line=1, column=1), value="if"), Token(TokenType.OPEN_PARENTHESIS, Position(line=1, column=4), value="("), Token(TokenType.BOOL_LITERAL, Position(line=1, column=5), value="true"), Token(TokenType.CLOSING_PARENTHESIS, Position(line=1, column=9), value=")"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=11), value="{"), Token(TokenType.CLOSING_CURLY_BRACKET, Position(line=1, column=12), value="}"), Token(TokenType.ELSE, Position(line=1, column=14), value="else"), Token(TokenType.IF, Position(line=1, column=19), value="if"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=22), value="{"), Token(TokenType.CLOSING_CURLY_BRACKET, Position(line=1, column=23), value="}"), Token(TokenType.ELSE, Position(line=1, column=25), value="else"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=30), value="{"), Token(TokenType.CLOSING_CURLY_BRACKET, Position(line=1, column=31), value="}"), ] ) def test_keywords2(self): string_source = StringSource( io.StringIO("while foreach return define this reflect by_ref class reflect recursive") ) scanner = Scanner(string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.string_stream.close() self.assertListEqual( tokens, [ Token(TokenType.WHILE, Position(line=1, column=1), value="while"), Token(TokenType.FOREACH, Position(line=1, column=7), value="foreach"), Token(TokenType.RETURN, Position(line=1, column=15), value="return"), Token(TokenType.DEFINE, Position(line=1, column=22), value="define"), Token(TokenType.THIS, Position(line=1, column=29), value="this"), Token(TokenType.REFLECT, Position(line=1, column=34), value="reflect"), Token(TokenType.BY_REF, Position(line=1, column=42), value="by_ref"), Token(TokenType.CLASS, Position(line=1, column=49), value="class"), Token(TokenType.REFLECT, Position(line=1, column=55), value="reflect"), Token(TokenType.RECURSIVE, Position(line=1, column=63), value="recursive") ] ) def test_unknown_symbol(self): string_source = StringSource( io.StringIO("?self;") ) scanner = Scanner(string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() the_exception = cm.exception self.assertEqual( the_exception.message, f"Scanning exception at position {scanner.token_position}:\nUnknown symbol" ) def test_custom_file(self): file_source = FileSource( io.open("../../lang_codes/real_codes/recursion.txt") ) scanner = Scanner(file_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token file_source.close() breakpoint()
from src.data_source.string_source import StringSource from src.data_source.file_source import FileSource from src.exceptions.scanning_exception import ScanningException from src.scanner.scanner import Scanner from src.utils.token_type import TokenType from src.utils.token import Token from src.utils.position import Position from src.exceptions.scanner_exception import ScannerException import io import os import unittest class TestScanner(unittest.TestCase): def test_invalid_source(self): with self.assertRaises(ScannerException) as cm: Scanner(source="string_source") the_exception = cm.exception self.assertEqual( the_exception.message, "The given source is not an instance of BaseSource" ) def test_ignoring_whitespaces(self): file_stream = io.open( os.path.abspath("../../lang_codes/testing/test_arithmetic") ) file_source = FileSource(file_stream) scanner = Scanner(source=file_source) chars = [] while scanner.source.character != -1: scanner.ignore_whitespaces() chars.append(file_source.get_curr_char()) file_source.read_char() file_stream.close() real_chars = [c for c in "a+bc-d"] self.assertListEqual(chars, real_chars, "Doesn't ignore whitespaces") def test_eof_creation(self): string_stream_eof = io.StringIO("") string_source = StringSource(string_stream_eof) scanner = Scanner(source=string_source) scanner.next_token() string_stream_eof.close() self.assertEqual(scanner.token.token_type, TokenType.EOF, "Token must be of type EOF") def test_single_param_creation(self): string_source = StringSource( io.StringIO("+ - / {{ } )( * % ][,.;") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.PLUS, Position(line=1, column=1), value="+"), Token(TokenType.MINUS, Position(line=1, column=6), value="-"), Token(TokenType.DIVIDE, Position(line=1, column=8), value="/"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=14), value="{"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=15), value=""), Token(TokenType.CLOSING_CURLY_BRACKET, Position(line=1, column=17), value="}"), Token(TokenType.CLOSING_PARENTHESIS, Position(line=1, column=19), value=""), Token(TokenType.OPEN_PARENTHESIS, Position(line=1, column=20), value="("), Token(TokenType.MULTIPLY, Position(line=1, column=22), value="*"), Token(TokenType.MODULO, Position(line=1, column=24), value="%"), Token(TokenType.CLOSING_BRACKET, Position(line=1, column=26), value="]"), Token(TokenType.OPEN_BRACKET, Position(line=1, column=27), value="["), Token(TokenType.COMMA, Position(line=1, column=28), value=","), Token(TokenType.ACCESS, Position(line=1, column=29), value="."), Token(TokenType.SEMICOLON, Position(line=1, column=30), value=';') ], "Something went wrong while detecting tokens" ) def test_double_param_creation(self): string_source = StringSource( io.StringIO(">= <= != == && ||") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.GREATER_EQUAL, Position(line=1, column=1), value=">="), Token(TokenType.LESS_EQUAL, Position(line=1, column=4), value="<="), Token(TokenType.NOT_EQUAL, Position(line=1, column=7), value="!="), Token(TokenType.EQUAL, Position(line=1, column=11), value="=="), Token(TokenType.AND, Position(line=1, column=14), value="&&"), Token(TokenType.OR, Position(line=1, column=17), value="||") ], "Something went wrong while detecting double-character tokens" ) def test_leading_zero(self): string_source = StringSource( io.StringIO("010") ) scanner = Scanner(source=string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() the_exception = cm.exception self.assertEqual( the_exception.message, f"Scanning exception at position {scanner.token_position}:\n" f"Non-zero number can't contain anything after zero" ) def test_max_number(self): string_source = StringSource( io.StringIO("1000000000000000000000000000000000;") ) scanner = Scanner(string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() the_exception = cm.exception self.assertEqual( the_exception.message, f"Scanning exception at position {scanner.token_position}:\n" f"Max allowed number value is {Token.MAX_NUMBER}" ) def test_number(self): string_source = StringSource( io.StringIO("[10, 12, 13, 0]") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.OPEN_BRACKET, Position(line=1, column=1), value="["), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=2), value=10), Token(TokenType.COMMA, Position(line=1, column=4), value=","), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=6), value=12), Token(TokenType.COMMA, Position(line=1, column=8), value=","), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=10), value=13), Token(TokenType.COMMA, Position(line=1, column=12), value=","), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=14), value=0), Token(TokenType.CLOSING_BRACKET, Position(line=1, column=15), value="]"), ], "Something went wrong during the number tokenization" ) def test_fraction_number(self): string_source = StringSource( io.StringIO("[7.806, 5.25]") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.OPEN_BRACKET, Position(line=1, column=1), value="["), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=2), value=7.806), Token(TokenType.COMMA, Position(line=1, column=7), value=","), Token(TokenType.NUMERIC_LITERAL, Position(line=1, column=9), value=5.25), Token(TokenType.CLOSING_BRACKET, Position(line=1, column=13), value="]"), ], "Something went wrong during fraction number tokenization" ) def test_string_literal(self): string_source = StringSource( io.StringIO('= "To be or not to be.";') ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.ASSIGN, Position(line=1, column=1), value="="), Token(TokenType.STRING_LITERAL, Position(line=1, column=3), value="To be or not to be"), Token(TokenType.SEMICOLON, Position(line=1, column=24), value=";") ], "Something went wrong during the string literal tokenization" ) def test_not_closed_exception(self): # test if raised exception for missing " string_source = StringSource( io.StringIO('"To be or not to be.;') ) scanner = Scanner(source=string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() exception_message = cm.exception.message self.assertEqual( exception_message, f"Scanning exception at position {scanner.token_position}:\n" f"Missing closing \"" ) def test_identifier(self): string_source = StringSource( io.StringIO("is_prime = true;") ) scanner = Scanner(source=string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.close() self.assertListEqual( tokens, [ Token(TokenType.IDENTIFIER, Position(line=1, column=1), value="is_prime"), Token(TokenType.ASSIGN, Position(line=1, column=10), value="="), Token(TokenType.BOOL_LITERAL, Position(line=1, column=12), value="true"), Token(TokenType.SEMICOLON, Position(line=1, column=16), value=";") ], "Something went wrong during identifier and boolean tokenization" ) def test_id_length(self): string_source = StringSource( io.StringIO("is_prime_second_third_fourth_louis_armstrongs_anything_else_that_comes_to_your_mind_when_creating_a_too_long_identifier_name = true;") ) scanner = Scanner(string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() exception_message = cm.exception.message self.assertEqual( exception_message, f"Scanning exception at position {scanner.token_position}:\n" f"Exceeded length of the identifier" ) def test_id_valid(self): string_source = StringSource( io.StringIO("$/") ) scanner = Scanner(string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() self.assertEqual( cm.exception.message, f"Scanning exception at position {scanner.token_position}:\n" f"Invalid identifier" ) def test_comment(self): file_source = FileSource( io.open("../../lang_codes/real_codes/comment.txt") ) scanner = Scanner(file_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token file_source.close() self.assertListEqual( tokens, [ Token(TokenType.COMMENT, Position(line=1, column=1), value="# this is the comment"), Token(TokenType.COMMENT, Position(line=2, column=1), value="# 234134 th1s 1$ also # a comment"), Token(TokenType.COMMENT, Position(line=3, column=1), value="# and :sd...a91.4/3 is comment either") ], "Something went wrong while detecting comment tokens" ) def test_keyword1(self): string_source = StringSource( io.StringIO("if (true) {} else if {} else {}") ) scanner = Scanner(string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.string_stream.close() self.assertListEqual( tokens, [ Token(TokenType.IF, Position(line=1, column=1), value="if"), Token(TokenType.OPEN_PARENTHESIS, Position(line=1, column=4), value="("), Token(TokenType.BOOL_LITERAL, Position(line=1, column=5), value="true"), Token(TokenType.CLOSING_PARENTHESIS, Position(line=1, column=9), value=")"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=11), value="{"), Token(TokenType.CLOSING_CURLY_BRACKET, Position(line=1, column=12), value="}"), Token(TokenType.ELSE, Position(line=1, column=14), value="else"), Token(TokenType.IF, Position(line=1, column=19), value="if"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=22), value="{"), Token(TokenType.CLOSING_CURLY_BRACKET, Position(line=1, column=23), value="}"), Token(TokenType.ELSE, Position(line=1, column=25), value="else"), Token(TokenType.OPEN_CURLY_BRACKET, Position(line=1, column=30), value="{"), Token(TokenType.CLOSING_CURLY_BRACKET, Position(line=1, column=31), value="}"), ] ) def test_keywords2(self): string_source = StringSource( io.StringIO("while foreach return define this reflect by_ref class reflect recursive") ) scanner = Scanner(string_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token string_source.string_stream.close() self.assertListEqual( tokens, [ Token(TokenType.WHILE, Position(line=1, column=1), value="while"), Token(TokenType.FOREACH, Position(line=1, column=7), value="foreach"), Token(TokenType.RETURN, Position(line=1, column=15), value="return"), Token(TokenType.DEFINE, Position(line=1, column=22), value="define"), Token(TokenType.THIS, Position(line=1, column=29), value="this"), Token(TokenType.REFLECT, Position(line=1, column=34), value="reflect"), Token(TokenType.BY_REF, Position(line=1, column=42), value="by_ref"), Token(TokenType.CLASS, Position(line=1, column=49), value="class"), Token(TokenType.REFLECT, Position(line=1, column=55), value="reflect"), Token(TokenType.RECURSIVE, Position(line=1, column=63), value="recursive") ] ) def test_unknown_symbol(self): string_source = StringSource( io.StringIO("?self;") ) scanner = Scanner(string_source) with self.assertRaises(ScanningException) as cm: scanner.next_token() the_exception = cm.exception self.assertEqual( the_exception.message, f"Scanning exception at position {scanner.token_position}:\nUnknown symbol" ) def test_custom_file(self): file_source = FileSource( io.open("../../lang_codes/real_codes/recursion.txt") ) scanner = Scanner(file_source) scanner.next_token() current_token = scanner.token tokens = [] while current_token.token_type != TokenType.EOF: tokens.append(current_token) scanner.next_token() current_token = scanner.token file_source.close() breakpoint()
en
0.687748
# test if raised exception for missing " # a comment"),
2.557368
3
utils/drawseg.py
JiangXiaobai00/FCNonKitti-Cityscapes
2
6625264
<reponame>JiangXiaobai00/FCNonKitti-Cityscapes from torch.utils.data import Dataset import numpy as np import torch from torchvision import transforms import random from skimage import io, transform import os from PIL import Image IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',] class_name = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', ) class_color = ((128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156), (190, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0), (107, 142, 35), \ (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), \ (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32), ) label_map = np.array(class_color) class_n = 19 mean = [0.2902, 0.2976, 0.3042] std = [0.1271, 0.1330, 0.1431] flip_rate = 0 shrink_rate = 1 def is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def dataloader(filepath): image = [img for img in os.listdir(filepath) if img.find('_10') > -1] left_test = [filepath+img for img in image] return left_test def gettransform(img): h, w, c = img.shape h = int(h // 32 * shrink_rate) * 32 w = int(w // 32 * shrink_rate) * 32 # use interpolation for quality img=transform.resize(img, (h, w), order=1, mode='constant', preserve_range=True).astype('uint8') if np.random.random() < flip_rate: img= np.fliplr(img) # cause error if remove '.copy()' (prevent memory sharing) img= transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])(img) return img def draw_img(rgb,segmentation, n_class): # mask mask = np.zeros_like(rgb, dtype=np.float32)#(384,1248,3) for clsid in range(n_class): mask += np.dot((segmentation == clsid)[..., np.newaxis], [label_map[clsid]]) rgb = np.clip(np.round(mask * 1), 0, 255.0).astype(np.uint8) return rgb def direct_render(label,n_class): renders = [] if not isinstance(label, torch.Tensor): label = torch.from_numpy(label) _,h, w = label.shape temp_label = np.zeros((1,h, w, 3), dtype='uint8')# B H W C np for i, segmentation in enumerate(label): render = draw_img(temp_label[i], segmentation, n_class) renders.append(render) renders = np.array(renders) return renders def visualize(label): if not isinstance(label, torch.Tensor): label = torch.from_numpy(label) h, w = label.shape temp_label = np.zeros((h, w, 3), dtype='uint8') for i in range(h): # how to write more elegantly for j in range(w): temp_label[i, j] = class_color[int(label[i, j])] return transforms.ToTensor()(temp_label) def denormalize(self, image): image = np.transpose(image, (1, 2, 0)) image[:, :, 0] = image[:, :, 0] * self.std[0] + self.mean[0] image[:, :, 1] = image[:, :, 1] * self.std[1] + self.mean[1] image[:, :, 2] = image[:, :, 2] * self.std[2] + self.mean[2] return np.transpose(image, (2, 0, 1)) #process __imagenet_stats = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]} #__imagenet_stats = {'mean': [0.5, 0.5, 0.5], # 'std': [0.5, 0.5, 0.5]} #__imagenet_stats ={'mean': [0.2902, 0.2976, 0.3042], # 'std': [0.1271, 0.1330, 0.1431]} __imagenet_pca = { 'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]), 'eigvec': torch.Tensor([ [-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203], ]) } def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.ToTensor(), transforms.Normalize(**normalize), ] #if scale_size != input_size: #t_list = [transforms.Scale((960,540))] + t_list return transforms.Compose(t_list) def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.RandomCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list transforms.Compose(t_list) def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): padding = int((scale_size - input_size) / 2) return transforms.Compose([ transforms.RandomCrop(input_size, padding=padding), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize), ]) def inception_preproccess(input_size, normalize=__imagenet_stats): return transforms.Compose([ transforms.RandomSizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ]) def inception_color_preproccess(input_size, normalize=__imagenet_stats): return transforms.Compose([ #transforms.RandomSizedCrop(input_size), #transforms.RandomHorizontalFlip(), transforms.ToTensor(), ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, ), Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']), transforms.Normalize(**normalize) ]) def get_transform(name='imagenet', input_size=None, scale_size=None, normalize=None, augment=True): normalize = __imagenet_stats input_size = 256 if augment: return inception_color_preproccess(input_size, normalize=normalize) else: return scale_crop(input_size=input_size, scale_size=scale_size, normalize=normalize) class Lighting(object): """Lighting noise(AlexNet - style PCA - based noise)""" def __init__(self, alphastd, eigval, eigvec): self.alphastd = alphastd self.eigval = eigval self.eigvec = eigvec def __call__(self, img): if self.alphastd == 0: return img alpha = img.new().resize_(3).normal_(0, self.alphastd) rgb = self.eigvec.type_as(img).clone()\ .mul(alpha.view(1, 3).expand(3, 3))\ .mul(self.eigval.view(1, 3).expand(3, 3))\ .sum(1).squeeze() return img.add(rgb.view(3, 1, 1).expand_as(img)) class Grayscale(object): def __call__(self, img): gs = img.clone() gs[0].mul_(0.299).add_(0.587, gs[1]).add_(0.114, gs[2]) gs[1].copy_(gs[0]) gs[2].copy_(gs[0]) return gs class Saturation(object): def __init__(self, var): self.var = var def __call__(self, img): gs = Grayscale()(img) alpha = random.uniform(0, self.var) return img.lerp(gs, alpha) class Brightness(object): def __init__(self, var): self.var = var def __call__(self, img): gs = img.new().resize_as_(img).zero_() alpha = random.uniform(0, self.var) return img.lerp(gs, alpha) class Contrast(object): def __init__(self, var): self.var = var def __call__(self, img): gs = Grayscale()(img) gs.fill_(gs.mean()) alpha = random.uniform(0, self.var) return img.lerp(gs, alpha) class RandomOrder(object): """ Composes several transforms together in random order. """ def __init__(self, transforms): self.transforms = transforms def __call__(self, img): if self.transforms is None: return img order = torch.randperm(len(self.transforms)) for i in order: img = self.transforms[i](img) return img class ColorJitter(RandomOrder): def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4): self.transforms = [] if brightness != 0: self.transforms.append(Brightness(brightness)) if contrast != 0: self.transforms.append(Contrast(contrast)) if saturation != 0: self.transforms.append(Saturation(saturation)) def findmax(numpy, n_class): N, _, H, W = numpy.shape dnumpy = numpy.transpose(0, 2, 3, 1).reshape(-1, n_class).argmax(axis=1).reshape(N, H, W) return dnumpy def toRGB(img, dtype=np.uint8): dnumpy = (img.transpose(0, 2, 3, 1) * 255).astype(dtype) # 1,384,1248,3 dnumpy = np.round(dnumpy) dnumpy = np.clip(dnumpy, 0, 255) return dnumpy def draw_img1(rgb,segmentation, n_class, opacity): #rgb[segmentation > 0] *= 1 - opacity # mask mask = np.zeros_like(rgb, dtype=np.float32)#(384,1248,3) for clsid in range(n_class): mask += np.dot((segmentation == clsid)[..., np.newaxis], [label_map[clsid]]) # paste #rgb = np.clip(np.round(rgb + mask * opacity), 0, 255.0).astype(np.uint8) #rgb = np.clip(np.round(mask * opacity), 0, 255.0).astype(np.uint8) rgb = np.clip(np.round(mask * 1), 0, 255.0).astype(np.uint8) return rgb def direct_render1(img, predict_map,n_class=21,opacity=0.5): renders = [] rgb = toRGB(img, dtype=np.float32)# 1,384,1248,3 for i, segmentation in enumerate(predict_map): render = draw_img1(rgb[i], segmentation, n_class=n_class, opacity=opacity) renders.append(render) renders = np.array(renders) return renders
from torch.utils.data import Dataset import numpy as np import torch from torchvision import transforms import random from skimage import io, transform import os from PIL import Image IMG_EXTENSIONS = [ '.jpg', '.JPG', '.jpeg', '.JPEG', '.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',] class_name = ('road', 'sidewalk', 'building', 'wall', 'fence', 'pole', 'traffic light', 'traffic sign', 'vegetation', 'terrain', 'sky', 'person', 'rider', 'car', 'truck', 'bus', 'train', 'motorcycle', 'bicycle', ) class_color = ((128, 64, 128), (244, 35, 232), (70, 70, 70), (102, 102, 156), (190, 153, 153), (153, 153, 153), (250, 170, 30), (220, 220, 0), (107, 142, 35), \ (152, 251, 152), (70, 130, 180), (220, 20, 60), (255, 0, 0), (0, 0, 142), \ (0, 0, 70), (0, 60, 100), (0, 80, 100), (0, 0, 230), (119, 11, 32), ) label_map = np.array(class_color) class_n = 19 mean = [0.2902, 0.2976, 0.3042] std = [0.1271, 0.1330, 0.1431] flip_rate = 0 shrink_rate = 1 def is_image_file(filename): return any(filename.endswith(extension) for extension in IMG_EXTENSIONS) def dataloader(filepath): image = [img for img in os.listdir(filepath) if img.find('_10') > -1] left_test = [filepath+img for img in image] return left_test def gettransform(img): h, w, c = img.shape h = int(h // 32 * shrink_rate) * 32 w = int(w // 32 * shrink_rate) * 32 # use interpolation for quality img=transform.resize(img, (h, w), order=1, mode='constant', preserve_range=True).astype('uint8') if np.random.random() < flip_rate: img= np.fliplr(img) # cause error if remove '.copy()' (prevent memory sharing) img= transforms.Compose([transforms.ToTensor(), transforms.Normalize(mean, std)])(img) return img def draw_img(rgb,segmentation, n_class): # mask mask = np.zeros_like(rgb, dtype=np.float32)#(384,1248,3) for clsid in range(n_class): mask += np.dot((segmentation == clsid)[..., np.newaxis], [label_map[clsid]]) rgb = np.clip(np.round(mask * 1), 0, 255.0).astype(np.uint8) return rgb def direct_render(label,n_class): renders = [] if not isinstance(label, torch.Tensor): label = torch.from_numpy(label) _,h, w = label.shape temp_label = np.zeros((1,h, w, 3), dtype='uint8')# B H W C np for i, segmentation in enumerate(label): render = draw_img(temp_label[i], segmentation, n_class) renders.append(render) renders = np.array(renders) return renders def visualize(label): if not isinstance(label, torch.Tensor): label = torch.from_numpy(label) h, w = label.shape temp_label = np.zeros((h, w, 3), dtype='uint8') for i in range(h): # how to write more elegantly for j in range(w): temp_label[i, j] = class_color[int(label[i, j])] return transforms.ToTensor()(temp_label) def denormalize(self, image): image = np.transpose(image, (1, 2, 0)) image[:, :, 0] = image[:, :, 0] * self.std[0] + self.mean[0] image[:, :, 1] = image[:, :, 1] * self.std[1] + self.mean[1] image[:, :, 2] = image[:, :, 2] * self.std[2] + self.mean[2] return np.transpose(image, (2, 0, 1)) #process __imagenet_stats = {'mean': [0.485, 0.456, 0.406], 'std': [0.229, 0.224, 0.225]} #__imagenet_stats = {'mean': [0.5, 0.5, 0.5], # 'std': [0.5, 0.5, 0.5]} #__imagenet_stats ={'mean': [0.2902, 0.2976, 0.3042], # 'std': [0.1271, 0.1330, 0.1431]} __imagenet_pca = { 'eigval': torch.Tensor([0.2175, 0.0188, 0.0045]), 'eigvec': torch.Tensor([ [-0.5675, 0.7192, 0.4009], [-0.5808, -0.0045, -0.8140], [-0.5836, -0.6948, 0.4203], ]) } def scale_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.ToTensor(), transforms.Normalize(**normalize), ] #if scale_size != input_size: #t_list = [transforms.Scale((960,540))] + t_list return transforms.Compose(t_list) def scale_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): t_list = [ transforms.RandomCrop(input_size), transforms.ToTensor(), transforms.Normalize(**normalize), ] if scale_size != input_size: t_list = [transforms.Scale(scale_size)] + t_list transforms.Compose(t_list) def pad_random_crop(input_size, scale_size=None, normalize=__imagenet_stats): padding = int((scale_size - input_size) / 2) return transforms.Compose([ transforms.RandomCrop(input_size, padding=padding), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize), ]) def inception_preproccess(input_size, normalize=__imagenet_stats): return transforms.Compose([ transforms.RandomSizedCrop(input_size), transforms.RandomHorizontalFlip(), transforms.ToTensor(), transforms.Normalize(**normalize) ]) def inception_color_preproccess(input_size, normalize=__imagenet_stats): return transforms.Compose([ #transforms.RandomSizedCrop(input_size), #transforms.RandomHorizontalFlip(), transforms.ToTensor(), ColorJitter( brightness=0.4, contrast=0.4, saturation=0.4, ), Lighting(0.1, __imagenet_pca['eigval'], __imagenet_pca['eigvec']), transforms.Normalize(**normalize) ]) def get_transform(name='imagenet', input_size=None, scale_size=None, normalize=None, augment=True): normalize = __imagenet_stats input_size = 256 if augment: return inception_color_preproccess(input_size, normalize=normalize) else: return scale_crop(input_size=input_size, scale_size=scale_size, normalize=normalize) class Lighting(object): """Lighting noise(AlexNet - style PCA - based noise)""" def __init__(self, alphastd, eigval, eigvec): self.alphastd = alphastd self.eigval = eigval self.eigvec = eigvec def __call__(self, img): if self.alphastd == 0: return img alpha = img.new().resize_(3).normal_(0, self.alphastd) rgb = self.eigvec.type_as(img).clone()\ .mul(alpha.view(1, 3).expand(3, 3))\ .mul(self.eigval.view(1, 3).expand(3, 3))\ .sum(1).squeeze() return img.add(rgb.view(3, 1, 1).expand_as(img)) class Grayscale(object): def __call__(self, img): gs = img.clone() gs[0].mul_(0.299).add_(0.587, gs[1]).add_(0.114, gs[2]) gs[1].copy_(gs[0]) gs[2].copy_(gs[0]) return gs class Saturation(object): def __init__(self, var): self.var = var def __call__(self, img): gs = Grayscale()(img) alpha = random.uniform(0, self.var) return img.lerp(gs, alpha) class Brightness(object): def __init__(self, var): self.var = var def __call__(self, img): gs = img.new().resize_as_(img).zero_() alpha = random.uniform(0, self.var) return img.lerp(gs, alpha) class Contrast(object): def __init__(self, var): self.var = var def __call__(self, img): gs = Grayscale()(img) gs.fill_(gs.mean()) alpha = random.uniform(0, self.var) return img.lerp(gs, alpha) class RandomOrder(object): """ Composes several transforms together in random order. """ def __init__(self, transforms): self.transforms = transforms def __call__(self, img): if self.transforms is None: return img order = torch.randperm(len(self.transforms)) for i in order: img = self.transforms[i](img) return img class ColorJitter(RandomOrder): def __init__(self, brightness=0.4, contrast=0.4, saturation=0.4): self.transforms = [] if brightness != 0: self.transforms.append(Brightness(brightness)) if contrast != 0: self.transforms.append(Contrast(contrast)) if saturation != 0: self.transforms.append(Saturation(saturation)) def findmax(numpy, n_class): N, _, H, W = numpy.shape dnumpy = numpy.transpose(0, 2, 3, 1).reshape(-1, n_class).argmax(axis=1).reshape(N, H, W) return dnumpy def toRGB(img, dtype=np.uint8): dnumpy = (img.transpose(0, 2, 3, 1) * 255).astype(dtype) # 1,384,1248,3 dnumpy = np.round(dnumpy) dnumpy = np.clip(dnumpy, 0, 255) return dnumpy def draw_img1(rgb,segmentation, n_class, opacity): #rgb[segmentation > 0] *= 1 - opacity # mask mask = np.zeros_like(rgb, dtype=np.float32)#(384,1248,3) for clsid in range(n_class): mask += np.dot((segmentation == clsid)[..., np.newaxis], [label_map[clsid]]) # paste #rgb = np.clip(np.round(rgb + mask * opacity), 0, 255.0).astype(np.uint8) #rgb = np.clip(np.round(mask * opacity), 0, 255.0).astype(np.uint8) rgb = np.clip(np.round(mask * 1), 0, 255.0).astype(np.uint8) return rgb def direct_render1(img, predict_map,n_class=21,opacity=0.5): renders = [] rgb = toRGB(img, dtype=np.float32)# 1,384,1248,3 for i, segmentation in enumerate(predict_map): render = draw_img1(rgb[i], segmentation, n_class=n_class, opacity=opacity) renders.append(render) renders = np.array(renders) return renders
en
0.425143
# use interpolation for quality # cause error if remove '.copy()' (prevent memory sharing) # mask #(384,1248,3) # B H W C np # how to write more elegantly #process #__imagenet_stats = {'mean': [0.5, 0.5, 0.5], # 'std': [0.5, 0.5, 0.5]} #__imagenet_stats ={'mean': [0.2902, 0.2976, 0.3042], # 'std': [0.1271, 0.1330, 0.1431]} #if scale_size != input_size: #t_list = [transforms.Scale((960,540))] + t_list #transforms.RandomSizedCrop(input_size), #transforms.RandomHorizontalFlip(), Lighting noise(AlexNet - style PCA - based noise) Composes several transforms together in random order. # 1,384,1248,3 #rgb[segmentation > 0] *= 1 - opacity # mask #(384,1248,3) # paste #rgb = np.clip(np.round(rgb + mask * opacity), 0, 255.0).astype(np.uint8) #rgb = np.clip(np.round(mask * opacity), 0, 255.0).astype(np.uint8) # 1,384,1248,3
2.353125
2
tests/test_version.py
drvinceknight/HierarchicalPromotion
0
6625265
import hierarchy as hrcy def test_version_is_str(): assert type(hrcy.__version__) is str
import hierarchy as hrcy def test_version_is_str(): assert type(hrcy.__version__) is str
none
1
2.018582
2
django-rgd-imagery/rgd_imagery/models/processed.py
ResonantGeoData/ResonantGeoData
40
6625266
<filename>django-rgd-imagery/rgd_imagery/models/processed.py from django.contrib.gis.db import models from django.utils.translation import gettext_lazy as _ from django_extensions.db.models import TimeStampedModel from rgd.models import ChecksumFile from rgd.models.mixins import PermissionPathMixin, Status, TaskEventMixin from rgd_imagery.tasks import jobs from .base import Image class ProcessedImageGroup(TimeStampedModel): class ProcessTypes(models.TextChoices): ARBITRARY = 'arbitrary', _('Arbitrarily processed externally') COG = 'cog', _('Converted to Cloud Optimized GeoTIFF') REGION = 'region', _('Extract subregion') RESAMPLE = 'resample', _('Resample by factor') MOSAIC = 'mosaic', _('Mosaic multiple images') process_type = models.CharField( max_length=20, default=ProcessTypes.ARBITRARY, choices=ProcessTypes.choices ) parameters = models.JSONField(null=True, blank=True) # TODO: permissions_paths def _post_save(self, *args, **kwargs): source_images = ProcessedImage.objects.filter(group=self) for processed_image in source_images: if processed_image.status not in [Status.QUEUED, Status.RUNNING]: processed_image.save() class ProcessedImage(TimeStampedModel, TaskEventMixin, PermissionPathMixin): """Base class for processed images.""" task_funcs = (jobs.task_run_processed_image,) source_images = models.ManyToManyField(Image) processed_image = models.ForeignKey( Image, on_delete=models.SET_NULL, null=True, blank=True, related_name='+' ) ancillary_files = models.ManyToManyField(ChecksumFile, blank=True, related_name='+') def _pre_delete(self, *args, **kwargs): if self.processed_image: self.processed_image.file.delete() # TODO: clean up ancillary_files - this throws an error when done through the admin interface # self.ancillary_files.all().delete() group = models.ForeignKey(ProcessedImageGroup, on_delete=models.CASCADE) permissions_paths = [('source_images', Image)]
<filename>django-rgd-imagery/rgd_imagery/models/processed.py from django.contrib.gis.db import models from django.utils.translation import gettext_lazy as _ from django_extensions.db.models import TimeStampedModel from rgd.models import ChecksumFile from rgd.models.mixins import PermissionPathMixin, Status, TaskEventMixin from rgd_imagery.tasks import jobs from .base import Image class ProcessedImageGroup(TimeStampedModel): class ProcessTypes(models.TextChoices): ARBITRARY = 'arbitrary', _('Arbitrarily processed externally') COG = 'cog', _('Converted to Cloud Optimized GeoTIFF') REGION = 'region', _('Extract subregion') RESAMPLE = 'resample', _('Resample by factor') MOSAIC = 'mosaic', _('Mosaic multiple images') process_type = models.CharField( max_length=20, default=ProcessTypes.ARBITRARY, choices=ProcessTypes.choices ) parameters = models.JSONField(null=True, blank=True) # TODO: permissions_paths def _post_save(self, *args, **kwargs): source_images = ProcessedImage.objects.filter(group=self) for processed_image in source_images: if processed_image.status not in [Status.QUEUED, Status.RUNNING]: processed_image.save() class ProcessedImage(TimeStampedModel, TaskEventMixin, PermissionPathMixin): """Base class for processed images.""" task_funcs = (jobs.task_run_processed_image,) source_images = models.ManyToManyField(Image) processed_image = models.ForeignKey( Image, on_delete=models.SET_NULL, null=True, blank=True, related_name='+' ) ancillary_files = models.ManyToManyField(ChecksumFile, blank=True, related_name='+') def _pre_delete(self, *args, **kwargs): if self.processed_image: self.processed_image.file.delete() # TODO: clean up ancillary_files - this throws an error when done through the admin interface # self.ancillary_files.all().delete() group = models.ForeignKey(ProcessedImageGroup, on_delete=models.CASCADE) permissions_paths = [('source_images', Image)]
en
0.599685
# TODO: permissions_paths Base class for processed images. # TODO: clean up ancillary_files - this throws an error when done through the admin interface # self.ancillary_files.all().delete()
2.022628
2
plugins/rapid7_insightvm/komand_rapid7_insightvm/actions/get_scan/schema.py
lukaszlaszuk/insightconnect-plugins
46
6625267
# GENERATED BY KOMAND SDK - DO NOT EDIT import komand import json class Component: DESCRIPTION = "Get the status of a scan" class Input: SCAN_ID = "scan_id" class Output: ASSETS = "assets" DURATION = "duration" ENDTIME = "endTime" ENGINENAME = "engineName" ID = "id" LINKS = "links" SCANNAME = "scanName" SCANTYPE = "scanType" STARTTIME = "startTime" STATUS = "status" VULNERABILITIES = "vulnerabilities" class GetScanInput(komand.Input): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "scan_id": { "type": "string", "title": "Scan ID", "description": "ID of the scan to obtain", "order": 1 } }, "required": [ "scan_id" ] } """) def __init__(self): super(self.__class__, self).__init__(self.schema) class GetScanOutput(komand.Output): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "assets": { "type": "integer", "title": "Assets", "description": "Number of assets within the scan", "order": 3 }, "duration": { "type": "string", "title": "Duration", "description": "Duration of the scan in ISO8601 format", "order": 7 }, "endTime": { "type": "string", "title": "End Time", "description": "End time of the scan in ISO8601 format", "order": 9 }, "engineName": { "type": "string", "title": "Engine Name", "description": "Name of the engine used for the scan", "order": 8 }, "id": { "type": "integer", "title": "ID", "description": "ID of the scan", "order": 10 }, "links": { "type": "array", "title": "Links", "description": "Hypermedia links to corresponding or related resources", "items": { "$ref": "#/definitions/link" }, "order": 4 }, "scanName": { "type": "string", "title": "Scan Name", "description": "User-driven scan name for the scan", "order": 11 }, "scanType": { "type": "string", "title": "Scan Type", "description": "Scan type (manual, automated, scheduled)", "order": 2 }, "startTime": { "type": "string", "title": "Start Time", "description": "Start time of the scan in ISO8601 format", "order": 6 }, "status": { "type": "string", "title": "Status", "description": "Scan status (aborted, unknown, running, finished, stopped, error, paused, dispatched or integrating)", "order": 1 }, "vulnerabilities": { "$ref": "#/definitions/vulnerabilities_count", "title": "Vulnerabilities", "description": "Counts of vulnerabilities found within the scan", "order": 5 } }, "definitions": { "link": { "type": "object", "title": "link", "properties": { "href": { "type": "string", "title": "URL", "description": "A hypertext reference, which is either a URI (see RFC 3986) or URI template (see RFC 6570)", "order": 1 }, "rel": { "type": "string", "title": "Rel", "description": "Link relation type following RFC 5988", "order": 2 } } }, "vulnerabilities_count": { "type": "object", "title": "vulnerabilities_count", "properties": { "critical": { "type": "integer", "title": "Critical", "description": "Number of critical vulnerabilities", "order": 1 }, "moderate": { "type": "integer", "title": "Moderate", "description": "Number of moderate vulnerabilities", "order": 2 }, "severe": { "type": "integer", "title": "Severe", "description": "Number of severe vulnerabilities", "order": 3 }, "total": { "type": "integer", "title": "Total number of vulnerabilities", "description": "Total", "order": 4 } } } } } """) def __init__(self): super(self.__class__, self).__init__(self.schema)
# GENERATED BY KOMAND SDK - DO NOT EDIT import komand import json class Component: DESCRIPTION = "Get the status of a scan" class Input: SCAN_ID = "scan_id" class Output: ASSETS = "assets" DURATION = "duration" ENDTIME = "endTime" ENGINENAME = "engineName" ID = "id" LINKS = "links" SCANNAME = "scanName" SCANTYPE = "scanType" STARTTIME = "startTime" STATUS = "status" VULNERABILITIES = "vulnerabilities" class GetScanInput(komand.Input): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "scan_id": { "type": "string", "title": "Scan ID", "description": "ID of the scan to obtain", "order": 1 } }, "required": [ "scan_id" ] } """) def __init__(self): super(self.__class__, self).__init__(self.schema) class GetScanOutput(komand.Output): schema = json.loads(""" { "type": "object", "title": "Variables", "properties": { "assets": { "type": "integer", "title": "Assets", "description": "Number of assets within the scan", "order": 3 }, "duration": { "type": "string", "title": "Duration", "description": "Duration of the scan in ISO8601 format", "order": 7 }, "endTime": { "type": "string", "title": "End Time", "description": "End time of the scan in ISO8601 format", "order": 9 }, "engineName": { "type": "string", "title": "Engine Name", "description": "Name of the engine used for the scan", "order": 8 }, "id": { "type": "integer", "title": "ID", "description": "ID of the scan", "order": 10 }, "links": { "type": "array", "title": "Links", "description": "Hypermedia links to corresponding or related resources", "items": { "$ref": "#/definitions/link" }, "order": 4 }, "scanName": { "type": "string", "title": "Scan Name", "description": "User-driven scan name for the scan", "order": 11 }, "scanType": { "type": "string", "title": "Scan Type", "description": "Scan type (manual, automated, scheduled)", "order": 2 }, "startTime": { "type": "string", "title": "Start Time", "description": "Start time of the scan in ISO8601 format", "order": 6 }, "status": { "type": "string", "title": "Status", "description": "Scan status (aborted, unknown, running, finished, stopped, error, paused, dispatched or integrating)", "order": 1 }, "vulnerabilities": { "$ref": "#/definitions/vulnerabilities_count", "title": "Vulnerabilities", "description": "Counts of vulnerabilities found within the scan", "order": 5 } }, "definitions": { "link": { "type": "object", "title": "link", "properties": { "href": { "type": "string", "title": "URL", "description": "A hypertext reference, which is either a URI (see RFC 3986) or URI template (see RFC 6570)", "order": 1 }, "rel": { "type": "string", "title": "Rel", "description": "Link relation type following RFC 5988", "order": 2 } } }, "vulnerabilities_count": { "type": "object", "title": "vulnerabilities_count", "properties": { "critical": { "type": "integer", "title": "Critical", "description": "Number of critical vulnerabilities", "order": 1 }, "moderate": { "type": "integer", "title": "Moderate", "description": "Number of moderate vulnerabilities", "order": 2 }, "severe": { "type": "integer", "title": "Severe", "description": "Number of severe vulnerabilities", "order": 3 }, "total": { "type": "integer", "title": "Total number of vulnerabilities", "description": "Total", "order": 4 } } } } } """) def __init__(self): super(self.__class__, self).__init__(self.schema)
en
0.355766
# GENERATED BY KOMAND SDK - DO NOT EDIT { "type": "object", "title": "Variables", "properties": { "scan_id": { "type": "string", "title": "Scan ID", "description": "ID of the scan to obtain", "order": 1 } }, "required": [ "scan_id" ] } { "type": "object", "title": "Variables", "properties": { "assets": { "type": "integer", "title": "Assets", "description": "Number of assets within the scan", "order": 3 }, "duration": { "type": "string", "title": "Duration", "description": "Duration of the scan in ISO8601 format", "order": 7 }, "endTime": { "type": "string", "title": "End Time", "description": "End time of the scan in ISO8601 format", "order": 9 }, "engineName": { "type": "string", "title": "Engine Name", "description": "Name of the engine used for the scan", "order": 8 }, "id": { "type": "integer", "title": "ID", "description": "ID of the scan", "order": 10 }, "links": { "type": "array", "title": "Links", "description": "Hypermedia links to corresponding or related resources", "items": { "$ref": "#/definitions/link" }, "order": 4 }, "scanName": { "type": "string", "title": "Scan Name", "description": "User-driven scan name for the scan", "order": 11 }, "scanType": { "type": "string", "title": "Scan Type", "description": "Scan type (manual, automated, scheduled)", "order": 2 }, "startTime": { "type": "string", "title": "Start Time", "description": "Start time of the scan in ISO8601 format", "order": 6 }, "status": { "type": "string", "title": "Status", "description": "Scan status (aborted, unknown, running, finished, stopped, error, paused, dispatched or integrating)", "order": 1 }, "vulnerabilities": { "$ref": "#/definitions/vulnerabilities_count", "title": "Vulnerabilities", "description": "Counts of vulnerabilities found within the scan", "order": 5 } }, "definitions": { "link": { "type": "object", "title": "link", "properties": { "href": { "type": "string", "title": "URL", "description": "A hypertext reference, which is either a URI (see RFC 3986) or URI template (see RFC 6570)", "order": 1 }, "rel": { "type": "string", "title": "Rel", "description": "Link relation type following RFC 5988", "order": 2 } } }, "vulnerabilities_count": { "type": "object", "title": "vulnerabilities_count", "properties": { "critical": { "type": "integer", "title": "Critical", "description": "Number of critical vulnerabilities", "order": 1 }, "moderate": { "type": "integer", "title": "Moderate", "description": "Number of moderate vulnerabilities", "order": 2 }, "severe": { "type": "integer", "title": "Severe", "description": "Number of severe vulnerabilities", "order": 3 }, "total": { "type": "integer", "title": "Total number of vulnerabilities", "description": "Total", "order": 4 } } } } }
2.332413
2
src/pretix/helpers/models.py
MaxRink/pretix
0
6625268
<reponame>MaxRink/pretix from django.db import models class Thumbnail(models.Model): source = models.CharField(max_length=255) size = models.CharField(max_length=255) thumb = models.FileField(upload_to='pub/thumbs/', max_length=255) class Meta: unique_together = (('source', 'size'),)
from django.db import models class Thumbnail(models.Model): source = models.CharField(max_length=255) size = models.CharField(max_length=255) thumb = models.FileField(upload_to='pub/thumbs/', max_length=255) class Meta: unique_together = (('source', 'size'),)
none
1
2.180432
2
utils/db_manage.py
Jaylen0829/tornado-RESTfulAPI
0
6625269
<filename>utils/db_manage.py # 设置模块路径,否则 apps 无法导入 import os, sys base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(base_path) sys.path.append(os.path.join(base_path,'apps')) from peewee import MySQLDatabase from apps.users.models import User, Group, Auth, AuthPermission # from users.models import User from base.settings import sync_db from playhouse.migrate import * def run_create(): '''生成表''' sync_db.connect() # sync_db.create_tables([UserProfile]) # 注意:如果检查没问题后数据库仍然报 (1215, 'Cannot add foreign key constraint') 那么需要使用下面的方式创建表,具体报错原因未知,可能create_tables内执行顺序不一致 # UserProfile.create_table() Group.create_table() Auth.create_table() AuthPermission.create_table() User.create_table() sync_db.close() def run_update(): '''修改表''' sync_db.connect() migrator = MySQLMigrator(sync_db) # 由于peewee没办法像Django ORM那样迁移数据,因此如果在表创建好了之后还要对表字段做操作,就必须依靠peewee的migrate来操作了 # 具体文档:http://docs.peewee-orm.com/en/latest/peewee/playhouse.html?highlight=migrate#example-usage # 下面的示例是用来修改表字段的名称,将多个表的add_time字段改为create_time字段 with sync_db.atomic(): migrate( migrator.rename_column('userprofile', 'add_time', 'create_time'), migrator.rename_column('verifyemailcode', 'add_time', 'create_time'), migrator.rename_column('category', 'add_time', 'create_time'), migrator.rename_column('post', 'add_time', 'create_time'), ) sync_db.close() if __name__ == '__main__': run_create()
<filename>utils/db_manage.py # 设置模块路径,否则 apps 无法导入 import os, sys base_path = os.path.dirname(os.path.dirname(os.path.abspath(__file__))) sys.path.append(base_path) sys.path.append(os.path.join(base_path,'apps')) from peewee import MySQLDatabase from apps.users.models import User, Group, Auth, AuthPermission # from users.models import User from base.settings import sync_db from playhouse.migrate import * def run_create(): '''生成表''' sync_db.connect() # sync_db.create_tables([UserProfile]) # 注意:如果检查没问题后数据库仍然报 (1215, 'Cannot add foreign key constraint') 那么需要使用下面的方式创建表,具体报错原因未知,可能create_tables内执行顺序不一致 # UserProfile.create_table() Group.create_table() Auth.create_table() AuthPermission.create_table() User.create_table() sync_db.close() def run_update(): '''修改表''' sync_db.connect() migrator = MySQLMigrator(sync_db) # 由于peewee没办法像Django ORM那样迁移数据,因此如果在表创建好了之后还要对表字段做操作,就必须依靠peewee的migrate来操作了 # 具体文档:http://docs.peewee-orm.com/en/latest/peewee/playhouse.html?highlight=migrate#example-usage # 下面的示例是用来修改表字段的名称,将多个表的add_time字段改为create_time字段 with sync_db.atomic(): migrate( migrator.rename_column('userprofile', 'add_time', 'create_time'), migrator.rename_column('verifyemailcode', 'add_time', 'create_time'), migrator.rename_column('category', 'add_time', 'create_time'), migrator.rename_column('post', 'add_time', 'create_time'), ) sync_db.close() if __name__ == '__main__': run_create()
zh
0.721383
# 设置模块路径,否则 apps 无法导入 # from users.models import User 生成表 # sync_db.create_tables([UserProfile]) # 注意:如果检查没问题后数据库仍然报 (1215, 'Cannot add foreign key constraint') 那么需要使用下面的方式创建表,具体报错原因未知,可能create_tables内执行顺序不一致 # UserProfile.create_table() 修改表 # 由于peewee没办法像Django ORM那样迁移数据,因此如果在表创建好了之后还要对表字段做操作,就必须依靠peewee的migrate来操作了 # 具体文档:http://docs.peewee-orm.com/en/latest/peewee/playhouse.html?highlight=migrate#example-usage # 下面的示例是用来修改表字段的名称,将多个表的add_time字段改为create_time字段
2.333197
2
var/spack/repos/builtin/packages/intltool/package.py
kkauder/spack
2,360
6625270
<gh_stars>1000+ # Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Intltool(AutotoolsPackage): """intltool is a set of tools to centralize translation of many different file formats using GNU gettext-compatible PO files.""" homepage = 'https://freedesktop.org/wiki/Software/intltool/' url = 'https://launchpad.net/intltool/trunk/0.51.0/+download/intltool-0.51.0.tar.gz' list_url = 'https://launchpad.net/intltool/+download' version('0.51.0', sha256='67c74d94196b153b774ab9f89b2fa6c6ba79352407037c8c14d5aeb334e959cd') # requires XML::Parser perl module depends_on('perl-xml-parser', type=('build', 'run')) depends_on('perl@5.8.1:', type=('build', 'run')) # patch for "Unescaped left brace in regex is illegal here in regex" # warnings witn perl 5.22 and errors with perl 5.26 and newer patch('https://launchpadlibrarian.net/216052398/intltool-perl-5.22.patch', sha256='ca9d6562f29f06c64150f50369a24402b7aa01a3a0dc73dce55106f3224330a1', level=0) def check(self): # `make check` passes but causes `make install` to fail pass def _make_executable(self, name): return Executable(join_path(self.prefix.bin, name)) def setup_dependent_package(self, module, dependent_spec): # intltool is very likely to be a build dependency, # so we add the tools it provides to the dependent module executables = [ 'intltool-extract', 'intltoolize', 'intltool-merge', 'intltool-prepare', 'intltool-update' ] for name in executables: setattr(module, name, self._make_executable(name))
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) from spack import * class Intltool(AutotoolsPackage): """intltool is a set of tools to centralize translation of many different file formats using GNU gettext-compatible PO files.""" homepage = 'https://freedesktop.org/wiki/Software/intltool/' url = 'https://launchpad.net/intltool/trunk/0.51.0/+download/intltool-0.51.0.tar.gz' list_url = 'https://launchpad.net/intltool/+download' version('0.51.0', sha256='67c74d94196b153b774ab9f89b2fa6c6ba79352407037c8c14d5aeb334e959cd') # requires XML::Parser perl module depends_on('perl-xml-parser', type=('build', 'run')) depends_on('perl@5.8.1:', type=('build', 'run')) # patch for "Unescaped left brace in regex is illegal here in regex" # warnings witn perl 5.22 and errors with perl 5.26 and newer patch('https://launchpadlibrarian.net/216052398/intltool-perl-5.22.patch', sha256='ca9d6562f29f06c64150f50369a24402b7aa01a3a0dc73dce55106f3224330a1', level=0) def check(self): # `make check` passes but causes `make install` to fail pass def _make_executable(self, name): return Executable(join_path(self.prefix.bin, name)) def setup_dependent_package(self, module, dependent_spec): # intltool is very likely to be a build dependency, # so we add the tools it provides to the dependent module executables = [ 'intltool-extract', 'intltoolize', 'intltool-merge', 'intltool-prepare', 'intltool-update' ] for name in executables: setattr(module, name, self._make_executable(name))
en
0.763735
# Copyright 2013-2021 Lawrence Livermore National Security, LLC and other # Spack Project Developers. See the top-level COPYRIGHT file for details. # # SPDX-License-Identifier: (Apache-2.0 OR MIT) intltool is a set of tools to centralize translation of many different file formats using GNU gettext-compatible PO files. # requires XML::Parser perl module # patch for "Unescaped left brace in regex is illegal here in regex" # warnings witn perl 5.22 and errors with perl 5.26 and newer # `make check` passes but causes `make install` to fail # intltool is very likely to be a build dependency, # so we add the tools it provides to the dependent module
1.838875
2
src/wechaty_plugin_contrib/contrib/chat_history_plugin/data.py
wj-Mcat/python-wechaty-plugin-contrib
11
6625271
<reponame>wj-Mcat/python-wechaty-plugin-contrib<gh_stars>10-100 """ handle the chat history data """ from __future__ import annotations
""" handle the chat history data """ from __future__ import annotations
en
0.373569
handle the chat history data
1.058036
1
_broken/test_delete_annotation_all.py
SU-ECE-17-7/ibeis
0
6625272
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- # TODO: ADD COPYRIGHT TAG from __future__ import absolute_import, division, print_function import multiprocessing import utool print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_DELETE_ANNOTATION_ALL]') def TEST_DELETE_ANNOTATION_ALL(ibs, back): aid_list = ibs.get_valid_aids() thumbpath_list = ibs.get_annot_chip_thumbpath(aid_list) ibs.delete_annots(aid_list) aid_list = ibs.get_valid_aids() cid_list = ibs.get_valid_cids() fid_list = ibs.get_valid_fids() assert len(aid_list) == 0, "Didn't delete all ANNOTATIONs" assert len(cid_list) == 0, "Didn't delete all chips" assert len(fid_list) == 0, "Didn't delete all features" for thumbpath in thumbpath_list: assert not utool.checkpath(thumbpath), "Thumbnail still exists" return locals() if __name__ == '__main__': multiprocessing.freeze_support() # For windows import ibeis main_locals = ibeis.main(defaultdb='testdb_empty', gui=False, allow_newdir=True, delete_ibsdir=True) ibs = main_locals['ibs'] # IBEIS Control back = main_locals['back'] # IBEIS GUI backend test_locals = utool.run_test(TEST_DELETE_ANNOTATION_ALL, ibs, back) exec(utool.execstr_dict(test_locals, 'test_locals')) exec(utool.ipython_execstr())
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- # TODO: ADD COPYRIGHT TAG from __future__ import absolute_import, division, print_function import multiprocessing import utool print, print_, printDBG, rrr, profile = utool.inject(__name__, '[TEST_DELETE_ANNOTATION_ALL]') def TEST_DELETE_ANNOTATION_ALL(ibs, back): aid_list = ibs.get_valid_aids() thumbpath_list = ibs.get_annot_chip_thumbpath(aid_list) ibs.delete_annots(aid_list) aid_list = ibs.get_valid_aids() cid_list = ibs.get_valid_cids() fid_list = ibs.get_valid_fids() assert len(aid_list) == 0, "Didn't delete all ANNOTATIONs" assert len(cid_list) == 0, "Didn't delete all chips" assert len(fid_list) == 0, "Didn't delete all features" for thumbpath in thumbpath_list: assert not utool.checkpath(thumbpath), "Thumbnail still exists" return locals() if __name__ == '__main__': multiprocessing.freeze_support() # For windows import ibeis main_locals = ibeis.main(defaultdb='testdb_empty', gui=False, allow_newdir=True, delete_ibsdir=True) ibs = main_locals['ibs'] # IBEIS Control back = main_locals['back'] # IBEIS GUI backend test_locals = utool.run_test(TEST_DELETE_ANNOTATION_ALL, ibs, back) exec(utool.execstr_dict(test_locals, 'test_locals')) exec(utool.ipython_execstr())
en
0.316397
#!/usr/bin/env python2.7 # -*- coding: utf-8 -*- # TODO: ADD COPYRIGHT TAG # For windows # IBEIS Control # IBEIS GUI backend
1.969762
2
Natwest_Interview/distinct_integer.py
meghakashyap/Natwest_Questions
0
6625273
def nonDivisibleSubset(S,k): count = [0] * k for i in S: remainder = i % k count[remainder] +=1 ans = min( count[0] , 1) if k % 2 == 0: ans += min(count[k//2] ,1 ) for i in range( 1 , k//2 + 1): if i != k - i: ans += max(count[i] , count[k-i]) return ans n,k = map(int,input("1.Enter the lenght of list 2.Enter the divisor: ").split()) S = list(map(int,input("Enter the elements of list : ").split(" ",n))) print(nonDivisibleSubset(S,k))
def nonDivisibleSubset(S,k): count = [0] * k for i in S: remainder = i % k count[remainder] +=1 ans = min( count[0] , 1) if k % 2 == 0: ans += min(count[k//2] ,1 ) for i in range( 1 , k//2 + 1): if i != k - i: ans += max(count[i] , count[k-i]) return ans n,k = map(int,input("1.Enter the lenght of list 2.Enter the divisor: ").split()) S = list(map(int,input("Enter the elements of list : ").split(" ",n))) print(nonDivisibleSubset(S,k))
none
1
3.564579
4
functions.py
glauciocorreia/projeto-3estagio-python
0
6625274
#=====================================================================================================================================================================================# #FUNÇÕES ''' Sistemas para Internet 1º período 2017.2 Projeto Algoritmos e Programação Professor: Lamark Aluno: <NAME> E-mail: <EMAIL> Matricula: 1720020743 Avaliação Terceiro Estágio ''' #=====================================================================================================================================================================================# ''' O projeto do terceiro estágio tem como objetivo pôr em prática o conteúdo aprendido durante todo o desenvolvimento da disciplina Algoritmos e Programação. A linguagem utilizada para criar os itens solicitados deverá ser Python, a partir da versão 3. Este trabalho será composto por 5 partes, que devem ser feitas de forma sequencial e complementar. ''' #=====================================================================================================================================================================================# #Seção de importação de libs/módulos em "fuctions.py", sendo "smtplib" já nativa no Python e "reportlab" instalada pelo "Pip" #Importando lib/módulo "Reportlab" (item "canvas", especificamente), focada na criação e manipulação de arquivos PDF dentro do Python from reportlab.pdfgen import canvas #Importando lib/módulo "smtplib" e alguns itens focadas para ajustes e envio de e-mails dentro do Python import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.mime.base import MIMEBase from email import encoders #=====================================================================================================================================================================================# ''' 1) [5 Pontos] Fazer uma análise no arquivo convidados.txt, verificando seu conteúdo, como as informações estão separadas e organizadas. Em seguida, é necessário coletar todos os dados e organizar em um dicionário contendo apenas as seguintes informações nome como chave e telefone como valor. Deverá ser feita uma função que receba umaString (Str) com os dados do arquivo e retorne o dicionário. ''' #=====================================================================================================================================================================================# #Definindo a função que irá ler o arquivo "convidados.txt" (string) e irá retornar um dicionário, onde o nome de cada indivíduo será a 'chave' e o telefone o 'valor'. def func_convidados(txt): #Definição da função convidados = {} #Criação do dicionário em branco txt_linhas = txt.readlines() #Leitura do arquivo .txt (string), o transformando em uma lista for linx in txt_linhas[1:]: #Laço para ler linha por linha, desconsiderando a primeira linha da lista e considerando todo o resto lista = linx.split() #Criação da variável "lista" onde irá remover o espaço (" ") na linha lida, registrando esses novos dados for liny in lista: #Laço para verificar a linha na nova variável "lista" nome,telefone = liny.split('-') #Remoção do hífen/traço ("-") no indíce da lista que está sendo lido convidados[nome] = telefone #Organização das chaves e dos valores no dicionário "convidados" return convidados #Retorna o dicionário "convidados", com os nomes do convidados como chave e os telefones dos convidados como valor #=====================================================================================================================================================================================# ''' 2) [2 Pontos] O dicionário resultante do item anterior deverá ser usado para gerar um pdf [com o formato de uma lista de nomes com seus respectivos telefones] ''' #=====================================================================================================================================================================================# #Definindo a função que irá pegar o parametro "lista" e transformar e organizar em um arquivo PDF def func_pdf(lista): #Definição da função doc = canvas.Canvas("Contatos.pdf") #Criando um arquivo PDF com nome "Contatos.pdf", o atribuindo à "doc" doc.setLineWidth(.2) #Estipulando a espessura das linhas no arquivo doc.setFont('Helvetica-Bold', 14) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 14) doc.drawString(170,700,'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') #Desenhando/escrevendo um simples componente para o visual da página doc.setFont('Helvetica-Bold', 16) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 16) doc.drawString(220,660,'----Lista de Contatos----') #Desenhando/escrevendo um texto-título para a lista de contatos na página doc.setFont('Helvetica-Bold', 12) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica; Tamanho: 12) doc.drawString(250,595,'Nome e Telefone') #Desenhando/escrevendo um texto-subtítulo para a lista de contatos na página y = 580 #Definindo que a variável "y" (eixo) terá o valor "580" ("posição/pontuação" indo ao canto inferior da página) doc.setFont('Helvetica', 12) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 14) for nome,telefone in lista.items(): #Laço de repetição que irá passar por cada item de "lista" y -= 15 #A primeita etapa do laço é diminuir em 15 pontos/posição o eixo Y ("de cima para baixo") doc.drawString(250,y, '{} : {}'.format(nome,telefone)) #Irá desenhar/escrever na página a chave e o valor lidos (Nome e Telefone) na posição x = 250 e y estipulada acima doc.setFont('Helvetica-Bold', 14) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 14) doc.drawString(170,y-40,'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') #Desenhando/escrevendo um simples componente para o visual da página doc.save() #Salvando todas as modificações do arquivo PDF em "doc" return doc #Retorna "doc" com o arquivo pdf (Contatos.pdf) com todas as alterações feitas #=====================================================================================================================================================================================# ''' 3) [1 Ponto] O arquivo pdf gerado deverá ser enviado por e-mail para o seguinte: Endereço: <EMAIL> Assunto: Contatos – Equipe [nome_da_equipe] Conteúdo: Segue os contatos em anexo. Anexar documento ''' #=====================================================================================================================================================================================# def func_email(): #Definição da função fromaddr = "<EMAIL>" #Estipulando o remetente do e-mail toaddr = "<EMAIL>" #Estipulando o destinatário do e-mail msg = MIMEMultipart() #Construindo o "header" do e-mail msg['From'] = fromaddr #Selecionando o remetente msg['To'] = toaddr #Selecionando o destinatário msg['Subject'] = "Contatos - Equipe Gláucio Correia Dutra" #Estipulando o "Assunto" do e-mail body = "Seguem os contatos em anexo!" #Conteúdo do "Corpo" do e-mail msg.attach(MIMEText(body, 'plain')) #Inserindo o "body" como texto filename = "Contatos.pdf" #Estipulando o nome do arquivo binário em PDF a ser anexado attachment = open(filename, "rb") #Abrindo (lendo documento binário ("rb")) o arquivo em PDF e vinculando à variável "attachment" part = MIMEBase('application', 'octet-stream') #Processo necessário para codificar o arquivo PDF para anexo - parte 1 part.set_payload((attachment).read()) #Processo necessário para codificar o arquivo PDF para anexo - parte 2 encoders.encode_base64(part) #Processo necessário para codificar o arquivo PDF para anexo - parte 3 part.add_header('Content-Disposition', "attachment; filename= %s" % filename) #Processo necessário para codificar o arquivo PDF para anexo - parte 4 msg.attach(part) #Inserindo o anexo ao e-mail server = smtplib.SMTP('smtp.gmail.com', 587) #Configurando SMTP e porta do GMAIL server.starttls() #Configurando componentes de segurança server.login(fromaddr, "testes789") #Realizando login, selecionando o e-mail e inserindo a senha de acesso text = msg.as_string() #Transformando o conteúdo do e-mail em string server.sendmail(fromaddr, toaddr, text) #Enviando e-mail selecionando o e-mail do remetente, do destinatário e o conteúdo do e-mail server.quit() #Saindo do servidor do e-mail #=====================================================================================================================================================================================#
#=====================================================================================================================================================================================# #FUNÇÕES ''' Sistemas para Internet 1º período 2017.2 Projeto Algoritmos e Programação Professor: Lamark Aluno: <NAME> E-mail: <EMAIL> Matricula: 1720020743 Avaliação Terceiro Estágio ''' #=====================================================================================================================================================================================# ''' O projeto do terceiro estágio tem como objetivo pôr em prática o conteúdo aprendido durante todo o desenvolvimento da disciplina Algoritmos e Programação. A linguagem utilizada para criar os itens solicitados deverá ser Python, a partir da versão 3. Este trabalho será composto por 5 partes, que devem ser feitas de forma sequencial e complementar. ''' #=====================================================================================================================================================================================# #Seção de importação de libs/módulos em "fuctions.py", sendo "smtplib" já nativa no Python e "reportlab" instalada pelo "Pip" #Importando lib/módulo "Reportlab" (item "canvas", especificamente), focada na criação e manipulação de arquivos PDF dentro do Python from reportlab.pdfgen import canvas #Importando lib/módulo "smtplib" e alguns itens focadas para ajustes e envio de e-mails dentro do Python import smtplib from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from email.mime.base import MIMEBase from email import encoders #=====================================================================================================================================================================================# ''' 1) [5 Pontos] Fazer uma análise no arquivo convidados.txt, verificando seu conteúdo, como as informações estão separadas e organizadas. Em seguida, é necessário coletar todos os dados e organizar em um dicionário contendo apenas as seguintes informações nome como chave e telefone como valor. Deverá ser feita uma função que receba umaString (Str) com os dados do arquivo e retorne o dicionário. ''' #=====================================================================================================================================================================================# #Definindo a função que irá ler o arquivo "convidados.txt" (string) e irá retornar um dicionário, onde o nome de cada indivíduo será a 'chave' e o telefone o 'valor'. def func_convidados(txt): #Definição da função convidados = {} #Criação do dicionário em branco txt_linhas = txt.readlines() #Leitura do arquivo .txt (string), o transformando em uma lista for linx in txt_linhas[1:]: #Laço para ler linha por linha, desconsiderando a primeira linha da lista e considerando todo o resto lista = linx.split() #Criação da variável "lista" onde irá remover o espaço (" ") na linha lida, registrando esses novos dados for liny in lista: #Laço para verificar a linha na nova variável "lista" nome,telefone = liny.split('-') #Remoção do hífen/traço ("-") no indíce da lista que está sendo lido convidados[nome] = telefone #Organização das chaves e dos valores no dicionário "convidados" return convidados #Retorna o dicionário "convidados", com os nomes do convidados como chave e os telefones dos convidados como valor #=====================================================================================================================================================================================# ''' 2) [2 Pontos] O dicionário resultante do item anterior deverá ser usado para gerar um pdf [com o formato de uma lista de nomes com seus respectivos telefones] ''' #=====================================================================================================================================================================================# #Definindo a função que irá pegar o parametro "lista" e transformar e organizar em um arquivo PDF def func_pdf(lista): #Definição da função doc = canvas.Canvas("Contatos.pdf") #Criando um arquivo PDF com nome "Contatos.pdf", o atribuindo à "doc" doc.setLineWidth(.2) #Estipulando a espessura das linhas no arquivo doc.setFont('Helvetica-Bold', 14) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 14) doc.drawString(170,700,'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') #Desenhando/escrevendo um simples componente para o visual da página doc.setFont('Helvetica-Bold', 16) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 16) doc.drawString(220,660,'----Lista de Contatos----') #Desenhando/escrevendo um texto-título para a lista de contatos na página doc.setFont('Helvetica-Bold', 12) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica; Tamanho: 12) doc.drawString(250,595,'Nome e Telefone') #Desenhando/escrevendo um texto-subtítulo para a lista de contatos na página y = 580 #Definindo que a variável "y" (eixo) terá o valor "580" ("posição/pontuação" indo ao canto inferior da página) doc.setFont('Helvetica', 12) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 14) for nome,telefone in lista.items(): #Laço de repetição que irá passar por cada item de "lista" y -= 15 #A primeita etapa do laço é diminuir em 15 pontos/posição o eixo Y ("de cima para baixo") doc.drawString(250,y, '{} : {}'.format(nome,telefone)) #Irá desenhar/escrever na página a chave e o valor lidos (Nome e Telefone) na posição x = 250 e y estipulada acima doc.setFont('Helvetica-Bold', 14) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 14) doc.drawString(170,y-40,'xxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxxx') #Desenhando/escrevendo um simples componente para o visual da página doc.save() #Salvando todas as modificações do arquivo PDF em "doc" return doc #Retorna "doc" com o arquivo pdf (Contatos.pdf) com todas as alterações feitas #=====================================================================================================================================================================================# ''' 3) [1 Ponto] O arquivo pdf gerado deverá ser enviado por e-mail para o seguinte: Endereço: <EMAIL> Assunto: Contatos – Equipe [nome_da_equipe] Conteúdo: Segue os contatos em anexo. Anexar documento ''' #=====================================================================================================================================================================================# def func_email(): #Definição da função fromaddr = "<EMAIL>" #Estipulando o remetente do e-mail toaddr = "<EMAIL>" #Estipulando o destinatário do e-mail msg = MIMEMultipart() #Construindo o "header" do e-mail msg['From'] = fromaddr #Selecionando o remetente msg['To'] = toaddr #Selecionando o destinatário msg['Subject'] = "Contatos - Equipe Gláucio Correia Dutra" #Estipulando o "Assunto" do e-mail body = "Seguem os contatos em anexo!" #Conteúdo do "Corpo" do e-mail msg.attach(MIMEText(body, 'plain')) #Inserindo o "body" como texto filename = "Contatos.pdf" #Estipulando o nome do arquivo binário em PDF a ser anexado attachment = open(filename, "rb") #Abrindo (lendo documento binário ("rb")) o arquivo em PDF e vinculando à variável "attachment" part = MIMEBase('application', 'octet-stream') #Processo necessário para codificar o arquivo PDF para anexo - parte 1 part.set_payload((attachment).read()) #Processo necessário para codificar o arquivo PDF para anexo - parte 2 encoders.encode_base64(part) #Processo necessário para codificar o arquivo PDF para anexo - parte 3 part.add_header('Content-Disposition', "attachment; filename= %s" % filename) #Processo necessário para codificar o arquivo PDF para anexo - parte 4 msg.attach(part) #Inserindo o anexo ao e-mail server = smtplib.SMTP('smtp.gmail.com', 587) #Configurando SMTP e porta do GMAIL server.starttls() #Configurando componentes de segurança server.login(fromaddr, "testes789") #Realizando login, selecionando o e-mail e inserindo a senha de acesso text = msg.as_string() #Transformando o conteúdo do e-mail em string server.sendmail(fromaddr, toaddr, text) #Enviando e-mail selecionando o e-mail do remetente, do destinatário e o conteúdo do e-mail server.quit() #Saindo do servidor do e-mail #=====================================================================================================================================================================================#
pt
0.901646
#=====================================================================================================================================================================================# #FUNÇÕES Sistemas para Internet 1º período 2017.2 Projeto Algoritmos e Programação Professor: Lamark Aluno: <NAME> E-mail: <EMAIL> Matricula: 1720020743 Avaliação Terceiro Estágio #=====================================================================================================================================================================================# O projeto do terceiro estágio tem como objetivo pôr em prática o conteúdo aprendido durante todo o desenvolvimento da disciplina Algoritmos e Programação. A linguagem utilizada para criar os itens solicitados deverá ser Python, a partir da versão 3. Este trabalho será composto por 5 partes, que devem ser feitas de forma sequencial e complementar. #=====================================================================================================================================================================================# #Seção de importação de libs/módulos em "fuctions.py", sendo "smtplib" já nativa no Python e "reportlab" instalada pelo "Pip" #Importando lib/módulo "Reportlab" (item "canvas", especificamente), focada na criação e manipulação de arquivos PDF dentro do Python #Importando lib/módulo "smtplib" e alguns itens focadas para ajustes e envio de e-mails dentro do Python #=====================================================================================================================================================================================# 1) [5 Pontos] Fazer uma análise no arquivo convidados.txt, verificando seu conteúdo, como as informações estão separadas e organizadas. Em seguida, é necessário coletar todos os dados e organizar em um dicionário contendo apenas as seguintes informações nome como chave e telefone como valor. Deverá ser feita uma função que receba umaString (Str) com os dados do arquivo e retorne o dicionário. #=====================================================================================================================================================================================# #Definindo a função que irá ler o arquivo "convidados.txt" (string) e irá retornar um dicionário, onde o nome de cada indivíduo será a 'chave' e o telefone o 'valor'. #Definição da função #Criação do dicionário em branco #Leitura do arquivo .txt (string), o transformando em uma lista #Laço para ler linha por linha, desconsiderando a primeira linha da lista e considerando todo o resto #Criação da variável "lista" onde irá remover o espaço (" ") na linha lida, registrando esses novos dados #Laço para verificar a linha na nova variável "lista" #Remoção do hífen/traço ("-") no indíce da lista que está sendo lido #Organização das chaves e dos valores no dicionário "convidados" #Retorna o dicionário "convidados", com os nomes do convidados como chave e os telefones dos convidados como valor #=====================================================================================================================================================================================# 2) [2 Pontos] O dicionário resultante do item anterior deverá ser usado para gerar um pdf [com o formato de uma lista de nomes com seus respectivos telefones] #=====================================================================================================================================================================================# #Definindo a função que irá pegar o parametro "lista" e transformar e organizar em um arquivo PDF #Definição da função #Criando um arquivo PDF com nome "Contatos.pdf", o atribuindo à "doc" #Estipulando a espessura das linhas no arquivo #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 14) #Desenhando/escrevendo um simples componente para o visual da página #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 16) #Desenhando/escrevendo um texto-título para a lista de contatos na página #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica; Tamanho: 12) #Desenhando/escrevendo um texto-subtítulo para a lista de contatos na página #Definindo que a variável "y" (eixo) terá o valor "580" ("posição/pontuação" indo ao canto inferior da página) #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 14) #Laço de repetição que irá passar por cada item de "lista" #A primeita etapa do laço é diminuir em 15 pontos/posição o eixo Y ("de cima para baixo") #Irá desenhar/escrever na página a chave e o valor lidos (Nome e Telefone) na posição x = 250 e y estipulada acima #Definindo o tipo e tamanho da fonte a partir desse momento (Tipo: Helvetica-Bold; Tamanho: 14) #Desenhando/escrevendo um simples componente para o visual da página #Salvando todas as modificações do arquivo PDF em "doc" #Retorna "doc" com o arquivo pdf (Contatos.pdf) com todas as alterações feitas #=====================================================================================================================================================================================# 3) [1 Ponto] O arquivo pdf gerado deverá ser enviado por e-mail para o seguinte: Endereço: <EMAIL> Assunto: Contatos – Equipe [nome_da_equipe] Conteúdo: Segue os contatos em anexo. Anexar documento #=====================================================================================================================================================================================# #Definição da função #Estipulando o remetente do e-mail #Estipulando o destinatário do e-mail #Construindo o "header" do e-mail #Selecionando o remetente #Selecionando o destinatário #Estipulando o "Assunto" do e-mail #Conteúdo do "Corpo" do e-mail #Inserindo o "body" como texto #Estipulando o nome do arquivo binário em PDF a ser anexado #Abrindo (lendo documento binário ("rb")) o arquivo em PDF e vinculando à variável "attachment" #Processo necessário para codificar o arquivo PDF para anexo - parte 1 #Processo necessário para codificar o arquivo PDF para anexo - parte 2 #Processo necessário para codificar o arquivo PDF para anexo - parte 3 #Processo necessário para codificar o arquivo PDF para anexo - parte 4 #Inserindo o anexo ao e-mail #Configurando SMTP e porta do GMAIL #Configurando componentes de segurança #Realizando login, selecionando o e-mail e inserindo a senha de acesso #Transformando o conteúdo do e-mail em string #Enviando e-mail selecionando o e-mail do remetente, do destinatário e o conteúdo do e-mail #Saindo do servidor do e-mail #=====================================================================================================================================================================================#
3.489933
3
apps/api/urls.py
MySmile/sfchat
4
6625275
<gh_stars>1-10 from django.conf.urls import * urlpatterns = [ #url(r'', include('apps.api.v2.urls', namespace='default')), url(r'^v1/', include('apps.api.v1.urls', namespace='v1')), ]
from django.conf.urls import * urlpatterns = [ #url(r'', include('apps.api.v2.urls', namespace='default')), url(r'^v1/', include('apps.api.v1.urls', namespace='v1')), ]
tr
0.114962
#url(r'', include('apps.api.v2.urls', namespace='default')),
1.447825
1
charmcraft/commands/init.py
fakela/charmcraft
0
6625276
# Copyright 2020 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # For further info, check https://github.com/canonical/charmcraft import logging import os import pwd import re from datetime import date from pathlib import Path import yaml from jinja2 import Environment, PackageLoader, StrictUndefined from charmcraft.cmdbase import BaseCommand, CommandError from .utils import make_executable logger = logging.getLogger(__name__) _overview = """ Initialize a directory to be a charm project. It will leave a basic structure in the specified directory (the current one by default), including: - a README.md as basic documentation for the project (with some TODOs for you to complete), - a metadata.yaml for the basic Juju-related config (that you also need to customize) - a src/charm.py with a basic structure for your charm - simple config.yaml and actions.yaml files, in case you need them in the charm - a requirement.txt and a requirement-dev.txt files that will hold the Python dependencies (for production and development correspondingly), extend as you need - some example tests in the corresponding directory, and a script to run them. """ class InitCommand(BaseCommand): """Initialize a directory to be a charm project.""" name = "init" help_msg = "Initialize a directory to be a charm project." overview = _overview common = True def fill_parser(self, parser): parser.add_argument( "--project-dir", type=Path, default=Path("."), metavar="DIR", dest="path", help="The directory to initialize. Must be empty, or not exist; defaults to '.'.") parser.add_argument( "--name", help="The name of the project; defaults to the directory name.") parser.add_argument( "--author", help="The author of the project;" " defaults to the current user's name as present in the GECOS field.") parser.add_argument( "--series", default="kubernetes", help="The comma-separated list of series this charm will support;" " defaults to 'kubernetes'.") def run(self, args): args.path = args.path.resolve() if args.path.exists(): if not args.path.is_dir(): raise CommandError("{} is not a directory".format(args.path)) if next(args.path.iterdir(), False): raise CommandError("{} is not empty".format(args.path)) logger.debug("Using existing project directory '%s'", args.path) else: logger.debug("Creating project directory '%s'", args.path) args.path.mkdir() if args.author is None: gecos = pwd.getpwuid(os.getuid()).pw_gecos.split(',', 1)[0] if not gecos: raise CommandError("Author not given, and nothing in GECOS field") logger.debug("Setting author to %r from GECOS field", gecos) args.author = gecos if not args.name: args.name = args.path.name logger.debug("Set project name to '%s'", args.name) if not re.match(r"[a-z][a-z0-9-]*[a-z0-9]$", args.name): raise CommandError("{} is not a valid charm name".format(args.name)) context = { "name": args.name, "author": args.author, "year": date.today().year, "class_name": "".join(re.split(r"\W+", args.name.title())) + "Charm", "series": yaml.dump(args.series.split(","), default_flow_style=True), } env = Environment( loader=PackageLoader('charmcraft', 'templates/init'), autoescape=False, # no need to escape things here :-) keep_trailing_newline=True, # they're not text files if they don't end in newline! optimized=False, # optimization doesn't make sense for one-offs undefined=StrictUndefined) # fail on undefined _todo_rx = re.compile("TODO: (.*)") todos = [] executables = ["run_tests", "src/charm.py"] for template_name in env.list_templates(): if not template_name.endswith(".j2"): continue template = env.get_template(template_name) template_name = template_name[:-3] logger.debug("Rendering %s", template_name) path = args.path / template_name path.parent.mkdir(parents=True, exist_ok=True) with path.open("wt", encoding="utf8") as fh: out = template.render(context) fh.write(out) for todo in _todo_rx.findall(out): todos.append((template_name, todo)) if template_name in executables: make_executable(fh) logger.debug(" made executable") logger.info("All done.") if todos: logger.info("There are some notes about things we think you should do.") logger.info("These are marked with ‘TODO:’, as is customary. Namely:") w = max(len(i[0]) for i in todos) for fn, todo in todos: logger.info("%*s: %s", w + 2, fn, todo)
# Copyright 2020 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # For further info, check https://github.com/canonical/charmcraft import logging import os import pwd import re from datetime import date from pathlib import Path import yaml from jinja2 import Environment, PackageLoader, StrictUndefined from charmcraft.cmdbase import BaseCommand, CommandError from .utils import make_executable logger = logging.getLogger(__name__) _overview = """ Initialize a directory to be a charm project. It will leave a basic structure in the specified directory (the current one by default), including: - a README.md as basic documentation for the project (with some TODOs for you to complete), - a metadata.yaml for the basic Juju-related config (that you also need to customize) - a src/charm.py with a basic structure for your charm - simple config.yaml and actions.yaml files, in case you need them in the charm - a requirement.txt and a requirement-dev.txt files that will hold the Python dependencies (for production and development correspondingly), extend as you need - some example tests in the corresponding directory, and a script to run them. """ class InitCommand(BaseCommand): """Initialize a directory to be a charm project.""" name = "init" help_msg = "Initialize a directory to be a charm project." overview = _overview common = True def fill_parser(self, parser): parser.add_argument( "--project-dir", type=Path, default=Path("."), metavar="DIR", dest="path", help="The directory to initialize. Must be empty, or not exist; defaults to '.'.") parser.add_argument( "--name", help="The name of the project; defaults to the directory name.") parser.add_argument( "--author", help="The author of the project;" " defaults to the current user's name as present in the GECOS field.") parser.add_argument( "--series", default="kubernetes", help="The comma-separated list of series this charm will support;" " defaults to 'kubernetes'.") def run(self, args): args.path = args.path.resolve() if args.path.exists(): if not args.path.is_dir(): raise CommandError("{} is not a directory".format(args.path)) if next(args.path.iterdir(), False): raise CommandError("{} is not empty".format(args.path)) logger.debug("Using existing project directory '%s'", args.path) else: logger.debug("Creating project directory '%s'", args.path) args.path.mkdir() if args.author is None: gecos = pwd.getpwuid(os.getuid()).pw_gecos.split(',', 1)[0] if not gecos: raise CommandError("Author not given, and nothing in GECOS field") logger.debug("Setting author to %r from GECOS field", gecos) args.author = gecos if not args.name: args.name = args.path.name logger.debug("Set project name to '%s'", args.name) if not re.match(r"[a-z][a-z0-9-]*[a-z0-9]$", args.name): raise CommandError("{} is not a valid charm name".format(args.name)) context = { "name": args.name, "author": args.author, "year": date.today().year, "class_name": "".join(re.split(r"\W+", args.name.title())) + "Charm", "series": yaml.dump(args.series.split(","), default_flow_style=True), } env = Environment( loader=PackageLoader('charmcraft', 'templates/init'), autoescape=False, # no need to escape things here :-) keep_trailing_newline=True, # they're not text files if they don't end in newline! optimized=False, # optimization doesn't make sense for one-offs undefined=StrictUndefined) # fail on undefined _todo_rx = re.compile("TODO: (.*)") todos = [] executables = ["run_tests", "src/charm.py"] for template_name in env.list_templates(): if not template_name.endswith(".j2"): continue template = env.get_template(template_name) template_name = template_name[:-3] logger.debug("Rendering %s", template_name) path = args.path / template_name path.parent.mkdir(parents=True, exist_ok=True) with path.open("wt", encoding="utf8") as fh: out = template.render(context) fh.write(out) for todo in _todo_rx.findall(out): todos.append((template_name, todo)) if template_name in executables: make_executable(fh) logger.debug(" made executable") logger.info("All done.") if todos: logger.info("There are some notes about things we think you should do.") logger.info("These are marked with ‘TODO:’, as is customary. Namely:") w = max(len(i[0]) for i in todos) for fn, todo in todos: logger.info("%*s: %s", w + 2, fn, todo)
en
0.881597
# Copyright 2020 Canonical Ltd. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # For further info, check https://github.com/canonical/charmcraft Initialize a directory to be a charm project. It will leave a basic structure in the specified directory (the current one by default), including: - a README.md as basic documentation for the project (with some TODOs for you to complete), - a metadata.yaml for the basic Juju-related config (that you also need to customize) - a src/charm.py with a basic structure for your charm - simple config.yaml and actions.yaml files, in case you need them in the charm - a requirement.txt and a requirement-dev.txt files that will hold the Python dependencies (for production and development correspondingly), extend as you need - some example tests in the corresponding directory, and a script to run them. Initialize a directory to be a charm project. # no need to escape things here :-) # they're not text files if they don't end in newline! # optimization doesn't make sense for one-offs # fail on undefined
2.13824
2
isi_mip/climatemodels/management/commands/import_numbers.py
thiasB/isimip
4
6625277
import re import urllib.request import logging from datetime import date from django.core.management.base import BaseCommand from isi_mip.pages.models import HomePage logger = logging.getLogger(__name__) class Command(BaseCommand): help = 'Imports ISIMIP numbers from a textfile link to display on homepage' def get_number(self, url): try: data = urllib.request.urlopen(url) lines = data.read().splitlines() line = lines[len(lines) - 2].decode('utf-8') number = re.search(':(.*)', line).group(1) return number.strip() except Exception as e: print("An error happend while importing from: %s" % url) print(e) return None def handle(self, *args, **options): home_pages = HomePage.objects.live() for home_page in home_pages: number1 = None number2 = None if home_page.number1_link: number1 = self.get_number(home_page.number1_link) if number1: print('%s: imported number %s' % (home_page.number1_link, number1)) home_page.number1_imported_number = number1 if home_page.number2_link: number2 = self.get_number(home_page.number2_link) if number2: print('%s: imported number %s' % (home_page.number2_link, number2)) home_page.number2_imported_number = number2 if number1 or number2: home_page.save() else: print('nothing imported')
import re import urllib.request import logging from datetime import date from django.core.management.base import BaseCommand from isi_mip.pages.models import HomePage logger = logging.getLogger(__name__) class Command(BaseCommand): help = 'Imports ISIMIP numbers from a textfile link to display on homepage' def get_number(self, url): try: data = urllib.request.urlopen(url) lines = data.read().splitlines() line = lines[len(lines) - 2].decode('utf-8') number = re.search(':(.*)', line).group(1) return number.strip() except Exception as e: print("An error happend while importing from: %s" % url) print(e) return None def handle(self, *args, **options): home_pages = HomePage.objects.live() for home_page in home_pages: number1 = None number2 = None if home_page.number1_link: number1 = self.get_number(home_page.number1_link) if number1: print('%s: imported number %s' % (home_page.number1_link, number1)) home_page.number1_imported_number = number1 if home_page.number2_link: number2 = self.get_number(home_page.number2_link) if number2: print('%s: imported number %s' % (home_page.number2_link, number2)) home_page.number2_imported_number = number2 if number1 or number2: home_page.save() else: print('nothing imported')
none
1
2.357985
2
authenticator.py
Fedalto/alfred-authenticator
0
6625278
from pyotp import TOTP from workflow.notify import notify def list_tokens(keychain, wf): for service, secret_key in keychain.iteritems(): token = TOTP(secret_key).now() _add_workflow_item(wf, service, token) wf.send_feedback() def add_new_service(keychain, wf, service, secret_key): wf.logger.debug("secret_key = %s", secret_key) try: TOTP(secret_key).now() except TypeError as e: error_msg = u"Error adding %s: %s" % (service, e.message) wf.logger.error(error_msg) notify(u"Error adding %s" % service, e.message) raise if service in keychain: error_msg = u"Duplicate service name: %s" % service wf.logger.error(error_msg) notify("Error adding %s" % service, error_msg) raise ValueError(error_msg) keychain[service] = secret_key keychain.save() wf.logger.info(u"Added %s", service) def _add_workflow_item(wf, service, token): wf.add_item( title=service, subtitle=token, valid=True, arg=token, copytext=token, largetext=token, )
from pyotp import TOTP from workflow.notify import notify def list_tokens(keychain, wf): for service, secret_key in keychain.iteritems(): token = TOTP(secret_key).now() _add_workflow_item(wf, service, token) wf.send_feedback() def add_new_service(keychain, wf, service, secret_key): wf.logger.debug("secret_key = %s", secret_key) try: TOTP(secret_key).now() except TypeError as e: error_msg = u"Error adding %s: %s" % (service, e.message) wf.logger.error(error_msg) notify(u"Error adding %s" % service, e.message) raise if service in keychain: error_msg = u"Duplicate service name: %s" % service wf.logger.error(error_msg) notify("Error adding %s" % service, error_msg) raise ValueError(error_msg) keychain[service] = secret_key keychain.save() wf.logger.info(u"Added %s", service) def _add_workflow_item(wf, service, token): wf.add_item( title=service, subtitle=token, valid=True, arg=token, copytext=token, largetext=token, )
none
1
2.268305
2
sahara/tests/scenario_unit/test_runner.py
redhat-openstack/sahara
0
6625279
<reponame>redhat-openstack/sahara # Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import mock import testtools from sahara.tests.scenario import runner class RunnerUnitTest(testtools.TestCase): def _isDictContainSubset(self, sub_dictionary, dictionary): for key in sub_dictionary: if sub_dictionary[key] != dictionary[key]: return False return True def test_set_defaults(self): config_without_cred_net = { "clusters": [ { "plugin_name": "vanilla", "plugin_version": "2.6.0", "image": "sahara-vanilla-2.6.0-ubuntu-14.04" }], } expected_default_credential = { "credentials": { "os_username": "admin", "os_auth_url": "http://localhost:5000/v2.0", "sahara_url": None, "os_password": "<PASSWORD>", "os_tenant": "admin" } } expected_default_network = { "network": { "type": "neutron", "private_network": "private", "public_network": "public", "auto_assignment_floating_ip": False }, } expected_default_cluster = { "clusters": [ { "image": "sahara-vanilla-2.6.0-ubuntu-14.04", "edp_jobs_flow": None, "class_name": "vanilla2_6_0", "plugin_name": "vanilla", "scenario": ['run_jobs', 'scale', 'run_jobs'], "plugin_version": "2.6.0", "retain_resources": False }], } runner.set_defaults(config_without_cred_net) self.assertTrue(self._isDictContainSubset( expected_default_credential, config_without_cred_net)) self.assertTrue(self._isDictContainSubset( expected_default_network, config_without_cred_net)) self.assertTrue(self._isDictContainSubset( expected_default_cluster, config_without_cred_net)) config = { "credentials": { "os_username": "changed_admin", "os_auth_url": "http://127.0.0.1:5000/v2.0", "sahara_url": "http://127.0.0.1", "os_password": "<PASSWORD>", "os_tenant": "changed_admin" }, "network": { "type": "neutron", "private_network": "changed_private", "public_network": "changed_public", "auto_assignment_floating_ip": True, }, "clusters": [ { "plugin_name": "vanilla", "plugin_version": "2.6.0", "image": "sahara-vanilla-2.6.0-ubuntu-14.04", "edp_jobs_flow": "test_flow", "retain_resources": True }], "edp_jobs_flow": { "test_flow": [ { "type": "Pig", "input_datasource": { "type": "swift", "source": "etc/edp-examples/edp-pig/top-todoers/" "data/input" }, "output_datasource": { "type": "hdfs", "destination": "/user/hadoop/edp-output" }, "main_lib": { "type": "swift", "source": "etc/edp-examples/edp-pig/top-todoers/" "example.pig" } }, { "type": "Java", "additional_libs": [ { "type": "database", "source": "sahara/tests/integration/tests/" "resources/" }], "configs": "edp.java.main_class: org.apache.hadoop." "examples.QuasiMonteCarlo", "args": [10, 10] }, ], }, } expected_credential = { "credentials": { "os_username": "changed_admin", "os_auth_url": "http://127.0.0.1:5000/v2.0", "sahara_url": "http://127.0.0.1", "os_password": "<PASSWORD>", "os_tenant": "changed_admin" }, } expected_network = { "network": { "type": "neutron", "private_network": "changed_private", "public_network": "changed_public", "auto_assignment_floating_ip": True, }, } expected_cluster = { "clusters": [ { "plugin_name": "vanilla", "plugin_version": "2.6.0", "image": "sahara-vanilla-2.6.0-ubuntu-14.04", "retain_resources": True, 'edp_jobs_flow': [ { 'main_lib': { 'source': 'etc/edp-examples/edp-pig/' 'top-todoers/example.pig', 'type': 'swift' }, 'type': 'Pig', 'input_datasource': { 'source': 'etc/edp-examples/edp-pig/' 'top-todoers/data/input', 'type': 'swift' }, 'output_datasource': { 'type': 'hdfs', 'destination': '/user/hadoop/edp-output' } }, { 'args': [10, 10], 'configs': 'edp.java.main_class: org.apache.' 'hadoop.examples.QuasiMonteCarlo', 'type': 'Java', 'additional_libs': [ { 'source': 'sahara/tests/integration/' 'tests/resources/', 'type': 'database' }] } ], "scenario": ['run_jobs', 'scale', 'run_jobs'], "class_name": "vanilla2_6_0" }], } runner.set_defaults(config) self.assertTrue(self._isDictContainSubset( expected_credential, config)) self.assertTrue(self._isDictContainSubset( expected_network, config)) self.assertTrue(self._isDictContainSubset( expected_cluster, config)) @mock.patch('sys.exit', return_value=None) @mock.patch('os.system', return_value=None) def test_runner_main(self, mock_os, mock_sys): sys.argv = ['sahara/tests/scenario/runner.py', 'sahara/tests/scenario_unit/vanilla2_6_0.yaml'] runner.main()
# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License. import sys import mock import testtools from sahara.tests.scenario import runner class RunnerUnitTest(testtools.TestCase): def _isDictContainSubset(self, sub_dictionary, dictionary): for key in sub_dictionary: if sub_dictionary[key] != dictionary[key]: return False return True def test_set_defaults(self): config_without_cred_net = { "clusters": [ { "plugin_name": "vanilla", "plugin_version": "2.6.0", "image": "sahara-vanilla-2.6.0-ubuntu-14.04" }], } expected_default_credential = { "credentials": { "os_username": "admin", "os_auth_url": "http://localhost:5000/v2.0", "sahara_url": None, "os_password": "<PASSWORD>", "os_tenant": "admin" } } expected_default_network = { "network": { "type": "neutron", "private_network": "private", "public_network": "public", "auto_assignment_floating_ip": False }, } expected_default_cluster = { "clusters": [ { "image": "sahara-vanilla-2.6.0-ubuntu-14.04", "edp_jobs_flow": None, "class_name": "vanilla2_6_0", "plugin_name": "vanilla", "scenario": ['run_jobs', 'scale', 'run_jobs'], "plugin_version": "2.6.0", "retain_resources": False }], } runner.set_defaults(config_without_cred_net) self.assertTrue(self._isDictContainSubset( expected_default_credential, config_without_cred_net)) self.assertTrue(self._isDictContainSubset( expected_default_network, config_without_cred_net)) self.assertTrue(self._isDictContainSubset( expected_default_cluster, config_without_cred_net)) config = { "credentials": { "os_username": "changed_admin", "os_auth_url": "http://127.0.0.1:5000/v2.0", "sahara_url": "http://127.0.0.1", "os_password": "<PASSWORD>", "os_tenant": "changed_admin" }, "network": { "type": "neutron", "private_network": "changed_private", "public_network": "changed_public", "auto_assignment_floating_ip": True, }, "clusters": [ { "plugin_name": "vanilla", "plugin_version": "2.6.0", "image": "sahara-vanilla-2.6.0-ubuntu-14.04", "edp_jobs_flow": "test_flow", "retain_resources": True }], "edp_jobs_flow": { "test_flow": [ { "type": "Pig", "input_datasource": { "type": "swift", "source": "etc/edp-examples/edp-pig/top-todoers/" "data/input" }, "output_datasource": { "type": "hdfs", "destination": "/user/hadoop/edp-output" }, "main_lib": { "type": "swift", "source": "etc/edp-examples/edp-pig/top-todoers/" "example.pig" } }, { "type": "Java", "additional_libs": [ { "type": "database", "source": "sahara/tests/integration/tests/" "resources/" }], "configs": "edp.java.main_class: org.apache.hadoop." "examples.QuasiMonteCarlo", "args": [10, 10] }, ], }, } expected_credential = { "credentials": { "os_username": "changed_admin", "os_auth_url": "http://127.0.0.1:5000/v2.0", "sahara_url": "http://127.0.0.1", "os_password": "<PASSWORD>", "os_tenant": "changed_admin" }, } expected_network = { "network": { "type": "neutron", "private_network": "changed_private", "public_network": "changed_public", "auto_assignment_floating_ip": True, }, } expected_cluster = { "clusters": [ { "plugin_name": "vanilla", "plugin_version": "2.6.0", "image": "sahara-vanilla-2.6.0-ubuntu-14.04", "retain_resources": True, 'edp_jobs_flow': [ { 'main_lib': { 'source': 'etc/edp-examples/edp-pig/' 'top-todoers/example.pig', 'type': 'swift' }, 'type': 'Pig', 'input_datasource': { 'source': 'etc/edp-examples/edp-pig/' 'top-todoers/data/input', 'type': 'swift' }, 'output_datasource': { 'type': 'hdfs', 'destination': '/user/hadoop/edp-output' } }, { 'args': [10, 10], 'configs': 'edp.java.main_class: org.apache.' 'hadoop.examples.QuasiMonteCarlo', 'type': 'Java', 'additional_libs': [ { 'source': 'sahara/tests/integration/' 'tests/resources/', 'type': 'database' }] } ], "scenario": ['run_jobs', 'scale', 'run_jobs'], "class_name": "vanilla2_6_0" }], } runner.set_defaults(config) self.assertTrue(self._isDictContainSubset( expected_credential, config)) self.assertTrue(self._isDictContainSubset( expected_network, config)) self.assertTrue(self._isDictContainSubset( expected_cluster, config)) @mock.patch('sys.exit', return_value=None) @mock.patch('os.system', return_value=None) def test_runner_main(self, mock_os, mock_sys): sys.argv = ['sahara/tests/scenario/runner.py', 'sahara/tests/scenario_unit/vanilla2_6_0.yaml'] runner.main()
en
0.851104
# Copyright (c) 2015 Mirantis Inc. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or # implied. # See the License for the specific language governing permissions and # limitations under the License.
1.537484
2
demo/demoproject/tests.py
Maplecroft/django-chartjs
0
6625280
"""Unit tests for chartjs api.""" import json from django.test import TestCase from django.core.urlresolvers import reverse from demoproject._compat import decode class LineChartJSTestCase(TestCase): def test_line_chartjs(self): resp = self.client.get(reverse('line_chart')) self.assertContains(resp, 'Chart.min.js') def test_list_chartjs_json(self): resp = self.client.get(reverse('line_chart_json')) try: data = json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content) self.assertIn('datasets', data) self.assertNotIn('series', data) class ColorTestCase(TestCase): def test_colorview(self): resp = self.client.get(reverse('colors')) self.assertContains(resp, '100px') class HighChartJSTestCase(TestCase): def test_column_chartjs_json(self): resp = self.client.get(reverse('column_highchart_json')) try: data = json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content) self.assertIn('title', data) self.assertIn('text', data['title']) self.assertEqual(data['title']['text'], 'Column Highchart test') self.assertIn('credits', data) credits = data['credits'] self.assertEqual(credits['enabled'], False) def test_list_chartjs_json(self): resp = self.client.get(reverse('line_highchart_json')) try: data = json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content) self.assertIn('series', data) self.assertNotIn('datasets', data) self.assertIn('credits', data) credits = data['credits'] self.assertEqual(credits['enabled'], True) self.assertEqual(credits['href'], 'http://example.com') self.assertEqual(credits['text'], 'Novapost Team') def test_pie_chartjs_json(self): resp = self.client.get(reverse('pie_highchart_json')) try: json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content) def test_donut_chartjs_json(self): resp = self.client.get(reverse('donut_highchart_json')) try: json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content)
"""Unit tests for chartjs api.""" import json from django.test import TestCase from django.core.urlresolvers import reverse from demoproject._compat import decode class LineChartJSTestCase(TestCase): def test_line_chartjs(self): resp = self.client.get(reverse('line_chart')) self.assertContains(resp, 'Chart.min.js') def test_list_chartjs_json(self): resp = self.client.get(reverse('line_chart_json')) try: data = json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content) self.assertIn('datasets', data) self.assertNotIn('series', data) class ColorTestCase(TestCase): def test_colorview(self): resp = self.client.get(reverse('colors')) self.assertContains(resp, '100px') class HighChartJSTestCase(TestCase): def test_column_chartjs_json(self): resp = self.client.get(reverse('column_highchart_json')) try: data = json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content) self.assertIn('title', data) self.assertIn('text', data['title']) self.assertEqual(data['title']['text'], 'Column Highchart test') self.assertIn('credits', data) credits = data['credits'] self.assertEqual(credits['enabled'], False) def test_list_chartjs_json(self): resp = self.client.get(reverse('line_highchart_json')) try: data = json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content) self.assertIn('series', data) self.assertNotIn('datasets', data) self.assertIn('credits', data) credits = data['credits'] self.assertEqual(credits['enabled'], True) self.assertEqual(credits['href'], 'http://example.com') self.assertEqual(credits['text'], 'Novapost Team') def test_pie_chartjs_json(self): resp = self.client.get(reverse('pie_highchart_json')) try: json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content) def test_donut_chartjs_json(self): resp = self.client.get(reverse('donut_highchart_json')) try: json.loads(decode(resp.content)) except ValueError: self.fail("%r is not valid json" % self.resp.content)
en
0.551535
Unit tests for chartjs api.
2.75864
3
pytorch/AutoEncoderCIFAR10.py
quickgrid/CodeLab
0
6625281
<reponame>quickgrid/CodeLab """ References: https://www.kaggle.com/ljlbarbosa/convolution-autoencoder-pytorch """ import gc import torch from torch import nn from torchvision import datasets, transforms from torch.utils.data import DataLoader import matplotlib.pyplot as plt import numpy as np torch.manual_seed(17) np.random.seed(0) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class Encoder(nn.Module): def __init__(self, latent_channel_dim): super(Encoder, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) self.conv2 = nn.Conv2d(in_channels=16, out_channels=latent_channel_dim, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) self.pool1 = nn.MaxPool2d(2, stride=2) self.act1 = nn.ReLU() def forward(self, x): x = self.conv1(x) x = self.act1(x) x = self.pool1(x) x = self.conv2(x) x = self.act1(x) x = self.pool1(x) return x class Decoder(nn.Module): def __init__(self, latent_channel_dim): super(Decoder, self).__init__() self.t_conv1 = nn.ConvTranspose2d(in_channels=latent_channel_dim, out_channels=16, kernel_size=(2, 2), stride=(2, 2)) self.t_conv2 = nn.ConvTranspose2d(in_channels=16, out_channels=3, kernel_size=(2, 2), stride=(2, 2)) self.act1 = nn.ReLU() self.act2 = nn.Sigmoid() def forward(self, x): x = self.t_conv1(x) x = self.act1(x) x = self.t_conv2(x) x = self.act2(x) return x class AutoEncoder(nn.Module): def __init__(self, latent_channel_dim): super(AutoEncoder, self).__init__() self.encoder = Encoder(latent_channel_dim=latent_channel_dim).to(device) self.decoder = Decoder(latent_channel_dim=latent_channel_dim).to(device) def forward(self, x): x = self.encoder(x) x = self.decoder(x) return x def test_autoencoder(input_data_shape, latent_channel_dim): random_data = torch.randn(input_data_shape).to(device) autoencoder = AutoEncoder(latent_channel_dim=latent_channel_dim).to(device) result = autoencoder(random_data) print(result.shape) del random_data del autoencoder assert result.shape == input_data_shape, F"Output of decoder must match original image dimensions." def test_decoder_output_channels(input_data_shape, latent_channel_dim): random_data = torch.randn(input_data_shape).to(device) encoder1 = Encoder(latent_channel_dim=latent_channel_dim).to(device) result = encoder1(random_data) print(result.shape) random_data = torch.randn(result.shape).to(device) decoder1 = Decoder(latent_channel_dim= latent_channel_dim).to(device) result = decoder1(random_data) print(result.shape) del random_data del encoder1 del decoder1 assert result.shape == input_data_shape, F"Output of decoder must match original image dimensions." def test_encoder_output_channels(input_data_shape, latent_channel_dim): random_data = torch.randn(input_data_shape).to(device) encoder1 = Encoder(latent_channel_dim=latent_channel_dim).to(device) result = encoder1(random_data) print(result.shape) del random_data del encoder1 assert result.shape[1] == latent_channel_dim, F"Channel dimension should be {latent_channel_dim}." def print_model_details(model): print(model) def test_models(): latent_channel_dim = 4 test_encoder_output_channels(input_data_shape=(1, 3, 28, 28), latent_channel_dim=latent_channel_dim) test_encoder_output_channels(input_data_shape=(8, 3, 200, 200), latent_channel_dim=latent_channel_dim) test_decoder_output_channels(input_data_shape=(1, 3, 28, 28), latent_channel_dim=latent_channel_dim) test_decoder_output_channels(input_data_shape=(8, 3, 200, 200), latent_channel_dim=latent_channel_dim) test_autoencoder(input_data_shape=(1, 3, 28, 28), latent_channel_dim=latent_channel_dim) test_autoencoder(input_data_shape=(8, 3, 200, 200), latent_channel_dim=latent_channel_dim) print_model_details(AutoEncoder(latent_channel_dim)) gc.collect() torch.cuda.empty_cache() print(gc.get_count()) print(gc.get_stats()) def main(): latent_channel_dim = 4 n_epochs = 100 num_workers = 0 batch_size = 32 transform = transforms.ToTensor() train_data = datasets.CIFAR10(root='data', train=True, transform=transform, download=True) test_data = datasets.CIFAR10(root='data', train=False, transform=transform, download=True) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) def imshow(img): img = img / 2 + 0.5 # unnormalize plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() fig = plt.figure(figsize=(25, 4)) for idx in np.arange(20): ax = fig.add_subplot(2, 20 / 2, idx + 1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title(classes[labels[idx]]) plt.show() model = AutoEncoder(latent_channel_dim).to(device) criterion = nn.BCELoss().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.001) for epoch in range(n_epochs): train_loss = 0.0 chosen_output = None chosen_image = None for data in train_loader: images, _ = data images = images.to(device) # Zero out previous grads, forward pass, calculate loss, backpropagate, optimize optimizer.zero_grad() outputs = model(images) chosen_output = outputs[0] chosen_image = images[0] loss = criterion(outputs, images) loss.backward() optimizer.step() train_loss += loss.item() * images.size(0) train_loss = train_loss / len(train_loader) print('Epoch: {} \tTraining Loss: {:.6f}'.format( epoch, train_loss )) # Training is blocked on non notebook until window closed if epoch % 10 == 0: fig, ax = plt.subplots(1, 2) ax[0].imshow(np.transpose(chosen_output.detach().cpu().numpy(), (1, 2, 0))) ax[1].imshow(np.transpose(chosen_image.detach().cpu().numpy(), (1, 2, 0))) plt.show() if __name__ == '__main__': main()
""" References: https://www.kaggle.com/ljlbarbosa/convolution-autoencoder-pytorch """ import gc import torch from torch import nn from torchvision import datasets, transforms from torch.utils.data import DataLoader import matplotlib.pyplot as plt import numpy as np torch.manual_seed(17) np.random.seed(0) device = torch.device('cuda' if torch.cuda.is_available() else 'cpu') class Encoder(nn.Module): def __init__(self, latent_channel_dim): super(Encoder, self).__init__() self.conv1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) self.conv2 = nn.Conv2d(in_channels=16, out_channels=latent_channel_dim, kernel_size=(3, 3), stride=(1, 1), padding=(1, 1)) self.pool1 = nn.MaxPool2d(2, stride=2) self.act1 = nn.ReLU() def forward(self, x): x = self.conv1(x) x = self.act1(x) x = self.pool1(x) x = self.conv2(x) x = self.act1(x) x = self.pool1(x) return x class Decoder(nn.Module): def __init__(self, latent_channel_dim): super(Decoder, self).__init__() self.t_conv1 = nn.ConvTranspose2d(in_channels=latent_channel_dim, out_channels=16, kernel_size=(2, 2), stride=(2, 2)) self.t_conv2 = nn.ConvTranspose2d(in_channels=16, out_channels=3, kernel_size=(2, 2), stride=(2, 2)) self.act1 = nn.ReLU() self.act2 = nn.Sigmoid() def forward(self, x): x = self.t_conv1(x) x = self.act1(x) x = self.t_conv2(x) x = self.act2(x) return x class AutoEncoder(nn.Module): def __init__(self, latent_channel_dim): super(AutoEncoder, self).__init__() self.encoder = Encoder(latent_channel_dim=latent_channel_dim).to(device) self.decoder = Decoder(latent_channel_dim=latent_channel_dim).to(device) def forward(self, x): x = self.encoder(x) x = self.decoder(x) return x def test_autoencoder(input_data_shape, latent_channel_dim): random_data = torch.randn(input_data_shape).to(device) autoencoder = AutoEncoder(latent_channel_dim=latent_channel_dim).to(device) result = autoencoder(random_data) print(result.shape) del random_data del autoencoder assert result.shape == input_data_shape, F"Output of decoder must match original image dimensions." def test_decoder_output_channels(input_data_shape, latent_channel_dim): random_data = torch.randn(input_data_shape).to(device) encoder1 = Encoder(latent_channel_dim=latent_channel_dim).to(device) result = encoder1(random_data) print(result.shape) random_data = torch.randn(result.shape).to(device) decoder1 = Decoder(latent_channel_dim= latent_channel_dim).to(device) result = decoder1(random_data) print(result.shape) del random_data del encoder1 del decoder1 assert result.shape == input_data_shape, F"Output of decoder must match original image dimensions." def test_encoder_output_channels(input_data_shape, latent_channel_dim): random_data = torch.randn(input_data_shape).to(device) encoder1 = Encoder(latent_channel_dim=latent_channel_dim).to(device) result = encoder1(random_data) print(result.shape) del random_data del encoder1 assert result.shape[1] == latent_channel_dim, F"Channel dimension should be {latent_channel_dim}." def print_model_details(model): print(model) def test_models(): latent_channel_dim = 4 test_encoder_output_channels(input_data_shape=(1, 3, 28, 28), latent_channel_dim=latent_channel_dim) test_encoder_output_channels(input_data_shape=(8, 3, 200, 200), latent_channel_dim=latent_channel_dim) test_decoder_output_channels(input_data_shape=(1, 3, 28, 28), latent_channel_dim=latent_channel_dim) test_decoder_output_channels(input_data_shape=(8, 3, 200, 200), latent_channel_dim=latent_channel_dim) test_autoencoder(input_data_shape=(1, 3, 28, 28), latent_channel_dim=latent_channel_dim) test_autoencoder(input_data_shape=(8, 3, 200, 200), latent_channel_dim=latent_channel_dim) print_model_details(AutoEncoder(latent_channel_dim)) gc.collect() torch.cuda.empty_cache() print(gc.get_count()) print(gc.get_stats()) def main(): latent_channel_dim = 4 n_epochs = 100 num_workers = 0 batch_size = 32 transform = transforms.ToTensor() train_data = datasets.CIFAR10(root='data', train=True, transform=transform, download=True) test_data = datasets.CIFAR10(root='data', train=False, transform=transform, download=True) train_loader = torch.utils.data.DataLoader(train_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) test_loader = torch.utils.data.DataLoader(test_data, batch_size=batch_size, shuffle=True, num_workers=num_workers) def imshow(img): img = img / 2 + 0.5 # unnormalize plt.imshow(np.transpose(img, (1, 2, 0))) # convert from Tensor image classes = ['airplane', 'automobile', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck'] dataiter = iter(train_loader) images, labels = dataiter.next() images = images.numpy() fig = plt.figure(figsize=(25, 4)) for idx in np.arange(20): ax = fig.add_subplot(2, 20 / 2, idx + 1, xticks=[], yticks=[]) imshow(images[idx]) ax.set_title(classes[labels[idx]]) plt.show() model = AutoEncoder(latent_channel_dim).to(device) criterion = nn.BCELoss().to(device) optimizer = torch.optim.Adam(model.parameters(), lr=0.001) for epoch in range(n_epochs): train_loss = 0.0 chosen_output = None chosen_image = None for data in train_loader: images, _ = data images = images.to(device) # Zero out previous grads, forward pass, calculate loss, backpropagate, optimize optimizer.zero_grad() outputs = model(images) chosen_output = outputs[0] chosen_image = images[0] loss = criterion(outputs, images) loss.backward() optimizer.step() train_loss += loss.item() * images.size(0) train_loss = train_loss / len(train_loader) print('Epoch: {} \tTraining Loss: {:.6f}'.format( epoch, train_loss )) # Training is blocked on non notebook until window closed if epoch % 10 == 0: fig, ax = plt.subplots(1, 2) ax[0].imshow(np.transpose(chosen_output.detach().cpu().numpy(), (1, 2, 0))) ax[1].imshow(np.transpose(chosen_image.detach().cpu().numpy(), (1, 2, 0))) plt.show() if __name__ == '__main__': main()
en
0.806971
References: https://www.kaggle.com/ljlbarbosa/convolution-autoencoder-pytorch # unnormalize # convert from Tensor image # Zero out previous grads, forward pass, calculate loss, backpropagate, optimize # Training is blocked on non notebook until window closed
2.740201
3
neutron/services/segments/db.py
brandonlogan/neutron
0
6625282
# Copyright 2016 Hewlett Packard Enterprise Development, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib import constants from neutron_lib import exceptions as n_exc from oslo_db import exception as db_exc from oslo_log import helpers as log_helpers from oslo_utils import uuidutils from sqlalchemy.orm import exc from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import _deprecate from neutron.db import api as db_api from neutron.db import common_db_mixin from neutron.db.models import segment as segment_model from neutron.db import segments_db as db from neutron.extensions import segment as extension from neutron import manager from neutron.services.segments import exceptions _deprecate._moved_global('SegmentHostMapping', new_module=segment_model) class SegmentDbMixin(common_db_mixin.CommonDbMixin): """Mixin class to add segment.""" def _make_segment_dict(self, segment_db, fields=None): res = {'id': segment_db['id'], 'network_id': segment_db['network_id'], 'name': segment_db['name'], 'description': segment_db['description'], db.PHYSICAL_NETWORK: segment_db[db.PHYSICAL_NETWORK], db.NETWORK_TYPE: segment_db[db.NETWORK_TYPE], db.SEGMENTATION_ID: segment_db[db.SEGMENTATION_ID], 'hosts': [mapping.host for mapping in segment_db.segment_host_mapping], 'segment_index': segment_db['segment_index']} return self._fields(res, fields) def _get_segment(self, context, segment_id): try: return self._get_by_id( context, segment_model.NetworkSegment, segment_id) except exc.NoResultFound: raise exceptions.SegmentNotFound(segment_id=segment_id) @log_helpers.log_method_call def create_segment(self, context, segment): """Create a segment.""" segment = segment['segment'] segment_id = segment.get('id') or uuidutils.generate_uuid() try: new_segment = self._create_segment_db(context, segment_id, segment) except db_exc.DBReferenceError: raise n_exc.NetworkNotFound(net_id=segment['network_id']) registry.notify(resources.SEGMENT, events.AFTER_CREATE, self, context=context, segment=new_segment) return self._make_segment_dict(new_segment) def _create_segment_db(self, context, segment_id, segment): with context.session.begin(subtransactions=True): network_id = segment['network_id'] physical_network = segment[extension.PHYSICAL_NETWORK] if physical_network == constants.ATTR_NOT_SPECIFIED: physical_network = None network_type = segment[extension.NETWORK_TYPE] segmentation_id = segment[extension.SEGMENTATION_ID] if segmentation_id == constants.ATTR_NOT_SPECIFIED: segmentation_id = None name = segment['name'] if name == constants.ATTR_NOT_SPECIFIED: name = None description = segment['description'] if description == constants.ATTR_NOT_SPECIFIED: description = None args = {'id': segment_id, 'network_id': network_id, 'name': name, 'description': description, db.PHYSICAL_NETWORK: physical_network, db.NETWORK_TYPE: network_type, db.SEGMENTATION_ID: segmentation_id} # Calculate the index of segment segment_index = 0 segments = self.get_segments( context, filters={'network_id': [network_id]}, fields=['segment_index'], sorts=[('segment_index', True)]) if segments: # NOTE(xiaohhui): The new index is the last index + 1, this # may cause discontinuous segment_index. But segment_index # can functionally work as the order index for segments. segment_index = (segments[-1].get('segment_index') + 1) args['segment_index'] = segment_index new_segment = segment_model.NetworkSegment(**args) context.session.add(new_segment) # Do some preliminary operations before committing the segment to # db registry.notify(resources.SEGMENT, events.PRECOMMIT_CREATE, self, context=context, segment=new_segment) return new_segment @log_helpers.log_method_call def update_segment(self, context, uuid, segment): """Update an existing segment.""" segment = segment['segment'] with context.session.begin(subtransactions=True): curr_segment = self._get_segment(context, uuid) curr_segment.update(segment) return self._make_segment_dict(curr_segment) @log_helpers.log_method_call def get_segment(self, context, uuid, fields=None): segment_db = self._get_segment(context, uuid) return self._make_segment_dict(segment_db, fields) @log_helpers.log_method_call def get_segments(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'segment', limit, marker) make_segment_dict = functools.partial(self._make_segment_dict) return self._get_collection(context, segment_model.NetworkSegment, make_segment_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) @log_helpers.log_method_call def get_segments_count(self, context, filters=None): return self._get_collection_count(context, segment_model.NetworkSegment, filters=filters) @log_helpers.log_method_call def get_segments_by_hosts(self, context, hosts): if not hosts: return [] query = context.session.query( segment_model.SegmentHostMapping).filter( segment_model.SegmentHostMapping.host.in_(hosts)) return list({mapping.segment_id for mapping in query}) @log_helpers.log_method_call def delete_segment(self, context, uuid): """Delete an existing segment.""" segment = self.get_segment(context, uuid) # Do some preliminary operations before deleting the segment registry.notify(resources.SEGMENT, events.BEFORE_DELETE, self.delete_segment, context=context, segment=segment) # Delete segment in DB with context.session.begin(subtransactions=True): query = self._model_query(context, segment_model.NetworkSegment) query = query.filter(segment_model.NetworkSegment.id == uuid) if 0 == query.delete(): raise exceptions.SegmentNotFound(segment_id=uuid) # Do some preliminary operations before deleting segment in db registry.notify(resources.SEGMENT, events.PRECOMMIT_DELETE, self.delete_segment, context=context, segment=segment) registry.notify(resources.SEGMENT, events.AFTER_DELETE, self.delete_segment, context=context, segment=segment) def update_segment_host_mapping(context, host, current_segment_ids): with context.session.begin(subtransactions=True): segments_host_query = context.session.query( segment_model.SegmentHostMapping).filter_by(host=host) previous_segment_ids = { seg_host['segment_id'] for seg_host in segments_host_query} for segment_id in current_segment_ids - previous_segment_ids: context.session.add(segment_model.SegmentHostMapping( segment_id=segment_id, host=host)) stale_segment_ids = previous_segment_ids - current_segment_ids if stale_segment_ids: segments_host_query.filter( segment_model.SegmentHostMapping.segment_id.in_( stale_segment_ids)).delete(synchronize_session=False) def get_hosts_mapped_with_segments(context): """Get hosts that are mapped with segments. L2 providers can use this method to get an overview of SegmentHostMapping, and then delete the stale SegmentHostMapping. """ query = context.session.query(segment_model.SegmentHostMapping.host) return {row.host for row in query} def _get_phys_nets(agent): configurations_dict = agent.get('configurations', {}) mappings = configurations_dict.get('bridge_mappings', {}) mappings.update(configurations_dict.get('interface_mappings', {})) mappings.update(configurations_dict.get('device_mappings', {})) return mappings.keys() reported_hosts = set() # NOTE: Module level variable of segments plugin. It should be removed once # segments becomes a default plugin. segments_plugin = None def get_segments_with_phys_nets(context, phys_nets): """Get segments from physical networks. L2 providers usually have information of hostname and physical networks. They could use this method to get related segments and then update SegmentHostMapping. """ if not phys_nets: return [] with context.session.begin(subtransactions=True): segments = context.session.query(segment_model.NetworkSegment).filter( segment_model.NetworkSegment.physical_network.in_(phys_nets)) return segments def map_segment_to_hosts(context, segment_id, hosts): """Map segment to a collection of hosts.""" with db_api.autonested_transaction(context.session): for host in hosts: context.session.add( segment_model.SegmentHostMapping(segment_id=segment_id, host=host)) def _update_segment_host_mapping_for_agent(resource, event, trigger, context, host, plugin, agent): check_segment_for_agent = getattr(plugin, 'check_segment_for_agent', None) if not check_segment_for_agent: return phys_nets = _get_phys_nets(agent) if not phys_nets: return start_flag = agent.get('start_flag', None) if host in reported_hosts and not start_flag: return reported_hosts.add(host) segments = get_segments_with_phys_nets(context, phys_nets) current_segment_ids = { segment['id'] for segment in segments if check_segment_for_agent(segment, agent)} update_segment_host_mapping(context, host, current_segment_ids) def _add_segment_host_mapping_for_segment(resource, event, trigger, context, segment): if not context.session.is_active: # The session might be in partial rollback state, due to errors in # peer callback. In that case, there is no need to add the mapping. # Just return here. return if not segment.physical_network: return cp = manager.NeutronManager.get_plugin() check_segment_for_agent = getattr(cp, 'check_segment_for_agent', None) if not hasattr(cp, 'get_agents') or not check_segment_for_agent: # not an agent-supporting plugin registry.unsubscribe(_add_segment_host_mapping_for_segment, resources.SEGMENT, events.PRECOMMIT_CREATE) return hosts = {agent['host'] for agent in cp.get_agents(context) if check_segment_for_agent(segment, agent)} map_segment_to_hosts(context, segment.id, hosts) def _delete_segments_for_network(resource, event, trigger, context, network_id): admin_ctx = context.elevated() global segments_plugin if not segments_plugin: segments_plugin = manager.NeutronManager.load_class_for_provider( 'neutron.service_plugins', 'segments')() segments = segments_plugin.get_segments( admin_ctx, filters={'network_id': [network_id]}) for segment in segments: segments_plugin.delete_segment(admin_ctx, segment['id']) def subscribe(): registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, events.AFTER_CREATE) registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, events.AFTER_UPDATE) registry.subscribe(_add_segment_host_mapping_for_segment, resources.SEGMENT, events.PRECOMMIT_CREATE) registry.subscribe(_delete_segments_for_network, resources.NETWORK, events.PRECOMMIT_DELETE) subscribe() _deprecate._MovedGlobals()
# Copyright 2016 Hewlett Packard Enterprise Development, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. import functools from neutron_lib import constants from neutron_lib import exceptions as n_exc from oslo_db import exception as db_exc from oslo_log import helpers as log_helpers from oslo_utils import uuidutils from sqlalchemy.orm import exc from neutron.callbacks import events from neutron.callbacks import registry from neutron.callbacks import resources from neutron.common import _deprecate from neutron.db import api as db_api from neutron.db import common_db_mixin from neutron.db.models import segment as segment_model from neutron.db import segments_db as db from neutron.extensions import segment as extension from neutron import manager from neutron.services.segments import exceptions _deprecate._moved_global('SegmentHostMapping', new_module=segment_model) class SegmentDbMixin(common_db_mixin.CommonDbMixin): """Mixin class to add segment.""" def _make_segment_dict(self, segment_db, fields=None): res = {'id': segment_db['id'], 'network_id': segment_db['network_id'], 'name': segment_db['name'], 'description': segment_db['description'], db.PHYSICAL_NETWORK: segment_db[db.PHYSICAL_NETWORK], db.NETWORK_TYPE: segment_db[db.NETWORK_TYPE], db.SEGMENTATION_ID: segment_db[db.SEGMENTATION_ID], 'hosts': [mapping.host for mapping in segment_db.segment_host_mapping], 'segment_index': segment_db['segment_index']} return self._fields(res, fields) def _get_segment(self, context, segment_id): try: return self._get_by_id( context, segment_model.NetworkSegment, segment_id) except exc.NoResultFound: raise exceptions.SegmentNotFound(segment_id=segment_id) @log_helpers.log_method_call def create_segment(self, context, segment): """Create a segment.""" segment = segment['segment'] segment_id = segment.get('id') or uuidutils.generate_uuid() try: new_segment = self._create_segment_db(context, segment_id, segment) except db_exc.DBReferenceError: raise n_exc.NetworkNotFound(net_id=segment['network_id']) registry.notify(resources.SEGMENT, events.AFTER_CREATE, self, context=context, segment=new_segment) return self._make_segment_dict(new_segment) def _create_segment_db(self, context, segment_id, segment): with context.session.begin(subtransactions=True): network_id = segment['network_id'] physical_network = segment[extension.PHYSICAL_NETWORK] if physical_network == constants.ATTR_NOT_SPECIFIED: physical_network = None network_type = segment[extension.NETWORK_TYPE] segmentation_id = segment[extension.SEGMENTATION_ID] if segmentation_id == constants.ATTR_NOT_SPECIFIED: segmentation_id = None name = segment['name'] if name == constants.ATTR_NOT_SPECIFIED: name = None description = segment['description'] if description == constants.ATTR_NOT_SPECIFIED: description = None args = {'id': segment_id, 'network_id': network_id, 'name': name, 'description': description, db.PHYSICAL_NETWORK: physical_network, db.NETWORK_TYPE: network_type, db.SEGMENTATION_ID: segmentation_id} # Calculate the index of segment segment_index = 0 segments = self.get_segments( context, filters={'network_id': [network_id]}, fields=['segment_index'], sorts=[('segment_index', True)]) if segments: # NOTE(xiaohhui): The new index is the last index + 1, this # may cause discontinuous segment_index. But segment_index # can functionally work as the order index for segments. segment_index = (segments[-1].get('segment_index') + 1) args['segment_index'] = segment_index new_segment = segment_model.NetworkSegment(**args) context.session.add(new_segment) # Do some preliminary operations before committing the segment to # db registry.notify(resources.SEGMENT, events.PRECOMMIT_CREATE, self, context=context, segment=new_segment) return new_segment @log_helpers.log_method_call def update_segment(self, context, uuid, segment): """Update an existing segment.""" segment = segment['segment'] with context.session.begin(subtransactions=True): curr_segment = self._get_segment(context, uuid) curr_segment.update(segment) return self._make_segment_dict(curr_segment) @log_helpers.log_method_call def get_segment(self, context, uuid, fields=None): segment_db = self._get_segment(context, uuid) return self._make_segment_dict(segment_db, fields) @log_helpers.log_method_call def get_segments(self, context, filters=None, fields=None, sorts=None, limit=None, marker=None, page_reverse=False): marker_obj = self._get_marker_obj(context, 'segment', limit, marker) make_segment_dict = functools.partial(self._make_segment_dict) return self._get_collection(context, segment_model.NetworkSegment, make_segment_dict, filters=filters, fields=fields, sorts=sorts, limit=limit, marker_obj=marker_obj, page_reverse=page_reverse) @log_helpers.log_method_call def get_segments_count(self, context, filters=None): return self._get_collection_count(context, segment_model.NetworkSegment, filters=filters) @log_helpers.log_method_call def get_segments_by_hosts(self, context, hosts): if not hosts: return [] query = context.session.query( segment_model.SegmentHostMapping).filter( segment_model.SegmentHostMapping.host.in_(hosts)) return list({mapping.segment_id for mapping in query}) @log_helpers.log_method_call def delete_segment(self, context, uuid): """Delete an existing segment.""" segment = self.get_segment(context, uuid) # Do some preliminary operations before deleting the segment registry.notify(resources.SEGMENT, events.BEFORE_DELETE, self.delete_segment, context=context, segment=segment) # Delete segment in DB with context.session.begin(subtransactions=True): query = self._model_query(context, segment_model.NetworkSegment) query = query.filter(segment_model.NetworkSegment.id == uuid) if 0 == query.delete(): raise exceptions.SegmentNotFound(segment_id=uuid) # Do some preliminary operations before deleting segment in db registry.notify(resources.SEGMENT, events.PRECOMMIT_DELETE, self.delete_segment, context=context, segment=segment) registry.notify(resources.SEGMENT, events.AFTER_DELETE, self.delete_segment, context=context, segment=segment) def update_segment_host_mapping(context, host, current_segment_ids): with context.session.begin(subtransactions=True): segments_host_query = context.session.query( segment_model.SegmentHostMapping).filter_by(host=host) previous_segment_ids = { seg_host['segment_id'] for seg_host in segments_host_query} for segment_id in current_segment_ids - previous_segment_ids: context.session.add(segment_model.SegmentHostMapping( segment_id=segment_id, host=host)) stale_segment_ids = previous_segment_ids - current_segment_ids if stale_segment_ids: segments_host_query.filter( segment_model.SegmentHostMapping.segment_id.in_( stale_segment_ids)).delete(synchronize_session=False) def get_hosts_mapped_with_segments(context): """Get hosts that are mapped with segments. L2 providers can use this method to get an overview of SegmentHostMapping, and then delete the stale SegmentHostMapping. """ query = context.session.query(segment_model.SegmentHostMapping.host) return {row.host for row in query} def _get_phys_nets(agent): configurations_dict = agent.get('configurations', {}) mappings = configurations_dict.get('bridge_mappings', {}) mappings.update(configurations_dict.get('interface_mappings', {})) mappings.update(configurations_dict.get('device_mappings', {})) return mappings.keys() reported_hosts = set() # NOTE: Module level variable of segments plugin. It should be removed once # segments becomes a default plugin. segments_plugin = None def get_segments_with_phys_nets(context, phys_nets): """Get segments from physical networks. L2 providers usually have information of hostname and physical networks. They could use this method to get related segments and then update SegmentHostMapping. """ if not phys_nets: return [] with context.session.begin(subtransactions=True): segments = context.session.query(segment_model.NetworkSegment).filter( segment_model.NetworkSegment.physical_network.in_(phys_nets)) return segments def map_segment_to_hosts(context, segment_id, hosts): """Map segment to a collection of hosts.""" with db_api.autonested_transaction(context.session): for host in hosts: context.session.add( segment_model.SegmentHostMapping(segment_id=segment_id, host=host)) def _update_segment_host_mapping_for_agent(resource, event, trigger, context, host, plugin, agent): check_segment_for_agent = getattr(plugin, 'check_segment_for_agent', None) if not check_segment_for_agent: return phys_nets = _get_phys_nets(agent) if not phys_nets: return start_flag = agent.get('start_flag', None) if host in reported_hosts and not start_flag: return reported_hosts.add(host) segments = get_segments_with_phys_nets(context, phys_nets) current_segment_ids = { segment['id'] for segment in segments if check_segment_for_agent(segment, agent)} update_segment_host_mapping(context, host, current_segment_ids) def _add_segment_host_mapping_for_segment(resource, event, trigger, context, segment): if not context.session.is_active: # The session might be in partial rollback state, due to errors in # peer callback. In that case, there is no need to add the mapping. # Just return here. return if not segment.physical_network: return cp = manager.NeutronManager.get_plugin() check_segment_for_agent = getattr(cp, 'check_segment_for_agent', None) if not hasattr(cp, 'get_agents') or not check_segment_for_agent: # not an agent-supporting plugin registry.unsubscribe(_add_segment_host_mapping_for_segment, resources.SEGMENT, events.PRECOMMIT_CREATE) return hosts = {agent['host'] for agent in cp.get_agents(context) if check_segment_for_agent(segment, agent)} map_segment_to_hosts(context, segment.id, hosts) def _delete_segments_for_network(resource, event, trigger, context, network_id): admin_ctx = context.elevated() global segments_plugin if not segments_plugin: segments_plugin = manager.NeutronManager.load_class_for_provider( 'neutron.service_plugins', 'segments')() segments = segments_plugin.get_segments( admin_ctx, filters={'network_id': [network_id]}) for segment in segments: segments_plugin.delete_segment(admin_ctx, segment['id']) def subscribe(): registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, events.AFTER_CREATE) registry.subscribe(_update_segment_host_mapping_for_agent, resources.AGENT, events.AFTER_UPDATE) registry.subscribe(_add_segment_host_mapping_for_segment, resources.SEGMENT, events.PRECOMMIT_CREATE) registry.subscribe(_delete_segments_for_network, resources.NETWORK, events.PRECOMMIT_DELETE) subscribe() _deprecate._MovedGlobals()
en
0.880022
# Copyright 2016 Hewlett Packard Enterprise Development, LP # # All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); you may # not use this file except in compliance with the License. You may obtain # a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, WITHOUT # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. Mixin class to add segment. Create a segment. # Calculate the index of segment # NOTE(xiaohhui): The new index is the last index + 1, this # may cause discontinuous segment_index. But segment_index # can functionally work as the order index for segments. # Do some preliminary operations before committing the segment to # db Update an existing segment. Delete an existing segment. # Do some preliminary operations before deleting the segment # Delete segment in DB # Do some preliminary operations before deleting segment in db Get hosts that are mapped with segments. L2 providers can use this method to get an overview of SegmentHostMapping, and then delete the stale SegmentHostMapping. # NOTE: Module level variable of segments plugin. It should be removed once # segments becomes a default plugin. Get segments from physical networks. L2 providers usually have information of hostname and physical networks. They could use this method to get related segments and then update SegmentHostMapping. Map segment to a collection of hosts. # The session might be in partial rollback state, due to errors in # peer callback. In that case, there is no need to add the mapping. # Just return here. # not an agent-supporting plugin
1.533337
2
poezio/xhtml.py
mathiasertl/poezio
0
6625283
<reponame>mathiasertl/poezio # Copyright 2010-2011 <NAME> <<EMAIL>> # # This file is part of Poezio. # # Poezio is free software: you can redistribute it and/or modify # it under the terms of the zlib license. See the COPYING file. """ Various methods to convert shell colors to poezio colors, xhtml code to shell colors, poezio colors to xhtml code """ import base64 import curses import hashlib import re from os import path from slixmpp.xmlstream import ET from urllib.parse import unquote from io import BytesIO from xml import sax from xml.sax import saxutils digits = '0123456789' # never trust the modules XHTML_NS = 'http://www.w3.org/1999/xhtml' # HTML named colors colors = { 'aliceblue': 231, 'antiquewhite': 231, 'aqua': 51, 'aquamarine': 122, 'azure': 231, 'beige': 231, 'bisque': 230, 'black': 232, 'blanchedalmond': 230, 'blue': 21, 'blueviolet': 135, 'brown': 124, 'burlywood': 223, 'cadetblue': 109, 'chartreuse': 118, 'chocolate': 172, 'coral': 209, 'cornflowerblue': 111, 'cornsilk': 231, 'crimson': 197, 'cyan': 51, 'darkblue': 19, 'darkcyan': 37, 'darkgoldenrod': 178, 'darkgray': 247, 'darkgreen': 28, 'darkgrey': 247, 'darkkhaki': 186, 'darkmagenta': 127, 'darkolivegreen': 65, 'darkorange': 214, 'darkorchid': 134, 'darkred': 124, 'darksalmon': 216, 'darkseagreen': 151, 'darkslateblue': 61, 'darkslategray': 59, 'darkslategrey': 59, 'darkturquoise': 44, 'darkviolet': 128, 'deeppink': 199, 'deepskyblue': 45, 'dimgray': 241, 'dimgrey': 241, 'dodgerblue': 39, 'firebrick': 160, 'floralwhite': 231, 'forestgreen': 34, 'fuchsia': 201, 'gainsboro': 252, 'ghostwhite': 231, 'gold': 226, 'goldenrod': 214, 'gray': 244, 'green': 34, 'greenyellow': 191, 'grey': 244, 'honeydew': 231, 'hotpink': 212, 'indianred': 174, 'indigo': 55, 'ivory': 231, 'khaki': 229, 'lavender': 231, 'lavenderblush': 231, 'lawngreen': 118, 'lemonchiffon': 230, 'lightblue': 195, 'lightcoral': 217, 'lightcyan': 231, 'lightgoldenrodyellow': 230, 'lightgray': 251, 'lightgreen': 157, 'lightgrey': 251, 'lightpink': 224, 'lightsalmon': 216, 'lightseagreen': 43, 'lightskyblue': 153, 'lightslategray': 109, 'lightslategrey': 109, 'lightsteelblue': 189, 'lightyellow': 231, 'lime': 46, 'limegreen': 77, 'linen': 231, 'magenta': 201, 'maroon': 124, 'mediumaquamarine': 115, 'mediumblue': 20, 'mediumorchid': 170, 'mediumpurple': 141, 'mediumseagreen': 78, 'mediumslateblue': 105, 'mediumspringgreen': 49, 'mediumturquoise': 80, 'mediumvioletred': 163, 'midnightblue': 18, 'mintcream': 231, 'mistyrose': 231, 'moccasin': 230, 'navajowhite': 230, 'navy': 19, 'oldlace': 231, 'olive': 142, 'olivedrab': 106, 'orange': 214, 'orangered': 202, 'orchid': 213, 'palegoldenrod': 229, 'palegreen': 157, 'paleturquoise': 195, 'palevioletred': 211, 'papayawhip': 231, 'peachpuff': 230, 'peru': 179, 'pink': 224, 'plum': 219, 'powderblue': 195, 'purple': 127, 'red': 196, 'rosybrown': 181, 'royalblue': 69, 'saddlebrown': 130, 'salmon': 216, 'sandybrown': 216, 'seagreen': 72, 'seashell': 231, 'sienna': 131, 'silver': 250, 'skyblue': 153, 'slateblue': 104, 'slategray': 109, 'slategrey': 109, 'snow': 231, 'springgreen': 48, 'steelblue': 74, 'tan': 187, 'teal': 37, 'thistle': 225, 'tomato': 209, 'turquoise': 86, 'violet': 219, 'wheat': 230, 'white': 255, 'whitesmoke': 255, 'yellow': 226, 'yellowgreen': 149 } whitespace_re = re.compile(r'\s+') xhtml_attr_re = re.compile(r'\x19-?\d[^}]*}|\x19[buaio]') xhtml_data_re = re.compile(r'data:image/([a-z]+);base64,(.+)') poezio_color_double = re.compile(r'(?:\x19\d+}|\x19\d)+(\x19\d|\x19\d+})') poezio_format_trim = re.compile(r'(\x19\d+}|\x19\d|\x19[buaio]|\x19o)+\x19o') xhtml_simple_attr_re = re.compile(r'\x19\d') def get_body_from_message_stanza(message, use_xhtml=False, tmp_dir=None, extract_images=False): """ Returns a string with xhtml markups converted to poezio colors if there's an xhtml_im element, or the body (without any color) otherwise """ if not use_xhtml: return message['body'] xhtml = message.xml.find('{http://jabber.org/protocol/xhtml-im}html') if not xhtml: return message['body'] xhtml_body = xhtml.find('{http://www.w3.org/1999/xhtml}body') if not xhtml_body: return message['body'] content = xhtml_to_poezio_colors(xhtml_body, tmp_dir=tmp_dir, extract_images=extract_images) content = content if content else message['body'] return content or " " def ncurses_color_to_html(color): """ Takes an int between 0 and 256 and returns a string of the form #XXXXXX representing an html color. """ if color <= 15: try: (r, g, b) = curses.color_content(color) except: # fallback in faulty terminals (e.g. xterm) (r, g, b) = curses.color_content(color%8) r = r / 1000 * 6 - 0.01 g = g / 1000 * 6 - 0.01 b = b / 1000 * 6 - 0.01 elif color <= 231: color = color - 16 r = color % 6 color = color / 6 g = color % 6 color = color / 6 b = color % 6 else: color -= 232 r = g = b = color / 24 * 6 return '#%02X%02X%02X' % (int(r*256/6), int(g*256/6), int(b*256/6)) def parse_css(css): def get_color(value): if value[0] == '#': value = value[1:] length = len(value) if length != 3 and length != 6: return -1 value = int(value, 16) if length == 6: r = int(value >> 16) g = int((value >> 8) & 0xff) b = int(value & 0xff) if r == g == b: return 232 + int(r/10.6251) div = 42.51 else: r = int(value >> 8) g = int((value >> 4) & 0xf) b = int(value & 0xf) if r == g == b: return 232 + int(1.54*r) div = 2.51 return 6*6*int(r/div) + 6*int(g/div) + int(b/div) + 16 if value in colors: return colors[value] return -1 shell = '' rules = css.split(';') for rule in rules: if ':' not in rule: continue key, value = rule.split(':', 1) key = key.strip() value = value.strip() if key == 'background-color': pass#shell += '\x191' elif key == 'color': color = get_color(value) if color != -1: shell += '\x19%d}' % color elif key == 'font-style': shell += '\x19i' elif key == 'font-weight': shell += '\x19b' elif key == 'margin-left': shell += ' ' elif key == 'text-align': pass elif key == 'text-decoration': if value == 'underline': shell += '\x19u' elif value == 'blink': shell += '\x19a' return shell def trim(string): return re.sub(whitespace_re, ' ', string) class XHTMLHandler(sax.ContentHandler): def __init__(self, force_ns=False, tmp_dir=None, extract_images=False): self.builder = [] self.formatting = [] self.attrs = [] self.list_state = [] self.is_pre = False self.a_start = 0 # do not care about xhtml-in namespace self.force_ns = force_ns self.tmp_dir = tmp_dir self.extract_images = extract_images @property def result(self): sanitized = re.sub(poezio_color_double, r'\1', ''.join(self.builder).strip()) return re.sub(poezio_format_trim, '\x19o', sanitized) def append_formatting(self, formatting): self.formatting.append(formatting) self.builder.append(formatting) def pop_formatting(self): self.formatting.pop() self.builder.append('\x19o' + ''.join(self.formatting)) def characters(self, characters): self.builder.append(characters if self.is_pre else trim(characters)) def startElementNS(self, name, _, attrs): if name[0] != XHTML_NS and not self.force_ns: return builder = self.builder attrs = {name: value for ((ns, name), value) in attrs.items() if ns is None} self.attrs.append(attrs) if 'style' in attrs: style = parse_css(attrs['style']) self.append_formatting(style) name = name[1] if name == 'a': self.append_formatting('\x19u') self.a_start = len(self.builder) elif name == 'blockquote': builder.append('“') elif name == 'br': builder.append('\n') elif name == 'cite': self.append_formatting('\x19u') elif name == 'em': self.append_formatting('\x19i') elif name == 'img': if re.match(xhtml_data_re, attrs['src']) and self.extract_images: type_, data = [i for i in re.split(xhtml_data_re, attrs['src']) if i] bin_data = base64.b64decode(unquote(data)) filename = hashlib.sha1(bin_data).hexdigest() + '.' + type_ filepath = path.join(self.tmp_dir, filename) if not path.exists(filepath): try: with open(filepath, 'wb') as fd: fd.write(bin_data) builder.append('[file stored as %s]' % filename) except Exception as e: builder.append('[Error while saving image: %s]' % e) else: builder.append('[file stored as %s]' % filename) else: builder.append(trim(attrs['src'])) if 'alt' in attrs: builder.append(' (%s)' % trim(attrs['alt'])) elif name == 'ul': self.list_state.append('ul') elif name == 'ol': self.list_state.append(1) elif name == 'li': try: state = self.list_state[-1] except IndexError: state = 'ul' if state == 'ul': builder.append('\n• ') else: builder.append('\n%d) ' % state) state += 1 self.list_state[-1] = state elif name == 'p': builder.append('\n') elif name == 'pre': builder.append('\n') self.is_pre = True elif name == 'strong': self.append_formatting('\x19b') def endElementNS(self, name, _): if name[0] != XHTML_NS and not self.force_ns: return builder = self.builder attrs = self.attrs.pop() name = name[1] if name == 'a': self.pop_formatting() # do not display the link twice text_elements = [x for x in self.builder[self.a_start:] if not x.startswith('\x19')] link_text = ''.join(text_elements).strip() if 'href' in attrs and attrs['href'] != link_text: builder.append(' (%s)' % trim(attrs['href'])) elif name == 'blockquote': builder.append('”') elif name in ('cite', 'em', 'strong'): self.pop_formatting() elif name in ('ol', 'p', 'ul'): builder.append('\n') elif name == 'pre': builder.append('\n') self.is_pre = False if 'style' in attrs: self.pop_formatting() if 'title' in attrs: builder.append(' [' + attrs['title'] + ']') def xhtml_to_poezio_colors(xml, force=False, tmp_dir=None, extract_images=None): if isinstance(xml, str): xml = xml.encode('utf8') elif not isinstance(xml, bytes): xml = ET.tostring(xml) handler = XHTMLHandler(force_ns=force, tmp_dir=tmp_dir, extract_images=extract_images) parser = sax.make_parser() parser.setFeature(sax.handler.feature_namespaces, True) parser.setContentHandler(handler) parser.parse(BytesIO(xml)) return handler.result def clean_text(s): """ Remove all xhtml-im attributes (\x19etc) from the string with the complete color format, i.e \x19xxx} """ s = re.sub(xhtml_attr_re, "", s) return s def clean_text_simple(string): """ Remove all \x19 from the string formatted with simple colors: \x198 """ pos = string.find('\x19') while pos != -1: string = string[:pos] + string[pos+2:] pos = string.find('\x19') return string def convert_simple_to_full_colors(text): """ takes a \x19n formatted string and returns a \x19n} formatted one. """ # TODO, have a single list of this. This is some sort of # dusplicate from windows.format_chars mapping = str.maketrans({'\x0E': '\x19b', '\x0F': '\x19o', '\x10': '\x19u', '\x11': '\x191', '\x12': '\x192', '\x13': '\x193', '\x14': '\x194', '\x15': '\x195', '\x16': '\x196', '\x17': '\x197', '\x18': '\x198', '\x19': '\x199'}) text = text.translate(mapping) def add_curly_bracket(match): return match.group(0) + '}' return re.sub(xhtml_simple_attr_re, add_curly_bracket, text) number_to_color_names = { 1: 'red', 2: 'green', 3: 'yellow', 4: 'blue', 5: 'violet', 6: 'turquoise', 7: 'white' } def format_inline_css(_dict): return ''.join(('%s: %s;' % (key, value) for key, value in _dict.items())) def poezio_colors_to_html(string): """ Convert poezio colors to html (e.g. \x191}: <span style='color: red'>) """ # Maintain a list of the current css attributes used # And check if a tag is open (by design, we only open # spans tag, and they cannot be nested. current_attrs = {} tag_open = False next_attr_char = string.find('\x19') build = ["<body xmlns='http://www.w3.org/1999/xhtml'><p>"] def check_property(key, value): nonlocal tag_open if current_attrs.get(key, None) == value: return current_attrs[key] = value if tag_open: tag_open = False build.append('</span>') while next_attr_char != -1: attr_char = string[next_attr_char+1].lower() if next_attr_char != 0 and string[:next_attr_char]: if current_attrs and not tag_open: build.append('<span style="%s">' % format_inline_css(current_attrs)) tag_open = True build.append(saxutils.escape(string[:next_attr_char])) if attr_char == 'o': if tag_open: build.append('</span>') tag_open = False current_attrs = {} elif attr_char == 'b': check_property('font-weight', 'bold') elif attr_char == 'u': check_property('text-decoration', 'underline') if attr_char in digits: number_str = string[next_attr_char+1:string.find('}', next_attr_char)] number = int(number_str) if number in number_to_color_names: check_property('color', number_to_color_names.get(number, 'black')) else: check_property('color', ncurses_color_to_html(number)) string = string[next_attr_char+len(number_str)+2:] else: string = string[next_attr_char+2:] next_attr_char = string.find('\x19') if current_attrs and not tag_open and string: build.append('<span style="%s">' % format_inline_css(current_attrs)) tag_open = True build.append(saxutils.escape(string)) if tag_open: build.append('</span>') build.append("</p></body>") text = ''.join(build) return text.replace('\n', '<br />')
# Copyright 2010-2011 <NAME> <<EMAIL>> # # This file is part of Poezio. # # Poezio is free software: you can redistribute it and/or modify # it under the terms of the zlib license. See the COPYING file. """ Various methods to convert shell colors to poezio colors, xhtml code to shell colors, poezio colors to xhtml code """ import base64 import curses import hashlib import re from os import path from slixmpp.xmlstream import ET from urllib.parse import unquote from io import BytesIO from xml import sax from xml.sax import saxutils digits = '0123456789' # never trust the modules XHTML_NS = 'http://www.w3.org/1999/xhtml' # HTML named colors colors = { 'aliceblue': 231, 'antiquewhite': 231, 'aqua': 51, 'aquamarine': 122, 'azure': 231, 'beige': 231, 'bisque': 230, 'black': 232, 'blanchedalmond': 230, 'blue': 21, 'blueviolet': 135, 'brown': 124, 'burlywood': 223, 'cadetblue': 109, 'chartreuse': 118, 'chocolate': 172, 'coral': 209, 'cornflowerblue': 111, 'cornsilk': 231, 'crimson': 197, 'cyan': 51, 'darkblue': 19, 'darkcyan': 37, 'darkgoldenrod': 178, 'darkgray': 247, 'darkgreen': 28, 'darkgrey': 247, 'darkkhaki': 186, 'darkmagenta': 127, 'darkolivegreen': 65, 'darkorange': 214, 'darkorchid': 134, 'darkred': 124, 'darksalmon': 216, 'darkseagreen': 151, 'darkslateblue': 61, 'darkslategray': 59, 'darkslategrey': 59, 'darkturquoise': 44, 'darkviolet': 128, 'deeppink': 199, 'deepskyblue': 45, 'dimgray': 241, 'dimgrey': 241, 'dodgerblue': 39, 'firebrick': 160, 'floralwhite': 231, 'forestgreen': 34, 'fuchsia': 201, 'gainsboro': 252, 'ghostwhite': 231, 'gold': 226, 'goldenrod': 214, 'gray': 244, 'green': 34, 'greenyellow': 191, 'grey': 244, 'honeydew': 231, 'hotpink': 212, 'indianred': 174, 'indigo': 55, 'ivory': 231, 'khaki': 229, 'lavender': 231, 'lavenderblush': 231, 'lawngreen': 118, 'lemonchiffon': 230, 'lightblue': 195, 'lightcoral': 217, 'lightcyan': 231, 'lightgoldenrodyellow': 230, 'lightgray': 251, 'lightgreen': 157, 'lightgrey': 251, 'lightpink': 224, 'lightsalmon': 216, 'lightseagreen': 43, 'lightskyblue': 153, 'lightslategray': 109, 'lightslategrey': 109, 'lightsteelblue': 189, 'lightyellow': 231, 'lime': 46, 'limegreen': 77, 'linen': 231, 'magenta': 201, 'maroon': 124, 'mediumaquamarine': 115, 'mediumblue': 20, 'mediumorchid': 170, 'mediumpurple': 141, 'mediumseagreen': 78, 'mediumslateblue': 105, 'mediumspringgreen': 49, 'mediumturquoise': 80, 'mediumvioletred': 163, 'midnightblue': 18, 'mintcream': 231, 'mistyrose': 231, 'moccasin': 230, 'navajowhite': 230, 'navy': 19, 'oldlace': 231, 'olive': 142, 'olivedrab': 106, 'orange': 214, 'orangered': 202, 'orchid': 213, 'palegoldenrod': 229, 'palegreen': 157, 'paleturquoise': 195, 'palevioletred': 211, 'papayawhip': 231, 'peachpuff': 230, 'peru': 179, 'pink': 224, 'plum': 219, 'powderblue': 195, 'purple': 127, 'red': 196, 'rosybrown': 181, 'royalblue': 69, 'saddlebrown': 130, 'salmon': 216, 'sandybrown': 216, 'seagreen': 72, 'seashell': 231, 'sienna': 131, 'silver': 250, 'skyblue': 153, 'slateblue': 104, 'slategray': 109, 'slategrey': 109, 'snow': 231, 'springgreen': 48, 'steelblue': 74, 'tan': 187, 'teal': 37, 'thistle': 225, 'tomato': 209, 'turquoise': 86, 'violet': 219, 'wheat': 230, 'white': 255, 'whitesmoke': 255, 'yellow': 226, 'yellowgreen': 149 } whitespace_re = re.compile(r'\s+') xhtml_attr_re = re.compile(r'\x19-?\d[^}]*}|\x19[buaio]') xhtml_data_re = re.compile(r'data:image/([a-z]+);base64,(.+)') poezio_color_double = re.compile(r'(?:\x19\d+}|\x19\d)+(\x19\d|\x19\d+})') poezio_format_trim = re.compile(r'(\x19\d+}|\x19\d|\x19[buaio]|\x19o)+\x19o') xhtml_simple_attr_re = re.compile(r'\x19\d') def get_body_from_message_stanza(message, use_xhtml=False, tmp_dir=None, extract_images=False): """ Returns a string with xhtml markups converted to poezio colors if there's an xhtml_im element, or the body (without any color) otherwise """ if not use_xhtml: return message['body'] xhtml = message.xml.find('{http://jabber.org/protocol/xhtml-im}html') if not xhtml: return message['body'] xhtml_body = xhtml.find('{http://www.w3.org/1999/xhtml}body') if not xhtml_body: return message['body'] content = xhtml_to_poezio_colors(xhtml_body, tmp_dir=tmp_dir, extract_images=extract_images) content = content if content else message['body'] return content or " " def ncurses_color_to_html(color): """ Takes an int between 0 and 256 and returns a string of the form #XXXXXX representing an html color. """ if color <= 15: try: (r, g, b) = curses.color_content(color) except: # fallback in faulty terminals (e.g. xterm) (r, g, b) = curses.color_content(color%8) r = r / 1000 * 6 - 0.01 g = g / 1000 * 6 - 0.01 b = b / 1000 * 6 - 0.01 elif color <= 231: color = color - 16 r = color % 6 color = color / 6 g = color % 6 color = color / 6 b = color % 6 else: color -= 232 r = g = b = color / 24 * 6 return '#%02X%02X%02X' % (int(r*256/6), int(g*256/6), int(b*256/6)) def parse_css(css): def get_color(value): if value[0] == '#': value = value[1:] length = len(value) if length != 3 and length != 6: return -1 value = int(value, 16) if length == 6: r = int(value >> 16) g = int((value >> 8) & 0xff) b = int(value & 0xff) if r == g == b: return 232 + int(r/10.6251) div = 42.51 else: r = int(value >> 8) g = int((value >> 4) & 0xf) b = int(value & 0xf) if r == g == b: return 232 + int(1.54*r) div = 2.51 return 6*6*int(r/div) + 6*int(g/div) + int(b/div) + 16 if value in colors: return colors[value] return -1 shell = '' rules = css.split(';') for rule in rules: if ':' not in rule: continue key, value = rule.split(':', 1) key = key.strip() value = value.strip() if key == 'background-color': pass#shell += '\x191' elif key == 'color': color = get_color(value) if color != -1: shell += '\x19%d}' % color elif key == 'font-style': shell += '\x19i' elif key == 'font-weight': shell += '\x19b' elif key == 'margin-left': shell += ' ' elif key == 'text-align': pass elif key == 'text-decoration': if value == 'underline': shell += '\x19u' elif value == 'blink': shell += '\x19a' return shell def trim(string): return re.sub(whitespace_re, ' ', string) class XHTMLHandler(sax.ContentHandler): def __init__(self, force_ns=False, tmp_dir=None, extract_images=False): self.builder = [] self.formatting = [] self.attrs = [] self.list_state = [] self.is_pre = False self.a_start = 0 # do not care about xhtml-in namespace self.force_ns = force_ns self.tmp_dir = tmp_dir self.extract_images = extract_images @property def result(self): sanitized = re.sub(poezio_color_double, r'\1', ''.join(self.builder).strip()) return re.sub(poezio_format_trim, '\x19o', sanitized) def append_formatting(self, formatting): self.formatting.append(formatting) self.builder.append(formatting) def pop_formatting(self): self.formatting.pop() self.builder.append('\x19o' + ''.join(self.formatting)) def characters(self, characters): self.builder.append(characters if self.is_pre else trim(characters)) def startElementNS(self, name, _, attrs): if name[0] != XHTML_NS and not self.force_ns: return builder = self.builder attrs = {name: value for ((ns, name), value) in attrs.items() if ns is None} self.attrs.append(attrs) if 'style' in attrs: style = parse_css(attrs['style']) self.append_formatting(style) name = name[1] if name == 'a': self.append_formatting('\x19u') self.a_start = len(self.builder) elif name == 'blockquote': builder.append('“') elif name == 'br': builder.append('\n') elif name == 'cite': self.append_formatting('\x19u') elif name == 'em': self.append_formatting('\x19i') elif name == 'img': if re.match(xhtml_data_re, attrs['src']) and self.extract_images: type_, data = [i for i in re.split(xhtml_data_re, attrs['src']) if i] bin_data = base64.b64decode(unquote(data)) filename = hashlib.sha1(bin_data).hexdigest() + '.' + type_ filepath = path.join(self.tmp_dir, filename) if not path.exists(filepath): try: with open(filepath, 'wb') as fd: fd.write(bin_data) builder.append('[file stored as %s]' % filename) except Exception as e: builder.append('[Error while saving image: %s]' % e) else: builder.append('[file stored as %s]' % filename) else: builder.append(trim(attrs['src'])) if 'alt' in attrs: builder.append(' (%s)' % trim(attrs['alt'])) elif name == 'ul': self.list_state.append('ul') elif name == 'ol': self.list_state.append(1) elif name == 'li': try: state = self.list_state[-1] except IndexError: state = 'ul' if state == 'ul': builder.append('\n• ') else: builder.append('\n%d) ' % state) state += 1 self.list_state[-1] = state elif name == 'p': builder.append('\n') elif name == 'pre': builder.append('\n') self.is_pre = True elif name == 'strong': self.append_formatting('\x19b') def endElementNS(self, name, _): if name[0] != XHTML_NS and not self.force_ns: return builder = self.builder attrs = self.attrs.pop() name = name[1] if name == 'a': self.pop_formatting() # do not display the link twice text_elements = [x for x in self.builder[self.a_start:] if not x.startswith('\x19')] link_text = ''.join(text_elements).strip() if 'href' in attrs and attrs['href'] != link_text: builder.append(' (%s)' % trim(attrs['href'])) elif name == 'blockquote': builder.append('”') elif name in ('cite', 'em', 'strong'): self.pop_formatting() elif name in ('ol', 'p', 'ul'): builder.append('\n') elif name == 'pre': builder.append('\n') self.is_pre = False if 'style' in attrs: self.pop_formatting() if 'title' in attrs: builder.append(' [' + attrs['title'] + ']') def xhtml_to_poezio_colors(xml, force=False, tmp_dir=None, extract_images=None): if isinstance(xml, str): xml = xml.encode('utf8') elif not isinstance(xml, bytes): xml = ET.tostring(xml) handler = XHTMLHandler(force_ns=force, tmp_dir=tmp_dir, extract_images=extract_images) parser = sax.make_parser() parser.setFeature(sax.handler.feature_namespaces, True) parser.setContentHandler(handler) parser.parse(BytesIO(xml)) return handler.result def clean_text(s): """ Remove all xhtml-im attributes (\x19etc) from the string with the complete color format, i.e \x19xxx} """ s = re.sub(xhtml_attr_re, "", s) return s def clean_text_simple(string): """ Remove all \x19 from the string formatted with simple colors: \x198 """ pos = string.find('\x19') while pos != -1: string = string[:pos] + string[pos+2:] pos = string.find('\x19') return string def convert_simple_to_full_colors(text): """ takes a \x19n formatted string and returns a \x19n} formatted one. """ # TODO, have a single list of this. This is some sort of # dusplicate from windows.format_chars mapping = str.maketrans({'\x0E': '\x19b', '\x0F': '\x19o', '\x10': '\x19u', '\x11': '\x191', '\x12': '\x192', '\x13': '\x193', '\x14': '\x194', '\x15': '\x195', '\x16': '\x196', '\x17': '\x197', '\x18': '\x198', '\x19': '\x199'}) text = text.translate(mapping) def add_curly_bracket(match): return match.group(0) + '}' return re.sub(xhtml_simple_attr_re, add_curly_bracket, text) number_to_color_names = { 1: 'red', 2: 'green', 3: 'yellow', 4: 'blue', 5: 'violet', 6: 'turquoise', 7: 'white' } def format_inline_css(_dict): return ''.join(('%s: %s;' % (key, value) for key, value in _dict.items())) def poezio_colors_to_html(string): """ Convert poezio colors to html (e.g. \x191}: <span style='color: red'>) """ # Maintain a list of the current css attributes used # And check if a tag is open (by design, we only open # spans tag, and they cannot be nested. current_attrs = {} tag_open = False next_attr_char = string.find('\x19') build = ["<body xmlns='http://www.w3.org/1999/xhtml'><p>"] def check_property(key, value): nonlocal tag_open if current_attrs.get(key, None) == value: return current_attrs[key] = value if tag_open: tag_open = False build.append('</span>') while next_attr_char != -1: attr_char = string[next_attr_char+1].lower() if next_attr_char != 0 and string[:next_attr_char]: if current_attrs and not tag_open: build.append('<span style="%s">' % format_inline_css(current_attrs)) tag_open = True build.append(saxutils.escape(string[:next_attr_char])) if attr_char == 'o': if tag_open: build.append('</span>') tag_open = False current_attrs = {} elif attr_char == 'b': check_property('font-weight', 'bold') elif attr_char == 'u': check_property('text-decoration', 'underline') if attr_char in digits: number_str = string[next_attr_char+1:string.find('}', next_attr_char)] number = int(number_str) if number in number_to_color_names: check_property('color', number_to_color_names.get(number, 'black')) else: check_property('color', ncurses_color_to_html(number)) string = string[next_attr_char+len(number_str)+2:] else: string = string[next_attr_char+2:] next_attr_char = string.find('\x19') if current_attrs and not tag_open and string: build.append('<span style="%s">' % format_inline_css(current_attrs)) tag_open = True build.append(saxutils.escape(string)) if tag_open: build.append('</span>') build.append("</p></body>") text = ''.join(build) return text.replace('\n', '<br />')
en
0.659182
# Copyright 2010-2011 <NAME> <<EMAIL>> # # This file is part of Poezio. # # Poezio is free software: you can redistribute it and/or modify # it under the terms of the zlib license. See the COPYING file. Various methods to convert shell colors to poezio colors, xhtml code to shell colors, poezio colors to xhtml code # never trust the modules # HTML named colors Returns a string with xhtml markups converted to poezio colors if there's an xhtml_im element, or the body (without any color) otherwise Takes an int between 0 and 256 and returns a string of the form #XXXXXX representing an html color. # fallback in faulty terminals (e.g. xterm) #shell += '\x191' # do not care about xhtml-in namespace # do not display the link twice Remove all xhtml-im attributes (\x19etc) from the string with the complete color format, i.e \x19xxx} Remove all \x19 from the string formatted with simple colors: \x198 takes a \x19n formatted string and returns a \x19n} formatted one. # TODO, have a single list of this. This is some sort of # dusplicate from windows.format_chars Convert poezio colors to html (e.g. \x191}: <span style='color: red'>) # Maintain a list of the current css attributes used # And check if a tag is open (by design, we only open # spans tag, and they cannot be nested.
2.012077
2
grano/model/property.py
ANCIR/grano
30
6625284
<reponame>ANCIR/grano from datetime import datetime from sqlalchemy.orm import aliased from grano.core import db from grano.model.common import IntBase from grano.model.attribute import Attribute VALUE_COLUMNS = { 'value_string': basestring, 'value_datetime': datetime, 'value_integer': int, 'value_float': float, 'value_boolean': bool } DATETIME_PRECISION = [ 'year', 'month', 'day', 'time', ] class Property(db.Model, IntBase): __tablename__ = 'grano_property' attribute_id = db.Column(db.Integer, db.ForeignKey('grano_attribute.id')) author_id = db.Column(db.Integer, db.ForeignKey('grano_account.id')) entity_id = db.Column(db.Unicode(), db.ForeignKey('grano_entity.id'), index=True, nullable=True) relation_id = db.Column(db.Unicode(), db.ForeignKey('grano_relation.id'), index=True, nullable=True) name = db.Column(db.Unicode(), index=True) value_string = db.Column(db.Unicode()) value_integer = db.Column(db.Integer()) value_float = db.Column(db.Float()) value_datetime = db.Column(db.DateTime()) value_datetime_precision = db.Column(db.Enum(*DATETIME_PRECISION, native_enum=False)) value_boolean = db.Column(db.Boolean()) value_file_id = db.Column(db.Integer(), db.ForeignKey('grano_file.id')) source_url = db.Column(db.Unicode()) active = db.Column(db.Boolean()) @property def value(self): # check file column first since file uses both # value_string and value_file_id if self.value_file_id is not None: return self.value_file_id for column in Attribute.DATATYPES.values(): value = getattr(self, column) if value is not None: return value @classmethod def type_column(self, value): for name, typ in VALUE_COLUMNS.items(): if isinstance(value, typ): return name return 'value_string' def to_dict_index(self): data = { 'value': self.value, 'source_url': self.source_url } if self.value_file_id is not None: data['file_url'] = self.value_string elif self.value_datetime is not None: data['value_precision'] = self.value_datetime_precision return data def to_dict_kv(self): return self.name, self.to_dict_index() def to_dict(self): name, data = self.to_dict_index() data['id'] = self.id data['name'] = name data['created_at'] = self.created_at data['updated_at'] = self.updated_at data['active'] = self.active return data class PropertyBase(object): @property def active_properties(self): q = [p for p in self.properties if p.active] return q def __getitem__(self, name): for prop in self.active_properties: if prop.name == name: return prop def has_property(self, name): return self[name] is not None @classmethod def _filter_property(cls, q, name, value, only_active=True, alias=None): if alias is None: alias = cls Prop = aliased(Property) q = q.join(Prop, alias.properties) q = q.filter(Prop.name == name) column = getattr(Prop, Property.type_column(value)) q = q.filter(column == value) if only_active: q = q.filter(Prop.active == True) # noqa return q
from datetime import datetime from sqlalchemy.orm import aliased from grano.core import db from grano.model.common import IntBase from grano.model.attribute import Attribute VALUE_COLUMNS = { 'value_string': basestring, 'value_datetime': datetime, 'value_integer': int, 'value_float': float, 'value_boolean': bool } DATETIME_PRECISION = [ 'year', 'month', 'day', 'time', ] class Property(db.Model, IntBase): __tablename__ = 'grano_property' attribute_id = db.Column(db.Integer, db.ForeignKey('grano_attribute.id')) author_id = db.Column(db.Integer, db.ForeignKey('grano_account.id')) entity_id = db.Column(db.Unicode(), db.ForeignKey('grano_entity.id'), index=True, nullable=True) relation_id = db.Column(db.Unicode(), db.ForeignKey('grano_relation.id'), index=True, nullable=True) name = db.Column(db.Unicode(), index=True) value_string = db.Column(db.Unicode()) value_integer = db.Column(db.Integer()) value_float = db.Column(db.Float()) value_datetime = db.Column(db.DateTime()) value_datetime_precision = db.Column(db.Enum(*DATETIME_PRECISION, native_enum=False)) value_boolean = db.Column(db.Boolean()) value_file_id = db.Column(db.Integer(), db.ForeignKey('grano_file.id')) source_url = db.Column(db.Unicode()) active = db.Column(db.Boolean()) @property def value(self): # check file column first since file uses both # value_string and value_file_id if self.value_file_id is not None: return self.value_file_id for column in Attribute.DATATYPES.values(): value = getattr(self, column) if value is not None: return value @classmethod def type_column(self, value): for name, typ in VALUE_COLUMNS.items(): if isinstance(value, typ): return name return 'value_string' def to_dict_index(self): data = { 'value': self.value, 'source_url': self.source_url } if self.value_file_id is not None: data['file_url'] = self.value_string elif self.value_datetime is not None: data['value_precision'] = self.value_datetime_precision return data def to_dict_kv(self): return self.name, self.to_dict_index() def to_dict(self): name, data = self.to_dict_index() data['id'] = self.id data['name'] = name data['created_at'] = self.created_at data['updated_at'] = self.updated_at data['active'] = self.active return data class PropertyBase(object): @property def active_properties(self): q = [p for p in self.properties if p.active] return q def __getitem__(self, name): for prop in self.active_properties: if prop.name == name: return prop def has_property(self, name): return self[name] is not None @classmethod def _filter_property(cls, q, name, value, only_active=True, alias=None): if alias is None: alias = cls Prop = aliased(Property) q = q.join(Prop, alias.properties) q = q.filter(Prop.name == name) column = getattr(Prop, Property.type_column(value)) q = q.filter(column == value) if only_active: q = q.filter(Prop.active == True) # noqa return q
en
0.759136
# check file column first since file uses both # value_string and value_file_id # noqa
2.136096
2
web/addons/board/controllers.py
diogocs1/comps
1
6625285
# -*- coding: utf-8 -*- from xml.etree import ElementTree from openerp.addons.web.controllers.main import load_actions_from_ir_values from openerp.http import Controller, route, request class Board(Controller): @route('/board/add_to_dashboard', type='json', auth='user') def add_to_dashboard(self, menu_id, action_id, context_to_save, domain, view_mode, name=''): # FIXME move this method to board.board model dashboard_action = load_actions_from_ir_values('action', 'tree_but_open', [('ir.ui.menu', menu_id)], False) if dashboard_action: action = dashboard_action[0][2] if action['res_model'] == 'board.board' and action['views'][0][1] == 'form': # Maybe should check the content instead of model board.board ? view_id = action['views'][0][0] board = request.session.model(action['res_model']).fields_view_get(view_id, 'form') if board and 'arch' in board: xml = ElementTree.fromstring(board['arch']) column = xml.find('./board/column') if column is not None: new_action = ElementTree.Element('action', { 'name': str(action_id), 'string': name, 'view_mode': view_mode, 'context': str(context_to_save), 'domain': str(domain) }) column.insert(0, new_action) arch = ElementTree.tostring(xml, 'utf-8') return request.session.model('ir.ui.view.custom').create({ 'user_id': request.session.uid, 'ref_id': view_id, 'arch': arch }, request.context) return False
# -*- coding: utf-8 -*- from xml.etree import ElementTree from openerp.addons.web.controllers.main import load_actions_from_ir_values from openerp.http import Controller, route, request class Board(Controller): @route('/board/add_to_dashboard', type='json', auth='user') def add_to_dashboard(self, menu_id, action_id, context_to_save, domain, view_mode, name=''): # FIXME move this method to board.board model dashboard_action = load_actions_from_ir_values('action', 'tree_but_open', [('ir.ui.menu', menu_id)], False) if dashboard_action: action = dashboard_action[0][2] if action['res_model'] == 'board.board' and action['views'][0][1] == 'form': # Maybe should check the content instead of model board.board ? view_id = action['views'][0][0] board = request.session.model(action['res_model']).fields_view_get(view_id, 'form') if board and 'arch' in board: xml = ElementTree.fromstring(board['arch']) column = xml.find('./board/column') if column is not None: new_action = ElementTree.Element('action', { 'name': str(action_id), 'string': name, 'view_mode': view_mode, 'context': str(context_to_save), 'domain': str(domain) }) column.insert(0, new_action) arch = ElementTree.tostring(xml, 'utf-8') return request.session.model('ir.ui.view.custom').create({ 'user_id': request.session.uid, 'ref_id': view_id, 'arch': arch }, request.context) return False
en
0.778578
# -*- coding: utf-8 -*- # FIXME move this method to board.board model # Maybe should check the content instead of model board.board ?
1.993318
2
lessinline/services/admin.py
skpatro23/lessinline
0
6625286
<reponame>skpatro23/lessinline<filename>lessinline/services/admin.py from django.contrib import admin from lessinline.services.models import Service, Slot class SlotAdmin(admin.TabularInline): model = Slot extra = 1 @admin.register(Service) class ServiceAdmin(admin.ModelAdmin): list_display = ['name', 'business', 'price', 'is_open'] inlines = [SlotAdmin]
from django.contrib import admin from lessinline.services.models import Service, Slot class SlotAdmin(admin.TabularInline): model = Slot extra = 1 @admin.register(Service) class ServiceAdmin(admin.ModelAdmin): list_display = ['name', 'business', 'price', 'is_open'] inlines = [SlotAdmin]
none
1
1.984408
2
tests/_utils.py
nikitanovosibirsk/vedro-allure-reporter
1
6625287
<filename>tests/_utils.py from argparse import Namespace from contextlib import contextmanager from pathlib import Path from typing import Any, Dict, List, Optional from unittest.mock import Mock, patch from uuid import uuid4 import pytest from allure_commons.logger import AllureMemoryLogger from vedro.core import Dispatcher, ScenarioResult, StepResult from vedro.plugins.director.rich.test_utils import make_path, make_random_name, make_vscenario __all__ = ("plugin_manager_", "logger_", "logger_factory_", "dispatcher", "make_parsed_args", "logger", "patch_uuid", "make_test_case", "make_scenario_result",) @pytest.fixture() def plugin_manager_() -> Mock: return Mock() @pytest.fixture() def logger_() -> Mock: return Mock() @pytest.fixture() def logger_factory_(logger_) -> Mock: return Mock(side_effect=Mock(return_value=logger_)) @pytest.fixture() def dispatcher() -> Dispatcher: return Dispatcher() @pytest.fixture() def logger() -> AllureMemoryLogger: return AllureMemoryLogger() def make_parsed_args(*, allure_report_dir: str, allure_attach_scope: bool = False) -> Namespace: return Namespace(allure_report_dir=allure_report_dir, allure_attach_scope=allure_attach_scope) @contextmanager def patch_uuid(uuid: Optional[str] = None): if uuid is None: uuid = str(uuid4()) with patch("allure_commons.utils.uuid4", Mock(return_value=uuid)): yield uuid def make_scenario_result(path: Optional[Path] = None, subject: Optional[str] = None) -> ScenarioResult: if path is None: path = make_path("namespace") if subject is None: subject = make_random_name() vscenario = make_vscenario(path=path, subject=subject) return ScenarioResult(vscenario) def make_test_case(uuid: str, scenario_result: ScenarioResult, steps: Optional[List[StepResult]] = None) -> Dict[str, Any]: test_case = { "uuid": uuid, "name": scenario_result.scenario.subject, "status": scenario_result.status.value.lower(), "start": int(scenario_result.started_at * 1000), "stop": int(scenario_result.ended_at * 1000), "historyId": scenario_result.scenario.unique_id, "testCaseId": scenario_result.scenario.unique_id, "labels": [ { "name": "package", "value": "scenarios.namespace" }, { "name": "suite", "value": "scenarios", } ] } if steps: test_case["steps"] = [] for step_result in steps: test_case["steps"].append({ "name": step_result.step_name, "status": step_result.status.value.lower(), "start": int(step_result.started_at * 1000), "stop": int(step_result.ended_at * 1000), }) return test_case
<filename>tests/_utils.py from argparse import Namespace from contextlib import contextmanager from pathlib import Path from typing import Any, Dict, List, Optional from unittest.mock import Mock, patch from uuid import uuid4 import pytest from allure_commons.logger import AllureMemoryLogger from vedro.core import Dispatcher, ScenarioResult, StepResult from vedro.plugins.director.rich.test_utils import make_path, make_random_name, make_vscenario __all__ = ("plugin_manager_", "logger_", "logger_factory_", "dispatcher", "make_parsed_args", "logger", "patch_uuid", "make_test_case", "make_scenario_result",) @pytest.fixture() def plugin_manager_() -> Mock: return Mock() @pytest.fixture() def logger_() -> Mock: return Mock() @pytest.fixture() def logger_factory_(logger_) -> Mock: return Mock(side_effect=Mock(return_value=logger_)) @pytest.fixture() def dispatcher() -> Dispatcher: return Dispatcher() @pytest.fixture() def logger() -> AllureMemoryLogger: return AllureMemoryLogger() def make_parsed_args(*, allure_report_dir: str, allure_attach_scope: bool = False) -> Namespace: return Namespace(allure_report_dir=allure_report_dir, allure_attach_scope=allure_attach_scope) @contextmanager def patch_uuid(uuid: Optional[str] = None): if uuid is None: uuid = str(uuid4()) with patch("allure_commons.utils.uuid4", Mock(return_value=uuid)): yield uuid def make_scenario_result(path: Optional[Path] = None, subject: Optional[str] = None) -> ScenarioResult: if path is None: path = make_path("namespace") if subject is None: subject = make_random_name() vscenario = make_vscenario(path=path, subject=subject) return ScenarioResult(vscenario) def make_test_case(uuid: str, scenario_result: ScenarioResult, steps: Optional[List[StepResult]] = None) -> Dict[str, Any]: test_case = { "uuid": uuid, "name": scenario_result.scenario.subject, "status": scenario_result.status.value.lower(), "start": int(scenario_result.started_at * 1000), "stop": int(scenario_result.ended_at * 1000), "historyId": scenario_result.scenario.unique_id, "testCaseId": scenario_result.scenario.unique_id, "labels": [ { "name": "package", "value": "scenarios.namespace" }, { "name": "suite", "value": "scenarios", } ] } if steps: test_case["steps"] = [] for step_result in steps: test_case["steps"].append({ "name": step_result.step_name, "status": step_result.status.value.lower(), "start": int(step_result.started_at * 1000), "stop": int(step_result.ended_at * 1000), }) return test_case
none
1
2.185705
2
ARM_STM32/Serial-CAN-UI/UI.py
fuszenecker/ARM
1
6625288
<filename>ARM_STM32/Serial-CAN-UI/UI.py #!/usr/bin/python # ----------------------------------------------------------------------------- # This program is the user interface of the Serial-CAN converter. # Python, PyTk and PySerial must be installed on the host computer. # ----------------------------------------------------------------------------- import Tkinter from Tkinter import * import serial from serial import * import threading from threading import * import time from time import * # ----------------------------------------------------------------------------- # The USART baudrate used during the communication. # ----------------------------------------------------------------------------- usart_baudrate = 1200 ttydev = "/dev/ttyUSB2"; # ----------------------------------------------------------------------------- # Insert a new line into the LOG window. The inserted line will be the last # one in the window. # ----------------------------------------------------------------------------- def log(line): global text text.insert(END, line) text.see(END) # ----------------------------------------------------------------------------- # This routine will be called periodically by the TIMER module. # The function checks is there are characters waiting in the USART RX FIFO. # If there are, the characters will be inserted into the LOG window. # ----------------------------------------------------------------------------- def print_tty(): global tty if tty.inWaiting() > 1: line = tty.readline() log(line) timer = Timer(0.1, print_tty) timer.start() # ----------------------------------------------------------------------------- # This function is the callback function of the SET FILTER button. # It converts the ID value to integer, and sends the appropriate string to # the serial-CAN converter. # ----------------------------------------------------------------------------- def set_filter_func(): global tty global set_filter_number, set_filter_mask, set_filter_id filter = int(set_filter_number.get()) log("f%0X%s\n" % (filter, set_filter_mask.get())) tty.write("f%0X%s\n" % (filter, set_filter_mask.get())) sleep(1) log("i%0X%s\n" % (filter, set_filter_id.get())) tty.write("i%0X%s\n" % (filter, set_filter_id.get())) # ----------------------------------------------------------------------------- # This function is the callback function of the SET BAUDRATE button. # It converts the baudrate value to integer, and sends the appropriate string # to the serial-CAN converter. # ----------------------------------------------------------------------------- def set_baudrate_func(): global tty global set_baudrate_number baudrate = int(set_baudrate_number.get()) log("b %08X\n" % (baudrate)) tty.write("b %08X\n" % (baudrate)) # ----------------------------------------------------------------------------- # This function is the callback function of the SEND MESSAGE button. # It converts the length value to integer, and sends the appropriate string # to the serial-CAN converter. # ----------------------------------------------------------------------------- def send_func(): global tty global send_length, send_id, send_data len = int(send_length.get()) log("s%01X%s%s\n" % (len, send_id.get(), send_data.get())) tty.write("s%01X%s%s\n" % (len, send_id.get(), send_data.get())) # ----------------------------------------------------------------------------- # This function is to "initialize" the converter. Really, it asks the # version string. # ----------------------------------------------------------------------------- def init_func(): global tty log("v\n") tty.write("v\n") # ----------------------------------------------------------------------------- # Main function of the program # ----------------------------------------------------------------------------- def main(): global root, text, timer global set_baudrate_number global set_filter_number, set_filter_mask, set_filter_id global send_length, send_id, send_data root = Tkinter.Tk() root.configure({"width": 500, "height": 400}) root.title("Serial-CAN converter's user interface") text = Text() text.grid(row = 0, column = 0, columnspan = 7) log("*** INITIALIZING SERIAL-CAN CONVERTER ***\n") # Set baud rate set_baudrate_button = Button(root, text="Set baud rate", width = 12, command=set_baudrate_func) set_baudrate_button.grid(row = 1, column = 0, columnspan = 1, sticky = W) Label(root, text="Baud rate:").grid(row = 1, column = 1, sticky = E) set_baudrate_number = Entry(root, width = 6) set_baudrate_number.grid(row = 1, column = 2, columnspan = 1, sticky = W) Label(root, text="bit/s").grid(row = 1, column = 3, sticky = W) # Set filter and CAN ID set_filter_button = Button(root, text="Set filter", width = 12, command=set_filter_func) set_filter_button.grid(row = 2, column = 0, columnspan = 1, sticky = W) Label(root, text="Filter ID (dec):").grid(row = 2, column = 1, sticky = E) set_filter_number = Entry(root, width = 6) set_filter_number.grid(row = 2, column = 2, columnspan = 1, sticky = W) Label(root, text="Mask: 0x").grid(row = 2, column = 3, sticky = E) set_filter_mask = Entry(root, width=10) set_filter_mask.grid(row = 2, column = 4, columnspan = 1, sticky = W) Label(root, text="ID: 0x").grid(row = 2, column = 5, sticky = E) set_filter_id = Entry(root, width=10) set_filter_id.grid(row = 2, column = 6, columnspan = 1, sticky = W) # Send message send_button = Button(root, text="Send message", width = 12, command=send_func) send_button.grid(row = 3, column = 0, columnspan = 1, sticky = W) Label(root, text="Length (dec):").grid(row = 3, column = 1, sticky = E) send_length = Entry(root, width = 6) send_length.grid(row = 3, column = 2, columnspan = 1, sticky = W) Label(root, text="ID: 0x").grid(row = 3, column = 3, sticky = E) send_id = Entry(root, width=10) send_id.grid(row = 3, column = 4, columnspan = 1, sticky = W) Label(root, text="Data: 0x").grid(row = 3, column = 5, sticky = E) send_data = Entry(root, width=17) send_data.grid(row = 3, column = 6, columnspan = 1, sticky = W) timer = Timer(0.1, print_tty) timer.start() # Initializing device init_func() # Main loop root.mainloop() # ----------------------------------------------------------------------------- # Open serial device and set initial parameters: # 115200 bit/sec, 8 bit length, no parity check, 1 stop bit. # Then sets the USART baud rate that will be used during the communication. # ----------------------------------------------------------------------------- tty = serial.Serial( port = ttydev, # port = 0, parity = serial.PARITY_NONE, bytesize = serial.EIGHTBITS, stopbits = serial.STOPBITS_ONE, timeout = 1, xonxoff = 0, rtscts = 0, baudrate = 1200 ) tty.write("u %08X\n" % (usart_baudrate)) tty.close() tty = serial.Serial( port = ttydev, # port = 0, parity = serial.PARITY_NONE, bytesize = serial.EIGHTBITS, stopbits = serial.STOPBITS_ONE, timeout = 1, xonxoff = 0, rtscts = 0, baudrate = usart_baudrate ) # ----------------------------------------------------------------------------- # Start "main()" function... # ----------------------------------------------------------------------------- main() # ---- END OF PYTHON CODE -----------------------------------------------------
<filename>ARM_STM32/Serial-CAN-UI/UI.py #!/usr/bin/python # ----------------------------------------------------------------------------- # This program is the user interface of the Serial-CAN converter. # Python, PyTk and PySerial must be installed on the host computer. # ----------------------------------------------------------------------------- import Tkinter from Tkinter import * import serial from serial import * import threading from threading import * import time from time import * # ----------------------------------------------------------------------------- # The USART baudrate used during the communication. # ----------------------------------------------------------------------------- usart_baudrate = 1200 ttydev = "/dev/ttyUSB2"; # ----------------------------------------------------------------------------- # Insert a new line into the LOG window. The inserted line will be the last # one in the window. # ----------------------------------------------------------------------------- def log(line): global text text.insert(END, line) text.see(END) # ----------------------------------------------------------------------------- # This routine will be called periodically by the TIMER module. # The function checks is there are characters waiting in the USART RX FIFO. # If there are, the characters will be inserted into the LOG window. # ----------------------------------------------------------------------------- def print_tty(): global tty if tty.inWaiting() > 1: line = tty.readline() log(line) timer = Timer(0.1, print_tty) timer.start() # ----------------------------------------------------------------------------- # This function is the callback function of the SET FILTER button. # It converts the ID value to integer, and sends the appropriate string to # the serial-CAN converter. # ----------------------------------------------------------------------------- def set_filter_func(): global tty global set_filter_number, set_filter_mask, set_filter_id filter = int(set_filter_number.get()) log("f%0X%s\n" % (filter, set_filter_mask.get())) tty.write("f%0X%s\n" % (filter, set_filter_mask.get())) sleep(1) log("i%0X%s\n" % (filter, set_filter_id.get())) tty.write("i%0X%s\n" % (filter, set_filter_id.get())) # ----------------------------------------------------------------------------- # This function is the callback function of the SET BAUDRATE button. # It converts the baudrate value to integer, and sends the appropriate string # to the serial-CAN converter. # ----------------------------------------------------------------------------- def set_baudrate_func(): global tty global set_baudrate_number baudrate = int(set_baudrate_number.get()) log("b %08X\n" % (baudrate)) tty.write("b %08X\n" % (baudrate)) # ----------------------------------------------------------------------------- # This function is the callback function of the SEND MESSAGE button. # It converts the length value to integer, and sends the appropriate string # to the serial-CAN converter. # ----------------------------------------------------------------------------- def send_func(): global tty global send_length, send_id, send_data len = int(send_length.get()) log("s%01X%s%s\n" % (len, send_id.get(), send_data.get())) tty.write("s%01X%s%s\n" % (len, send_id.get(), send_data.get())) # ----------------------------------------------------------------------------- # This function is to "initialize" the converter. Really, it asks the # version string. # ----------------------------------------------------------------------------- def init_func(): global tty log("v\n") tty.write("v\n") # ----------------------------------------------------------------------------- # Main function of the program # ----------------------------------------------------------------------------- def main(): global root, text, timer global set_baudrate_number global set_filter_number, set_filter_mask, set_filter_id global send_length, send_id, send_data root = Tkinter.Tk() root.configure({"width": 500, "height": 400}) root.title("Serial-CAN converter's user interface") text = Text() text.grid(row = 0, column = 0, columnspan = 7) log("*** INITIALIZING SERIAL-CAN CONVERTER ***\n") # Set baud rate set_baudrate_button = Button(root, text="Set baud rate", width = 12, command=set_baudrate_func) set_baudrate_button.grid(row = 1, column = 0, columnspan = 1, sticky = W) Label(root, text="Baud rate:").grid(row = 1, column = 1, sticky = E) set_baudrate_number = Entry(root, width = 6) set_baudrate_number.grid(row = 1, column = 2, columnspan = 1, sticky = W) Label(root, text="bit/s").grid(row = 1, column = 3, sticky = W) # Set filter and CAN ID set_filter_button = Button(root, text="Set filter", width = 12, command=set_filter_func) set_filter_button.grid(row = 2, column = 0, columnspan = 1, sticky = W) Label(root, text="Filter ID (dec):").grid(row = 2, column = 1, sticky = E) set_filter_number = Entry(root, width = 6) set_filter_number.grid(row = 2, column = 2, columnspan = 1, sticky = W) Label(root, text="Mask: 0x").grid(row = 2, column = 3, sticky = E) set_filter_mask = Entry(root, width=10) set_filter_mask.grid(row = 2, column = 4, columnspan = 1, sticky = W) Label(root, text="ID: 0x").grid(row = 2, column = 5, sticky = E) set_filter_id = Entry(root, width=10) set_filter_id.grid(row = 2, column = 6, columnspan = 1, sticky = W) # Send message send_button = Button(root, text="Send message", width = 12, command=send_func) send_button.grid(row = 3, column = 0, columnspan = 1, sticky = W) Label(root, text="Length (dec):").grid(row = 3, column = 1, sticky = E) send_length = Entry(root, width = 6) send_length.grid(row = 3, column = 2, columnspan = 1, sticky = W) Label(root, text="ID: 0x").grid(row = 3, column = 3, sticky = E) send_id = Entry(root, width=10) send_id.grid(row = 3, column = 4, columnspan = 1, sticky = W) Label(root, text="Data: 0x").grid(row = 3, column = 5, sticky = E) send_data = Entry(root, width=17) send_data.grid(row = 3, column = 6, columnspan = 1, sticky = W) timer = Timer(0.1, print_tty) timer.start() # Initializing device init_func() # Main loop root.mainloop() # ----------------------------------------------------------------------------- # Open serial device and set initial parameters: # 115200 bit/sec, 8 bit length, no parity check, 1 stop bit. # Then sets the USART baud rate that will be used during the communication. # ----------------------------------------------------------------------------- tty = serial.Serial( port = ttydev, # port = 0, parity = serial.PARITY_NONE, bytesize = serial.EIGHTBITS, stopbits = serial.STOPBITS_ONE, timeout = 1, xonxoff = 0, rtscts = 0, baudrate = 1200 ) tty.write("u %08X\n" % (usart_baudrate)) tty.close() tty = serial.Serial( port = ttydev, # port = 0, parity = serial.PARITY_NONE, bytesize = serial.EIGHTBITS, stopbits = serial.STOPBITS_ONE, timeout = 1, xonxoff = 0, rtscts = 0, baudrate = usart_baudrate ) # ----------------------------------------------------------------------------- # Start "main()" function... # ----------------------------------------------------------------------------- main() # ---- END OF PYTHON CODE -----------------------------------------------------
en
0.391245
#!/usr/bin/python # ----------------------------------------------------------------------------- # This program is the user interface of the Serial-CAN converter. # Python, PyTk and PySerial must be installed on the host computer. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # The USART baudrate used during the communication. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Insert a new line into the LOG window. The inserted line will be the last # one in the window. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # This routine will be called periodically by the TIMER module. # The function checks is there are characters waiting in the USART RX FIFO. # If there are, the characters will be inserted into the LOG window. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # This function is the callback function of the SET FILTER button. # It converts the ID value to integer, and sends the appropriate string to # the serial-CAN converter. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # This function is the callback function of the SET BAUDRATE button. # It converts the baudrate value to integer, and sends the appropriate string # to the serial-CAN converter. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # This function is the callback function of the SEND MESSAGE button. # It converts the length value to integer, and sends the appropriate string # to the serial-CAN converter. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # This function is to "initialize" the converter. Really, it asks the # version string. # ----------------------------------------------------------------------------- # ----------------------------------------------------------------------------- # Main function of the program # ----------------------------------------------------------------------------- # Set baud rate # Set filter and CAN ID # Send message # Initializing device # Main loop # ----------------------------------------------------------------------------- # Open serial device and set initial parameters: # 115200 bit/sec, 8 bit length, no parity check, 1 stop bit. # Then sets the USART baud rate that will be used during the communication. # ----------------------------------------------------------------------------- # port = 0, # port = 0, # ----------------------------------------------------------------------------- # Start "main()" function... # ----------------------------------------------------------------------------- # ---- END OF PYTHON CODE -----------------------------------------------------
2.784543
3
Models/support_vector_regression.py
ayorkshireworrall/Regression-Models-Evaluator
0
6625289
# Support Vector Regression (SVR) from .regression import Regression class SupportVectorRegression(Regression): def __init__(self, dataset): super().__init__(dataset) from sklearn.preprocessing import StandardScaler self.sc_X = StandardScaler() self.sc_y = StandardScaler() self.X_train = self.sc_X.fit_transform(self.X_train) self.y_train = self.sc_y.fit_transform(self.y_train) from sklearn.svm import SVR self.regressor = SVR(kernel = 'rbf') def score(self): y_pred = self.sc_y.inverse_transform(self.regressor.predict(self.sc_X.transform(self.X_test))) from sklearn.metrics import r2_score return r2_score(self.y_test, y_pred) def predict(self, X): return self.sc_y.inverse_transform(self.regressor.predict(self.sc_X.transform(X))) def extract_variables(self, dataset): X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values y = y.reshape(len(y),1) return X, y
# Support Vector Regression (SVR) from .regression import Regression class SupportVectorRegression(Regression): def __init__(self, dataset): super().__init__(dataset) from sklearn.preprocessing import StandardScaler self.sc_X = StandardScaler() self.sc_y = StandardScaler() self.X_train = self.sc_X.fit_transform(self.X_train) self.y_train = self.sc_y.fit_transform(self.y_train) from sklearn.svm import SVR self.regressor = SVR(kernel = 'rbf') def score(self): y_pred = self.sc_y.inverse_transform(self.regressor.predict(self.sc_X.transform(self.X_test))) from sklearn.metrics import r2_score return r2_score(self.y_test, y_pred) def predict(self, X): return self.sc_y.inverse_transform(self.regressor.predict(self.sc_X.transform(X))) def extract_variables(self, dataset): X = dataset.iloc[:, :-1].values y = dataset.iloc[:, -1].values y = y.reshape(len(y),1) return X, y
en
0.509768
# Support Vector Regression (SVR)
2.949407
3
scripts/automation/regression/stateless_tests/stl_examples_test.py
alialnu/trex-core
0
6625290
#!/router/bin/python from .stl_general_test import CStlGeneral_Test, CTRexScenario import os, sys from misc_methods import run_command class STLExamples_Test(CStlGeneral_Test): """This class defines the IMIX testcase of the TRex traffic generator""" def explicitSetUp(self): # examples connect by their own if self.is_connected(): CTRexScenario.stl_trex.disconnect() def explicitTearDown(self): # connect back at end of tests if not self.is_connected(): self.stl_trex.connect() def test_stl_examples(self): #Work around for trex-405. Remove when it is resolved rx_port = CTRexScenario.stl_ports_map['bi'][0] port_info = CTRexScenario.stl_trex.get_port_info(ports = rx_port)[0] drv_name = port_info['driver'] if drv_name == 'net_mlx5' and 'VM' in self.modes: self.skip('Can not run on mlx VM currently - see trex-405 for details') examples_dir = '../trex_control_plane/stl/examples' examples_to_test = [ 'stl_imix.py', ] for example in examples_to_test: self.explicitSetUp() return_code, stdout, stderr = run_command("sh -c 'cd %s; %s %s -s %s'" % (examples_dir, sys.executable, example, CTRexScenario.configuration.trex['trex_name'])) self.explicitTearDown() assert return_code == 0, 'example %s failed.\nstdout: %s\nstderr: %s' % (return_code, stdout, stderr)
#!/router/bin/python from .stl_general_test import CStlGeneral_Test, CTRexScenario import os, sys from misc_methods import run_command class STLExamples_Test(CStlGeneral_Test): """This class defines the IMIX testcase of the TRex traffic generator""" def explicitSetUp(self): # examples connect by their own if self.is_connected(): CTRexScenario.stl_trex.disconnect() def explicitTearDown(self): # connect back at end of tests if not self.is_connected(): self.stl_trex.connect() def test_stl_examples(self): #Work around for trex-405. Remove when it is resolved rx_port = CTRexScenario.stl_ports_map['bi'][0] port_info = CTRexScenario.stl_trex.get_port_info(ports = rx_port)[0] drv_name = port_info['driver'] if drv_name == 'net_mlx5' and 'VM' in self.modes: self.skip('Can not run on mlx VM currently - see trex-405 for details') examples_dir = '../trex_control_plane/stl/examples' examples_to_test = [ 'stl_imix.py', ] for example in examples_to_test: self.explicitSetUp() return_code, stdout, stderr = run_command("sh -c 'cd %s; %s %s -s %s'" % (examples_dir, sys.executable, example, CTRexScenario.configuration.trex['trex_name'])) self.explicitTearDown() assert return_code == 0, 'example %s failed.\nstdout: %s\nstderr: %s' % (return_code, stdout, stderr)
en
0.877913
#!/router/bin/python This class defines the IMIX testcase of the TRex traffic generator # examples connect by their own # connect back at end of tests #Work around for trex-405. Remove when it is resolved
2.051669
2
robust_optim/train.py
shaun95/google-research
1
6625291
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Train and test a robust model with the implicit bias of an optimizer.""" import copy from absl import app from absl import flags from absl import logging import cvxpy as cp import jax from jax import numpy as jnp from jax.flatten_util import ravel_pytree from ml_collections.config_flags import config_flags import numpy as np import scipy.linalg import robust_optim.adversarial as adversarial import robust_optim.data as data_loader import robust_optim.model as model from robust_optim.norm import norm_f from robust_optim.norm import norm_type_dual import robust_optim.optim as optim import robust_optim.summary as summary_tools FLAGS = flags.FLAGS config_flags.DEFINE_config_file('config', None, 'Config file name.') def evaluate_risks(data, predict_f, loss_f, model_param): """Returns the risk of a model for various loss functions. Args: data: An array of data samples for approximating the risk. predict_f: Function that predicts labels given input. loss_f: Function that outputs model's specific loss function. model_param: Model parameters. Returns: Dictionary of risks for following loss functions: (model's loss, 0/1, adversarial risk wrt a single norm-ball). """ inputs, labels = data pred = predict_f(model_param, inputs) loss = loss_f(model_param, inputs, labels) zero_one_risk = (1 - (pred == labels)).mean() return { 'loss': loss, 'zero_one': zero_one_risk, } def evaluate_adversarial_risk(data, predict_f, loss_adv_f, dloss_adv_dx, model_param, normalize_f, config, rng_key): """Evaluating adversarial risk by looping over epsilon. Args: data: An array of data samples for approximating the risk. predict_f: Function that predicts labels given input. loss_adv_f: The loss function. This loss has to be specific to the model to tackle gradient masking. dloss_adv_dx: The gradient function of the adversarial loss w.r.t. the input. Ideally, we will have multiple loss functions even on different layers of network. This loss has to be specific to the model to tackle gradient masking. model_param: Model parameters. normalize_f: A function to normalize the weights of the model. config: Dictionary of hyperparameters. rng_key: JAX random number generator key. Returns: Dictionary adversarial risk wrt a range of norm-balls. """ _, labels = data # If config.adv.eps_from_cvxpy, eps is reset after min-norm solution is found eps_iter, eps_tot = config.adv.eps_iter, config.adv.eps_tot config_new = copy.deepcopy(config.adv) adv_risk = [] adv_eps = [] for i in jnp.arange(0, 1.05, 0.05): config_new.eps_iter = float(eps_iter * i) config_new.eps_tot = float(eps_tot * i) x_adv_multi = adversarial.find_adversarial_samples_multi_attack( data, loss_adv_f, dloss_adv_dx, model_param, normalize_f, config_new, rng_key) correct_label = jnp.zeros(1) for x_adv in x_adv_multi: pred_adv = predict_f(model_param, x_adv) correct_label += (pred_adv == labels) / len(x_adv_multi) adv_risk += [float((1 - correct_label).mean())] adv_eps += [config_new.eps_tot] return {'adv/%s' % config.adv.norm_type: (adv_eps, adv_risk)} def train(model_param, train_test_data, predict_f, loss_f, loss_adv_f, linearize_f, normalize_f, loss_and_prox_op, summary, config, rng_key): """Train a model and log risks.""" dloss_dw = jax.grad(loss_f, argnums=0) dloss_adv_dx = jax.grad(loss_adv_f, argnums=1) train_data = train_test_data[0] xtrain, ytrain = train_data # Precompute min-norm solutions if config.enable_cvxpy: min_norm_w = {} for norm_type in config.available_norm_types: min_norm_w[norm_type] = compute_min_norm_solution(xtrain, ytrain, norm_type) if config.adv.eps_from_cvxpy: dual_norm = norm_type_dual(config.adv.norm_type) wcomp = min_norm_w[dual_norm] wnorm = norm_f(wcomp, dual_norm) margin = 1. / wnorm config.adv.eps_tot = config.adv.eps_iter = float(2 * margin) if config['optim']['name'] == 'cvxpy': norm_type = config['optim']['norm'] cvxpy_sol = compute_min_norm_solution(xtrain, ytrain, norm_type) model_param = jnp.array(cvxpy_sol) # Train loop optim_step, optim_options = optim.get_optimizer_step(config['optim']) niters = optim_options['niters'] for step in range(1, niters): # Take one optimization step if config['optim']['name'] != 'cvxpy': if config['optim']['adv_train']['enable']: # Adversarial training rng_key, rng_subkey = jax.random.split(rng_key) x_adv = adversarial.find_adversarial_samples(train_data, loss_adv_f, dloss_adv_dx, model_param, normalize_f, config.optim.adv_train, rng_key) train_data_new = x_adv, ytrain else: # Standard training train_data_new = train_data if config['optim']['name'] == 'fista': model_param, optim_options = optim_step(train_data_new, loss_and_prox_op, model_param, optim_options) else: model_param, optim_options = optim_step(train_data_new, loss_f, model_param, optim_options) # Log risks and other statistics if (step + 1) % config.log_interval == 0: # Evaluate risk on train/test sets for do_train in [True, False]: data = train_test_data[0] if do_train else train_test_data[1] prefix = 'risk/train' if do_train else 'risk/test' risk = evaluate_risks(data, predict_f, loss_f, model_param) for rname, rvalue in risk.items(): summary.scalar('%s/%s' % (prefix, rname), rvalue, step=step) rng_key, rng_subkey = jax.random.split(rng_key) risk = evaluate_adversarial_risk(data, predict_f, loss_adv_f, dloss_adv_dx, model_param, normalize_f, config, rng_subkey) for rname, rvalue in risk.items(): summary.array('%s/%s' % (prefix, rname), rvalue, step=step) grad = dloss_dw(model_param, xtrain, ytrain) grad_ravel, _ = ravel_pytree(grad) model_param_ravel, _ = ravel_pytree(model_param) for norm_type in config.available_norm_types: # Log the norm of the gradient w.r.t. various norms if not norm_type.startswith('dft'): summary.scalar( 'grad/norm/' + norm_type, norm_f(grad_ravel, norm_type), step=step) # Log weight norm if not norm_type.startswith('dft'): wnorm = norm_f(model_param_ravel, norm_type) summary.scalar('weight/norm/' + norm_type, wnorm, step=step) # Log margin for the equivalent linearized single layer model linear_param = linearize_f(model_param) min_loss = jnp.min(ytrain * (linear_param.T @ xtrain)) wcomp = linear_param / min_loss wnorm = norm_f(wcomp, norm_type) margin = jnp.sign(min_loss) * 1 / wnorm summary.scalar('margin/' + norm_type, margin, step=step) summary.scalar('weight/linear/norm/' + norm_type, wnorm, step=step) # Cosine similarity between the current params and min-norm solution if config.enable_cvxpy: def cos_sim(a, b): return jnp.dot(a, b) / (jnp.linalg.norm(a) * jnp.linalg.norm(b)) min_norm_w_ravel, _ = ravel_pytree(min_norm_w[norm_type]) cs = cos_sim(linear_param.flatten(), min_norm_w_ravel) summary.scalar('csim_to_wmin/' + norm_type, cs, step=step) if 'step_size' in optim_options: summary.scalar('optim/step_size', optim_options['step_size'], step=step) logging.info('Epoch: [%d/%d]\t%s', step + 1, niters, summary.last_scalars_to_str(config.log_keys)) logging.flush() summary.flush() def compute_min_norm_solution(x, y, norm_type): """Compute the min-norm solution using a convex-program solver.""" w = cp.Variable((x.shape[0], 1)) if norm_type == 'linf': # compute minimal L_infinity solution constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm_inf(w)), constraints) elif norm_type == 'l2': # compute minimal L_2 solution constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm2(w)), constraints) elif norm_type == 'l1': # compute minimal L_1 solution constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm1(w)), constraints) elif norm_type[0] == 'l': # compute minimal Lp solution p = float(norm_type[1:]) constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.pnorm(w, p)), constraints) elif norm_type == 'dft1': w = cp.Variable((x.shape[0], 1), complex=True) # compute minimal Fourier L1 norm (||F(w)||_1) solution dft = scipy.linalg.dft(x.shape[0]) / np.sqrt(x.shape[0]) constraints = [cp.multiply(y, (cp.real(w).T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm1(dft @ w)), constraints) prob.solve(verbose=True) logging.info('Min %s-norm solution found (norm=%.4f)', norm_type, float(norm_f(w.value, norm_type))) return cp.real(w).value def main_with_config(config): logging.info(str(config.log_dir)) summary = summary_tools.SummaryWriter(config.log_dir, config.available_norm_types) logging.info(str(config)) summary.object('config', config) rng_key = jax.random.PRNGKey(config.seed) rng_subkey = jax.random.split(rng_key, 3) model_ret = model.get_model_functions(rng_subkey[0], config.dim, **config.model) (model_param, predict_f, loss_f, loss_adv_f, linearize_f, normalize_f, loss_and_prox_op) = model_ret train_test_generator = data_loader.get_train_test_generator(config.dataset) train_test_data = train_test_generator(config, rng_subkey[1]) train(model_param, train_test_data, predict_f, loss_f, loss_adv_f, linearize_f, normalize_f, loss_and_prox_op, summary, config, rng_subkey[2]) def main(_): config = FLAGS.config main_with_config(config) if __name__ == '__main__': app.run(main)
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Train and test a robust model with the implicit bias of an optimizer.""" import copy from absl import app from absl import flags from absl import logging import cvxpy as cp import jax from jax import numpy as jnp from jax.flatten_util import ravel_pytree from ml_collections.config_flags import config_flags import numpy as np import scipy.linalg import robust_optim.adversarial as adversarial import robust_optim.data as data_loader import robust_optim.model as model from robust_optim.norm import norm_f from robust_optim.norm import norm_type_dual import robust_optim.optim as optim import robust_optim.summary as summary_tools FLAGS = flags.FLAGS config_flags.DEFINE_config_file('config', None, 'Config file name.') def evaluate_risks(data, predict_f, loss_f, model_param): """Returns the risk of a model for various loss functions. Args: data: An array of data samples for approximating the risk. predict_f: Function that predicts labels given input. loss_f: Function that outputs model's specific loss function. model_param: Model parameters. Returns: Dictionary of risks for following loss functions: (model's loss, 0/1, adversarial risk wrt a single norm-ball). """ inputs, labels = data pred = predict_f(model_param, inputs) loss = loss_f(model_param, inputs, labels) zero_one_risk = (1 - (pred == labels)).mean() return { 'loss': loss, 'zero_one': zero_one_risk, } def evaluate_adversarial_risk(data, predict_f, loss_adv_f, dloss_adv_dx, model_param, normalize_f, config, rng_key): """Evaluating adversarial risk by looping over epsilon. Args: data: An array of data samples for approximating the risk. predict_f: Function that predicts labels given input. loss_adv_f: The loss function. This loss has to be specific to the model to tackle gradient masking. dloss_adv_dx: The gradient function of the adversarial loss w.r.t. the input. Ideally, we will have multiple loss functions even on different layers of network. This loss has to be specific to the model to tackle gradient masking. model_param: Model parameters. normalize_f: A function to normalize the weights of the model. config: Dictionary of hyperparameters. rng_key: JAX random number generator key. Returns: Dictionary adversarial risk wrt a range of norm-balls. """ _, labels = data # If config.adv.eps_from_cvxpy, eps is reset after min-norm solution is found eps_iter, eps_tot = config.adv.eps_iter, config.adv.eps_tot config_new = copy.deepcopy(config.adv) adv_risk = [] adv_eps = [] for i in jnp.arange(0, 1.05, 0.05): config_new.eps_iter = float(eps_iter * i) config_new.eps_tot = float(eps_tot * i) x_adv_multi = adversarial.find_adversarial_samples_multi_attack( data, loss_adv_f, dloss_adv_dx, model_param, normalize_f, config_new, rng_key) correct_label = jnp.zeros(1) for x_adv in x_adv_multi: pred_adv = predict_f(model_param, x_adv) correct_label += (pred_adv == labels) / len(x_adv_multi) adv_risk += [float((1 - correct_label).mean())] adv_eps += [config_new.eps_tot] return {'adv/%s' % config.adv.norm_type: (adv_eps, adv_risk)} def train(model_param, train_test_data, predict_f, loss_f, loss_adv_f, linearize_f, normalize_f, loss_and_prox_op, summary, config, rng_key): """Train a model and log risks.""" dloss_dw = jax.grad(loss_f, argnums=0) dloss_adv_dx = jax.grad(loss_adv_f, argnums=1) train_data = train_test_data[0] xtrain, ytrain = train_data # Precompute min-norm solutions if config.enable_cvxpy: min_norm_w = {} for norm_type in config.available_norm_types: min_norm_w[norm_type] = compute_min_norm_solution(xtrain, ytrain, norm_type) if config.adv.eps_from_cvxpy: dual_norm = norm_type_dual(config.adv.norm_type) wcomp = min_norm_w[dual_norm] wnorm = norm_f(wcomp, dual_norm) margin = 1. / wnorm config.adv.eps_tot = config.adv.eps_iter = float(2 * margin) if config['optim']['name'] == 'cvxpy': norm_type = config['optim']['norm'] cvxpy_sol = compute_min_norm_solution(xtrain, ytrain, norm_type) model_param = jnp.array(cvxpy_sol) # Train loop optim_step, optim_options = optim.get_optimizer_step(config['optim']) niters = optim_options['niters'] for step in range(1, niters): # Take one optimization step if config['optim']['name'] != 'cvxpy': if config['optim']['adv_train']['enable']: # Adversarial training rng_key, rng_subkey = jax.random.split(rng_key) x_adv = adversarial.find_adversarial_samples(train_data, loss_adv_f, dloss_adv_dx, model_param, normalize_f, config.optim.adv_train, rng_key) train_data_new = x_adv, ytrain else: # Standard training train_data_new = train_data if config['optim']['name'] == 'fista': model_param, optim_options = optim_step(train_data_new, loss_and_prox_op, model_param, optim_options) else: model_param, optim_options = optim_step(train_data_new, loss_f, model_param, optim_options) # Log risks and other statistics if (step + 1) % config.log_interval == 0: # Evaluate risk on train/test sets for do_train in [True, False]: data = train_test_data[0] if do_train else train_test_data[1] prefix = 'risk/train' if do_train else 'risk/test' risk = evaluate_risks(data, predict_f, loss_f, model_param) for rname, rvalue in risk.items(): summary.scalar('%s/%s' % (prefix, rname), rvalue, step=step) rng_key, rng_subkey = jax.random.split(rng_key) risk = evaluate_adversarial_risk(data, predict_f, loss_adv_f, dloss_adv_dx, model_param, normalize_f, config, rng_subkey) for rname, rvalue in risk.items(): summary.array('%s/%s' % (prefix, rname), rvalue, step=step) grad = dloss_dw(model_param, xtrain, ytrain) grad_ravel, _ = ravel_pytree(grad) model_param_ravel, _ = ravel_pytree(model_param) for norm_type in config.available_norm_types: # Log the norm of the gradient w.r.t. various norms if not norm_type.startswith('dft'): summary.scalar( 'grad/norm/' + norm_type, norm_f(grad_ravel, norm_type), step=step) # Log weight norm if not norm_type.startswith('dft'): wnorm = norm_f(model_param_ravel, norm_type) summary.scalar('weight/norm/' + norm_type, wnorm, step=step) # Log margin for the equivalent linearized single layer model linear_param = linearize_f(model_param) min_loss = jnp.min(ytrain * (linear_param.T @ xtrain)) wcomp = linear_param / min_loss wnorm = norm_f(wcomp, norm_type) margin = jnp.sign(min_loss) * 1 / wnorm summary.scalar('margin/' + norm_type, margin, step=step) summary.scalar('weight/linear/norm/' + norm_type, wnorm, step=step) # Cosine similarity between the current params and min-norm solution if config.enable_cvxpy: def cos_sim(a, b): return jnp.dot(a, b) / (jnp.linalg.norm(a) * jnp.linalg.norm(b)) min_norm_w_ravel, _ = ravel_pytree(min_norm_w[norm_type]) cs = cos_sim(linear_param.flatten(), min_norm_w_ravel) summary.scalar('csim_to_wmin/' + norm_type, cs, step=step) if 'step_size' in optim_options: summary.scalar('optim/step_size', optim_options['step_size'], step=step) logging.info('Epoch: [%d/%d]\t%s', step + 1, niters, summary.last_scalars_to_str(config.log_keys)) logging.flush() summary.flush() def compute_min_norm_solution(x, y, norm_type): """Compute the min-norm solution using a convex-program solver.""" w = cp.Variable((x.shape[0], 1)) if norm_type == 'linf': # compute minimal L_infinity solution constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm_inf(w)), constraints) elif norm_type == 'l2': # compute minimal L_2 solution constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm2(w)), constraints) elif norm_type == 'l1': # compute minimal L_1 solution constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm1(w)), constraints) elif norm_type[0] == 'l': # compute minimal Lp solution p = float(norm_type[1:]) constraints = [cp.multiply(y, (w.T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.pnorm(w, p)), constraints) elif norm_type == 'dft1': w = cp.Variable((x.shape[0], 1), complex=True) # compute minimal Fourier L1 norm (||F(w)||_1) solution dft = scipy.linalg.dft(x.shape[0]) / np.sqrt(x.shape[0]) constraints = [cp.multiply(y, (cp.real(w).T @ x)) >= 1] prob = cp.Problem(cp.Minimize(cp.norm1(dft @ w)), constraints) prob.solve(verbose=True) logging.info('Min %s-norm solution found (norm=%.4f)', norm_type, float(norm_f(w.value, norm_type))) return cp.real(w).value def main_with_config(config): logging.info(str(config.log_dir)) summary = summary_tools.SummaryWriter(config.log_dir, config.available_norm_types) logging.info(str(config)) summary.object('config', config) rng_key = jax.random.PRNGKey(config.seed) rng_subkey = jax.random.split(rng_key, 3) model_ret = model.get_model_functions(rng_subkey[0], config.dim, **config.model) (model_param, predict_f, loss_f, loss_adv_f, linearize_f, normalize_f, loss_and_prox_op) = model_ret train_test_generator = data_loader.get_train_test_generator(config.dataset) train_test_data = train_test_generator(config, rng_subkey[1]) train(model_param, train_test_data, predict_f, loss_f, loss_adv_f, linearize_f, normalize_f, loss_and_prox_op, summary, config, rng_subkey[2]) def main(_): config = FLAGS.config main_with_config(config) if __name__ == '__main__': app.run(main)
en
0.806573
# coding=utf-8 # Copyright 2022 The Google Research Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. Train and test a robust model with the implicit bias of an optimizer. Returns the risk of a model for various loss functions. Args: data: An array of data samples for approximating the risk. predict_f: Function that predicts labels given input. loss_f: Function that outputs model's specific loss function. model_param: Model parameters. Returns: Dictionary of risks for following loss functions: (model's loss, 0/1, adversarial risk wrt a single norm-ball). Evaluating adversarial risk by looping over epsilon. Args: data: An array of data samples for approximating the risk. predict_f: Function that predicts labels given input. loss_adv_f: The loss function. This loss has to be specific to the model to tackle gradient masking. dloss_adv_dx: The gradient function of the adversarial loss w.r.t. the input. Ideally, we will have multiple loss functions even on different layers of network. This loss has to be specific to the model to tackle gradient masking. model_param: Model parameters. normalize_f: A function to normalize the weights of the model. config: Dictionary of hyperparameters. rng_key: JAX random number generator key. Returns: Dictionary adversarial risk wrt a range of norm-balls. # If config.adv.eps_from_cvxpy, eps is reset after min-norm solution is found Train a model and log risks. # Precompute min-norm solutions # Train loop # Take one optimization step # Adversarial training # Standard training # Log risks and other statistics # Evaluate risk on train/test sets # Log the norm of the gradient w.r.t. various norms # Log weight norm # Log margin for the equivalent linearized single layer model # Cosine similarity between the current params and min-norm solution Compute the min-norm solution using a convex-program solver. # compute minimal L_infinity solution # compute minimal L_2 solution # compute minimal L_1 solution # compute minimal Lp solution # compute minimal Fourier L1 norm (||F(w)||_1) solution
2.220421
2
ooni/utils/__init__.py
Acidburn0zzz/ooni-probe
2
6625292
<filename>ooni/utils/__init__.py import shutil import string import random import signal import errno import gzip import os from datetime import datetime, timedelta from zipfile import ZipFile from twisted.python.filepath import FilePath from twisted.python.runtime import platform class Storage(dict): """ A Storage object is like a dictionary except `obj.foo` can be used in addition to `obj['foo']`. >>> o = Storage(a=1) >>> o.a 1 >>> o['a'] 1 >>> o.a = 2 >>> o['a'] 2 >>> del o.a >>> o.a None """ def __getattr__(self, key): try: return self[key] except KeyError: return None def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError, k: raise AttributeError(k) def __repr__(self): return '<Storage ' + dict.__repr__(self) + '>' def __getstate__(self): return dict(self) def __setstate__(self, value): for (k, v) in value.items(): self[k] = v def checkForRoot(): from ooni import errors if os.getuid() != 0: raise errors.InsufficientPrivileges def randomSTR(length, num=True): """ Returns a random all uppercase alfa-numerical (if num True) string long length """ chars = string.ascii_uppercase if num: chars += string.digits return ''.join(random.choice(chars) for x in range(length)) def randomstr(length, num=True): """ Returns a random all lowercase alfa-numerical (if num True) string long length """ chars = string.ascii_lowercase if num: chars += string.digits return ''.join(random.choice(chars) for x in range(length)) def randomStr(length, num=True): """ Returns a random a mixed lowercase, uppercase, alfanumerical (if num True) string long length """ chars = string.ascii_lowercase + string.ascii_uppercase if num: chars += string.digits return ''.join(random.choice(chars) for x in range(length)) def randomDate(start, end): """ From: http://stackoverflow.com/a/553448 """ delta = end - start int_delta = (delta.days * 24 * 60 * 60) random_second = random.randrange(int_delta) return start + timedelta(seconds=random_second) LONG_DATE = "%Y-%m-%d %H:%M:%S" SHORT_DATE = "%Y%m%dT%H%M%SZ" def generate_filename(test_details, prefix=None, extension=None): """ Returns a filename for every test execution. It's used to assure that all files of a certain test have a common basename but different extension. """ kwargs = {} filename_format = "" if prefix is not None: kwargs["prefix"] = prefix filename_format += "{prefix}-" filename_format += "{timestamp}-{probe_cc}-{probe_asn}-{test_name}" if extension is not None: kwargs["extension"] = extension filename_format += ".{extension}" kwargs['test_name'] = test_details['test_name'] kwargs['probe_cc'] = test_details.get('probe_cc', 'ZZ') kwargs['probe_asn'] = test_details.get('probe_asn', 'AS0') kwargs['timestamp'] = datetime.strptime(test_details['test_start_time'], LONG_DATE).strftime(SHORT_DATE) return filename_format.format(**kwargs) def sanitize_options(options): """ Strips all possible user identifying information from the ooniprobe test options. Currently only strips leading directories from filepaths. """ sanitized_options = [] for option in options: if isinstance(option, str): option = os.path.basename(option) sanitized_options.append(option) return sanitized_options def rename(src, dst): # Best effort atomic renaming if platform.isWindows() and os.path.exists(dst): os.unlink(dst) os.rename(src, dst) def unzip(filename, dst): assert filename.endswith('.zip') dst_path = os.path.join( dst, os.path.basename(filename).replace(".zip", "") ) with open(filename) as zfp: zip_file = ZipFile(zfp) zip_file.extractall(dst_path) return dst_path def gunzip(file_path): """ gunzip a file in place. """ tmp_location = FilePath(file_path).temporarySibling() in_file = gzip.open(file_path) with tmp_location.open('w') as out_file: shutil.copyfileobj(in_file, out_file) in_file.close() rename(tmp_location.path, file_path) def get_ooni_root(): script = os.path.join(__file__, '..') return os.path.dirname(os.path.realpath(script)) def is_process_running(pid): try: os.kill(pid, 0) running = True except OSError as ose: if ose.errno == errno.EPERM: running = True elif ose.errno == errno.ESRCH: running = False else: raise return running def mkdir_p(path): """ Like makedirs, but it also ignores EEXIST errors, unless it exists but isn't a directory. """ try: os.makedirs(path) except OSError as ose: if ose.errno != errno.EEXIST: raise if not os.path.isdir(path): raise
<filename>ooni/utils/__init__.py import shutil import string import random import signal import errno import gzip import os from datetime import datetime, timedelta from zipfile import ZipFile from twisted.python.filepath import FilePath from twisted.python.runtime import platform class Storage(dict): """ A Storage object is like a dictionary except `obj.foo` can be used in addition to `obj['foo']`. >>> o = Storage(a=1) >>> o.a 1 >>> o['a'] 1 >>> o.a = 2 >>> o['a'] 2 >>> del o.a >>> o.a None """ def __getattr__(self, key): try: return self[key] except KeyError: return None def __setattr__(self, key, value): self[key] = value def __delattr__(self, key): try: del self[key] except KeyError, k: raise AttributeError(k) def __repr__(self): return '<Storage ' + dict.__repr__(self) + '>' def __getstate__(self): return dict(self) def __setstate__(self, value): for (k, v) in value.items(): self[k] = v def checkForRoot(): from ooni import errors if os.getuid() != 0: raise errors.InsufficientPrivileges def randomSTR(length, num=True): """ Returns a random all uppercase alfa-numerical (if num True) string long length """ chars = string.ascii_uppercase if num: chars += string.digits return ''.join(random.choice(chars) for x in range(length)) def randomstr(length, num=True): """ Returns a random all lowercase alfa-numerical (if num True) string long length """ chars = string.ascii_lowercase if num: chars += string.digits return ''.join(random.choice(chars) for x in range(length)) def randomStr(length, num=True): """ Returns a random a mixed lowercase, uppercase, alfanumerical (if num True) string long length """ chars = string.ascii_lowercase + string.ascii_uppercase if num: chars += string.digits return ''.join(random.choice(chars) for x in range(length)) def randomDate(start, end): """ From: http://stackoverflow.com/a/553448 """ delta = end - start int_delta = (delta.days * 24 * 60 * 60) random_second = random.randrange(int_delta) return start + timedelta(seconds=random_second) LONG_DATE = "%Y-%m-%d %H:%M:%S" SHORT_DATE = "%Y%m%dT%H%M%SZ" def generate_filename(test_details, prefix=None, extension=None): """ Returns a filename for every test execution. It's used to assure that all files of a certain test have a common basename but different extension. """ kwargs = {} filename_format = "" if prefix is not None: kwargs["prefix"] = prefix filename_format += "{prefix}-" filename_format += "{timestamp}-{probe_cc}-{probe_asn}-{test_name}" if extension is not None: kwargs["extension"] = extension filename_format += ".{extension}" kwargs['test_name'] = test_details['test_name'] kwargs['probe_cc'] = test_details.get('probe_cc', 'ZZ') kwargs['probe_asn'] = test_details.get('probe_asn', 'AS0') kwargs['timestamp'] = datetime.strptime(test_details['test_start_time'], LONG_DATE).strftime(SHORT_DATE) return filename_format.format(**kwargs) def sanitize_options(options): """ Strips all possible user identifying information from the ooniprobe test options. Currently only strips leading directories from filepaths. """ sanitized_options = [] for option in options: if isinstance(option, str): option = os.path.basename(option) sanitized_options.append(option) return sanitized_options def rename(src, dst): # Best effort atomic renaming if platform.isWindows() and os.path.exists(dst): os.unlink(dst) os.rename(src, dst) def unzip(filename, dst): assert filename.endswith('.zip') dst_path = os.path.join( dst, os.path.basename(filename).replace(".zip", "") ) with open(filename) as zfp: zip_file = ZipFile(zfp) zip_file.extractall(dst_path) return dst_path def gunzip(file_path): """ gunzip a file in place. """ tmp_location = FilePath(file_path).temporarySibling() in_file = gzip.open(file_path) with tmp_location.open('w') as out_file: shutil.copyfileobj(in_file, out_file) in_file.close() rename(tmp_location.path, file_path) def get_ooni_root(): script = os.path.join(__file__, '..') return os.path.dirname(os.path.realpath(script)) def is_process_running(pid): try: os.kill(pid, 0) running = True except OSError as ose: if ose.errno == errno.EPERM: running = True elif ose.errno == errno.ESRCH: running = False else: raise return running def mkdir_p(path): """ Like makedirs, but it also ignores EEXIST errors, unless it exists but isn't a directory. """ try: os.makedirs(path) except OSError as ose: if ose.errno != errno.EEXIST: raise if not os.path.isdir(path): raise
en
0.658231
A Storage object is like a dictionary except `obj.foo` can be used in addition to `obj['foo']`. >>> o = Storage(a=1) >>> o.a 1 >>> o['a'] 1 >>> o.a = 2 >>> o['a'] 2 >>> del o.a >>> o.a None Returns a random all uppercase alfa-numerical (if num True) string long length Returns a random all lowercase alfa-numerical (if num True) string long length Returns a random a mixed lowercase, uppercase, alfanumerical (if num True) string long length From: http://stackoverflow.com/a/553448 Returns a filename for every test execution. It's used to assure that all files of a certain test have a common basename but different extension. Strips all possible user identifying information from the ooniprobe test options. Currently only strips leading directories from filepaths. # Best effort atomic renaming gunzip a file in place. Like makedirs, but it also ignores EEXIST errors, unless it exists but isn't a directory.
2.530457
3
py/libs/assetexchange_maya/__init__.py
ddesmond/assetexchange
0
6625293
from .mainthread import * from .plugin import *
from .mainthread import * from .plugin import *
none
1
1.088691
1
research/instahide_attack_2020/step_4_final_graph.py
andrewyguo/privacy
0
6625294
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import multiprocessing as mp import pickle import random import numpy as np labels = np.load("data/label.npy") nextgraph = np.load("data/nextgraph.npy") assigned = [[] for _ in range(5000)] lambdas = [[] for _ in range(5000)] for i in range(100): order = (np.argsort(nextgraph[:,i])) correct = (labels[order[:20]]>0).sum(axis=0).argmax() # Let's create the final graph # Instead of doing a full bipartite matching, let's just greedily # choose the closest 80 candidates for each encoded image to pair # together can call it a day. # This is within a percent or two of doing that, and much easier. # Also record the lambdas based on which image it coresponds to, # but if they share a label then just guess it's an even 50/50 split. for x in order[:80]: if labels[x][correct] > 0 and len(assigned[x]) < 2: assigned[x].append(i) if np.sum(labels[x]>0) == 1: # the same label was mixed in twice. punt. lambdas[x].append(labels[x][correct]/2) else: lambdas[x].append(labels[x][correct]) np.save("data/predicted_pairings_80.npy", assigned) np.save("data/predicted_lambdas_80.npy", lambdas)
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== import multiprocessing as mp import pickle import random import numpy as np labels = np.load("data/label.npy") nextgraph = np.load("data/nextgraph.npy") assigned = [[] for _ in range(5000)] lambdas = [[] for _ in range(5000)] for i in range(100): order = (np.argsort(nextgraph[:,i])) correct = (labels[order[:20]]>0).sum(axis=0).argmax() # Let's create the final graph # Instead of doing a full bipartite matching, let's just greedily # choose the closest 80 candidates for each encoded image to pair # together can call it a day. # This is within a percent or two of doing that, and much easier. # Also record the lambdas based on which image it coresponds to, # but if they share a label then just guess it's an even 50/50 split. for x in order[:80]: if labels[x][correct] > 0 and len(assigned[x]) < 2: assigned[x].append(i) if np.sum(labels[x]>0) == 1: # the same label was mixed in twice. punt. lambdas[x].append(labels[x][correct]/2) else: lambdas[x].append(labels[x][correct]) np.save("data/predicted_pairings_80.npy", assigned) np.save("data/predicted_lambdas_80.npy", lambdas)
en
0.891049
# Copyright 2020 Google LLC # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== # Let's create the final graph # Instead of doing a full bipartite matching, let's just greedily # choose the closest 80 candidates for each encoded image to pair # together can call it a day. # This is within a percent or two of doing that, and much easier. # Also record the lambdas based on which image it coresponds to, # but if they share a label then just guess it's an even 50/50 split. # the same label was mixed in twice. punt.
2.473983
2
posthog/api/insight.py
msnitish/posthog
0
6625295
import json from typing import Any, Dict, Type from django.db.models import OuterRef, QuerySet, Subquery from django.db.models.query_utils import Q from django.http import HttpResponse from django.utils.text import slugify from django.utils.timezone import now from django_filters.rest_framework import DjangoFilterBackend from drf_spectacular.utils import OpenApiResponse from rest_framework import exceptions, request, serializers, status, viewsets from rest_framework.decorators import action from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework.settings import api_settings from rest_framework_csv import renderers as csvrenderers from sentry_sdk import capture_exception from ee.clickhouse.queries.funnels import ClickhouseFunnelTimeToConvert, ClickhouseFunnelTrends from ee.clickhouse.queries.funnels.utils import get_funnel_order_class from ee.clickhouse.queries.paths.paths import ClickhousePaths from ee.clickhouse.queries.retention.clickhouse_retention import ClickhouseRetention from ee.clickhouse.queries.stickiness.clickhouse_stickiness import ClickhouseStickiness from ee.clickhouse.queries.trends.clickhouse_trends import ClickhouseTrends from posthog.api.documentation import extend_schema from posthog.api.insight_serializers import ( FunnelSerializer, FunnelStepsResultsSerializer, TrendResultsSerializer, TrendSerializer, ) from posthog.api.routing import StructuredViewSetMixin from posthog.api.shared import UserBasicSerializer from posthog.api.tagged_item import TaggedItemSerializerMixin, TaggedItemViewSetMixin from posthog.api.utils import format_paginated_url from posthog.constants import ( BREAKDOWN_VALUES_LIMIT, FROM_DASHBOARD, INSIGHT, INSIGHT_FUNNELS, INSIGHT_PATHS, INSIGHT_STICKINESS, PATHS_INCLUDE_EVENT_TYPES, TRENDS_STICKINESS, FunnelVizType, ) from posthog.decorators import cached_function from posthog.helpers.multi_property_breakdown import protect_old_clients_from_multi_property_default from posthog.models import Filter, Insight, Team from posthog.models.dashboard import Dashboard from posthog.models.filters import RetentionFilter from posthog.models.filters.path_filter import PathFilter from posthog.models.filters.stickiness_filter import StickinessFilter from posthog.models.insight import InsightViewed from posthog.permissions import ProjectMembershipNecessaryPermissions, TeamMemberAccessPermission from posthog.queries.util import get_earliest_timestamp from posthog.settings import SITE_URL from posthog.tasks.update_cache import update_dashboard_item_cache from posthog.utils import get_safe_cache, relative_date_parse, should_refresh, str_to_bool class InsightBasicSerializer(serializers.ModelSerializer): """ Simplified serializer to speed response times when loading large amounts of objects. """ class Meta: model = Insight fields = [ "id", "short_id", "name", "filters", "dashboard", "color", "description", "last_refresh", "refreshing", "saved", "updated_at", ] read_only_fields = ("short_id", "updated_at") def create(self, validated_data: Dict, *args: Any, **kwargs: Any) -> Any: raise NotImplementedError() def to_representation(self, instance): representation = super().to_representation(instance) representation["filters"] = instance.dashboard_filters() return representation class InsightSerializer(TaggedItemSerializerMixin, InsightBasicSerializer): result = serializers.SerializerMethodField() last_refresh = serializers.SerializerMethodField() created_by = UserBasicSerializer(read_only=True) last_modified_by = UserBasicSerializer(read_only=True) effective_privilege_level = serializers.SerializerMethodField() class Meta: model = Insight fields = [ "id", "short_id", "name", "derived_name", "filters", "filters_hash", "order", "deleted", "dashboard", "layouts", "color", "last_refresh", "refreshing", "result", "created_at", "created_by", "description", "updated_at", "tags", "favorited", "saved", "last_modified_at", "last_modified_by", "is_sample", "effective_restriction_level", "effective_privilege_level", ] read_only_fields = ( "created_at", "created_by", "last_modified_at", "last_modified_by", "short_id", "updated_at", "is_sample", "effective_restriction_level", "effective_privilege_level", ) def create(self, validated_data: Dict, *args: Any, **kwargs: Any) -> Insight: request = self.context["request"] team = Team.objects.get(id=self.context["team_id"]) validated_data.pop("last_refresh", None) # last_refresh sometimes gets sent if dashboard_item is duplicated tags = validated_data.pop("tags", None) # tags are created separately as global tag relationships if not validated_data.get("dashboard", None): dashboard_item = Insight.objects.create( team=team, created_by=request.user, last_modified_by=request.user, **validated_data ) elif validated_data["dashboard"].team == team: created_by = validated_data.pop("created_by", request.user) dashboard_item = Insight.objects.create( team=team, last_refresh=now(), created_by=created_by, last_modified_by=created_by, **validated_data ) else: raise serializers.ValidationError("Dashboard not found") # Manual tag creation since this create method doesn't call super() self._attempt_set_tags(tags, dashboard_item) return dashboard_item def update(self, instance: Insight, validated_data: Dict, **kwargs) -> Insight: # Remove is_sample if it's set as user has altered the sample configuration validated_data["is_sample"] = False if validated_data.keys() & Insight.MATERIAL_INSIGHT_FIELDS: instance.last_modified_at = now() instance.last_modified_by = self.context["request"].user return super().update(instance, validated_data) def get_result(self, insight: Insight): if not insight.filters: return None if should_refresh(self.context["request"]): return update_dashboard_item_cache(insight, None) result = get_safe_cache(insight.filters_hash) if not result or result.get("task_id", None): return None # Data might not be defined if there is still cached results from before moving from 'results' to 'data' return result.get("result") def get_last_refresh(self, insight: Insight): if should_refresh(self.context["request"]): return now() result = self.get_result(insight) if result is not None: return insight.last_refresh if insight.last_refresh is not None: # Update last_refresh without updating "updated_at" (insight edit date) insight.last_refresh = None insight.save() return None def get_effective_privilege_level(self, insight: Insight) -> Dashboard.PrivilegeLevel: return insight.get_effective_privilege_level(self.context["request"].user.id) def to_representation(self, instance: Insight): representation = super().to_representation(instance) representation["filters"] = instance.dashboard_filters(dashboard=self.context.get("dashboard")) return representation class InsightViewSet(TaggedItemViewSetMixin, StructuredViewSetMixin, viewsets.ModelViewSet): queryset = Insight.objects.all().prefetch_related( "dashboard", "dashboard__team", "dashboard__team__organization", "created_by" ) serializer_class = InsightSerializer permission_classes = [IsAuthenticated, ProjectMembershipNecessaryPermissions, TeamMemberAccessPermission] renderer_classes = tuple(api_settings.DEFAULT_RENDERER_CLASSES) + (csvrenderers.CSVRenderer,) filter_backends = [DjangoFilterBackend] filterset_fields = ["short_id", "created_by"] include_in_docs = True def get_serializer_class(self) -> Type[serializers.BaseSerializer]: if (self.action == "list" or self.action == "retrieve") and str_to_bool( self.request.query_params.get("basic", "0"), ): return InsightBasicSerializer return super().get_serializer_class() def get_queryset(self) -> QuerySet: queryset = super().get_queryset() if self.action == "list": queryset = queryset.filter(deleted=False) queryset = self._filter_request(self.request, queryset) order = self.request.GET.get("order", None) if order: if order == "-my_last_viewed_at": queryset = self._annotate_with_my_last_viewed_at(queryset).order_by("-my_last_viewed_at") else: queryset = queryset.order_by(order) else: queryset = queryset.order_by("order") return queryset def _annotate_with_my_last_viewed_at(self, queryset: QuerySet) -> QuerySet: if self.request.user.is_authenticated: insight_viewed = InsightViewed.objects.filter( team=self.team, user=self.request.user, insight_id=OuterRef("id") ) return queryset.annotate(my_last_viewed_at=Subquery(insight_viewed.values("last_viewed_at")[:1])) raise exceptions.NotAuthenticated() def _filter_request(self, request: request.Request, queryset: QuerySet) -> QuerySet: filters = request.GET.dict() for key in filters: if key == "saved": if str_to_bool(request.GET["saved"]): queryset = queryset.filter(Q(saved=True) | Q(dashboard__isnull=False)) else: queryset = queryset.filter(Q(saved=False)) elif key == "my_last_viewed": if str_to_bool(request.GET["my_last_viewed"]): queryset = self._annotate_with_my_last_viewed_at(queryset).filter(my_last_viewed_at__isnull=False) elif key == "user": queryset = queryset.filter(created_by=request.user) elif key == "favorited": queryset = queryset.filter(Q(favorited=True)) elif key == "date_from": queryset = queryset.filter(last_modified_at__gt=relative_date_parse(request.GET["date_from"])) elif key == "date_to": queryset = queryset.filter(last_modified_at__lt=relative_date_parse(request.GET["date_to"])) elif key == INSIGHT: queryset = queryset.filter(filters__insight=request.GET[INSIGHT]) elif key == "search": queryset = queryset.filter( Q(name__icontains=request.GET["search"]) | Q(derived_name__icontains=request.GET["search"]) ) return queryset @action(methods=["patch"], detail=False) def layouts(self, request, **kwargs): """Dashboard item layouts.""" queryset = self.get_queryset() for data in request.data["items"]: queryset.filter(pk=data["id"]).update(layouts=data["layouts"]) serializer = self.get_serializer(queryset.all(), many=True) return Response(serializer.data) # ****************************************** # Calculated Insight Endpoints # /projects/:id/insights/trend # /projects/:id/insights/funnel # /projects/:id/insights/retention # /projects/:id/insights/path # # Request parameteres and caching are handled here and passed onto respective .queries classes # ****************************************** # ****************************************** # /projects/:id/insights/trend # # params: # - from_dashboard: (string) determines trend is being retrieved from dashboard item to update dashboard_item metadata # - shown_as: (string: Volume, Stickiness) specifies the trend aggregation type # - **shared filter types # ****************************************** @extend_schema( request=TrendSerializer, methods=["POST"], tags=["trend"], operation_id="Trends", responses=TrendResultsSerializer, ) @action(methods=["GET", "POST"], detail=False) def trend(self, request: request.Request, *args: Any, **kwargs: Any): try: serializer = TrendSerializer(request=request) serializer.is_valid(raise_exception=True) except Exception as e: capture_exception(e) result = self.calculate_trends(request) filter = Filter(request=request, team=self.team) next = ( format_paginated_url(request, filter.offset, BREAKDOWN_VALUES_LIMIT) if len(result["result"]) >= BREAKDOWN_VALUES_LIMIT else None ) if self.request.accepted_renderer.format == "csv": csvexport = [] for item in result["result"]: line = {"series": item["label"]} for index, data in enumerate(item["data"]): line[item["labels"][index]] = data csvexport.append(line) renderer = csvrenderers.CSVRenderer() renderer.header = csvexport[0].keys() export = renderer.render(csvexport) if request.GET.get("export_insight_id"): export = "{}/insights/{}/\n".format(SITE_URL, request.GET["export_insight_id"]).encode() + export response = HttpResponse(export) response[ "Content-Disposition" ] = 'attachment; filename="{name} ({date_from} {date_to}) from PostHog.csv"'.format( name=slugify(request.GET.get("export_name", "export")), date_from=filter.date_from.strftime("%Y-%m-%d -") if filter.date_from else "up until", date_to=filter.date_to.strftime("%Y-%m-%d"), ) return response return Response({**result, "next": next}) @cached_function def calculate_trends(self, request: request.Request) -> Dict[str, Any]: team = self.team filter = Filter(request=request, team=self.team) if filter.insight == INSIGHT_STICKINESS or filter.shown_as == TRENDS_STICKINESS: stickiness_filter = StickinessFilter( request=request, team=team, get_earliest_timestamp=get_earliest_timestamp ) result = ClickhouseStickiness().run(stickiness_filter, team) else: trends_query = ClickhouseTrends() result = trends_query.run(filter, team) self._refresh_dashboard(request=request) return {"result": result} # ****************************************** # /projects/:id/insights/funnel # The funnel endpoint is asynchronously processed. When a request is received, the endpoint will # call an async task with an id that can be continually polled for 3 minutes. # # params: # - refresh: (dict) specifies cache to force refresh or poll # - from_dashboard: (dict) determines funnel is being retrieved from dashboard item to update dashboard_item metadata # - **shared filter types # ****************************************** @extend_schema( request=FunnelSerializer, responses=OpenApiResponse( response=FunnelStepsResultsSerializer, description="Note, if funnel_viz_type is set the response will be different.", ), methods=["POST"], tags=["funnel"], operation_id="Funnels", ) @action(methods=["GET", "POST"], detail=False) def funnel(self, request: request.Request, *args: Any, **kwargs: Any) -> Response: try: serializer = FunnelSerializer(request=request) serializer.is_valid(raise_exception=True) except Exception as e: capture_exception(e) funnel = self.calculate_funnel(request) funnel["result"] = protect_old_clients_from_multi_property_default(request.data, funnel["result"]) return Response(funnel) @cached_function def calculate_funnel(self, request: request.Request) -> Dict[str, Any]: team = self.team filter = Filter(request=request, data={"insight": INSIGHT_FUNNELS}, team=self.team) if filter.funnel_viz_type == FunnelVizType.TRENDS: return {"result": ClickhouseFunnelTrends(team=team, filter=filter).run()} elif filter.funnel_viz_type == FunnelVizType.TIME_TO_CONVERT: return {"result": ClickhouseFunnelTimeToConvert(team=team, filter=filter).run()} else: funnel_order_class = get_funnel_order_class(filter) return {"result": funnel_order_class(team=team, filter=filter).run()} # ****************************************** # /projects/:id/insights/retention # params: # - start_entity: (dict) specifies id and type of the entity to focus retention on # - **shared filter types # ****************************************** @action(methods=["GET"], detail=False) def retention(self, request: request.Request, *args: Any, **kwargs: Any) -> Response: result = self.calculate_retention(request) return Response(result) @cached_function def calculate_retention(self, request: request.Request) -> Dict[str, Any]: team = self.team data = {} if not request.GET.get("date_from"): data.update({"date_from": "-11d"}) filter = RetentionFilter(data=data, request=request, team=self.team) base_uri = request.build_absolute_uri("/") result = ClickhouseRetention(base_uri=base_uri).run(filter, team) return {"result": result} # ****************************************** # /projects/:id/insights/path # params: # - start: (string) specifies the name of the starting property or element # - request_type: (string: $pageview, $autocapture, $screen, custom_event) specifies the path type # - **shared filter types # ****************************************** @action(methods=["GET", "POST"], detail=False) def path(self, request: request.Request, *args: Any, **kwargs: Any) -> Response: result = self.calculate_path(request) return Response(result) @cached_function def calculate_path(self, request: request.Request) -> Dict[str, Any]: team = self.team filter = PathFilter(request=request, data={"insight": INSIGHT_PATHS}, team=self.team) funnel_filter = None funnel_filter_data = request.GET.get("funnel_filter") or request.data.get("funnel_filter") if funnel_filter_data: if isinstance(funnel_filter_data, str): funnel_filter_data = json.loads(funnel_filter_data) funnel_filter = Filter(data={"insight": INSIGHT_FUNNELS, **funnel_filter_data}, team=self.team) # backwards compatibility if filter.path_type: filter = filter.with_data({PATHS_INCLUDE_EVENT_TYPES: [filter.path_type]}) resp = ClickhousePaths(filter=filter, team=team, funnel_filter=funnel_filter).run() return {"result": resp} # Checks if a dashboard id has been set and if so, update the refresh date def _refresh_dashboard(self, request) -> None: dashboard_id = request.GET.get(FROM_DASHBOARD, None) if dashboard_id: Insight.objects.filter(pk=dashboard_id).update(last_refresh=now()) # ****************************************** # /projects/:id/insights/:short_id/viewed # Creates or updates an InsightViewed object for the user/insight combo # ****************************************** @action(methods=["POST"], detail=True) def viewed(self, request: request.Request, *args: Any, **kwargs: Any) -> Response: InsightViewed.objects.update_or_create( team=self.team, user=request.user, insight=self.get_object(), defaults={"last_viewed_at": now()} ) return Response(status=status.HTTP_201_CREATED) class LegacyInsightViewSet(InsightViewSet): legacy_team_compatibility = True
import json from typing import Any, Dict, Type from django.db.models import OuterRef, QuerySet, Subquery from django.db.models.query_utils import Q from django.http import HttpResponse from django.utils.text import slugify from django.utils.timezone import now from django_filters.rest_framework import DjangoFilterBackend from drf_spectacular.utils import OpenApiResponse from rest_framework import exceptions, request, serializers, status, viewsets from rest_framework.decorators import action from rest_framework.permissions import IsAuthenticated from rest_framework.response import Response from rest_framework.settings import api_settings from rest_framework_csv import renderers as csvrenderers from sentry_sdk import capture_exception from ee.clickhouse.queries.funnels import ClickhouseFunnelTimeToConvert, ClickhouseFunnelTrends from ee.clickhouse.queries.funnels.utils import get_funnel_order_class from ee.clickhouse.queries.paths.paths import ClickhousePaths from ee.clickhouse.queries.retention.clickhouse_retention import ClickhouseRetention from ee.clickhouse.queries.stickiness.clickhouse_stickiness import ClickhouseStickiness from ee.clickhouse.queries.trends.clickhouse_trends import ClickhouseTrends from posthog.api.documentation import extend_schema from posthog.api.insight_serializers import ( FunnelSerializer, FunnelStepsResultsSerializer, TrendResultsSerializer, TrendSerializer, ) from posthog.api.routing import StructuredViewSetMixin from posthog.api.shared import UserBasicSerializer from posthog.api.tagged_item import TaggedItemSerializerMixin, TaggedItemViewSetMixin from posthog.api.utils import format_paginated_url from posthog.constants import ( BREAKDOWN_VALUES_LIMIT, FROM_DASHBOARD, INSIGHT, INSIGHT_FUNNELS, INSIGHT_PATHS, INSIGHT_STICKINESS, PATHS_INCLUDE_EVENT_TYPES, TRENDS_STICKINESS, FunnelVizType, ) from posthog.decorators import cached_function from posthog.helpers.multi_property_breakdown import protect_old_clients_from_multi_property_default from posthog.models import Filter, Insight, Team from posthog.models.dashboard import Dashboard from posthog.models.filters import RetentionFilter from posthog.models.filters.path_filter import PathFilter from posthog.models.filters.stickiness_filter import StickinessFilter from posthog.models.insight import InsightViewed from posthog.permissions import ProjectMembershipNecessaryPermissions, TeamMemberAccessPermission from posthog.queries.util import get_earliest_timestamp from posthog.settings import SITE_URL from posthog.tasks.update_cache import update_dashboard_item_cache from posthog.utils import get_safe_cache, relative_date_parse, should_refresh, str_to_bool class InsightBasicSerializer(serializers.ModelSerializer): """ Simplified serializer to speed response times when loading large amounts of objects. """ class Meta: model = Insight fields = [ "id", "short_id", "name", "filters", "dashboard", "color", "description", "last_refresh", "refreshing", "saved", "updated_at", ] read_only_fields = ("short_id", "updated_at") def create(self, validated_data: Dict, *args: Any, **kwargs: Any) -> Any: raise NotImplementedError() def to_representation(self, instance): representation = super().to_representation(instance) representation["filters"] = instance.dashboard_filters() return representation class InsightSerializer(TaggedItemSerializerMixin, InsightBasicSerializer): result = serializers.SerializerMethodField() last_refresh = serializers.SerializerMethodField() created_by = UserBasicSerializer(read_only=True) last_modified_by = UserBasicSerializer(read_only=True) effective_privilege_level = serializers.SerializerMethodField() class Meta: model = Insight fields = [ "id", "short_id", "name", "derived_name", "filters", "filters_hash", "order", "deleted", "dashboard", "layouts", "color", "last_refresh", "refreshing", "result", "created_at", "created_by", "description", "updated_at", "tags", "favorited", "saved", "last_modified_at", "last_modified_by", "is_sample", "effective_restriction_level", "effective_privilege_level", ] read_only_fields = ( "created_at", "created_by", "last_modified_at", "last_modified_by", "short_id", "updated_at", "is_sample", "effective_restriction_level", "effective_privilege_level", ) def create(self, validated_data: Dict, *args: Any, **kwargs: Any) -> Insight: request = self.context["request"] team = Team.objects.get(id=self.context["team_id"]) validated_data.pop("last_refresh", None) # last_refresh sometimes gets sent if dashboard_item is duplicated tags = validated_data.pop("tags", None) # tags are created separately as global tag relationships if not validated_data.get("dashboard", None): dashboard_item = Insight.objects.create( team=team, created_by=request.user, last_modified_by=request.user, **validated_data ) elif validated_data["dashboard"].team == team: created_by = validated_data.pop("created_by", request.user) dashboard_item = Insight.objects.create( team=team, last_refresh=now(), created_by=created_by, last_modified_by=created_by, **validated_data ) else: raise serializers.ValidationError("Dashboard not found") # Manual tag creation since this create method doesn't call super() self._attempt_set_tags(tags, dashboard_item) return dashboard_item def update(self, instance: Insight, validated_data: Dict, **kwargs) -> Insight: # Remove is_sample if it's set as user has altered the sample configuration validated_data["is_sample"] = False if validated_data.keys() & Insight.MATERIAL_INSIGHT_FIELDS: instance.last_modified_at = now() instance.last_modified_by = self.context["request"].user return super().update(instance, validated_data) def get_result(self, insight: Insight): if not insight.filters: return None if should_refresh(self.context["request"]): return update_dashboard_item_cache(insight, None) result = get_safe_cache(insight.filters_hash) if not result or result.get("task_id", None): return None # Data might not be defined if there is still cached results from before moving from 'results' to 'data' return result.get("result") def get_last_refresh(self, insight: Insight): if should_refresh(self.context["request"]): return now() result = self.get_result(insight) if result is not None: return insight.last_refresh if insight.last_refresh is not None: # Update last_refresh without updating "updated_at" (insight edit date) insight.last_refresh = None insight.save() return None def get_effective_privilege_level(self, insight: Insight) -> Dashboard.PrivilegeLevel: return insight.get_effective_privilege_level(self.context["request"].user.id) def to_representation(self, instance: Insight): representation = super().to_representation(instance) representation["filters"] = instance.dashboard_filters(dashboard=self.context.get("dashboard")) return representation class InsightViewSet(TaggedItemViewSetMixin, StructuredViewSetMixin, viewsets.ModelViewSet): queryset = Insight.objects.all().prefetch_related( "dashboard", "dashboard__team", "dashboard__team__organization", "created_by" ) serializer_class = InsightSerializer permission_classes = [IsAuthenticated, ProjectMembershipNecessaryPermissions, TeamMemberAccessPermission] renderer_classes = tuple(api_settings.DEFAULT_RENDERER_CLASSES) + (csvrenderers.CSVRenderer,) filter_backends = [DjangoFilterBackend] filterset_fields = ["short_id", "created_by"] include_in_docs = True def get_serializer_class(self) -> Type[serializers.BaseSerializer]: if (self.action == "list" or self.action == "retrieve") and str_to_bool( self.request.query_params.get("basic", "0"), ): return InsightBasicSerializer return super().get_serializer_class() def get_queryset(self) -> QuerySet: queryset = super().get_queryset() if self.action == "list": queryset = queryset.filter(deleted=False) queryset = self._filter_request(self.request, queryset) order = self.request.GET.get("order", None) if order: if order == "-my_last_viewed_at": queryset = self._annotate_with_my_last_viewed_at(queryset).order_by("-my_last_viewed_at") else: queryset = queryset.order_by(order) else: queryset = queryset.order_by("order") return queryset def _annotate_with_my_last_viewed_at(self, queryset: QuerySet) -> QuerySet: if self.request.user.is_authenticated: insight_viewed = InsightViewed.objects.filter( team=self.team, user=self.request.user, insight_id=OuterRef("id") ) return queryset.annotate(my_last_viewed_at=Subquery(insight_viewed.values("last_viewed_at")[:1])) raise exceptions.NotAuthenticated() def _filter_request(self, request: request.Request, queryset: QuerySet) -> QuerySet: filters = request.GET.dict() for key in filters: if key == "saved": if str_to_bool(request.GET["saved"]): queryset = queryset.filter(Q(saved=True) | Q(dashboard__isnull=False)) else: queryset = queryset.filter(Q(saved=False)) elif key == "my_last_viewed": if str_to_bool(request.GET["my_last_viewed"]): queryset = self._annotate_with_my_last_viewed_at(queryset).filter(my_last_viewed_at__isnull=False) elif key == "user": queryset = queryset.filter(created_by=request.user) elif key == "favorited": queryset = queryset.filter(Q(favorited=True)) elif key == "date_from": queryset = queryset.filter(last_modified_at__gt=relative_date_parse(request.GET["date_from"])) elif key == "date_to": queryset = queryset.filter(last_modified_at__lt=relative_date_parse(request.GET["date_to"])) elif key == INSIGHT: queryset = queryset.filter(filters__insight=request.GET[INSIGHT]) elif key == "search": queryset = queryset.filter( Q(name__icontains=request.GET["search"]) | Q(derived_name__icontains=request.GET["search"]) ) return queryset @action(methods=["patch"], detail=False) def layouts(self, request, **kwargs): """Dashboard item layouts.""" queryset = self.get_queryset() for data in request.data["items"]: queryset.filter(pk=data["id"]).update(layouts=data["layouts"]) serializer = self.get_serializer(queryset.all(), many=True) return Response(serializer.data) # ****************************************** # Calculated Insight Endpoints # /projects/:id/insights/trend # /projects/:id/insights/funnel # /projects/:id/insights/retention # /projects/:id/insights/path # # Request parameteres and caching are handled here and passed onto respective .queries classes # ****************************************** # ****************************************** # /projects/:id/insights/trend # # params: # - from_dashboard: (string) determines trend is being retrieved from dashboard item to update dashboard_item metadata # - shown_as: (string: Volume, Stickiness) specifies the trend aggregation type # - **shared filter types # ****************************************** @extend_schema( request=TrendSerializer, methods=["POST"], tags=["trend"], operation_id="Trends", responses=TrendResultsSerializer, ) @action(methods=["GET", "POST"], detail=False) def trend(self, request: request.Request, *args: Any, **kwargs: Any): try: serializer = TrendSerializer(request=request) serializer.is_valid(raise_exception=True) except Exception as e: capture_exception(e) result = self.calculate_trends(request) filter = Filter(request=request, team=self.team) next = ( format_paginated_url(request, filter.offset, BREAKDOWN_VALUES_LIMIT) if len(result["result"]) >= BREAKDOWN_VALUES_LIMIT else None ) if self.request.accepted_renderer.format == "csv": csvexport = [] for item in result["result"]: line = {"series": item["label"]} for index, data in enumerate(item["data"]): line[item["labels"][index]] = data csvexport.append(line) renderer = csvrenderers.CSVRenderer() renderer.header = csvexport[0].keys() export = renderer.render(csvexport) if request.GET.get("export_insight_id"): export = "{}/insights/{}/\n".format(SITE_URL, request.GET["export_insight_id"]).encode() + export response = HttpResponse(export) response[ "Content-Disposition" ] = 'attachment; filename="{name} ({date_from} {date_to}) from PostHog.csv"'.format( name=slugify(request.GET.get("export_name", "export")), date_from=filter.date_from.strftime("%Y-%m-%d -") if filter.date_from else "up until", date_to=filter.date_to.strftime("%Y-%m-%d"), ) return response return Response({**result, "next": next}) @cached_function def calculate_trends(self, request: request.Request) -> Dict[str, Any]: team = self.team filter = Filter(request=request, team=self.team) if filter.insight == INSIGHT_STICKINESS or filter.shown_as == TRENDS_STICKINESS: stickiness_filter = StickinessFilter( request=request, team=team, get_earliest_timestamp=get_earliest_timestamp ) result = ClickhouseStickiness().run(stickiness_filter, team) else: trends_query = ClickhouseTrends() result = trends_query.run(filter, team) self._refresh_dashboard(request=request) return {"result": result} # ****************************************** # /projects/:id/insights/funnel # The funnel endpoint is asynchronously processed. When a request is received, the endpoint will # call an async task with an id that can be continually polled for 3 minutes. # # params: # - refresh: (dict) specifies cache to force refresh or poll # - from_dashboard: (dict) determines funnel is being retrieved from dashboard item to update dashboard_item metadata # - **shared filter types # ****************************************** @extend_schema( request=FunnelSerializer, responses=OpenApiResponse( response=FunnelStepsResultsSerializer, description="Note, if funnel_viz_type is set the response will be different.", ), methods=["POST"], tags=["funnel"], operation_id="Funnels", ) @action(methods=["GET", "POST"], detail=False) def funnel(self, request: request.Request, *args: Any, **kwargs: Any) -> Response: try: serializer = FunnelSerializer(request=request) serializer.is_valid(raise_exception=True) except Exception as e: capture_exception(e) funnel = self.calculate_funnel(request) funnel["result"] = protect_old_clients_from_multi_property_default(request.data, funnel["result"]) return Response(funnel) @cached_function def calculate_funnel(self, request: request.Request) -> Dict[str, Any]: team = self.team filter = Filter(request=request, data={"insight": INSIGHT_FUNNELS}, team=self.team) if filter.funnel_viz_type == FunnelVizType.TRENDS: return {"result": ClickhouseFunnelTrends(team=team, filter=filter).run()} elif filter.funnel_viz_type == FunnelVizType.TIME_TO_CONVERT: return {"result": ClickhouseFunnelTimeToConvert(team=team, filter=filter).run()} else: funnel_order_class = get_funnel_order_class(filter) return {"result": funnel_order_class(team=team, filter=filter).run()} # ****************************************** # /projects/:id/insights/retention # params: # - start_entity: (dict) specifies id and type of the entity to focus retention on # - **shared filter types # ****************************************** @action(methods=["GET"], detail=False) def retention(self, request: request.Request, *args: Any, **kwargs: Any) -> Response: result = self.calculate_retention(request) return Response(result) @cached_function def calculate_retention(self, request: request.Request) -> Dict[str, Any]: team = self.team data = {} if not request.GET.get("date_from"): data.update({"date_from": "-11d"}) filter = RetentionFilter(data=data, request=request, team=self.team) base_uri = request.build_absolute_uri("/") result = ClickhouseRetention(base_uri=base_uri).run(filter, team) return {"result": result} # ****************************************** # /projects/:id/insights/path # params: # - start: (string) specifies the name of the starting property or element # - request_type: (string: $pageview, $autocapture, $screen, custom_event) specifies the path type # - **shared filter types # ****************************************** @action(methods=["GET", "POST"], detail=False) def path(self, request: request.Request, *args: Any, **kwargs: Any) -> Response: result = self.calculate_path(request) return Response(result) @cached_function def calculate_path(self, request: request.Request) -> Dict[str, Any]: team = self.team filter = PathFilter(request=request, data={"insight": INSIGHT_PATHS}, team=self.team) funnel_filter = None funnel_filter_data = request.GET.get("funnel_filter") or request.data.get("funnel_filter") if funnel_filter_data: if isinstance(funnel_filter_data, str): funnel_filter_data = json.loads(funnel_filter_data) funnel_filter = Filter(data={"insight": INSIGHT_FUNNELS, **funnel_filter_data}, team=self.team) # backwards compatibility if filter.path_type: filter = filter.with_data({PATHS_INCLUDE_EVENT_TYPES: [filter.path_type]}) resp = ClickhousePaths(filter=filter, team=team, funnel_filter=funnel_filter).run() return {"result": resp} # Checks if a dashboard id has been set and if so, update the refresh date def _refresh_dashboard(self, request) -> None: dashboard_id = request.GET.get(FROM_DASHBOARD, None) if dashboard_id: Insight.objects.filter(pk=dashboard_id).update(last_refresh=now()) # ****************************************** # /projects/:id/insights/:short_id/viewed # Creates or updates an InsightViewed object for the user/insight combo # ****************************************** @action(methods=["POST"], detail=True) def viewed(self, request: request.Request, *args: Any, **kwargs: Any) -> Response: InsightViewed.objects.update_or_create( team=self.team, user=request.user, insight=self.get_object(), defaults={"last_viewed_at": now()} ) return Response(status=status.HTTP_201_CREATED) class LegacyInsightViewSet(InsightViewSet): legacy_team_compatibility = True
en
0.725334
Simplified serializer to speed response times when loading large amounts of objects. # last_refresh sometimes gets sent if dashboard_item is duplicated # tags are created separately as global tag relationships # Manual tag creation since this create method doesn't call super() # Remove is_sample if it's set as user has altered the sample configuration # Data might not be defined if there is still cached results from before moving from 'results' to 'data' # Update last_refresh without updating "updated_at" (insight edit date) Dashboard item layouts. # ****************************************** # Calculated Insight Endpoints # /projects/:id/insights/trend # /projects/:id/insights/funnel # /projects/:id/insights/retention # /projects/:id/insights/path # # Request parameteres and caching are handled here and passed onto respective .queries classes # ****************************************** # ****************************************** # /projects/:id/insights/trend # # params: # - from_dashboard: (string) determines trend is being retrieved from dashboard item to update dashboard_item metadata # - shown_as: (string: Volume, Stickiness) specifies the trend aggregation type # - **shared filter types # ****************************************** # ****************************************** # /projects/:id/insights/funnel # The funnel endpoint is asynchronously processed. When a request is received, the endpoint will # call an async task with an id that can be continually polled for 3 minutes. # # params: # - refresh: (dict) specifies cache to force refresh or poll # - from_dashboard: (dict) determines funnel is being retrieved from dashboard item to update dashboard_item metadata # - **shared filter types # ****************************************** # ****************************************** # /projects/:id/insights/retention # params: # - start_entity: (dict) specifies id and type of the entity to focus retention on # - **shared filter types # ****************************************** # ****************************************** # /projects/:id/insights/path # params: # - start: (string) specifies the name of the starting property or element # - request_type: (string: $pageview, $autocapture, $screen, custom_event) specifies the path type # - **shared filter types # ****************************************** # backwards compatibility # Checks if a dashboard id has been set and if so, update the refresh date # ****************************************** # /projects/:id/insights/:short_id/viewed # Creates or updates an InsightViewed object for the user/insight combo # ******************************************
1.442327
1
src/solana/transaction.py
albert-vo-crypto/solana-py
0
6625296
<filename>src/solana/transaction.py """Library to package an atomic sequence of instructions to a transaction.""" from __future__ import annotations from dataclasses import dataclass from sys import maxsize from typing import Any, Dict, List, NamedTuple, NewType, Optional, Union from based58 import b58decode, b58encode from nacl.exceptions import BadSignatureError # type: ignore from nacl.signing import VerifyKey # type: ignore from solana.blockhash import Blockhash from solana.keypair import Keypair from solana.message import CompiledInstruction, Message, MessageArgs, MessageHeader from solana.publickey import PublicKey from solana.utils import shortvec_encoding as shortvec TransactionSignature = NewType("TransactionSignature", str) """Type for TransactionSignature.""" PACKET_DATA_SIZE = 1280 - 40 - 8 """Constant for maximum over-the-wire size of a Transaction.""" SIG_LENGTH = 64 """Constant for standard length of a signature.""" @dataclass class AccountMeta: """Account metadata dataclass.""" pubkey: PublicKey """An account's public key.""" is_signer: bool """True if an instruction requires a transaction signature matching `pubkey`""" is_writable: bool """True if the `pubkey` can be loaded as a read-write account.""" class TransactionInstruction(NamedTuple): """Transaction Instruction class.""" keys: List[AccountMeta] """Public keys to include in this transaction Boolean represents whether this pubkey needs to sign the transaction. """ program_id: PublicKey """Program Id to execute.""" data: bytes = bytes(0) """Program input.""" class NonceInformation(NamedTuple): """NonceInformation to be used to build a Transaction.""" nonce: Blockhash """The current Nonce blockhash.""" nonce_instruction: TransactionInstruction """AdvanceNonceAccount Instruction.""" @dataclass class SigPubkeyPair: """Pair of signature and corresponding public key.""" pubkey: PublicKey signature: Optional[bytes] = None class Transaction: """Transaction class to represent an atomic transaction. Args: recent_blockhash: A recent transaction id. nonce_info: Nonce information. If populated, transaction will use a durable Nonce hash instead of a `recent_blockhash`. signatures: Signatures for the transaction. Typically created by invoking the `sign()` method. fee_payer: The transaction fee payer. """ # Default (empty) signature __DEFAULT_SIG = bytes(64) def __init__( self, recent_blockhash: Optional[Blockhash] = None, nonce_info: Optional[NonceInformation] = None, signatures: Optional[List[SigPubkeyPair]] = None, fee_payer: Optional[PublicKey] = None, ) -> None: """Init transaction object.""" self.fee_payer = fee_payer self.instructions: List[TransactionInstruction] = [] self.signatures: List[SigPubkeyPair] = signatures if signatures else [] self.recent_blockhash, self.nonce_info = recent_blockhash, nonce_info def __eq__(self, other: Any) -> bool: """Equality defintion for Transactions.""" if not isinstance(other, Transaction): return False return ( self.recent_blockhash == other.recent_blockhash and self.nonce_info == other.nonce_info and self.signatures == other.signatures and self.instructions == other.instructions ) def signature(self) -> Optional[bytes]: """The first (payer) Transaction signature. Returns: The payer signature. """ return None if not self.signatures else self.signatures[0].signature def add(self, *args: Union[Transaction, TransactionInstruction]) -> Transaction: """Add one or more instructions to this Transaction. Args: *args: The instructions to add to this Transaction. If a `Transaction` is passsed, the instructions will be extracted from it. Returns: The transaction with the added instructions. """ for arg in args: if isinstance(arg, Transaction): self.instructions.extend(arg.instructions) elif isinstance(arg, TransactionInstruction): self.instructions.append(arg) else: raise ValueError("invalid instruction:", arg) return self def compile_message(self) -> Message: # pylint: disable=too-many-locals """Compile transaction data. Returns: The compiled message. """ if self.nonce_info and self.instructions[0] != self.nonce_info.nonce_instruction: self.recent_blockhash = self.nonce_info.nonce self.instructions = [self.nonce_info.nonce_instruction] + self.instructions if not self.recent_blockhash: raise AttributeError("transaction recentBlockhash required") if len(self.instructions) < 1: raise AttributeError("no instructions provided") fee_payer = self.fee_payer if not fee_payer and len(self.signatures) > 0 and self.signatures[0].pubkey: # Use implicit fee payer fee_payer = self.signatures[0].pubkey if not fee_payer: raise AttributeError("transaction feePayer required") account_metas, program_ids = [], [] for instr in self.instructions: if not instr.program_id: raise AttributeError("invalid instruction:", instr) account_metas.extend(instr.keys) if str(instr.program_id) not in program_ids: program_ids.append(str(instr.program_id)) # Append programID account metas. for pg_id in program_ids: account_metas.append(AccountMeta(PublicKey(pg_id), False, False)) # Sort. Prioritizing first by signer, then by writable and converting from set to list. account_metas.sort(key=lambda account: (not account.is_signer, not account.is_writable)) # Cull duplicate accounts fee_payer_idx = maxsize seen: Dict[str, int] = {} uniq_metas: List[AccountMeta] = [] for sig in self.signatures: pubkey = str(sig.pubkey) if pubkey in seen: uniq_metas[seen[pubkey]].is_signer = True else: uniq_metas.append(AccountMeta(sig.pubkey, True, True)) seen[pubkey] = len(uniq_metas) - 1 if sig.pubkey == fee_payer: fee_payer_idx = min(fee_payer_idx, seen[pubkey]) for a_m in account_metas: pubkey = str(a_m.pubkey) if pubkey in seen: idx = seen[pubkey] uniq_metas[idx].is_writable = uniq_metas[idx].is_writable or a_m.is_writable else: uniq_metas.append(a_m) seen[pubkey] = len(uniq_metas) - 1 if a_m.pubkey == fee_payer: fee_payer_idx = min(fee_payer_idx, seen[pubkey]) # Move fee payer to the front if fee_payer_idx == maxsize: uniq_metas = [AccountMeta(fee_payer, True, True)] + uniq_metas else: uniq_metas = ( [uniq_metas[fee_payer_idx]] + uniq_metas[:fee_payer_idx] + uniq_metas[fee_payer_idx + 1 :] # noqa: E203 ) # Split out signing from nonsigning keys and count readonlys signed_keys: List[str] = [] unsigned_keys: List[str] = [] num_required_signatures = num_readonly_signed_accounts = num_readonly_unsigned_accounts = 0 for a_m in uniq_metas: if a_m.is_signer: signed_keys.append(str(a_m.pubkey)) num_required_signatures += 1 num_readonly_signed_accounts += int(not a_m.is_writable) else: num_readonly_unsigned_accounts += int(not a_m.is_writable) unsigned_keys.append(str(a_m.pubkey)) # Initialize signature array, if needed if not self.signatures: self.signatures = [SigPubkeyPair(pubkey=PublicKey(key), signature=None) for key in signed_keys] account_keys: List[str] = signed_keys + unsigned_keys account_indices: Dict[str, int] = {str(key): i for i, key in enumerate(account_keys)} compiled_instructions: List[CompiledInstruction] = [ CompiledInstruction( accounts=[account_indices[str(a_m.pubkey)] for a_m in instr.keys], program_id_index=account_indices[str(instr.program_id)], data=b58encode(instr.data), ) for instr in self.instructions ] return Message( MessageArgs( header=MessageHeader( num_required_signatures=num_required_signatures, num_readonly_signed_accounts=num_readonly_signed_accounts, num_readonly_unsigned_accounts=num_readonly_unsigned_accounts, ), account_keys=account_keys, instructions=compiled_instructions, recent_blockhash=self.recent_blockhash, ) ) def serialize_message(self) -> bytes: """Get raw transaction data that need to be covered by signatures. Returns: The serialized message. """ return self.compile_message().serialize() def sign_partial(self, *partial_signers: Union[PublicKey, Keypair]) -> None: """Partially sign a Transaction with the specified accounts. The `Keypair` inputs will be used to sign the Transaction immediately, while any `PublicKey` inputs will be referenced in the signed Transaction but need to be filled in later by calling `addSigner()` with the matching `Keypair`. All the caveats from the `sign` method apply to `signPartial` """ def partial_signer_pubkey(account_or_pubkey: Union[PublicKey, Keypair]): return account_or_pubkey.public_key if isinstance(account_or_pubkey, Keypair) else account_or_pubkey signatures: List[SigPubkeyPair] = [ SigPubkeyPair(pubkey=partial_signer_pubkey(partial_signer)) for partial_signer in partial_signers ] self.signatures = signatures sign_data = self.serialize_message() for idx, partial_signer in enumerate(partial_signers): if isinstance(partial_signer, Keypair): sig = partial_signer.sign(sign_data).signature if len(sig) != SIG_LENGTH: raise RuntimeError("signature has invalid length", sig) self.signatures[idx].signature = sig def sign(self, *signers: Keypair) -> None: """Sign the Transaction with the specified accounts. Multiple signatures may be applied to a Transaction. The first signature is considered "primary" and is used when testing for Transaction confirmation. Transaction fields should not be modified after the first call to `sign`, as doing so may invalidate the signature and cause the Transaction to be rejected. The Transaction must be assigned a valid `recentBlockhash` before invoking this method. """ self.sign_partial(*signers) def add_signature(self, pubkey: PublicKey, signature: bytes) -> None: """Add an externally created signature to a transaction.""" if len(signature) != SIG_LENGTH: raise ValueError("signature has invalid length", signature) idx = next((i for i, sig_pair in enumerate(self.signatures) if sig_pair.pubkey == pubkey), None) if idx is None: raise ValueError("unknown signer: ", str(pubkey)) self.signatures[idx].signature = signature def add_signer(self, signer: Keypair) -> None: """Fill in a signature for a partially signed Transaction. The `signer` must be the corresponding `Keypair` for a `PublicKey` that was previously provided to `signPartial` """ signed_msg = signer.sign(self.serialize_message()) self.add_signature(signer.public_key, signed_msg.signature) def verify_signatures(self) -> bool: """Verify signatures of a complete, signed Transaction. Returns: a bool indicating if the signatures are correct or not. """ return self.__verify_signatures(self.serialize_message()) def __verify_signatures(self, signed_data: bytes) -> bool: for sig_pair in self.signatures: if not sig_pair.signature: return False try: VerifyKey(bytes(sig_pair.pubkey)).verify(signed_data, sig_pair.signature) except BadSignatureError: return False return True def serialize(self) -> bytes: """Serialize the Transaction in the wire format. The Transaction must have a valid `signature` before invoking this method. Example: >>> from solana.keypair import Keypair >>> from solana.blockhash import Blockhash >>> from solana.publickey import PublicKey >>> from solana.system_program import transfer, TransferParams >>> seed = bytes(PublicKey(1)) >>> sender, receiver = Keypair.from_seed(seed), PublicKey(2) >>> transfer_tx = Transaction().add(transfer(TransferParams(from_pubkey=sender.public_key, to_pubkey=receiver, lamports=1000))) >>> transfer_tx.recent_blockhash = Blockhash(str(PublicKey(3))) >>> transfer_tx.sign(sender) >>> transfer_tx.serialize().hex() '019d53be8af3a7c30f86c1092d2c3ea61d270c0cfa275a23ba504674c8fbbb724827b23b42dc8e08019e23120f1b6f40f9799355ce54185b4415be37ca2cee6e0e010001034cb5abf6ad79fbf5abbccafcc269d85cd2651ed4b885b5869f241aedf0a5ba2900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000301020200010c02000000e803000000000000' Returns: The serialized transaction. """ # noqa: E501 pylint: disable=line-too-long if not self.signatures: raise AttributeError("transaction has not been signed") sign_data = self.serialize_message() if not self.__verify_signatures(sign_data): raise AttributeError("transaction has not been signed correctly") return self.__serialize(sign_data) def __serialize(self, signed_data: bytes) -> bytes: if len(self.signatures) >= SIG_LENGTH * 4: raise AttributeError("too many signatures to encode") wire_transaction = bytearray() # Encode signature count signature_count = shortvec.encode_length(len(self.signatures)) wire_transaction.extend(signature_count) # Encode signatures for sig_pair in self.signatures: if sig_pair.signature and len(sig_pair.signature) != SIG_LENGTH: raise RuntimeError("signature has invalid length", sig_pair.signature) if not sig_pair.signature: wire_transaction.extend(bytearray(SIG_LENGTH)) else: wire_transaction.extend(sig_pair.signature) # Encode signed data wire_transaction.extend(signed_data) if len(wire_transaction) > PACKET_DATA_SIZE: raise RuntimeError(f"transaction too large: {len(wire_transaction)} > {PACKET_DATA_SIZE}") return bytes(wire_transaction) @staticmethod def deserialize(raw_transaction: bytes) -> Transaction: """Parse a wire transaction into a Transaction object. Example: >>> raw_transaction = bytes.fromhex( ... '019d53be8af3a7c30f86c1092d2c3ea61d270c0cfa2' ... '75a23ba504674c8fbbb724827b23b42dc8e08019e23' ... '120f1b6f40f9799355ce54185b4415be37ca2cee6e0' ... 'e010001034cb5abf6ad79fbf5abbccafcc269d85cd2' ... '651ed4b885b5869f241aedf0a5ba290000000000000' ... '0000000000000000000000000000000000000000000' ... '0000000200000000000000000000000000000000000' ... '0000000000000000000000000000000000000000000' ... '0000000000000000000000000000000000000000000' ... '000000301020200010c02000000e803000000000000' ... ) >>> type(Transaction.deserialize(raw_transaction)) <class 'solana.transaction.Transaction'> Returns: The deserialized transaction. """ signatures = [] signature_count, offset = shortvec.decode_length(raw_transaction) for _ in range(signature_count): signatures.append(b58encode(raw_transaction[offset : offset + SIG_LENGTH])) # noqa: E203 offset += SIG_LENGTH return Transaction.populate(Message.deserialize(raw_transaction[offset:]), signatures) @staticmethod def populate(message: Message, signatures: List[bytes]) -> Transaction: """Populate Transaction object from message and signatures. Example: >>> raw_message = bytes.fromhex( ... '0200030500000000000000000000000000000000000000000000' ... '0000000000000000000100000000000000000000000000000000' ... '0000000000000000000000000000000200000000000000000000' ... '0000000000000000000000000000000000000000000300000000' ... '0000000000000000000000000000000000000000000000000000' ... '0004000000000000000000000000000000000000000000000000' ... '0000000000000005c49ae77603782054f17a9decea43b444eba0' ... 'edb12c6f1d31c6e0e4a84bf052eb010403010203050909090909' ... ) >>> from based58 import b58encode >>> from solana.message import Message >>> msg = Message.deserialize(raw_message) >>> signatures = [b58encode(bytes([1] * SIG_LENGTH)), b58encode(bytes([2] * SIG_LENGTH))] >>> type(Transaction.populate(msg, signatures)) <class 'solana.transaction.Transaction'> Returns: The populated transaction. """ transaction = Transaction(recent_blockhash=message.recent_blockhash) for idx, sig in enumerate(signatures): signature = None if sig == b58encode(Transaction.__DEFAULT_SIG) else b58decode(sig) transaction.signatures.append(SigPubkeyPair(pubkey=message.account_keys[idx], signature=signature)) for instr in message.instructions: account_metas: List[AccountMeta] = [] for acc_idx in instr.accounts: pubkey = message.account_keys[acc_idx] is_signer = any((pubkey == sigkeypair.pubkey for sigkeypair in transaction.signatures)) account_metas.append( AccountMeta(pubkey=pubkey, is_signer=is_signer, is_writable=message.is_account_writable(acc_idx)) ) program_id = message.account_keys[instr.program_id_index] transaction.instructions.append( TransactionInstruction(keys=account_metas, program_id=program_id, data=b58decode(instr.data)) ) return transaction
<filename>src/solana/transaction.py """Library to package an atomic sequence of instructions to a transaction.""" from __future__ import annotations from dataclasses import dataclass from sys import maxsize from typing import Any, Dict, List, NamedTuple, NewType, Optional, Union from based58 import b58decode, b58encode from nacl.exceptions import BadSignatureError # type: ignore from nacl.signing import VerifyKey # type: ignore from solana.blockhash import Blockhash from solana.keypair import Keypair from solana.message import CompiledInstruction, Message, MessageArgs, MessageHeader from solana.publickey import PublicKey from solana.utils import shortvec_encoding as shortvec TransactionSignature = NewType("TransactionSignature", str) """Type for TransactionSignature.""" PACKET_DATA_SIZE = 1280 - 40 - 8 """Constant for maximum over-the-wire size of a Transaction.""" SIG_LENGTH = 64 """Constant for standard length of a signature.""" @dataclass class AccountMeta: """Account metadata dataclass.""" pubkey: PublicKey """An account's public key.""" is_signer: bool """True if an instruction requires a transaction signature matching `pubkey`""" is_writable: bool """True if the `pubkey` can be loaded as a read-write account.""" class TransactionInstruction(NamedTuple): """Transaction Instruction class.""" keys: List[AccountMeta] """Public keys to include in this transaction Boolean represents whether this pubkey needs to sign the transaction. """ program_id: PublicKey """Program Id to execute.""" data: bytes = bytes(0) """Program input.""" class NonceInformation(NamedTuple): """NonceInformation to be used to build a Transaction.""" nonce: Blockhash """The current Nonce blockhash.""" nonce_instruction: TransactionInstruction """AdvanceNonceAccount Instruction.""" @dataclass class SigPubkeyPair: """Pair of signature and corresponding public key.""" pubkey: PublicKey signature: Optional[bytes] = None class Transaction: """Transaction class to represent an atomic transaction. Args: recent_blockhash: A recent transaction id. nonce_info: Nonce information. If populated, transaction will use a durable Nonce hash instead of a `recent_blockhash`. signatures: Signatures for the transaction. Typically created by invoking the `sign()` method. fee_payer: The transaction fee payer. """ # Default (empty) signature __DEFAULT_SIG = bytes(64) def __init__( self, recent_blockhash: Optional[Blockhash] = None, nonce_info: Optional[NonceInformation] = None, signatures: Optional[List[SigPubkeyPair]] = None, fee_payer: Optional[PublicKey] = None, ) -> None: """Init transaction object.""" self.fee_payer = fee_payer self.instructions: List[TransactionInstruction] = [] self.signatures: List[SigPubkeyPair] = signatures if signatures else [] self.recent_blockhash, self.nonce_info = recent_blockhash, nonce_info def __eq__(self, other: Any) -> bool: """Equality defintion for Transactions.""" if not isinstance(other, Transaction): return False return ( self.recent_blockhash == other.recent_blockhash and self.nonce_info == other.nonce_info and self.signatures == other.signatures and self.instructions == other.instructions ) def signature(self) -> Optional[bytes]: """The first (payer) Transaction signature. Returns: The payer signature. """ return None if not self.signatures else self.signatures[0].signature def add(self, *args: Union[Transaction, TransactionInstruction]) -> Transaction: """Add one or more instructions to this Transaction. Args: *args: The instructions to add to this Transaction. If a `Transaction` is passsed, the instructions will be extracted from it. Returns: The transaction with the added instructions. """ for arg in args: if isinstance(arg, Transaction): self.instructions.extend(arg.instructions) elif isinstance(arg, TransactionInstruction): self.instructions.append(arg) else: raise ValueError("invalid instruction:", arg) return self def compile_message(self) -> Message: # pylint: disable=too-many-locals """Compile transaction data. Returns: The compiled message. """ if self.nonce_info and self.instructions[0] != self.nonce_info.nonce_instruction: self.recent_blockhash = self.nonce_info.nonce self.instructions = [self.nonce_info.nonce_instruction] + self.instructions if not self.recent_blockhash: raise AttributeError("transaction recentBlockhash required") if len(self.instructions) < 1: raise AttributeError("no instructions provided") fee_payer = self.fee_payer if not fee_payer and len(self.signatures) > 0 and self.signatures[0].pubkey: # Use implicit fee payer fee_payer = self.signatures[0].pubkey if not fee_payer: raise AttributeError("transaction feePayer required") account_metas, program_ids = [], [] for instr in self.instructions: if not instr.program_id: raise AttributeError("invalid instruction:", instr) account_metas.extend(instr.keys) if str(instr.program_id) not in program_ids: program_ids.append(str(instr.program_id)) # Append programID account metas. for pg_id in program_ids: account_metas.append(AccountMeta(PublicKey(pg_id), False, False)) # Sort. Prioritizing first by signer, then by writable and converting from set to list. account_metas.sort(key=lambda account: (not account.is_signer, not account.is_writable)) # Cull duplicate accounts fee_payer_idx = maxsize seen: Dict[str, int] = {} uniq_metas: List[AccountMeta] = [] for sig in self.signatures: pubkey = str(sig.pubkey) if pubkey in seen: uniq_metas[seen[pubkey]].is_signer = True else: uniq_metas.append(AccountMeta(sig.pubkey, True, True)) seen[pubkey] = len(uniq_metas) - 1 if sig.pubkey == fee_payer: fee_payer_idx = min(fee_payer_idx, seen[pubkey]) for a_m in account_metas: pubkey = str(a_m.pubkey) if pubkey in seen: idx = seen[pubkey] uniq_metas[idx].is_writable = uniq_metas[idx].is_writable or a_m.is_writable else: uniq_metas.append(a_m) seen[pubkey] = len(uniq_metas) - 1 if a_m.pubkey == fee_payer: fee_payer_idx = min(fee_payer_idx, seen[pubkey]) # Move fee payer to the front if fee_payer_idx == maxsize: uniq_metas = [AccountMeta(fee_payer, True, True)] + uniq_metas else: uniq_metas = ( [uniq_metas[fee_payer_idx]] + uniq_metas[:fee_payer_idx] + uniq_metas[fee_payer_idx + 1 :] # noqa: E203 ) # Split out signing from nonsigning keys and count readonlys signed_keys: List[str] = [] unsigned_keys: List[str] = [] num_required_signatures = num_readonly_signed_accounts = num_readonly_unsigned_accounts = 0 for a_m in uniq_metas: if a_m.is_signer: signed_keys.append(str(a_m.pubkey)) num_required_signatures += 1 num_readonly_signed_accounts += int(not a_m.is_writable) else: num_readonly_unsigned_accounts += int(not a_m.is_writable) unsigned_keys.append(str(a_m.pubkey)) # Initialize signature array, if needed if not self.signatures: self.signatures = [SigPubkeyPair(pubkey=PublicKey(key), signature=None) for key in signed_keys] account_keys: List[str] = signed_keys + unsigned_keys account_indices: Dict[str, int] = {str(key): i for i, key in enumerate(account_keys)} compiled_instructions: List[CompiledInstruction] = [ CompiledInstruction( accounts=[account_indices[str(a_m.pubkey)] for a_m in instr.keys], program_id_index=account_indices[str(instr.program_id)], data=b58encode(instr.data), ) for instr in self.instructions ] return Message( MessageArgs( header=MessageHeader( num_required_signatures=num_required_signatures, num_readonly_signed_accounts=num_readonly_signed_accounts, num_readonly_unsigned_accounts=num_readonly_unsigned_accounts, ), account_keys=account_keys, instructions=compiled_instructions, recent_blockhash=self.recent_blockhash, ) ) def serialize_message(self) -> bytes: """Get raw transaction data that need to be covered by signatures. Returns: The serialized message. """ return self.compile_message().serialize() def sign_partial(self, *partial_signers: Union[PublicKey, Keypair]) -> None: """Partially sign a Transaction with the specified accounts. The `Keypair` inputs will be used to sign the Transaction immediately, while any `PublicKey` inputs will be referenced in the signed Transaction but need to be filled in later by calling `addSigner()` with the matching `Keypair`. All the caveats from the `sign` method apply to `signPartial` """ def partial_signer_pubkey(account_or_pubkey: Union[PublicKey, Keypair]): return account_or_pubkey.public_key if isinstance(account_or_pubkey, Keypair) else account_or_pubkey signatures: List[SigPubkeyPair] = [ SigPubkeyPair(pubkey=partial_signer_pubkey(partial_signer)) for partial_signer in partial_signers ] self.signatures = signatures sign_data = self.serialize_message() for idx, partial_signer in enumerate(partial_signers): if isinstance(partial_signer, Keypair): sig = partial_signer.sign(sign_data).signature if len(sig) != SIG_LENGTH: raise RuntimeError("signature has invalid length", sig) self.signatures[idx].signature = sig def sign(self, *signers: Keypair) -> None: """Sign the Transaction with the specified accounts. Multiple signatures may be applied to a Transaction. The first signature is considered "primary" and is used when testing for Transaction confirmation. Transaction fields should not be modified after the first call to `sign`, as doing so may invalidate the signature and cause the Transaction to be rejected. The Transaction must be assigned a valid `recentBlockhash` before invoking this method. """ self.sign_partial(*signers) def add_signature(self, pubkey: PublicKey, signature: bytes) -> None: """Add an externally created signature to a transaction.""" if len(signature) != SIG_LENGTH: raise ValueError("signature has invalid length", signature) idx = next((i for i, sig_pair in enumerate(self.signatures) if sig_pair.pubkey == pubkey), None) if idx is None: raise ValueError("unknown signer: ", str(pubkey)) self.signatures[idx].signature = signature def add_signer(self, signer: Keypair) -> None: """Fill in a signature for a partially signed Transaction. The `signer` must be the corresponding `Keypair` for a `PublicKey` that was previously provided to `signPartial` """ signed_msg = signer.sign(self.serialize_message()) self.add_signature(signer.public_key, signed_msg.signature) def verify_signatures(self) -> bool: """Verify signatures of a complete, signed Transaction. Returns: a bool indicating if the signatures are correct or not. """ return self.__verify_signatures(self.serialize_message()) def __verify_signatures(self, signed_data: bytes) -> bool: for sig_pair in self.signatures: if not sig_pair.signature: return False try: VerifyKey(bytes(sig_pair.pubkey)).verify(signed_data, sig_pair.signature) except BadSignatureError: return False return True def serialize(self) -> bytes: """Serialize the Transaction in the wire format. The Transaction must have a valid `signature` before invoking this method. Example: >>> from solana.keypair import Keypair >>> from solana.blockhash import Blockhash >>> from solana.publickey import PublicKey >>> from solana.system_program import transfer, TransferParams >>> seed = bytes(PublicKey(1)) >>> sender, receiver = Keypair.from_seed(seed), PublicKey(2) >>> transfer_tx = Transaction().add(transfer(TransferParams(from_pubkey=sender.public_key, to_pubkey=receiver, lamports=1000))) >>> transfer_tx.recent_blockhash = Blockhash(str(PublicKey(3))) >>> transfer_tx.sign(sender) >>> transfer_tx.serialize().hex() '019d53be8af3a7c30f86c1092d2c3ea61d270c0cfa275a23ba504674c8fbbb724827b23b42dc8e08019e23120f1b6f40f9799355ce54185b4415be37ca2cee6e0e010001034cb5abf6ad79fbf5abbccafcc269d85cd2651ed4b885b5869f241aedf0a5ba2900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000301020200010c02000000e803000000000000' Returns: The serialized transaction. """ # noqa: E501 pylint: disable=line-too-long if not self.signatures: raise AttributeError("transaction has not been signed") sign_data = self.serialize_message() if not self.__verify_signatures(sign_data): raise AttributeError("transaction has not been signed correctly") return self.__serialize(sign_data) def __serialize(self, signed_data: bytes) -> bytes: if len(self.signatures) >= SIG_LENGTH * 4: raise AttributeError("too many signatures to encode") wire_transaction = bytearray() # Encode signature count signature_count = shortvec.encode_length(len(self.signatures)) wire_transaction.extend(signature_count) # Encode signatures for sig_pair in self.signatures: if sig_pair.signature and len(sig_pair.signature) != SIG_LENGTH: raise RuntimeError("signature has invalid length", sig_pair.signature) if not sig_pair.signature: wire_transaction.extend(bytearray(SIG_LENGTH)) else: wire_transaction.extend(sig_pair.signature) # Encode signed data wire_transaction.extend(signed_data) if len(wire_transaction) > PACKET_DATA_SIZE: raise RuntimeError(f"transaction too large: {len(wire_transaction)} > {PACKET_DATA_SIZE}") return bytes(wire_transaction) @staticmethod def deserialize(raw_transaction: bytes) -> Transaction: """Parse a wire transaction into a Transaction object. Example: >>> raw_transaction = bytes.fromhex( ... '019d53be8af3a7c30f86c1092d2c3ea61d270c0cfa2' ... '75a23ba504674c8fbbb724827b23b42dc8e08019e23' ... '120f1b6f40f9799355ce54185b4415be37ca2cee6e0' ... 'e010001034cb5abf6ad79fbf5abbccafcc269d85cd2' ... '651ed4b885b5869f241aedf0a5ba290000000000000' ... '0000000000000000000000000000000000000000000' ... '0000000200000000000000000000000000000000000' ... '0000000000000000000000000000000000000000000' ... '0000000000000000000000000000000000000000000' ... '000000301020200010c02000000e803000000000000' ... ) >>> type(Transaction.deserialize(raw_transaction)) <class 'solana.transaction.Transaction'> Returns: The deserialized transaction. """ signatures = [] signature_count, offset = shortvec.decode_length(raw_transaction) for _ in range(signature_count): signatures.append(b58encode(raw_transaction[offset : offset + SIG_LENGTH])) # noqa: E203 offset += SIG_LENGTH return Transaction.populate(Message.deserialize(raw_transaction[offset:]), signatures) @staticmethod def populate(message: Message, signatures: List[bytes]) -> Transaction: """Populate Transaction object from message and signatures. Example: >>> raw_message = bytes.fromhex( ... '0200030500000000000000000000000000000000000000000000' ... '0000000000000000000100000000000000000000000000000000' ... '0000000000000000000000000000000200000000000000000000' ... '0000000000000000000000000000000000000000000300000000' ... '0000000000000000000000000000000000000000000000000000' ... '0004000000000000000000000000000000000000000000000000' ... '0000000000000005c49ae77603782054f17a9decea43b444eba0' ... 'edb12c6f1d31c6e0e4a84bf052eb010403010203050909090909' ... ) >>> from based58 import b58encode >>> from solana.message import Message >>> msg = Message.deserialize(raw_message) >>> signatures = [b58encode(bytes([1] * SIG_LENGTH)), b58encode(bytes([2] * SIG_LENGTH))] >>> type(Transaction.populate(msg, signatures)) <class 'solana.transaction.Transaction'> Returns: The populated transaction. """ transaction = Transaction(recent_blockhash=message.recent_blockhash) for idx, sig in enumerate(signatures): signature = None if sig == b58encode(Transaction.__DEFAULT_SIG) else b58decode(sig) transaction.signatures.append(SigPubkeyPair(pubkey=message.account_keys[idx], signature=signature)) for instr in message.instructions: account_metas: List[AccountMeta] = [] for acc_idx in instr.accounts: pubkey = message.account_keys[acc_idx] is_signer = any((pubkey == sigkeypair.pubkey for sigkeypair in transaction.signatures)) account_metas.append( AccountMeta(pubkey=pubkey, is_signer=is_signer, is_writable=message.is_account_writable(acc_idx)) ) program_id = message.account_keys[instr.program_id_index] transaction.instructions.append( TransactionInstruction(keys=account_metas, program_id=program_id, data=b58decode(instr.data)) ) return transaction
en
0.675439
Library to package an atomic sequence of instructions to a transaction. # type: ignore # type: ignore Type for TransactionSignature. Constant for maximum over-the-wire size of a Transaction. Constant for standard length of a signature. Account metadata dataclass. An account's public key. True if an instruction requires a transaction signature matching `pubkey` True if the `pubkey` can be loaded as a read-write account. Transaction Instruction class. Public keys to include in this transaction Boolean represents whether this pubkey needs to sign the transaction. Program Id to execute. Program input. NonceInformation to be used to build a Transaction. The current Nonce blockhash. AdvanceNonceAccount Instruction. Pair of signature and corresponding public key. Transaction class to represent an atomic transaction. Args: recent_blockhash: A recent transaction id. nonce_info: Nonce information. If populated, transaction will use a durable Nonce hash instead of a `recent_blockhash`. signatures: Signatures for the transaction. Typically created by invoking the `sign()` method. fee_payer: The transaction fee payer. # Default (empty) signature Init transaction object. Equality defintion for Transactions. The first (payer) Transaction signature. Returns: The payer signature. Add one or more instructions to this Transaction. Args: *args: The instructions to add to this Transaction. If a `Transaction` is passsed, the instructions will be extracted from it. Returns: The transaction with the added instructions. # pylint: disable=too-many-locals Compile transaction data. Returns: The compiled message. # Use implicit fee payer # Append programID account metas. # Sort. Prioritizing first by signer, then by writable and converting from set to list. # Cull duplicate accounts # Move fee payer to the front # noqa: E203 # Split out signing from nonsigning keys and count readonlys # Initialize signature array, if needed Get raw transaction data that need to be covered by signatures. Returns: The serialized message. Partially sign a Transaction with the specified accounts. The `Keypair` inputs will be used to sign the Transaction immediately, while any `PublicKey` inputs will be referenced in the signed Transaction but need to be filled in later by calling `addSigner()` with the matching `Keypair`. All the caveats from the `sign` method apply to `signPartial` Sign the Transaction with the specified accounts. Multiple signatures may be applied to a Transaction. The first signature is considered "primary" and is used when testing for Transaction confirmation. Transaction fields should not be modified after the first call to `sign`, as doing so may invalidate the signature and cause the Transaction to be rejected. The Transaction must be assigned a valid `recentBlockhash` before invoking this method. Add an externally created signature to a transaction. Fill in a signature for a partially signed Transaction. The `signer` must be the corresponding `Keypair` for a `PublicKey` that was previously provided to `signPartial` Verify signatures of a complete, signed Transaction. Returns: a bool indicating if the signatures are correct or not. Serialize the Transaction in the wire format. The Transaction must have a valid `signature` before invoking this method. Example: >>> from solana.keypair import Keypair >>> from solana.blockhash import Blockhash >>> from solana.publickey import PublicKey >>> from solana.system_program import transfer, TransferParams >>> seed = bytes(PublicKey(1)) >>> sender, receiver = Keypair.from_seed(seed), PublicKey(2) >>> transfer_tx = Transaction().add(transfer(TransferParams(from_pubkey=sender.public_key, to_pubkey=receiver, lamports=1000))) >>> transfer_tx.recent_blockhash = Blockhash(str(PublicKey(3))) >>> transfer_tx.sign(sender) >>> transfer_tx.serialize().hex() '019d53be8af3a7c30f86c1092d2c3ea61d270c0cfa275a23ba504674c8fbbb724827b23b42dc8e08019e23120f1b6f40f9799355ce54185b4415be37ca2cee6e0e010001034cb5abf6ad79fbf5abbccafcc269d85cd2651ed4b885b5869f241aedf0a5ba2900000000000000000000000000000000000000000000000000000000000000020000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000000301020200010c02000000e803000000000000' Returns: The serialized transaction. # noqa: E501 pylint: disable=line-too-long # Encode signature count # Encode signatures # Encode signed data Parse a wire transaction into a Transaction object. Example: >>> raw_transaction = bytes.fromhex( ... '019d53be8af3a7c30f86c1092d2c3ea61d270c0cfa2' ... '75a23ba504674c8fbbb724827b23b42dc8e08019e23' ... '120f1b6f40f9799355ce54185b4415be37ca2cee6e0' ... 'e010001034cb5abf6ad79fbf5abbccafcc269d85cd2' ... '651ed4b885b5869f241aedf0a5ba290000000000000' ... '0000000000000000000000000000000000000000000' ... '0000000200000000000000000000000000000000000' ... '0000000000000000000000000000000000000000000' ... '0000000000000000000000000000000000000000000' ... '000000301020200010c02000000e803000000000000' ... ) >>> type(Transaction.deserialize(raw_transaction)) <class 'solana.transaction.Transaction'> Returns: The deserialized transaction. # noqa: E203 Populate Transaction object from message and signatures. Example: >>> raw_message = bytes.fromhex( ... '0200030500000000000000000000000000000000000000000000' ... '0000000000000000000100000000000000000000000000000000' ... '0000000000000000000000000000000200000000000000000000' ... '0000000000000000000000000000000000000000000300000000' ... '0000000000000000000000000000000000000000000000000000' ... '0004000000000000000000000000000000000000000000000000' ... '0000000000000005c49ae77603782054f17a9decea43b444eba0' ... 'edb12c6f1d31c6e0e4a84bf052eb010403010203050909090909' ... ) >>> from based58 import b58encode >>> from solana.message import Message >>> msg = Message.deserialize(raw_message) >>> signatures = [b58encode(bytes([1] * SIG_LENGTH)), b58encode(bytes([2] * SIG_LENGTH))] >>> type(Transaction.populate(msg, signatures)) <class 'solana.transaction.Transaction'> Returns: The populated transaction.
2.472402
2
wykan/models/swimlane.py
MobiusM/Wykan
2
6625297
<gh_stars>1-10 from . import _WekanObject class Swimlane(_WekanObject): def __init__(self, id: str): super().__init__(id)
from . import _WekanObject class Swimlane(_WekanObject): def __init__(self, id: str): super().__init__(id)
none
1
1.786143
2
data.py
ofirbartal100/AMNLP_CoReference_Project
0
6625298
from transformers.tokenization_bart import BartTokenizer from utils import extract_clusters2, extract_mentions_to_predicted_clusters_from_clusters, extract_clusters_for_decode from metrics import CorefEvaluator, MentionEvaluator from torch.utils.data import Dataset from utils import flatten_list_of_lists import json import logging import os import pickle from collections import namedtuple from tqdm import tqdm import torch import random # from consts import SPEAKER_START, SPEAKER_END, NULL_ID_FOR_COREF NULL_ID_FOR_COREF = 0 MENTION_START, MENTION_END = '<extra_id_0>', '<extra_id_1>' CorefExample = namedtuple( "CorefExample", ["token_ids", "clusters", "augmented_labels"]) logger = logging.getLogger(__name__) class CorefDataset(Dataset): def __init__(self, file_path, tokenizer, max_seq_length=-1): self.tokenizer = tokenizer logger.info(f"Reading dataset from {file_path}") examples, self.max_mention_num, self.max_cluster_size, self.max_num_clusters = self._parse_jsonlines(file_path) self.max_seq_length = max_seq_length # self.examples, self.lengths, self.num_examples_filtered = self._tokenize(examples) self.examples, self.lengths, self.num_examples_filtered = self.extended_tokenize(examples) logger.info(f"Finished preprocessing Coref dataset. {len(self.examples)} examples were extracted, {self.num_examples_filtered} were filtered due to sequence length.") def _parse_jsonlines(self, file_path): examples = [] max_mention_num = -1 max_cluster_size = -1 max_num_clusters = -1 with open(file_path, 'r') as f: for line in f: d = json.loads(line.strip()) doc_key = d["doc_key"] input_words = flatten_list_of_lists(d["sentences"]) clusters = d["clusters"] max_mention_num = max(max_mention_num, len( flatten_list_of_lists(clusters))) max_cluster_size = max(max_cluster_size, max( len(cluster) for cluster in clusters) if clusters else 0) max_num_clusters = max( max_num_clusters, len(clusters) if clusters else 0) speakers = flatten_list_of_lists(d["speakers"]) examples.append((doc_key, input_words, clusters, speakers)) return examples, max_mention_num, max_cluster_size, max_num_clusters def _augment_data(self, tokens, clusters): if isinstance(self.tokenizer ,BartTokenizer): start_mention = 50265 else: start_mention = 32000 end_mention = start_mention+1 or_token = start_mention+2 target = [[] for t in range(len(tokens) + 1)] sorted_clusters = [ sorted(c, key=lambda cc: cc[0]) for c in clusters] sorted_clusters = sorted(sorted_clusters, key=lambda c: c[0][0]) for idx, cluster in enumerate(sorted_clusters): for m in cluster: if(m[0] == m[1]): target[m[0]].append(('se', idx+start_mention+3)) else: target[m[0]].append(('s', idx+start_mention+3)) target[m[1]].append(('e', idx+start_mention+3)) i = 0 j = 0 augmented_data = [] for i in range(len(tokens)): for mi, m in enumerate(target[i]): if mi > 0: augmented_data.append(or_token) if 'e' in m: augmented_data.append(m[1]) augmented_data.append(end_mention) for mi, m in enumerate(target[i]): if mi > 0: augmented_data.append(or_token) if 's' in m: augmented_data.append(start_mention) augmented_data.append(m[1]) augmented_data.append(tokens[i]) for mi, m in enumerate(target[-1]): # if there are mentions that end in the end of the scentence if mi > 0: augmented_data.append(or_token) if 'e' in m: augmented_data.append(m[1]) augmented_data.append(end_mention) f1 = self.eval_augmentation(augmented_data, clusters) if f1 < 0.99: print("augmentation bug") return augmented_data def eval_augmentation(self, augmentation, gold_clusters): mention_evaluator = MentionEvaluator() coref_evaluator = CorefEvaluator() gold_clusters = extract_clusters2(gold_clusters) mention_to_gold_clusters = extract_mentions_to_predicted_clusters_from_clusters( gold_clusters) gold_mentions = list(mention_to_gold_clusters.keys()) predicted_clusters = extract_clusters2( self.reverse_augmentation(augmentation)) # need our clusters mention_to_predicted_clusters = extract_mentions_to_predicted_clusters_from_clusters( predicted_clusters) predicted_mentions = list(mention_to_predicted_clusters.keys()) mention_evaluator.update(predicted_mentions, gold_mentions) coref_evaluator.update(predicted_clusters, gold_clusters,mention_to_predicted_clusters, mention_to_gold_clusters) mention_precision, mentions_recall, mention_f1 = mention_evaluator.get_prf() prec, rec, f1 = coref_evaluator.get_prf() return f1 def reverse_augmentation(self, tokens): if isinstance(self.tokenizer ,BartTokenizer): start_mention = 50265 else: start_mention = 32000 end_mention = start_mention+1 clusters_dict = {} i = 0 ci = 0 while i < len(tokens): if tokens[i] == start_mention and i+1 < len(tokens) and tokens[i+1] >= start_mention+3: # valid cluster id #(id if tokens[i+1] in clusters_dict: clusters_dict[tokens[i+1]] += (('s', ci),) else: clusters_dict[tokens[i+1]] = (('s', ci),) i += 1 elif tokens[i] >= start_mention+3 and i+1 < len(tokens) and tokens[i+1] == end_mention: # id) if tokens[i] in clusters_dict: clusters_dict[tokens[i]] += (('e', ci),) else: clusters_dict[tokens[i]] = (('e', ci),) i += 1 elif tokens[i] < start_mention: ci += 1 i += 1 clusters = [] for key in clusters_dict: mentions = clusters_dict[key] cluster = [] stack = [] for m in mentions: if 'se' in m: cluster.append((m[1], m[1])) elif 's' in m: stack.append(m[1]) elif 'e' in m: e = m[1] if len(stack) > 0: s = stack.pop() if e == s and len(stack) > 0: cluster.append((stack.pop(), e)) stack.append(s) else: cluster.append((s, e)) if len(cluster)>0: clusters.append(tuple(cluster)) return clusters def _tokenize(self, examples):#normal tokanization coref_examples = [] lengths = [] num_examples_filtered = 0 for doc_key, words, clusters, speakers in tqdm(examples): word_idx_to_start_token_idx = dict() word_idx_to_end_token_idx = dict() end_token_idx_to_word_idx = [] token_ids = [] for idx, word in enumerate(words): word_idx_to_start_token_idx[idx] = len(token_ids) tokenized = self.tokenizer.encode( " " + word, add_special_tokens=False) for _ in range(len(tokenized)): end_token_idx_to_word_idx.append(idx) token_ids.extend(tokenized) word_idx_to_end_token_idx[idx] = len(token_ids) # token_ids.append(self.tokenizer.eos_token_id) new_clusters = [ [(word_idx_to_start_token_idx[start], word_idx_to_end_token_idx[end]) for start, end in cluster] for cluster in clusters] if new_clusters != []: augmented = self._augment_data(token_ids, new_clusters) if 0 < self.max_seq_length < len(augmented): num_examples_filtered += 1 continue lengths.append(len(augmented)) if new_clusters != []: coref_examples.append(((doc_key, end_token_idx_to_word_idx), CorefExample( token_ids=token_ids, clusters=new_clusters, augmented_labels=augmented))) return coref_examples, lengths, num_examples_filtered def __len__(self): return len(self.examples) def __getitem__(self, item): return self.examples[item] def pad_clusters_inside(self, clusters): return [cluster + [(NULL_ID_FOR_COREF, NULL_ID_FOR_COREF)] * (self.max_cluster_size - len(cluster)) for cluster in clusters] def pad_clusters_outside(self, clusters): return clusters + [[]] * (self.max_num_clusters - len(clusters)) def pad_clusters(self, clusters): clusters = self.pad_clusters_outside(clusters) clusters = self.pad_clusters_inside(clusters) return clusters def pad_batch(self, batch, max_length): max_length = max_length +1 # for eos token padded_batch = [] for example in batch: encoded_dict = self.tokenizer.encode_plus( example[0], padding='max_length', max_length=max_length, return_tensors='pt',truncation = True) clusters = self.pad_clusters(example.clusters) # add padding to augmented data as well encoded_dict_augmented = self.tokenizer.encode_plus( example[2], padding='max_length', max_length=max_length, return_tensors='pt',truncation = True) example = (encoded_dict["input_ids"], encoded_dict['attention_mask'], torch.tensor( clusters), encoded_dict_augmented["input_ids"], encoded_dict_augmented['attention_mask']) padded_batch.append(example) tensored_batch = tuple(torch.stack([example[i].squeeze( ) for example in padded_batch], dim=0) for i in range(len(example))) return tensored_batch def extended_tokenize(self, examples):#extended tokenization seperetor = '.' coref_examples = [] lengths = [] num_examples_filtered = 0 for doc_key, words, clusters, speakers in tqdm(examples): word_idx_to_start_token_idx = dict() word_idx_to_end_token_idx = dict() end_token_idx_to_word_idx = [] token_ids = [] seperators = [] for idx, word in enumerate(words): word_idx_to_start_token_idx[idx] = len(token_ids) tokenized = self.tokenizer.encode( " " + word, add_special_tokens=False) if word==seperetor: sep=[len(end_token_idx_to_word_idx)] for _ in range(len(tokenized)): end_token_idx_to_word_idx.append(idx) if word==seperetor: sep.append(len(end_token_idx_to_word_idx)) seperators.append(sep) token_ids.extend(tokenized) word_idx_to_end_token_idx[idx] = len(token_ids) base_clusters = [[(word_idx_to_start_token_idx[start], word_idx_to_end_token_idx[end]) for start, end in cluster] for cluster in clusters] token_ids = self.extend_token_ids(token_ids,seperators) for token_id in token_ids: trimmed_clusters = trim_clusters(base_clusters, token_id[1], token_id[2]) new_clusters = trimmed_clusters if new_clusters != []: augmented = self._augment_data(token_id[0], new_clusters) if 0 < self.max_seq_length < len(augmented): num_examples_filtered += 1 continue lengths.append(len(token_id[0])) if new_clusters != []: coref_examples.append(((doc_key, end_token_idx_to_word_idx), CorefExample( token_ids=token_id[0], clusters=new_clusters, augmented_labels=augmented))) return coref_examples, lengths, num_examples_filtered def extend_token_ids(self,token_ids, idxs): # eos = token_ids[-1] min_span_size = 3 sentences = [] start = 0 for idx in idxs: end = idx[1] sentences.append((token_ids[start:end],start,end)) start = end extended_tokens = [(token_ids, 0, len(token_ids))] num_scentences = len(sentences) for i in range(num_scentences-min_span_size+1): token_sentence = sentences[i][0].copy() sub = random.randint(i+min_span_size,num_scentences) for j in range(i+1,sub): token_sentence.extend(sentences[j][0].copy()) # token_sentence.append(eos) extended_tokens.append((token_sentence, sentences[i][1], sentences[j][2]+1)) return extended_tokens def get_dataset(args, tokenizer, evaluate=False): read_from_cache, file_path = False, '' if evaluate and os.path.exists(args.predict_file_cache): file_path = args.predict_file_cache read_from_cache = True # read_from_cache = False elif (not evaluate) and os.path.exists(args.train_file_cache): file_path = args.train_file_cache read_from_cache = True # read_from_cache = False # read_from_cache = False if read_from_cache: logger.info(f"Reading dataset from {file_path}") with open(file_path, 'rb') as f: a = pickle.load(f) a.tokenizer.vocab_file = tokenizer.vocab_file return a file_path, cache_path = (args.predict_file, args.predict_file_cache) if evaluate else ( args.train_file, args.train_file_cache) # file_path, cache_path = (args.predict_file, args.predict_file_cache) coref_dataset = CorefDataset(file_path, tokenizer, max_seq_length=args.max_seq_length) with open(cache_path, 'wb') as f: pickle.dump(coref_dataset, f) return coref_dataset def trim_clusters(clusters, start, end): return list(filter(lambda lst: len(lst)>1 , [list(filter(lambda x: x[1] < (end-start) and x[0] >= 0,map(lambda x: (x[0]-start, x[1]-start), cluster))) for cluster in clusters])) def find_sub_list(sl,l): results=[] sll=len(sl) for ind in (i for i,e in enumerate(l) if e==sl[0]): if l[ind:ind+sll]==sl: results.append((ind,ind+sll-1)) return results
from transformers.tokenization_bart import BartTokenizer from utils import extract_clusters2, extract_mentions_to_predicted_clusters_from_clusters, extract_clusters_for_decode from metrics import CorefEvaluator, MentionEvaluator from torch.utils.data import Dataset from utils import flatten_list_of_lists import json import logging import os import pickle from collections import namedtuple from tqdm import tqdm import torch import random # from consts import SPEAKER_START, SPEAKER_END, NULL_ID_FOR_COREF NULL_ID_FOR_COREF = 0 MENTION_START, MENTION_END = '<extra_id_0>', '<extra_id_1>' CorefExample = namedtuple( "CorefExample", ["token_ids", "clusters", "augmented_labels"]) logger = logging.getLogger(__name__) class CorefDataset(Dataset): def __init__(self, file_path, tokenizer, max_seq_length=-1): self.tokenizer = tokenizer logger.info(f"Reading dataset from {file_path}") examples, self.max_mention_num, self.max_cluster_size, self.max_num_clusters = self._parse_jsonlines(file_path) self.max_seq_length = max_seq_length # self.examples, self.lengths, self.num_examples_filtered = self._tokenize(examples) self.examples, self.lengths, self.num_examples_filtered = self.extended_tokenize(examples) logger.info(f"Finished preprocessing Coref dataset. {len(self.examples)} examples were extracted, {self.num_examples_filtered} were filtered due to sequence length.") def _parse_jsonlines(self, file_path): examples = [] max_mention_num = -1 max_cluster_size = -1 max_num_clusters = -1 with open(file_path, 'r') as f: for line in f: d = json.loads(line.strip()) doc_key = d["doc_key"] input_words = flatten_list_of_lists(d["sentences"]) clusters = d["clusters"] max_mention_num = max(max_mention_num, len( flatten_list_of_lists(clusters))) max_cluster_size = max(max_cluster_size, max( len(cluster) for cluster in clusters) if clusters else 0) max_num_clusters = max( max_num_clusters, len(clusters) if clusters else 0) speakers = flatten_list_of_lists(d["speakers"]) examples.append((doc_key, input_words, clusters, speakers)) return examples, max_mention_num, max_cluster_size, max_num_clusters def _augment_data(self, tokens, clusters): if isinstance(self.tokenizer ,BartTokenizer): start_mention = 50265 else: start_mention = 32000 end_mention = start_mention+1 or_token = start_mention+2 target = [[] for t in range(len(tokens) + 1)] sorted_clusters = [ sorted(c, key=lambda cc: cc[0]) for c in clusters] sorted_clusters = sorted(sorted_clusters, key=lambda c: c[0][0]) for idx, cluster in enumerate(sorted_clusters): for m in cluster: if(m[0] == m[1]): target[m[0]].append(('se', idx+start_mention+3)) else: target[m[0]].append(('s', idx+start_mention+3)) target[m[1]].append(('e', idx+start_mention+3)) i = 0 j = 0 augmented_data = [] for i in range(len(tokens)): for mi, m in enumerate(target[i]): if mi > 0: augmented_data.append(or_token) if 'e' in m: augmented_data.append(m[1]) augmented_data.append(end_mention) for mi, m in enumerate(target[i]): if mi > 0: augmented_data.append(or_token) if 's' in m: augmented_data.append(start_mention) augmented_data.append(m[1]) augmented_data.append(tokens[i]) for mi, m in enumerate(target[-1]): # if there are mentions that end in the end of the scentence if mi > 0: augmented_data.append(or_token) if 'e' in m: augmented_data.append(m[1]) augmented_data.append(end_mention) f1 = self.eval_augmentation(augmented_data, clusters) if f1 < 0.99: print("augmentation bug") return augmented_data def eval_augmentation(self, augmentation, gold_clusters): mention_evaluator = MentionEvaluator() coref_evaluator = CorefEvaluator() gold_clusters = extract_clusters2(gold_clusters) mention_to_gold_clusters = extract_mentions_to_predicted_clusters_from_clusters( gold_clusters) gold_mentions = list(mention_to_gold_clusters.keys()) predicted_clusters = extract_clusters2( self.reverse_augmentation(augmentation)) # need our clusters mention_to_predicted_clusters = extract_mentions_to_predicted_clusters_from_clusters( predicted_clusters) predicted_mentions = list(mention_to_predicted_clusters.keys()) mention_evaluator.update(predicted_mentions, gold_mentions) coref_evaluator.update(predicted_clusters, gold_clusters,mention_to_predicted_clusters, mention_to_gold_clusters) mention_precision, mentions_recall, mention_f1 = mention_evaluator.get_prf() prec, rec, f1 = coref_evaluator.get_prf() return f1 def reverse_augmentation(self, tokens): if isinstance(self.tokenizer ,BartTokenizer): start_mention = 50265 else: start_mention = 32000 end_mention = start_mention+1 clusters_dict = {} i = 0 ci = 0 while i < len(tokens): if tokens[i] == start_mention and i+1 < len(tokens) and tokens[i+1] >= start_mention+3: # valid cluster id #(id if tokens[i+1] in clusters_dict: clusters_dict[tokens[i+1]] += (('s', ci),) else: clusters_dict[tokens[i+1]] = (('s', ci),) i += 1 elif tokens[i] >= start_mention+3 and i+1 < len(tokens) and tokens[i+1] == end_mention: # id) if tokens[i] in clusters_dict: clusters_dict[tokens[i]] += (('e', ci),) else: clusters_dict[tokens[i]] = (('e', ci),) i += 1 elif tokens[i] < start_mention: ci += 1 i += 1 clusters = [] for key in clusters_dict: mentions = clusters_dict[key] cluster = [] stack = [] for m in mentions: if 'se' in m: cluster.append((m[1], m[1])) elif 's' in m: stack.append(m[1]) elif 'e' in m: e = m[1] if len(stack) > 0: s = stack.pop() if e == s and len(stack) > 0: cluster.append((stack.pop(), e)) stack.append(s) else: cluster.append((s, e)) if len(cluster)>0: clusters.append(tuple(cluster)) return clusters def _tokenize(self, examples):#normal tokanization coref_examples = [] lengths = [] num_examples_filtered = 0 for doc_key, words, clusters, speakers in tqdm(examples): word_idx_to_start_token_idx = dict() word_idx_to_end_token_idx = dict() end_token_idx_to_word_idx = [] token_ids = [] for idx, word in enumerate(words): word_idx_to_start_token_idx[idx] = len(token_ids) tokenized = self.tokenizer.encode( " " + word, add_special_tokens=False) for _ in range(len(tokenized)): end_token_idx_to_word_idx.append(idx) token_ids.extend(tokenized) word_idx_to_end_token_idx[idx] = len(token_ids) # token_ids.append(self.tokenizer.eos_token_id) new_clusters = [ [(word_idx_to_start_token_idx[start], word_idx_to_end_token_idx[end]) for start, end in cluster] for cluster in clusters] if new_clusters != []: augmented = self._augment_data(token_ids, new_clusters) if 0 < self.max_seq_length < len(augmented): num_examples_filtered += 1 continue lengths.append(len(augmented)) if new_clusters != []: coref_examples.append(((doc_key, end_token_idx_to_word_idx), CorefExample( token_ids=token_ids, clusters=new_clusters, augmented_labels=augmented))) return coref_examples, lengths, num_examples_filtered def __len__(self): return len(self.examples) def __getitem__(self, item): return self.examples[item] def pad_clusters_inside(self, clusters): return [cluster + [(NULL_ID_FOR_COREF, NULL_ID_FOR_COREF)] * (self.max_cluster_size - len(cluster)) for cluster in clusters] def pad_clusters_outside(self, clusters): return clusters + [[]] * (self.max_num_clusters - len(clusters)) def pad_clusters(self, clusters): clusters = self.pad_clusters_outside(clusters) clusters = self.pad_clusters_inside(clusters) return clusters def pad_batch(self, batch, max_length): max_length = max_length +1 # for eos token padded_batch = [] for example in batch: encoded_dict = self.tokenizer.encode_plus( example[0], padding='max_length', max_length=max_length, return_tensors='pt',truncation = True) clusters = self.pad_clusters(example.clusters) # add padding to augmented data as well encoded_dict_augmented = self.tokenizer.encode_plus( example[2], padding='max_length', max_length=max_length, return_tensors='pt',truncation = True) example = (encoded_dict["input_ids"], encoded_dict['attention_mask'], torch.tensor( clusters), encoded_dict_augmented["input_ids"], encoded_dict_augmented['attention_mask']) padded_batch.append(example) tensored_batch = tuple(torch.stack([example[i].squeeze( ) for example in padded_batch], dim=0) for i in range(len(example))) return tensored_batch def extended_tokenize(self, examples):#extended tokenization seperetor = '.' coref_examples = [] lengths = [] num_examples_filtered = 0 for doc_key, words, clusters, speakers in tqdm(examples): word_idx_to_start_token_idx = dict() word_idx_to_end_token_idx = dict() end_token_idx_to_word_idx = [] token_ids = [] seperators = [] for idx, word in enumerate(words): word_idx_to_start_token_idx[idx] = len(token_ids) tokenized = self.tokenizer.encode( " " + word, add_special_tokens=False) if word==seperetor: sep=[len(end_token_idx_to_word_idx)] for _ in range(len(tokenized)): end_token_idx_to_word_idx.append(idx) if word==seperetor: sep.append(len(end_token_idx_to_word_idx)) seperators.append(sep) token_ids.extend(tokenized) word_idx_to_end_token_idx[idx] = len(token_ids) base_clusters = [[(word_idx_to_start_token_idx[start], word_idx_to_end_token_idx[end]) for start, end in cluster] for cluster in clusters] token_ids = self.extend_token_ids(token_ids,seperators) for token_id in token_ids: trimmed_clusters = trim_clusters(base_clusters, token_id[1], token_id[2]) new_clusters = trimmed_clusters if new_clusters != []: augmented = self._augment_data(token_id[0], new_clusters) if 0 < self.max_seq_length < len(augmented): num_examples_filtered += 1 continue lengths.append(len(token_id[0])) if new_clusters != []: coref_examples.append(((doc_key, end_token_idx_to_word_idx), CorefExample( token_ids=token_id[0], clusters=new_clusters, augmented_labels=augmented))) return coref_examples, lengths, num_examples_filtered def extend_token_ids(self,token_ids, idxs): # eos = token_ids[-1] min_span_size = 3 sentences = [] start = 0 for idx in idxs: end = idx[1] sentences.append((token_ids[start:end],start,end)) start = end extended_tokens = [(token_ids, 0, len(token_ids))] num_scentences = len(sentences) for i in range(num_scentences-min_span_size+1): token_sentence = sentences[i][0].copy() sub = random.randint(i+min_span_size,num_scentences) for j in range(i+1,sub): token_sentence.extend(sentences[j][0].copy()) # token_sentence.append(eos) extended_tokens.append((token_sentence, sentences[i][1], sentences[j][2]+1)) return extended_tokens def get_dataset(args, tokenizer, evaluate=False): read_from_cache, file_path = False, '' if evaluate and os.path.exists(args.predict_file_cache): file_path = args.predict_file_cache read_from_cache = True # read_from_cache = False elif (not evaluate) and os.path.exists(args.train_file_cache): file_path = args.train_file_cache read_from_cache = True # read_from_cache = False # read_from_cache = False if read_from_cache: logger.info(f"Reading dataset from {file_path}") with open(file_path, 'rb') as f: a = pickle.load(f) a.tokenizer.vocab_file = tokenizer.vocab_file return a file_path, cache_path = (args.predict_file, args.predict_file_cache) if evaluate else ( args.train_file, args.train_file_cache) # file_path, cache_path = (args.predict_file, args.predict_file_cache) coref_dataset = CorefDataset(file_path, tokenizer, max_seq_length=args.max_seq_length) with open(cache_path, 'wb') as f: pickle.dump(coref_dataset, f) return coref_dataset def trim_clusters(clusters, start, end): return list(filter(lambda lst: len(lst)>1 , [list(filter(lambda x: x[1] < (end-start) and x[0] >= 0,map(lambda x: (x[0]-start, x[1]-start), cluster))) for cluster in clusters])) def find_sub_list(sl,l): results=[] sll=len(sl) for ind in (i for i,e in enumerate(l) if e==sl[0]): if l[ind:ind+sll]==sl: results.append((ind,ind+sll-1)) return results
en
0.512766
# from consts import SPEAKER_START, SPEAKER_END, NULL_ID_FOR_COREF # self.examples, self.lengths, self.num_examples_filtered = self._tokenize(examples) # if there are mentions that end in the end of the scentence # need our clusters # valid cluster id #(id # id) #normal tokanization # token_ids.append(self.tokenizer.eos_token_id) # for eos token # add padding to augmented data as well #extended tokenization # eos = token_ids[-1] # token_sentence.append(eos) # read_from_cache = False # read_from_cache = False # read_from_cache = False # file_path, cache_path = (args.predict_file, args.predict_file_cache)
2.354894
2
terradem/massbalance.py
VAW-SwissTerra/terradem
1
6625299
"""Tools to calculate mass balance and convert appropriately from volume.""" from __future__ import annotations import json import os import pathlib import warnings from typing import Any, Callable import geopandas as gpd import numpy as np import pandas as pd import rasterio as rio import shapely from tqdm import tqdm import terradem.dem_tools import terradem.files import terradem.metadata ICE_DENSITY_CONVERSION = 0.85 ICE_DENSITY_ERROR = 0.06 STANDARD_START_YEAR = 1931 STANDARD_END_YEAR = 2016 def read_mb_index() -> pd.DataFrame: data = pd.read_csv( terradem.files.INPUT_FILE_PATHS["massbalance_index"], delim_whitespace=True, skiprows=2, index_col=0, ) data.index.name = "year" return data def match_zones() -> Callable[[float, float, float, float], tuple[float, str]]: mb = read_mb_index().cumsum() standard_mb = pd.Series( index=mb.columns, data=np.diff(mb.T[[STANDARD_START_YEAR, STANDARD_END_YEAR]], axis=1).ravel(), ) zones = sorted(mb.columns, key=lambda x: len(x), reverse=True) lk50_outlines = gpd.read_file(terradem.files.INPUT_FILE_PATHS["lk50_outlines"]) for zone in zones: matches = [] for i, character in enumerate(zone): matches.append(lk50_outlines[f"RivLevel{i}"] == str(character)) all_matches = np.all(matches, axis=0) lk50_outlines.loc[all_matches, "zone"] = zone # Zone A55 is not covered by the zones, so hardcode this to be A54 instead. lk50_outlines.loc[ (lk50_outlines["RivLevel0"] == "A") & (lk50_outlines["RivLevel1"] == "5") & (lk50_outlines["RivLevel2"] == "5"), "zone", ] = "A54" lk50_outlines["easting"] = lk50_outlines.geometry.centroid.x lk50_outlines["northing"] = lk50_outlines.geometry.centroid.y def get_mb_factor(easting: float, northing: float, start_year: float, end_year: float) -> tuple[float, str]: # Calculate the distance between the point and each lk50_outline centroid distance = np.linalg.norm( [lk50_outlines["easting"] - easting, lk50_outlines["northing"] - northing], axis=0, ) # Find the closest lk50 outline min_distance_idx = np.argwhere(distance == distance.min()).ravel()[0] # Extract the representative zone for the closest lk50 outline. mb_zone = lk50_outlines.iloc[min_distance_idx]["zone"] # Calculate the mass balance of that zone for the given start and end year actual_mb = mb.loc[int(end_year), mb_zone] - mb.loc[int(start_year), mb_zone] # Calculate the conversion factor to the STANDARD_START_YEAR--STANDARD_END_YEAR factor = standard_mb[mb_zone] / actual_mb return factor, zone return get_mb_factor def get_volume_change() -> None: glacier_indices_ds = rio.open(terradem.files.TEMP_FILES["lk50_rasterized"]) ddem_versions = { "non_interp": terradem.files.TEMP_FILES["ddem_coreg_tcorr"], "norm-regional-national": terradem.files.TEMP_FILES["ddem_coreg_tcorr_national-interp-extrap"], "norm-regional-sgi1-subregion": terradem.files.TEMP_FILES["ddem_coreg_tcorr_subregion1-interp-extrap"], "norm-regional-sgi0-subregion": terradem.files.TEMP_FILES["ddem_coreg_tcorr_subregion0-interp-extrap"], } output = pd.DataFrame( index=ddem_versions.keys(), columns=["mean", "median", "std", "area", "volume_change", "coverage"] ) print("Reading glacier mask") glacier_mask = glacier_indices_ds.read(1, masked=True).filled(0) > 0 total_area = np.count_nonzero(glacier_mask) * (glacier_indices_ds.res[0] * glacier_indices_ds.res[1]) for key in tqdm(ddem_versions): ddem_ds = rio.open(ddem_versions[key]) ddem_values = ddem_ds.read(1, masked=True).filled(np.nan)[glacier_mask] output.loc[key] = { "mean": np.nanmean(ddem_values), "median": np.nanmedian(ddem_values), "std": np.nanstd(ddem_values), "area": total_area, "volume_change": np.nanmean(ddem_values) * total_area, "coverage": np.count_nonzero(np.isfinite(ddem_values)) / np.count_nonzero(glacier_mask), } print(output) output.to_csv("temp/volume_change.csv") def get_corrections(): mb_index = read_mb_index().cumsum() dirpath = pathlib.Path(terradem.files.TEMP_SUBDIRS["tcorr_meta_coreg"]) data_list: list[dict[str, Any]] = [] for filepath in dirpath.iterdir(): with open(filepath) as infile: data = json.load(infile) data["station"] = filepath.stem data_list.append(data) corrections = pd.DataFrame(data_list).set_index("station") corrections["start_date"] = pd.to_datetime(corrections["start_date"]) for zone, data in corrections.groupby("sgi_zone", as_index=False): corrections.loc[data.index, "masschange_standard"] = ( mb_index.loc[STANDARD_START_YEAR, zone] - mb_index.loc[STANDARD_END_YEAR, zone] ) corrections.loc[data.index, "masschange_actual"] = ( mb_index.loc[data["start_date"].dt.year.values, zone].values - mb_index.loc[data["end_year"].astype(int), zone].values ) def get_masschanges(easting: float, northing: float) -> tuple[float, float]: distances = np.argmin( np.linalg.norm([corrections["easting"] - easting, corrections["northing"] - northing], axis=0) ) return corrections.iloc[distances]["masschange_standard"], corrections.iloc[distances]["masschange_actual"] return get_masschanges def get_start_and_end_years(): mb_index = read_mb_index().cumsum() dirpath = pathlib.Path(terradem.files.TEMP_SUBDIRS["tcorr_meta_coreg"]) data_list: list[dict[str, Any]] = [] for filepath in dirpath.iterdir(): with open(filepath) as infile: data = json.load(infile) data["station"] = filepath.stem data_list.append(data) corrections = pd.DataFrame(data_list).set_index("station") corrections["start_date"] = pd.to_datetime(corrections["start_date"]) def get_start_and_end_year(easting: float, northing: float) -> tuple[float, float]: distances = np.argmin( np.linalg.norm([corrections["easting"] - easting, corrections["northing"] - northing], axis=0) ) return ( corrections.iloc[distances]["start_date"].year + corrections.iloc[distances]["start_date"].month / 12 + corrections.iloc[distances]["start_date"].day / 364.75, corrections.iloc[distances]["end_year"], ) return get_start_and_end_year def temporal_corr_error_model(): stochastic_yearly_error = 0.2 # m/a w.e. masschange_model = get_corrections() def error_model(easting: float, northing: float): standard, actual = masschange_model(easting, northing) return np.sqrt( (((2 * stochastic_yearly_error ** 2) / standard ** 2) + ((2 * stochastic_yearly_error ** 2) / actual ** 2)) * (standard / actual) ** 2 ) return error_model def match_sgi_ids(): sgi_2016 = gpd.read_file(terradem.files.INPUT_FILE_PATHS["sgi_2016"]) sgi_2016["name_lower"] = sgi_2016["name"].str.lower().fillna("") data_dir = pathlib.Path("data/external/mass_balance") warnings.filterwarnings("ignore", category=shapely.errors.ShapelyDeprecationWarning) result_data = [] ids = { "seewijnen": "B52-22", "corbassiere": "B83-03", "murtel": "E23-16", "gietro": "B82-14", "findelen": "B56-03", } results = pd.DataFrame(columns=["sgi-id", "year", "dh", "dm"]) for filepath in filter(lambda s: "longterm" in str(s), data_dir.iterdir()): name = filepath.stem.replace("_longterm", "") if name in ids: match = sgi_2016.loc[sgi_2016["sgi-id"] == ids[name]].iloc[0] else: name = { "ugrindelwald": "<NAME>", }.get(name, None) or name try: match = ( sgi_2016[sgi_2016["name_lower"].str.findall(f".*{name}.*").apply(len) > 0] .sort_values("area_km2") .iloc[-1] ) except IndexError: warnings.warn(f"Cannot find {name}") continue data = ( pd.read_csv(filepath, skiprows=1, delim_whitespace=True, na_values=[-99.0]) .rename(columns={"Year": "year", "B_a(mw.e.)": "dh"}) .ffill() ) data["dm"] = (data["Area(km2)"] * 1e6) * data["dh"] data["sgi-id"] = match["sgi-id"] result_data.append(data[["sgi-id", "year", "dh", "dm"]]) continue results = pd.concat(result_data).set_index(["sgi-id", "year"]).squeeze() glacier_wise_dh = pd.read_csv(terradem.files.TEMP_FILES["glacier_wise_dh"]) matthias_dh: pd.DataFrame = ( results.loc[ (results.index.get_level_values(1) >= STANDARD_START_YEAR) & (results.index.get_level_values(1) <= STANDARD_END_YEAR) ] .groupby(level=0) .cumsum() .groupby(level=0) .last() ) glacier_wise_dh.index = glacier_wise_dh["sgi_id"].apply(terradem.utilities.sgi_1973_to_2016) glacier_wise_dh["dm_err_tonswe"] = (glacier_wise_dh["dm_tons_we"] / glacier_wise_dh["dh_m_we"]) * glacier_wise_dh[ "dh_err_mwe" ] glacier_wise_dh.loc[glacier_wise_dh.index == "B36-26", ["dh_m_we", "dm_tons_we"]] *= 0.86 # A correction factor for including Mittelaletschgletscher matthias_dh = matthias_dh.merge( glacier_wise_dh[["dh_m_we", "dm_tons_we", "dh_err_mwe", "dm_err_tonswe"]], left_index=True, right_index=True ).rename( columns={ "dh_m_we": "geodetic_dh", "dh_err_mwe": "geodetic_dh_err", "dm_tons_we": "geodetic_dm", "dm_err_tonswe": "geodetic_dm_err", "dh": "glaciological_dh", "dm": "glaciological_dm", } ) matthias_dh[["glaciological_dh", "glaciological_dm"]] /= STANDARD_END_YEAR - STANDARD_START_YEAR # matthias_dh["geodetic_dh"] = glacier_wise_dh.loc[matthias_dh.index, "dh_m_we"].values * (STANDARD_END_YEAR - STANDARD_START_YEAR) import matplotlib.pyplot as plt for i, col in enumerate(["dh", "dm"]): plt.subplot(1,2, i + 1) sign = -1 if col == "dm" else 1 plt.errorbar(matthias_dh[f"geodetic_{col}"] * sign, matthias_dh[f"glaciological_{col}"] * sign, xerr=matthias_dh[f"geodetic_{col}_err"] * 2, marker="s", lw=0, elinewidth=2, ecolor="black") minval = matthias_dh[[f"geodetic_{col}", f"glaciological_{col}"]].min().min() plt.plot([minval * sign, 0], [minval * sign, 0]) plt.title(f"{STANDARD_START_YEAR}$-${STANDARD_END_YEAR}") if col == "dm m": plt.xscale("log") plt.yscale("log") plt.xlabel(f"Geodetic MB ({'m' if col == 'dh' else 'tons'} w.e. a⁻¹)") plt.ylabel(f"Matthias's MB ({'m' if col == 'dh' else 'tons'} w.e. a⁻¹)") plt.show() return plt.subplot(122) plt.scatter(-matthias_dh["geodetic_dm"], -matthias_dh["glaciological_dm"]) minval = matthias_dh[["geodetic_dm", "glaciological_dm"]].min().min() plt.plot([-minval, 0], [-minval, 0]) plt.title(f"{STANDARD_START_YEAR}$-${STANDARD_END_YEAR}") plt.xlabel(r"Geodetic MB (tons w.e. a$^{-1}$)") plt.ylabel(r"Matthias's MB (tons w.e. a$^{-1}$)") plt.show() print(glacier_wise_dh) print(matthias_dh)
"""Tools to calculate mass balance and convert appropriately from volume.""" from __future__ import annotations import json import os import pathlib import warnings from typing import Any, Callable import geopandas as gpd import numpy as np import pandas as pd import rasterio as rio import shapely from tqdm import tqdm import terradem.dem_tools import terradem.files import terradem.metadata ICE_DENSITY_CONVERSION = 0.85 ICE_DENSITY_ERROR = 0.06 STANDARD_START_YEAR = 1931 STANDARD_END_YEAR = 2016 def read_mb_index() -> pd.DataFrame: data = pd.read_csv( terradem.files.INPUT_FILE_PATHS["massbalance_index"], delim_whitespace=True, skiprows=2, index_col=0, ) data.index.name = "year" return data def match_zones() -> Callable[[float, float, float, float], tuple[float, str]]: mb = read_mb_index().cumsum() standard_mb = pd.Series( index=mb.columns, data=np.diff(mb.T[[STANDARD_START_YEAR, STANDARD_END_YEAR]], axis=1).ravel(), ) zones = sorted(mb.columns, key=lambda x: len(x), reverse=True) lk50_outlines = gpd.read_file(terradem.files.INPUT_FILE_PATHS["lk50_outlines"]) for zone in zones: matches = [] for i, character in enumerate(zone): matches.append(lk50_outlines[f"RivLevel{i}"] == str(character)) all_matches = np.all(matches, axis=0) lk50_outlines.loc[all_matches, "zone"] = zone # Zone A55 is not covered by the zones, so hardcode this to be A54 instead. lk50_outlines.loc[ (lk50_outlines["RivLevel0"] == "A") & (lk50_outlines["RivLevel1"] == "5") & (lk50_outlines["RivLevel2"] == "5"), "zone", ] = "A54" lk50_outlines["easting"] = lk50_outlines.geometry.centroid.x lk50_outlines["northing"] = lk50_outlines.geometry.centroid.y def get_mb_factor(easting: float, northing: float, start_year: float, end_year: float) -> tuple[float, str]: # Calculate the distance between the point and each lk50_outline centroid distance = np.linalg.norm( [lk50_outlines["easting"] - easting, lk50_outlines["northing"] - northing], axis=0, ) # Find the closest lk50 outline min_distance_idx = np.argwhere(distance == distance.min()).ravel()[0] # Extract the representative zone for the closest lk50 outline. mb_zone = lk50_outlines.iloc[min_distance_idx]["zone"] # Calculate the mass balance of that zone for the given start and end year actual_mb = mb.loc[int(end_year), mb_zone] - mb.loc[int(start_year), mb_zone] # Calculate the conversion factor to the STANDARD_START_YEAR--STANDARD_END_YEAR factor = standard_mb[mb_zone] / actual_mb return factor, zone return get_mb_factor def get_volume_change() -> None: glacier_indices_ds = rio.open(terradem.files.TEMP_FILES["lk50_rasterized"]) ddem_versions = { "non_interp": terradem.files.TEMP_FILES["ddem_coreg_tcorr"], "norm-regional-national": terradem.files.TEMP_FILES["ddem_coreg_tcorr_national-interp-extrap"], "norm-regional-sgi1-subregion": terradem.files.TEMP_FILES["ddem_coreg_tcorr_subregion1-interp-extrap"], "norm-regional-sgi0-subregion": terradem.files.TEMP_FILES["ddem_coreg_tcorr_subregion0-interp-extrap"], } output = pd.DataFrame( index=ddem_versions.keys(), columns=["mean", "median", "std", "area", "volume_change", "coverage"] ) print("Reading glacier mask") glacier_mask = glacier_indices_ds.read(1, masked=True).filled(0) > 0 total_area = np.count_nonzero(glacier_mask) * (glacier_indices_ds.res[0] * glacier_indices_ds.res[1]) for key in tqdm(ddem_versions): ddem_ds = rio.open(ddem_versions[key]) ddem_values = ddem_ds.read(1, masked=True).filled(np.nan)[glacier_mask] output.loc[key] = { "mean": np.nanmean(ddem_values), "median": np.nanmedian(ddem_values), "std": np.nanstd(ddem_values), "area": total_area, "volume_change": np.nanmean(ddem_values) * total_area, "coverage": np.count_nonzero(np.isfinite(ddem_values)) / np.count_nonzero(glacier_mask), } print(output) output.to_csv("temp/volume_change.csv") def get_corrections(): mb_index = read_mb_index().cumsum() dirpath = pathlib.Path(terradem.files.TEMP_SUBDIRS["tcorr_meta_coreg"]) data_list: list[dict[str, Any]] = [] for filepath in dirpath.iterdir(): with open(filepath) as infile: data = json.load(infile) data["station"] = filepath.stem data_list.append(data) corrections = pd.DataFrame(data_list).set_index("station") corrections["start_date"] = pd.to_datetime(corrections["start_date"]) for zone, data in corrections.groupby("sgi_zone", as_index=False): corrections.loc[data.index, "masschange_standard"] = ( mb_index.loc[STANDARD_START_YEAR, zone] - mb_index.loc[STANDARD_END_YEAR, zone] ) corrections.loc[data.index, "masschange_actual"] = ( mb_index.loc[data["start_date"].dt.year.values, zone].values - mb_index.loc[data["end_year"].astype(int), zone].values ) def get_masschanges(easting: float, northing: float) -> tuple[float, float]: distances = np.argmin( np.linalg.norm([corrections["easting"] - easting, corrections["northing"] - northing], axis=0) ) return corrections.iloc[distances]["masschange_standard"], corrections.iloc[distances]["masschange_actual"] return get_masschanges def get_start_and_end_years(): mb_index = read_mb_index().cumsum() dirpath = pathlib.Path(terradem.files.TEMP_SUBDIRS["tcorr_meta_coreg"]) data_list: list[dict[str, Any]] = [] for filepath in dirpath.iterdir(): with open(filepath) as infile: data = json.load(infile) data["station"] = filepath.stem data_list.append(data) corrections = pd.DataFrame(data_list).set_index("station") corrections["start_date"] = pd.to_datetime(corrections["start_date"]) def get_start_and_end_year(easting: float, northing: float) -> tuple[float, float]: distances = np.argmin( np.linalg.norm([corrections["easting"] - easting, corrections["northing"] - northing], axis=0) ) return ( corrections.iloc[distances]["start_date"].year + corrections.iloc[distances]["start_date"].month / 12 + corrections.iloc[distances]["start_date"].day / 364.75, corrections.iloc[distances]["end_year"], ) return get_start_and_end_year def temporal_corr_error_model(): stochastic_yearly_error = 0.2 # m/a w.e. masschange_model = get_corrections() def error_model(easting: float, northing: float): standard, actual = masschange_model(easting, northing) return np.sqrt( (((2 * stochastic_yearly_error ** 2) / standard ** 2) + ((2 * stochastic_yearly_error ** 2) / actual ** 2)) * (standard / actual) ** 2 ) return error_model def match_sgi_ids(): sgi_2016 = gpd.read_file(terradem.files.INPUT_FILE_PATHS["sgi_2016"]) sgi_2016["name_lower"] = sgi_2016["name"].str.lower().fillna("") data_dir = pathlib.Path("data/external/mass_balance") warnings.filterwarnings("ignore", category=shapely.errors.ShapelyDeprecationWarning) result_data = [] ids = { "seewijnen": "B52-22", "corbassiere": "B83-03", "murtel": "E23-16", "gietro": "B82-14", "findelen": "B56-03", } results = pd.DataFrame(columns=["sgi-id", "year", "dh", "dm"]) for filepath in filter(lambda s: "longterm" in str(s), data_dir.iterdir()): name = filepath.stem.replace("_longterm", "") if name in ids: match = sgi_2016.loc[sgi_2016["sgi-id"] == ids[name]].iloc[0] else: name = { "ugrindelwald": "<NAME>", }.get(name, None) or name try: match = ( sgi_2016[sgi_2016["name_lower"].str.findall(f".*{name}.*").apply(len) > 0] .sort_values("area_km2") .iloc[-1] ) except IndexError: warnings.warn(f"Cannot find {name}") continue data = ( pd.read_csv(filepath, skiprows=1, delim_whitespace=True, na_values=[-99.0]) .rename(columns={"Year": "year", "B_a(mw.e.)": "dh"}) .ffill() ) data["dm"] = (data["Area(km2)"] * 1e6) * data["dh"] data["sgi-id"] = match["sgi-id"] result_data.append(data[["sgi-id", "year", "dh", "dm"]]) continue results = pd.concat(result_data).set_index(["sgi-id", "year"]).squeeze() glacier_wise_dh = pd.read_csv(terradem.files.TEMP_FILES["glacier_wise_dh"]) matthias_dh: pd.DataFrame = ( results.loc[ (results.index.get_level_values(1) >= STANDARD_START_YEAR) & (results.index.get_level_values(1) <= STANDARD_END_YEAR) ] .groupby(level=0) .cumsum() .groupby(level=0) .last() ) glacier_wise_dh.index = glacier_wise_dh["sgi_id"].apply(terradem.utilities.sgi_1973_to_2016) glacier_wise_dh["dm_err_tonswe"] = (glacier_wise_dh["dm_tons_we"] / glacier_wise_dh["dh_m_we"]) * glacier_wise_dh[ "dh_err_mwe" ] glacier_wise_dh.loc[glacier_wise_dh.index == "B36-26", ["dh_m_we", "dm_tons_we"]] *= 0.86 # A correction factor for including Mittelaletschgletscher matthias_dh = matthias_dh.merge( glacier_wise_dh[["dh_m_we", "dm_tons_we", "dh_err_mwe", "dm_err_tonswe"]], left_index=True, right_index=True ).rename( columns={ "dh_m_we": "geodetic_dh", "dh_err_mwe": "geodetic_dh_err", "dm_tons_we": "geodetic_dm", "dm_err_tonswe": "geodetic_dm_err", "dh": "glaciological_dh", "dm": "glaciological_dm", } ) matthias_dh[["glaciological_dh", "glaciological_dm"]] /= STANDARD_END_YEAR - STANDARD_START_YEAR # matthias_dh["geodetic_dh"] = glacier_wise_dh.loc[matthias_dh.index, "dh_m_we"].values * (STANDARD_END_YEAR - STANDARD_START_YEAR) import matplotlib.pyplot as plt for i, col in enumerate(["dh", "dm"]): plt.subplot(1,2, i + 1) sign = -1 if col == "dm" else 1 plt.errorbar(matthias_dh[f"geodetic_{col}"] * sign, matthias_dh[f"glaciological_{col}"] * sign, xerr=matthias_dh[f"geodetic_{col}_err"] * 2, marker="s", lw=0, elinewidth=2, ecolor="black") minval = matthias_dh[[f"geodetic_{col}", f"glaciological_{col}"]].min().min() plt.plot([minval * sign, 0], [minval * sign, 0]) plt.title(f"{STANDARD_START_YEAR}$-${STANDARD_END_YEAR}") if col == "dm m": plt.xscale("log") plt.yscale("log") plt.xlabel(f"Geodetic MB ({'m' if col == 'dh' else 'tons'} w.e. a⁻¹)") plt.ylabel(f"Matthias's MB ({'m' if col == 'dh' else 'tons'} w.e. a⁻¹)") plt.show() return plt.subplot(122) plt.scatter(-matthias_dh["geodetic_dm"], -matthias_dh["glaciological_dm"]) minval = matthias_dh[["geodetic_dm", "glaciological_dm"]].min().min() plt.plot([-minval, 0], [-minval, 0]) plt.title(f"{STANDARD_START_YEAR}$-${STANDARD_END_YEAR}") plt.xlabel(r"Geodetic MB (tons w.e. a$^{-1}$)") plt.ylabel(r"Matthias's MB (tons w.e. a$^{-1}$)") plt.show() print(glacier_wise_dh) print(matthias_dh)
en
0.669081
Tools to calculate mass balance and convert appropriately from volume. # Zone A55 is not covered by the zones, so hardcode this to be A54 instead. # Calculate the distance between the point and each lk50_outline centroid # Find the closest lk50 outline # Extract the representative zone for the closest lk50 outline. # Calculate the mass balance of that zone for the given start and end year # Calculate the conversion factor to the STANDARD_START_YEAR--STANDARD_END_YEAR # m/a w.e. # A correction factor for including Mittelaletschgletscher # matthias_dh["geodetic_dh"] = glacier_wise_dh.loc[matthias_dh.index, "dh_m_we"].values * (STANDARD_END_YEAR - STANDARD_START_YEAR)
2.080963
2
examples/docs_snippets/docs_snippets_tests/concepts_tests/solids_pipelines_tests/test_hooks.py
kstennettlull/dagster
0
6625300
<reponame>kstennettlull/dagster<gh_stars>0 from unittest import mock from dagster import DagsterEventType, ResourceDefinition, job, op from docs_snippets.concepts.solids_pipelines.op_hooks import ( a, notif_all, notif_all_dev, notif_all_prod, selective_notif, slack_message_on_failure, slack_message_on_success, test_my_success_hook, ) from docs_snippets.concepts.solids_pipelines.op_hooks_context import my_failure_hook def test_notif_all(): result = notif_all.execute_in_process( run_config={"resources": {"slack": {"config": {"token": "..."}}}}, raise_on_error=False, ) assert not result.success for event in result.all_node_events: if event.is_hook_event: if event.event_type == DagsterEventType.HOOK_SKIPPED: assert event.step_key == "a" if event.event_type == DagsterEventType.HOOK_COMPLETED: assert event.step_key == "b" def test_selective_notif(): result = selective_notif.execute_in_process( run_config={"resources": {"slack": {"config": {"token": "..."}}}}, raise_on_error=False, ) assert not result.success for event in result.all_node_events: if event.is_hook_event: if event.event_type == DagsterEventType.HOOK_SKIPPED: assert event.step_key == "a" if event.event_type == DagsterEventType.HOOK_COMPLETED: assert event.step_key == "a" def test_notif_all_dev(): result = notif_all_dev.execute_in_process( run_config={"resources": {"slack": {"config": {"token": "..."}}}}, raise_on_error=False, ) assert not result.success for event in result.all_node_events: if event.is_hook_event: if event.event_type == DagsterEventType.HOOK_SKIPPED: assert event.step_key == "a" if event.event_type == DagsterEventType.HOOK_COMPLETED: assert event.step_key == "b" def test_notif_all_prod(): result = notif_all_prod.execute_in_process( run_config={"resources": {"slack": {"config": {"token": "..."}}}}, raise_on_error=False, ) assert not result.success for event in result.all_node_events: if event.is_hook_event: if event.event_type == DagsterEventType.HOOK_SKIPPED: assert event.step_key == "a" if event.event_type == DagsterEventType.HOOK_COMPLETED: assert event.step_key == "b" def test_hook_resource(): slack_mock = mock.MagicMock() @job( resource_defs={"slack": ResourceDefinition.hardcoded_resource(slack_mock)}, ) def foo(): a.with_hooks({slack_message_on_success, slack_message_on_failure})() foo.execute_in_process() assert slack_mock.chat.post_message.call_count == 1 def test_failure_hook_solid_exception(): @op def failed_op(): raise Exception("my failure") @job(hooks={my_failure_hook}) def foo(): failed_op() result = foo.execute_in_process(raise_on_error=False) assert not result.success def test_hook_testing_example(): test_my_success_hook()
from unittest import mock from dagster import DagsterEventType, ResourceDefinition, job, op from docs_snippets.concepts.solids_pipelines.op_hooks import ( a, notif_all, notif_all_dev, notif_all_prod, selective_notif, slack_message_on_failure, slack_message_on_success, test_my_success_hook, ) from docs_snippets.concepts.solids_pipelines.op_hooks_context import my_failure_hook def test_notif_all(): result = notif_all.execute_in_process( run_config={"resources": {"slack": {"config": {"token": "..."}}}}, raise_on_error=False, ) assert not result.success for event in result.all_node_events: if event.is_hook_event: if event.event_type == DagsterEventType.HOOK_SKIPPED: assert event.step_key == "a" if event.event_type == DagsterEventType.HOOK_COMPLETED: assert event.step_key == "b" def test_selective_notif(): result = selective_notif.execute_in_process( run_config={"resources": {"slack": {"config": {"token": "..."}}}}, raise_on_error=False, ) assert not result.success for event in result.all_node_events: if event.is_hook_event: if event.event_type == DagsterEventType.HOOK_SKIPPED: assert event.step_key == "a" if event.event_type == DagsterEventType.HOOK_COMPLETED: assert event.step_key == "a" def test_notif_all_dev(): result = notif_all_dev.execute_in_process( run_config={"resources": {"slack": {"config": {"token": "..."}}}}, raise_on_error=False, ) assert not result.success for event in result.all_node_events: if event.is_hook_event: if event.event_type == DagsterEventType.HOOK_SKIPPED: assert event.step_key == "a" if event.event_type == DagsterEventType.HOOK_COMPLETED: assert event.step_key == "b" def test_notif_all_prod(): result = notif_all_prod.execute_in_process( run_config={"resources": {"slack": {"config": {"token": "..."}}}}, raise_on_error=False, ) assert not result.success for event in result.all_node_events: if event.is_hook_event: if event.event_type == DagsterEventType.HOOK_SKIPPED: assert event.step_key == "a" if event.event_type == DagsterEventType.HOOK_COMPLETED: assert event.step_key == "b" def test_hook_resource(): slack_mock = mock.MagicMock() @job( resource_defs={"slack": ResourceDefinition.hardcoded_resource(slack_mock)}, ) def foo(): a.with_hooks({slack_message_on_success, slack_message_on_failure})() foo.execute_in_process() assert slack_mock.chat.post_message.call_count == 1 def test_failure_hook_solid_exception(): @op def failed_op(): raise Exception("my failure") @job(hooks={my_failure_hook}) def foo(): failed_op() result = foo.execute_in_process(raise_on_error=False) assert not result.success def test_hook_testing_example(): test_my_success_hook()
none
1
1.786185
2
src/flwr_experimental/ops/compute/docker_adapter_test.py
yiliucs/flower
1
6625301
# Copyright 2020 Adap GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests DockerAdapter.""" import os import time import unittest import docker from .docker_adapter import DockerAdapter, get_free_port if os.getenv("FLOWER_INTEGRATION"): class DockerAdapterIntegrationTestCase(unittest.TestCase): """Test suite for class DockerAdapter. Required docker to be available on the host machine. """ def setUp(self) -> None: """Prepare tests.""" self.name = "flower_test" self.client = docker.from_env() self.adapter = DockerAdapter(name=self.name) def tearDown(self) -> None: """Cleanup tests""" containers = self.client.containers.list( filters={"label": f"adapter_name={self.name}"} ) for container in containers: container.remove(force=True) self.client.close() def test_create_instances(self): """Create and start an instance.""" # Execute instances = self.adapter.create_instances( num_cpu=2, num_ram=2, timeout=1, num_instance=2, gpu=False ) # Assert assert len(instances) == 2 containers = self.client.containers.list( filters={"label": f"adapter_name={self.name}"} ) assert len(containers) == 2 def test_list_instances(self): """List all instances.""" # Prepare for _ in range(2): port = get_free_port() self.client.containers.run( "flower-sshd:latest", auto_remove=True, detach=True, ports={"22/tcp": port}, labels={"adapter_name": self.name}, ) # Execute instances = self.adapter.list_instances() # Assert assert len(instances) == 2, "Expected to find two instances." ports = {i[3] for i in instances} assert len(ports) == 2, "Each instance should have a distinct port." def test_terminate_instance(self): """Destroy all instances.""" # Prepare port = get_free_port() container = self.client.containers.run( "flower-sshd:latest", name=f"{self.name}_{int(time.time() * 1000)}", auto_remove=True, detach=True, ports={"22/tcp": port}, labels={"adapter_name": self.name}, ) # Execute self.adapter.terminate_instances([container.short_id]) # Assert containers = self.client.containers.list( filters={"label": f"adapter_name={self.name}"} ) assert len(containers) == 0 def test_terminate_all_instances(self): """Destroy all instances.""" # Prepare for _ in range(2): port = get_free_port() self.client.containers.run( "flower-sshd:latest", name=f"{self.name}_{int(time.time() * 1000)}", auto_remove=True, detach=True, ports={"22/tcp": port}, ) # Execute self.adapter.terminate_all_instances() # Assert containers = self.client.containers.list( filters={"label": f"adapter_name={self.name}"} ) assert len(containers) == 0 if __name__ == "__main__": unittest.main(verbosity=2)
# Copyright 2020 Adap GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== """Tests DockerAdapter.""" import os import time import unittest import docker from .docker_adapter import DockerAdapter, get_free_port if os.getenv("FLOWER_INTEGRATION"): class DockerAdapterIntegrationTestCase(unittest.TestCase): """Test suite for class DockerAdapter. Required docker to be available on the host machine. """ def setUp(self) -> None: """Prepare tests.""" self.name = "flower_test" self.client = docker.from_env() self.adapter = DockerAdapter(name=self.name) def tearDown(self) -> None: """Cleanup tests""" containers = self.client.containers.list( filters={"label": f"adapter_name={self.name}"} ) for container in containers: container.remove(force=True) self.client.close() def test_create_instances(self): """Create and start an instance.""" # Execute instances = self.adapter.create_instances( num_cpu=2, num_ram=2, timeout=1, num_instance=2, gpu=False ) # Assert assert len(instances) == 2 containers = self.client.containers.list( filters={"label": f"adapter_name={self.name}"} ) assert len(containers) == 2 def test_list_instances(self): """List all instances.""" # Prepare for _ in range(2): port = get_free_port() self.client.containers.run( "flower-sshd:latest", auto_remove=True, detach=True, ports={"22/tcp": port}, labels={"adapter_name": self.name}, ) # Execute instances = self.adapter.list_instances() # Assert assert len(instances) == 2, "Expected to find two instances." ports = {i[3] for i in instances} assert len(ports) == 2, "Each instance should have a distinct port." def test_terminate_instance(self): """Destroy all instances.""" # Prepare port = get_free_port() container = self.client.containers.run( "flower-sshd:latest", name=f"{self.name}_{int(time.time() * 1000)}", auto_remove=True, detach=True, ports={"22/tcp": port}, labels={"adapter_name": self.name}, ) # Execute self.adapter.terminate_instances([container.short_id]) # Assert containers = self.client.containers.list( filters={"label": f"adapter_name={self.name}"} ) assert len(containers) == 0 def test_terminate_all_instances(self): """Destroy all instances.""" # Prepare for _ in range(2): port = get_free_port() self.client.containers.run( "flower-sshd:latest", name=f"{self.name}_{int(time.time() * 1000)}", auto_remove=True, detach=True, ports={"22/tcp": port}, ) # Execute self.adapter.terminate_all_instances() # Assert containers = self.client.containers.list( filters={"label": f"adapter_name={self.name}"} ) assert len(containers) == 0 if __name__ == "__main__": unittest.main(verbosity=2)
en
0.784339
# Copyright 2020 Adap GmbH. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # ============================================================================== Tests DockerAdapter. Test suite for class DockerAdapter. Required docker to be available on the host machine. Prepare tests. Cleanup tests Create and start an instance. # Execute # Assert List all instances. # Prepare # Execute # Assert Destroy all instances. # Prepare # Execute # Assert Destroy all instances. # Prepare # Execute # Assert
1.897403
2
comentarios/forms.py
gabrielmachado395/Galart
0
6625302
from django.forms import ModelForm from .models import Comentario class FormComentario(ModelForm): def clean(self): data = self.cleaned_data nome = data.get('nome_comentario') email = data.get('email_comentario') comentario = data.get('comentario') if any(numero.isdigit() for numero in nome): self.add_error( 'nome_comentario', 'Nome não pode ter números.' ) class Meta: model = Comentario fields = ('nome_comentario', 'email_comentario', 'comentario')
from django.forms import ModelForm from .models import Comentario class FormComentario(ModelForm): def clean(self): data = self.cleaned_data nome = data.get('nome_comentario') email = data.get('email_comentario') comentario = data.get('comentario') if any(numero.isdigit() for numero in nome): self.add_error( 'nome_comentario', 'Nome não pode ter números.' ) class Meta: model = Comentario fields = ('nome_comentario', 'email_comentario', 'comentario')
none
1
2.567818
3
parebrick/characters/balanced.py
ctlab/parallel-rearrangements
3
6625303
from bg.grimm import GRIMMReader from bg.tree import BGTree from bg.genome import BGGenome from bg.vertices import BGVertex import numpy as np import os import csv get_colors_by_edge = lambda e: e.multicolor.multicolors def white_proportion(colors): return np.mean(list(map(lambda c: c == 0, colors))) def get_character_by_edge(bg, edge, genomes, neighbour_index): def get_neighbour_with_genome(v, genome): return neighbour_index[(v, genome)] def get_genome_character_state_by_edge(genome): if cnt[BGGenome(genome)] == 1: return 0 else: v1, v2 = edge.vertex1.name, edge.vertex2.name if v1 > v2: v1, v2 = v2, v1 try: v1_neighbour = get_neighbour_with_genome(v1, genome) v2_neighbour = get_neighbour_with_genome(v2, genome) except KeyError: return 2 if bg.get_edge_by_two_vertices(v1_neighbour, v2_neighbour): pair = (v1_neighbour, v2_neighbour) if pair not in possible_edges: possible_edges.append(pair) return 3 + possible_edges.index(pair) else: return 1 cnt = get_colors_by_edge(edge) possible_edges = [] return {genome: get_genome_character_state_by_edge(genome) for genome in genomes}, possible_edges def construct_vertex_genome_index(bg): neighbour_index = {} for v in bg.bg: for edge in bg.get_edges_by_vertex(v): colors = get_colors_by_edge(edge) for color in colors: neighbour_index[(str(v), color.name)] = edge.vertex2 return neighbour_index def get_characters(grimm_file, genomes, logger): bg = GRIMMReader.get_breakpoint_graph(open(grimm_file)) logger.info('Breakpoint graph parsed') logger.info(f'Edges in breakpoint graph: {len(list(bg.edges()))}') characters = [] # consistency_checker = TreeConsistencyChecker(tree_file) for i, component_bg in enumerate(bg.connected_components_subgraphs()): nodes_len = len(list(component_bg.nodes())) if nodes_len == 2: continue logger.info(f'Getting characters from breakpoint graph component, size={len(component_bg.bg)}') neighbour_index = construct_vertex_genome_index(component_bg) for i_edge, edge in enumerate(component_bg.edges()): v1, v2 = edge.vertex1.name, edge.vertex2.name if v1 > v2: v1, v2 = v2, v1 genome_colors, neighbour_edges = get_character_by_edge(component_bg, edge, genomes, neighbour_index) if white_proportion(genome_colors.values()) < 0.5: continue labels = ['adjacency exists', 'complex break of adjacency', 'some block is not presented'] + \ [f'inversion with {v1n}-{v2n}' for (v1n, v2n) in neighbour_edges] characters.append((v1, v2, genome_colors, labels)) return characters def get_characters_stats_balanced(characters, tree_holder, distance_between_blocks): ans = [] for v1, v2, genome_colors, labels in characters: tree_holder.count_innovations_fitch(genome_colors, count_second_color=False) b1, b2 = int(v1[:-1]), int(v2[:-1]) if b1 > b2: b1, b2 = b2, b1 score_rear, count_rear, count_all_rear = tree_holder.count_parallel_rearrangements(skip_grey=True) score_break, count_break = tree_holder.count_parallel_breakpoints() white_strains = [strain for strain, color in genome_colors.items() if color == 0] mean_break_length = np.mean([distance_between_blocks[(b1, b2)][strain] for strain in white_strains]) ans.append([f'{v1}–{v2}', int(mean_break_length), score_rear, count_rear, count_all_rear, score_break, count_break, count_break <= 1]) return ans def write_stats_csv_balanced(stats, stats_file): rows = [['id', 'adjacency', 'mean_break_length_nucleotide', 'parallel_rear_score', 'number_of_inconsistent_colors', 'number_of_parallel_events', 'parallel_break_score', 'number_of_parallel_breaks', 'tree_consistent']] + \ [[i+1] + stat for i, stat in enumerate(stats)] with open(stats_file, 'w') as f: wtr = csv.writer(f) wtr.writerows(rows) def write_characters_csv_balanced(characters, folder): os.makedirs(folder, exist_ok=True) fill_length = len(str(len(characters))) for i, (v1, v2, genome_colors, labels) in enumerate(characters): rows = [['strain', 'character_state', 'character_state_annotation']] + \ [[strain, color, labels[color]] for strain, color in genome_colors.items()] with open(folder + f'id_{str(i + 1).zfill(fill_length)}_edge_{v1}-{v2}.csv', 'w') as f: wtr = csv.writer(f) wtr.writerows(rows) def write_trees_balanced(characters, folder, show_branch_support, tree_holder, colors): os.makedirs(folder, exist_ok=True) fill_length = len(str(len(characters))) for i, (v1, v2, genome_colors, labels) in enumerate(characters): tree_holder.count_innovations_fitch(genome_colors) tree_holder.draw(folder + f'id_{str(i + 1).zfill(fill_length)}_edge_{v1}-{v2}.pdf', legend_labels=labels, show_branch_support=show_branch_support, colors=colors)
from bg.grimm import GRIMMReader from bg.tree import BGTree from bg.genome import BGGenome from bg.vertices import BGVertex import numpy as np import os import csv get_colors_by_edge = lambda e: e.multicolor.multicolors def white_proportion(colors): return np.mean(list(map(lambda c: c == 0, colors))) def get_character_by_edge(bg, edge, genomes, neighbour_index): def get_neighbour_with_genome(v, genome): return neighbour_index[(v, genome)] def get_genome_character_state_by_edge(genome): if cnt[BGGenome(genome)] == 1: return 0 else: v1, v2 = edge.vertex1.name, edge.vertex2.name if v1 > v2: v1, v2 = v2, v1 try: v1_neighbour = get_neighbour_with_genome(v1, genome) v2_neighbour = get_neighbour_with_genome(v2, genome) except KeyError: return 2 if bg.get_edge_by_two_vertices(v1_neighbour, v2_neighbour): pair = (v1_neighbour, v2_neighbour) if pair not in possible_edges: possible_edges.append(pair) return 3 + possible_edges.index(pair) else: return 1 cnt = get_colors_by_edge(edge) possible_edges = [] return {genome: get_genome_character_state_by_edge(genome) for genome in genomes}, possible_edges def construct_vertex_genome_index(bg): neighbour_index = {} for v in bg.bg: for edge in bg.get_edges_by_vertex(v): colors = get_colors_by_edge(edge) for color in colors: neighbour_index[(str(v), color.name)] = edge.vertex2 return neighbour_index def get_characters(grimm_file, genomes, logger): bg = GRIMMReader.get_breakpoint_graph(open(grimm_file)) logger.info('Breakpoint graph parsed') logger.info(f'Edges in breakpoint graph: {len(list(bg.edges()))}') characters = [] # consistency_checker = TreeConsistencyChecker(tree_file) for i, component_bg in enumerate(bg.connected_components_subgraphs()): nodes_len = len(list(component_bg.nodes())) if nodes_len == 2: continue logger.info(f'Getting characters from breakpoint graph component, size={len(component_bg.bg)}') neighbour_index = construct_vertex_genome_index(component_bg) for i_edge, edge in enumerate(component_bg.edges()): v1, v2 = edge.vertex1.name, edge.vertex2.name if v1 > v2: v1, v2 = v2, v1 genome_colors, neighbour_edges = get_character_by_edge(component_bg, edge, genomes, neighbour_index) if white_proportion(genome_colors.values()) < 0.5: continue labels = ['adjacency exists', 'complex break of adjacency', 'some block is not presented'] + \ [f'inversion with {v1n}-{v2n}' for (v1n, v2n) in neighbour_edges] characters.append((v1, v2, genome_colors, labels)) return characters def get_characters_stats_balanced(characters, tree_holder, distance_between_blocks): ans = [] for v1, v2, genome_colors, labels in characters: tree_holder.count_innovations_fitch(genome_colors, count_second_color=False) b1, b2 = int(v1[:-1]), int(v2[:-1]) if b1 > b2: b1, b2 = b2, b1 score_rear, count_rear, count_all_rear = tree_holder.count_parallel_rearrangements(skip_grey=True) score_break, count_break = tree_holder.count_parallel_breakpoints() white_strains = [strain for strain, color in genome_colors.items() if color == 0] mean_break_length = np.mean([distance_between_blocks[(b1, b2)][strain] for strain in white_strains]) ans.append([f'{v1}–{v2}', int(mean_break_length), score_rear, count_rear, count_all_rear, score_break, count_break, count_break <= 1]) return ans def write_stats_csv_balanced(stats, stats_file): rows = [['id', 'adjacency', 'mean_break_length_nucleotide', 'parallel_rear_score', 'number_of_inconsistent_colors', 'number_of_parallel_events', 'parallel_break_score', 'number_of_parallel_breaks', 'tree_consistent']] + \ [[i+1] + stat for i, stat in enumerate(stats)] with open(stats_file, 'w') as f: wtr = csv.writer(f) wtr.writerows(rows) def write_characters_csv_balanced(characters, folder): os.makedirs(folder, exist_ok=True) fill_length = len(str(len(characters))) for i, (v1, v2, genome_colors, labels) in enumerate(characters): rows = [['strain', 'character_state', 'character_state_annotation']] + \ [[strain, color, labels[color]] for strain, color in genome_colors.items()] with open(folder + f'id_{str(i + 1).zfill(fill_length)}_edge_{v1}-{v2}.csv', 'w') as f: wtr = csv.writer(f) wtr.writerows(rows) def write_trees_balanced(characters, folder, show_branch_support, tree_holder, colors): os.makedirs(folder, exist_ok=True) fill_length = len(str(len(characters))) for i, (v1, v2, genome_colors, labels) in enumerate(characters): tree_holder.count_innovations_fitch(genome_colors) tree_holder.draw(folder + f'id_{str(i + 1).zfill(fill_length)}_edge_{v1}-{v2}.pdf', legend_labels=labels, show_branch_support=show_branch_support, colors=colors)
en
0.54894
# consistency_checker = TreeConsistencyChecker(tree_file)
2.392422
2
cproto/core/cproto.py
asyne/cproto
30
6625304
from __future__ import absolute_import import json from os import path try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen from cproto.core.websocket import WebSocket from cproto.domains.factory import DomainFactory ROOT_DIR = path.abspath(path.dirname(path.dirname(__file__))) class CProto(object): def __init__(self, host='127.0.0.1', port=9222): res = urlopen('http://{0}:{1}/json'.format(host, port)) url = json.loads(res.read())[0]['webSocketDebuggerUrl'] self.ws = WebSocket(on_event=self._on_event) self.ws.connect(url) with open(path.join(ROOT_DIR, 'resources/protocol.json'), 'rb') as f: data = json.loads(f.read()) for d in data['domains']: domain_name = d['domain'] # Build Domain Class from protocol DomainClass = DomainFactory(domain_name, d['commands']) # Set WebSocket attribute DomainClass.ws = self.ws # Set Domain's Class as a property setattr(self, domain_name, DomainClass()) def close(self): self.ws.close() def _on_event(self, message): domain_name, method_name = message['method'].split('.') domain = getattr(self, domain_name) if hasattr(domain, method_name): getattr(domain, method_name)(message['params'])
from __future__ import absolute_import import json from os import path try: from urllib.request import urlopen except ImportError: from urllib2 import urlopen from cproto.core.websocket import WebSocket from cproto.domains.factory import DomainFactory ROOT_DIR = path.abspath(path.dirname(path.dirname(__file__))) class CProto(object): def __init__(self, host='127.0.0.1', port=9222): res = urlopen('http://{0}:{1}/json'.format(host, port)) url = json.loads(res.read())[0]['webSocketDebuggerUrl'] self.ws = WebSocket(on_event=self._on_event) self.ws.connect(url) with open(path.join(ROOT_DIR, 'resources/protocol.json'), 'rb') as f: data = json.loads(f.read()) for d in data['domains']: domain_name = d['domain'] # Build Domain Class from protocol DomainClass = DomainFactory(domain_name, d['commands']) # Set WebSocket attribute DomainClass.ws = self.ws # Set Domain's Class as a property setattr(self, domain_name, DomainClass()) def close(self): self.ws.close() def _on_event(self, message): domain_name, method_name = message['method'].split('.') domain = getattr(self, domain_name) if hasattr(domain, method_name): getattr(domain, method_name)(message['params'])
en
0.81589
# Build Domain Class from protocol # Set WebSocket attribute # Set Domain's Class as a property
2.466406
2
sdk/python/pulumi_azure_native/securityinsights/bookmark.py
sebtelko/pulumi-azure-native
0
6625305
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['BookmarkArgs', 'Bookmark'] @pulumi.input_type class BookmarkArgs: def __init__(__self__, *, display_name: pulumi.Input[str], query: pulumi.Input[str], resource_group_name: pulumi.Input[str], workspace_name: pulumi.Input[str], bookmark_id: Optional[pulumi.Input[str]] = None, created: Optional[pulumi.Input[str]] = None, created_by: Optional[pulumi.Input['UserInfoArgs']] = None, etag: Optional[pulumi.Input[str]] = None, event_time: Optional[pulumi.Input[str]] = None, incident_info: Optional[pulumi.Input['IncidentInfoArgs']] = None, labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, notes: Optional[pulumi.Input[str]] = None, query_end_time: Optional[pulumi.Input[str]] = None, query_result: Optional[pulumi.Input[str]] = None, query_start_time: Optional[pulumi.Input[str]] = None, updated: Optional[pulumi.Input[str]] = None, updated_by: Optional[pulumi.Input['UserInfoArgs']] = None): """ The set of arguments for constructing a Bookmark resource. :param pulumi.Input[str] display_name: The display name of the bookmark :param pulumi.Input[str] query: The query of the bookmark. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param pulumi.Input[str] workspace_name: The name of the workspace. :param pulumi.Input[str] bookmark_id: Bookmark ID :param pulumi.Input[str] created: The time the bookmark was created :param pulumi.Input['UserInfoArgs'] created_by: Describes a user that created the bookmark :param pulumi.Input[str] etag: Etag of the azure resource :param pulumi.Input[str] event_time: The bookmark event time :param pulumi.Input['IncidentInfoArgs'] incident_info: Describes an incident that relates to bookmark :param pulumi.Input[Sequence[pulumi.Input[str]]] labels: List of labels relevant to this bookmark :param pulumi.Input[str] notes: The notes of the bookmark :param pulumi.Input[str] query_end_time: The end time for the query :param pulumi.Input[str] query_result: The query result of the bookmark. :param pulumi.Input[str] query_start_time: The start time for the query :param pulumi.Input[str] updated: The last time the bookmark was updated :param pulumi.Input['UserInfoArgs'] updated_by: Describes a user that updated the bookmark """ pulumi.set(__self__, "display_name", display_name) pulumi.set(__self__, "query", query) pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "workspace_name", workspace_name) if bookmark_id is not None: pulumi.set(__self__, "bookmark_id", bookmark_id) if created is not None: pulumi.set(__self__, "created", created) if created_by is not None: pulumi.set(__self__, "created_by", created_by) if etag is not None: pulumi.set(__self__, "etag", etag) if event_time is not None: pulumi.set(__self__, "event_time", event_time) if incident_info is not None: pulumi.set(__self__, "incident_info", incident_info) if labels is not None: pulumi.set(__self__, "labels", labels) if notes is not None: pulumi.set(__self__, "notes", notes) if query_end_time is not None: pulumi.set(__self__, "query_end_time", query_end_time) if query_result is not None: pulumi.set(__self__, "query_result", query_result) if query_start_time is not None: pulumi.set(__self__, "query_start_time", query_start_time) if updated is not None: pulumi.set(__self__, "updated", updated) if updated_by is not None: pulumi.set(__self__, "updated_by", updated_by) @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Input[str]: """ The display name of the bookmark """ return pulumi.get(self, "display_name") @display_name.setter def display_name(self, value: pulumi.Input[str]): pulumi.set(self, "display_name", value) @property @pulumi.getter def query(self) -> pulumi.Input[str]: """ The query of the bookmark. """ return pulumi.get(self, "query") @query.setter def query(self, value: pulumi.Input[str]): pulumi.set(self, "query", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group within the user's subscription. The name is case insensitive. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="workspaceName") def workspace_name(self) -> pulumi.Input[str]: """ The name of the workspace. """ return pulumi.get(self, "workspace_name") @workspace_name.setter def workspace_name(self, value: pulumi.Input[str]): pulumi.set(self, "workspace_name", value) @property @pulumi.getter(name="bookmarkId") def bookmark_id(self) -> Optional[pulumi.Input[str]]: """ Bookmark ID """ return pulumi.get(self, "bookmark_id") @bookmark_id.setter def bookmark_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "bookmark_id", value) @property @pulumi.getter def created(self) -> Optional[pulumi.Input[str]]: """ The time the bookmark was created """ return pulumi.get(self, "created") @created.setter def created(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "created", value) @property @pulumi.getter(name="createdBy") def created_by(self) -> Optional[pulumi.Input['UserInfoArgs']]: """ Describes a user that created the bookmark """ return pulumi.get(self, "created_by") @created_by.setter def created_by(self, value: Optional[pulumi.Input['UserInfoArgs']]): pulumi.set(self, "created_by", value) @property @pulumi.getter def etag(self) -> Optional[pulumi.Input[str]]: """ Etag of the azure resource """ return pulumi.get(self, "etag") @etag.setter def etag(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "etag", value) @property @pulumi.getter(name="eventTime") def event_time(self) -> Optional[pulumi.Input[str]]: """ The bookmark event time """ return pulumi.get(self, "event_time") @event_time.setter def event_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "event_time", value) @property @pulumi.getter(name="incidentInfo") def incident_info(self) -> Optional[pulumi.Input['IncidentInfoArgs']]: """ Describes an incident that relates to bookmark """ return pulumi.get(self, "incident_info") @incident_info.setter def incident_info(self, value: Optional[pulumi.Input['IncidentInfoArgs']]): pulumi.set(self, "incident_info", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of labels relevant to this bookmark """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter def notes(self) -> Optional[pulumi.Input[str]]: """ The notes of the bookmark """ return pulumi.get(self, "notes") @notes.setter def notes(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "notes", value) @property @pulumi.getter(name="queryEndTime") def query_end_time(self) -> Optional[pulumi.Input[str]]: """ The end time for the query """ return pulumi.get(self, "query_end_time") @query_end_time.setter def query_end_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "query_end_time", value) @property @pulumi.getter(name="queryResult") def query_result(self) -> Optional[pulumi.Input[str]]: """ The query result of the bookmark. """ return pulumi.get(self, "query_result") @query_result.setter def query_result(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "query_result", value) @property @pulumi.getter(name="queryStartTime") def query_start_time(self) -> Optional[pulumi.Input[str]]: """ The start time for the query """ return pulumi.get(self, "query_start_time") @query_start_time.setter def query_start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "query_start_time", value) @property @pulumi.getter def updated(self) -> Optional[pulumi.Input[str]]: """ The last time the bookmark was updated """ return pulumi.get(self, "updated") @updated.setter def updated(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "updated", value) @property @pulumi.getter(name="updatedBy") def updated_by(self) -> Optional[pulumi.Input['UserInfoArgs']]: """ Describes a user that updated the bookmark """ return pulumi.get(self, "updated_by") @updated_by.setter def updated_by(self, value: Optional[pulumi.Input['UserInfoArgs']]): pulumi.set(self, "updated_by", value) class Bookmark(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, bookmark_id: Optional[pulumi.Input[str]] = None, created: Optional[pulumi.Input[str]] = None, created_by: Optional[pulumi.Input[pulumi.InputType['UserInfoArgs']]] = None, display_name: Optional[pulumi.Input[str]] = None, etag: Optional[pulumi.Input[str]] = None, event_time: Optional[pulumi.Input[str]] = None, incident_info: Optional[pulumi.Input[pulumi.InputType['IncidentInfoArgs']]] = None, labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, notes: Optional[pulumi.Input[str]] = None, query: Optional[pulumi.Input[str]] = None, query_end_time: Optional[pulumi.Input[str]] = None, query_result: Optional[pulumi.Input[str]] = None, query_start_time: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, updated: Optional[pulumi.Input[str]] = None, updated_by: Optional[pulumi.Input[pulumi.InputType['UserInfoArgs']]] = None, workspace_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Represents a bookmark in Azure Security Insights. API Version: 2020-01-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bookmark_id: Bookmark ID :param pulumi.Input[str] created: The time the bookmark was created :param pulumi.Input[pulumi.InputType['UserInfoArgs']] created_by: Describes a user that created the bookmark :param pulumi.Input[str] display_name: The display name of the bookmark :param pulumi.Input[str] etag: Etag of the azure resource :param pulumi.Input[str] event_time: The bookmark event time :param pulumi.Input[pulumi.InputType['IncidentInfoArgs']] incident_info: Describes an incident that relates to bookmark :param pulumi.Input[Sequence[pulumi.Input[str]]] labels: List of labels relevant to this bookmark :param pulumi.Input[str] notes: The notes of the bookmark :param pulumi.Input[str] query: The query of the bookmark. :param pulumi.Input[str] query_end_time: The end time for the query :param pulumi.Input[str] query_result: The query result of the bookmark. :param pulumi.Input[str] query_start_time: The start time for the query :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param pulumi.Input[str] updated: The last time the bookmark was updated :param pulumi.Input[pulumi.InputType['UserInfoArgs']] updated_by: Describes a user that updated the bookmark :param pulumi.Input[str] workspace_name: The name of the workspace. """ ... @overload def __init__(__self__, resource_name: str, args: BookmarkArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Represents a bookmark in Azure Security Insights. API Version: 2020-01-01. :param str resource_name: The name of the resource. :param BookmarkArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(BookmarkArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, bookmark_id: Optional[pulumi.Input[str]] = None, created: Optional[pulumi.Input[str]] = None, created_by: Optional[pulumi.Input[pulumi.InputType['UserInfoArgs']]] = None, display_name: Optional[pulumi.Input[str]] = None, etag: Optional[pulumi.Input[str]] = None, event_time: Optional[pulumi.Input[str]] = None, incident_info: Optional[pulumi.Input[pulumi.InputType['IncidentInfoArgs']]] = None, labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, notes: Optional[pulumi.Input[str]] = None, query: Optional[pulumi.Input[str]] = None, query_end_time: Optional[pulumi.Input[str]] = None, query_result: Optional[pulumi.Input[str]] = None, query_start_time: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, updated: Optional[pulumi.Input[str]] = None, updated_by: Optional[pulumi.Input[pulumi.InputType['UserInfoArgs']]] = None, workspace_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = BookmarkArgs.__new__(BookmarkArgs) __props__.__dict__["bookmark_id"] = bookmark_id __props__.__dict__["created"] = created __props__.__dict__["created_by"] = created_by if display_name is None and not opts.urn: raise TypeError("Missing required property 'display_name'") __props__.__dict__["display_name"] = display_name __props__.__dict__["etag"] = etag __props__.__dict__["event_time"] = event_time __props__.__dict__["incident_info"] = incident_info __props__.__dict__["labels"] = labels __props__.__dict__["notes"] = notes if query is None and not opts.urn: raise TypeError("Missing required property 'query'") __props__.__dict__["query"] = query __props__.__dict__["query_end_time"] = query_end_time __props__.__dict__["query_result"] = query_result __props__.__dict__["query_start_time"] = query_start_time if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["updated"] = updated __props__.__dict__["updated_by"] = updated_by if workspace_name is None and not opts.urn: raise TypeError("Missing required property 'workspace_name'") __props__.__dict__["workspace_name"] = workspace_name __props__.__dict__["name"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityinsights:Bookmark"), pulumi.Alias(type_="azure-native:securityinsights/v20200101:Bookmark"), pulumi.Alias(type_="azure-nextgen:securityinsights/v20200101:Bookmark")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Bookmark, __self__).__init__( 'azure-native:securityinsights:Bookmark', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Bookmark': """ Get an existing Bookmark resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = BookmarkArgs.__new__(BookmarkArgs) __props__.__dict__["created"] = None __props__.__dict__["created_by"] = None __props__.__dict__["display_name"] = None __props__.__dict__["etag"] = None __props__.__dict__["event_time"] = None __props__.__dict__["incident_info"] = None __props__.__dict__["labels"] = None __props__.__dict__["name"] = None __props__.__dict__["notes"] = None __props__.__dict__["query"] = None __props__.__dict__["query_end_time"] = None __props__.__dict__["query_result"] = None __props__.__dict__["query_start_time"] = None __props__.__dict__["type"] = None __props__.__dict__["updated"] = None __props__.__dict__["updated_by"] = None return Bookmark(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def created(self) -> pulumi.Output[Optional[str]]: """ The time the bookmark was created """ return pulumi.get(self, "created") @property @pulumi.getter(name="createdBy") def created_by(self) -> pulumi.Output[Optional['outputs.UserInfoResponse']]: """ Describes a user that created the bookmark """ return pulumi.get(self, "created_by") @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Output[str]: """ The display name of the bookmark """ return pulumi.get(self, "display_name") @property @pulumi.getter def etag(self) -> pulumi.Output[Optional[str]]: """ Etag of the azure resource """ return pulumi.get(self, "etag") @property @pulumi.getter(name="eventTime") def event_time(self) -> pulumi.Output[Optional[str]]: """ The bookmark event time """ return pulumi.get(self, "event_time") @property @pulumi.getter(name="incidentInfo") def incident_info(self) -> pulumi.Output[Optional['outputs.IncidentInfoResponse']]: """ Describes an incident that relates to bookmark """ return pulumi.get(self, "incident_info") @property @pulumi.getter def labels(self) -> pulumi.Output[Optional[Sequence[str]]]: """ List of labels relevant to this bookmark """ return pulumi.get(self, "labels") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Azure resource name """ return pulumi.get(self, "name") @property @pulumi.getter def notes(self) -> pulumi.Output[Optional[str]]: """ The notes of the bookmark """ return pulumi.get(self, "notes") @property @pulumi.getter def query(self) -> pulumi.Output[str]: """ The query of the bookmark. """ return pulumi.get(self, "query") @property @pulumi.getter(name="queryEndTime") def query_end_time(self) -> pulumi.Output[Optional[str]]: """ The end time for the query """ return pulumi.get(self, "query_end_time") @property @pulumi.getter(name="queryResult") def query_result(self) -> pulumi.Output[Optional[str]]: """ The query result of the bookmark. """ return pulumi.get(self, "query_result") @property @pulumi.getter(name="queryStartTime") def query_start_time(self) -> pulumi.Output[Optional[str]]: """ The start time for the query """ return pulumi.get(self, "query_start_time") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Azure resource type """ return pulumi.get(self, "type") @property @pulumi.getter def updated(self) -> pulumi.Output[Optional[str]]: """ The last time the bookmark was updated """ return pulumi.get(self, "updated") @property @pulumi.getter(name="updatedBy") def updated_by(self) -> pulumi.Output[Optional['outputs.UserInfoResponse']]: """ Describes a user that updated the bookmark """ return pulumi.get(self, "updated_by")
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** import warnings import pulumi import pulumi.runtime from typing import Any, Mapping, Optional, Sequence, Union, overload from .. import _utilities from . import outputs from ._enums import * from ._inputs import * __all__ = ['BookmarkArgs', 'Bookmark'] @pulumi.input_type class BookmarkArgs: def __init__(__self__, *, display_name: pulumi.Input[str], query: pulumi.Input[str], resource_group_name: pulumi.Input[str], workspace_name: pulumi.Input[str], bookmark_id: Optional[pulumi.Input[str]] = None, created: Optional[pulumi.Input[str]] = None, created_by: Optional[pulumi.Input['UserInfoArgs']] = None, etag: Optional[pulumi.Input[str]] = None, event_time: Optional[pulumi.Input[str]] = None, incident_info: Optional[pulumi.Input['IncidentInfoArgs']] = None, labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, notes: Optional[pulumi.Input[str]] = None, query_end_time: Optional[pulumi.Input[str]] = None, query_result: Optional[pulumi.Input[str]] = None, query_start_time: Optional[pulumi.Input[str]] = None, updated: Optional[pulumi.Input[str]] = None, updated_by: Optional[pulumi.Input['UserInfoArgs']] = None): """ The set of arguments for constructing a Bookmark resource. :param pulumi.Input[str] display_name: The display name of the bookmark :param pulumi.Input[str] query: The query of the bookmark. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param pulumi.Input[str] workspace_name: The name of the workspace. :param pulumi.Input[str] bookmark_id: Bookmark ID :param pulumi.Input[str] created: The time the bookmark was created :param pulumi.Input['UserInfoArgs'] created_by: Describes a user that created the bookmark :param pulumi.Input[str] etag: Etag of the azure resource :param pulumi.Input[str] event_time: The bookmark event time :param pulumi.Input['IncidentInfoArgs'] incident_info: Describes an incident that relates to bookmark :param pulumi.Input[Sequence[pulumi.Input[str]]] labels: List of labels relevant to this bookmark :param pulumi.Input[str] notes: The notes of the bookmark :param pulumi.Input[str] query_end_time: The end time for the query :param pulumi.Input[str] query_result: The query result of the bookmark. :param pulumi.Input[str] query_start_time: The start time for the query :param pulumi.Input[str] updated: The last time the bookmark was updated :param pulumi.Input['UserInfoArgs'] updated_by: Describes a user that updated the bookmark """ pulumi.set(__self__, "display_name", display_name) pulumi.set(__self__, "query", query) pulumi.set(__self__, "resource_group_name", resource_group_name) pulumi.set(__self__, "workspace_name", workspace_name) if bookmark_id is not None: pulumi.set(__self__, "bookmark_id", bookmark_id) if created is not None: pulumi.set(__self__, "created", created) if created_by is not None: pulumi.set(__self__, "created_by", created_by) if etag is not None: pulumi.set(__self__, "etag", etag) if event_time is not None: pulumi.set(__self__, "event_time", event_time) if incident_info is not None: pulumi.set(__self__, "incident_info", incident_info) if labels is not None: pulumi.set(__self__, "labels", labels) if notes is not None: pulumi.set(__self__, "notes", notes) if query_end_time is not None: pulumi.set(__self__, "query_end_time", query_end_time) if query_result is not None: pulumi.set(__self__, "query_result", query_result) if query_start_time is not None: pulumi.set(__self__, "query_start_time", query_start_time) if updated is not None: pulumi.set(__self__, "updated", updated) if updated_by is not None: pulumi.set(__self__, "updated_by", updated_by) @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Input[str]: """ The display name of the bookmark """ return pulumi.get(self, "display_name") @display_name.setter def display_name(self, value: pulumi.Input[str]): pulumi.set(self, "display_name", value) @property @pulumi.getter def query(self) -> pulumi.Input[str]: """ The query of the bookmark. """ return pulumi.get(self, "query") @query.setter def query(self, value: pulumi.Input[str]): pulumi.set(self, "query", value) @property @pulumi.getter(name="resourceGroupName") def resource_group_name(self) -> pulumi.Input[str]: """ The name of the resource group within the user's subscription. The name is case insensitive. """ return pulumi.get(self, "resource_group_name") @resource_group_name.setter def resource_group_name(self, value: pulumi.Input[str]): pulumi.set(self, "resource_group_name", value) @property @pulumi.getter(name="workspaceName") def workspace_name(self) -> pulumi.Input[str]: """ The name of the workspace. """ return pulumi.get(self, "workspace_name") @workspace_name.setter def workspace_name(self, value: pulumi.Input[str]): pulumi.set(self, "workspace_name", value) @property @pulumi.getter(name="bookmarkId") def bookmark_id(self) -> Optional[pulumi.Input[str]]: """ Bookmark ID """ return pulumi.get(self, "bookmark_id") @bookmark_id.setter def bookmark_id(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "bookmark_id", value) @property @pulumi.getter def created(self) -> Optional[pulumi.Input[str]]: """ The time the bookmark was created """ return pulumi.get(self, "created") @created.setter def created(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "created", value) @property @pulumi.getter(name="createdBy") def created_by(self) -> Optional[pulumi.Input['UserInfoArgs']]: """ Describes a user that created the bookmark """ return pulumi.get(self, "created_by") @created_by.setter def created_by(self, value: Optional[pulumi.Input['UserInfoArgs']]): pulumi.set(self, "created_by", value) @property @pulumi.getter def etag(self) -> Optional[pulumi.Input[str]]: """ Etag of the azure resource """ return pulumi.get(self, "etag") @etag.setter def etag(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "etag", value) @property @pulumi.getter(name="eventTime") def event_time(self) -> Optional[pulumi.Input[str]]: """ The bookmark event time """ return pulumi.get(self, "event_time") @event_time.setter def event_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "event_time", value) @property @pulumi.getter(name="incidentInfo") def incident_info(self) -> Optional[pulumi.Input['IncidentInfoArgs']]: """ Describes an incident that relates to bookmark """ return pulumi.get(self, "incident_info") @incident_info.setter def incident_info(self, value: Optional[pulumi.Input['IncidentInfoArgs']]): pulumi.set(self, "incident_info", value) @property @pulumi.getter def labels(self) -> Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]: """ List of labels relevant to this bookmark """ return pulumi.get(self, "labels") @labels.setter def labels(self, value: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]]): pulumi.set(self, "labels", value) @property @pulumi.getter def notes(self) -> Optional[pulumi.Input[str]]: """ The notes of the bookmark """ return pulumi.get(self, "notes") @notes.setter def notes(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "notes", value) @property @pulumi.getter(name="queryEndTime") def query_end_time(self) -> Optional[pulumi.Input[str]]: """ The end time for the query """ return pulumi.get(self, "query_end_time") @query_end_time.setter def query_end_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "query_end_time", value) @property @pulumi.getter(name="queryResult") def query_result(self) -> Optional[pulumi.Input[str]]: """ The query result of the bookmark. """ return pulumi.get(self, "query_result") @query_result.setter def query_result(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "query_result", value) @property @pulumi.getter(name="queryStartTime") def query_start_time(self) -> Optional[pulumi.Input[str]]: """ The start time for the query """ return pulumi.get(self, "query_start_time") @query_start_time.setter def query_start_time(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "query_start_time", value) @property @pulumi.getter def updated(self) -> Optional[pulumi.Input[str]]: """ The last time the bookmark was updated """ return pulumi.get(self, "updated") @updated.setter def updated(self, value: Optional[pulumi.Input[str]]): pulumi.set(self, "updated", value) @property @pulumi.getter(name="updatedBy") def updated_by(self) -> Optional[pulumi.Input['UserInfoArgs']]: """ Describes a user that updated the bookmark """ return pulumi.get(self, "updated_by") @updated_by.setter def updated_by(self, value: Optional[pulumi.Input['UserInfoArgs']]): pulumi.set(self, "updated_by", value) class Bookmark(pulumi.CustomResource): @overload def __init__(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, bookmark_id: Optional[pulumi.Input[str]] = None, created: Optional[pulumi.Input[str]] = None, created_by: Optional[pulumi.Input[pulumi.InputType['UserInfoArgs']]] = None, display_name: Optional[pulumi.Input[str]] = None, etag: Optional[pulumi.Input[str]] = None, event_time: Optional[pulumi.Input[str]] = None, incident_info: Optional[pulumi.Input[pulumi.InputType['IncidentInfoArgs']]] = None, labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, notes: Optional[pulumi.Input[str]] = None, query: Optional[pulumi.Input[str]] = None, query_end_time: Optional[pulumi.Input[str]] = None, query_result: Optional[pulumi.Input[str]] = None, query_start_time: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, updated: Optional[pulumi.Input[str]] = None, updated_by: Optional[pulumi.Input[pulumi.InputType['UserInfoArgs']]] = None, workspace_name: Optional[pulumi.Input[str]] = None, __props__=None): """ Represents a bookmark in Azure Security Insights. API Version: 2020-01-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bookmark_id: Bookmark ID :param pulumi.Input[str] created: The time the bookmark was created :param pulumi.Input[pulumi.InputType['UserInfoArgs']] created_by: Describes a user that created the bookmark :param pulumi.Input[str] display_name: The display name of the bookmark :param pulumi.Input[str] etag: Etag of the azure resource :param pulumi.Input[str] event_time: The bookmark event time :param pulumi.Input[pulumi.InputType['IncidentInfoArgs']] incident_info: Describes an incident that relates to bookmark :param pulumi.Input[Sequence[pulumi.Input[str]]] labels: List of labels relevant to this bookmark :param pulumi.Input[str] notes: The notes of the bookmark :param pulumi.Input[str] query: The query of the bookmark. :param pulumi.Input[str] query_end_time: The end time for the query :param pulumi.Input[str] query_result: The query result of the bookmark. :param pulumi.Input[str] query_start_time: The start time for the query :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param pulumi.Input[str] updated: The last time the bookmark was updated :param pulumi.Input[pulumi.InputType['UserInfoArgs']] updated_by: Describes a user that updated the bookmark :param pulumi.Input[str] workspace_name: The name of the workspace. """ ... @overload def __init__(__self__, resource_name: str, args: BookmarkArgs, opts: Optional[pulumi.ResourceOptions] = None): """ Represents a bookmark in Azure Security Insights. API Version: 2020-01-01. :param str resource_name: The name of the resource. :param BookmarkArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. """ ... def __init__(__self__, resource_name: str, *args, **kwargs): resource_args, opts = _utilities.get_resource_args_opts(BookmarkArgs, pulumi.ResourceOptions, *args, **kwargs) if resource_args is not None: __self__._internal_init(resource_name, opts, **resource_args.__dict__) else: __self__._internal_init(resource_name, *args, **kwargs) def _internal_init(__self__, resource_name: str, opts: Optional[pulumi.ResourceOptions] = None, bookmark_id: Optional[pulumi.Input[str]] = None, created: Optional[pulumi.Input[str]] = None, created_by: Optional[pulumi.Input[pulumi.InputType['UserInfoArgs']]] = None, display_name: Optional[pulumi.Input[str]] = None, etag: Optional[pulumi.Input[str]] = None, event_time: Optional[pulumi.Input[str]] = None, incident_info: Optional[pulumi.Input[pulumi.InputType['IncidentInfoArgs']]] = None, labels: Optional[pulumi.Input[Sequence[pulumi.Input[str]]]] = None, notes: Optional[pulumi.Input[str]] = None, query: Optional[pulumi.Input[str]] = None, query_end_time: Optional[pulumi.Input[str]] = None, query_result: Optional[pulumi.Input[str]] = None, query_start_time: Optional[pulumi.Input[str]] = None, resource_group_name: Optional[pulumi.Input[str]] = None, updated: Optional[pulumi.Input[str]] = None, updated_by: Optional[pulumi.Input[pulumi.InputType['UserInfoArgs']]] = None, workspace_name: Optional[pulumi.Input[str]] = None, __props__=None): if opts is None: opts = pulumi.ResourceOptions() if not isinstance(opts, pulumi.ResourceOptions): raise TypeError('Expected resource options to be a ResourceOptions instance') if opts.version is None: opts.version = _utilities.get_version() if opts.id is None: if __props__ is not None: raise TypeError('__props__ is only valid when passed in combination with a valid opts.id to get an existing resource') __props__ = BookmarkArgs.__new__(BookmarkArgs) __props__.__dict__["bookmark_id"] = bookmark_id __props__.__dict__["created"] = created __props__.__dict__["created_by"] = created_by if display_name is None and not opts.urn: raise TypeError("Missing required property 'display_name'") __props__.__dict__["display_name"] = display_name __props__.__dict__["etag"] = etag __props__.__dict__["event_time"] = event_time __props__.__dict__["incident_info"] = incident_info __props__.__dict__["labels"] = labels __props__.__dict__["notes"] = notes if query is None and not opts.urn: raise TypeError("Missing required property 'query'") __props__.__dict__["query"] = query __props__.__dict__["query_end_time"] = query_end_time __props__.__dict__["query_result"] = query_result __props__.__dict__["query_start_time"] = query_start_time if resource_group_name is None and not opts.urn: raise TypeError("Missing required property 'resource_group_name'") __props__.__dict__["resource_group_name"] = resource_group_name __props__.__dict__["updated"] = updated __props__.__dict__["updated_by"] = updated_by if workspace_name is None and not opts.urn: raise TypeError("Missing required property 'workspace_name'") __props__.__dict__["workspace_name"] = workspace_name __props__.__dict__["name"] = None __props__.__dict__["type"] = None alias_opts = pulumi.ResourceOptions(aliases=[pulumi.Alias(type_="azure-nextgen:securityinsights:Bookmark"), pulumi.Alias(type_="azure-native:securityinsights/v20200101:Bookmark"), pulumi.Alias(type_="azure-nextgen:securityinsights/v20200101:Bookmark")]) opts = pulumi.ResourceOptions.merge(opts, alias_opts) super(Bookmark, __self__).__init__( 'azure-native:securityinsights:Bookmark', resource_name, __props__, opts) @staticmethod def get(resource_name: str, id: pulumi.Input[str], opts: Optional[pulumi.ResourceOptions] = None) -> 'Bookmark': """ Get an existing Bookmark resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. """ opts = pulumi.ResourceOptions.merge(opts, pulumi.ResourceOptions(id=id)) __props__ = BookmarkArgs.__new__(BookmarkArgs) __props__.__dict__["created"] = None __props__.__dict__["created_by"] = None __props__.__dict__["display_name"] = None __props__.__dict__["etag"] = None __props__.__dict__["event_time"] = None __props__.__dict__["incident_info"] = None __props__.__dict__["labels"] = None __props__.__dict__["name"] = None __props__.__dict__["notes"] = None __props__.__dict__["query"] = None __props__.__dict__["query_end_time"] = None __props__.__dict__["query_result"] = None __props__.__dict__["query_start_time"] = None __props__.__dict__["type"] = None __props__.__dict__["updated"] = None __props__.__dict__["updated_by"] = None return Bookmark(resource_name, opts=opts, __props__=__props__) @property @pulumi.getter def created(self) -> pulumi.Output[Optional[str]]: """ The time the bookmark was created """ return pulumi.get(self, "created") @property @pulumi.getter(name="createdBy") def created_by(self) -> pulumi.Output[Optional['outputs.UserInfoResponse']]: """ Describes a user that created the bookmark """ return pulumi.get(self, "created_by") @property @pulumi.getter(name="displayName") def display_name(self) -> pulumi.Output[str]: """ The display name of the bookmark """ return pulumi.get(self, "display_name") @property @pulumi.getter def etag(self) -> pulumi.Output[Optional[str]]: """ Etag of the azure resource """ return pulumi.get(self, "etag") @property @pulumi.getter(name="eventTime") def event_time(self) -> pulumi.Output[Optional[str]]: """ The bookmark event time """ return pulumi.get(self, "event_time") @property @pulumi.getter(name="incidentInfo") def incident_info(self) -> pulumi.Output[Optional['outputs.IncidentInfoResponse']]: """ Describes an incident that relates to bookmark """ return pulumi.get(self, "incident_info") @property @pulumi.getter def labels(self) -> pulumi.Output[Optional[Sequence[str]]]: """ List of labels relevant to this bookmark """ return pulumi.get(self, "labels") @property @pulumi.getter def name(self) -> pulumi.Output[str]: """ Azure resource name """ return pulumi.get(self, "name") @property @pulumi.getter def notes(self) -> pulumi.Output[Optional[str]]: """ The notes of the bookmark """ return pulumi.get(self, "notes") @property @pulumi.getter def query(self) -> pulumi.Output[str]: """ The query of the bookmark. """ return pulumi.get(self, "query") @property @pulumi.getter(name="queryEndTime") def query_end_time(self) -> pulumi.Output[Optional[str]]: """ The end time for the query """ return pulumi.get(self, "query_end_time") @property @pulumi.getter(name="queryResult") def query_result(self) -> pulumi.Output[Optional[str]]: """ The query result of the bookmark. """ return pulumi.get(self, "query_result") @property @pulumi.getter(name="queryStartTime") def query_start_time(self) -> pulumi.Output[Optional[str]]: """ The start time for the query """ return pulumi.get(self, "query_start_time") @property @pulumi.getter def type(self) -> pulumi.Output[str]: """ Azure resource type """ return pulumi.get(self, "type") @property @pulumi.getter def updated(self) -> pulumi.Output[Optional[str]]: """ The last time the bookmark was updated """ return pulumi.get(self, "updated") @property @pulumi.getter(name="updatedBy") def updated_by(self) -> pulumi.Output[Optional['outputs.UserInfoResponse']]: """ Describes a user that updated the bookmark """ return pulumi.get(self, "updated_by")
en
0.702084
# coding=utf-8 # *** WARNING: this file was generated by the Pulumi SDK Generator. *** # *** Do not edit by hand unless you're certain you know what you are doing! *** The set of arguments for constructing a Bookmark resource. :param pulumi.Input[str] display_name: The display name of the bookmark :param pulumi.Input[str] query: The query of the bookmark. :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param pulumi.Input[str] workspace_name: The name of the workspace. :param pulumi.Input[str] bookmark_id: Bookmark ID :param pulumi.Input[str] created: The time the bookmark was created :param pulumi.Input['UserInfoArgs'] created_by: Describes a user that created the bookmark :param pulumi.Input[str] etag: Etag of the azure resource :param pulumi.Input[str] event_time: The bookmark event time :param pulumi.Input['IncidentInfoArgs'] incident_info: Describes an incident that relates to bookmark :param pulumi.Input[Sequence[pulumi.Input[str]]] labels: List of labels relevant to this bookmark :param pulumi.Input[str] notes: The notes of the bookmark :param pulumi.Input[str] query_end_time: The end time for the query :param pulumi.Input[str] query_result: The query result of the bookmark. :param pulumi.Input[str] query_start_time: The start time for the query :param pulumi.Input[str] updated: The last time the bookmark was updated :param pulumi.Input['UserInfoArgs'] updated_by: Describes a user that updated the bookmark The display name of the bookmark The query of the bookmark. The name of the resource group within the user's subscription. The name is case insensitive. The name of the workspace. Bookmark ID The time the bookmark was created Describes a user that created the bookmark Etag of the azure resource The bookmark event time Describes an incident that relates to bookmark List of labels relevant to this bookmark The notes of the bookmark The end time for the query The query result of the bookmark. The start time for the query The last time the bookmark was updated Describes a user that updated the bookmark Represents a bookmark in Azure Security Insights. API Version: 2020-01-01. :param str resource_name: The name of the resource. :param pulumi.ResourceOptions opts: Options for the resource. :param pulumi.Input[str] bookmark_id: Bookmark ID :param pulumi.Input[str] created: The time the bookmark was created :param pulumi.Input[pulumi.InputType['UserInfoArgs']] created_by: Describes a user that created the bookmark :param pulumi.Input[str] display_name: The display name of the bookmark :param pulumi.Input[str] etag: Etag of the azure resource :param pulumi.Input[str] event_time: The bookmark event time :param pulumi.Input[pulumi.InputType['IncidentInfoArgs']] incident_info: Describes an incident that relates to bookmark :param pulumi.Input[Sequence[pulumi.Input[str]]] labels: List of labels relevant to this bookmark :param pulumi.Input[str] notes: The notes of the bookmark :param pulumi.Input[str] query: The query of the bookmark. :param pulumi.Input[str] query_end_time: The end time for the query :param pulumi.Input[str] query_result: The query result of the bookmark. :param pulumi.Input[str] query_start_time: The start time for the query :param pulumi.Input[str] resource_group_name: The name of the resource group within the user's subscription. The name is case insensitive. :param pulumi.Input[str] updated: The last time the bookmark was updated :param pulumi.Input[pulumi.InputType['UserInfoArgs']] updated_by: Describes a user that updated the bookmark :param pulumi.Input[str] workspace_name: The name of the workspace. Represents a bookmark in Azure Security Insights. API Version: 2020-01-01. :param str resource_name: The name of the resource. :param BookmarkArgs args: The arguments to use to populate this resource's properties. :param pulumi.ResourceOptions opts: Options for the resource. Get an existing Bookmark resource's state with the given name, id, and optional extra properties used to qualify the lookup. :param str resource_name: The unique name of the resulting resource. :param pulumi.Input[str] id: The unique provider ID of the resource to lookup. :param pulumi.ResourceOptions opts: Options for the resource. The time the bookmark was created Describes a user that created the bookmark The display name of the bookmark Etag of the azure resource The bookmark event time Describes an incident that relates to bookmark List of labels relevant to this bookmark Azure resource name The notes of the bookmark The query of the bookmark. The end time for the query The query result of the bookmark. The start time for the query Azure resource type The last time the bookmark was updated Describes a user that updated the bookmark
1.743842
2
tests/test_runner.py
idekerlab/cdapsutil
0
6625306
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_runner ---------------------------------- Tests for `cdapsutil.runner` module. """ import os import stat import sys import tempfile import shutil import unittest import ndex2 import cdapsutil from cdapsutil.runner import Runner class TestRunner(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def get_data_dir(self): return os.path.join(os.path.dirname(__file__), 'data') def get_human_hiv_as_nice_cx(self): """ :return: """ return ndex2.create_nice_cx_from_file(os.path.join(self.get_data_dir(), 'hiv_human_ppi.cx')) def get_edge_dict(self, net_cx): edge_dict = {} for edge_id, edge_obj in net_cx.get_edges(): if edge_obj['s'] not in edge_dict: edge_dict[edge_obj['s']] = set() edge_dict[edge_obj['s']].add(edge_obj['t']) return edge_dict def test_get_edge_list(self): net_cx = self.get_human_hiv_as_nice_cx() edge_dict = self.get_edge_dict(net_cx) res = Runner._get_edge_list(net_cx) for entry in res.split('\n'): if len(entry.strip()) == 0: continue splitentry = entry.split('\t') self.assertTrue(int(splitentry[1]) in edge_dict[int(splitentry[0])]) def test_write_edge_list(self): temp_dir = tempfile.mkdtemp() try: net_cx = self.get_human_hiv_as_nice_cx() edge_dict = self.get_edge_dict(net_cx) input_edgelist = Runner._write_edge_list(net_cx, temp_dir) with open(input_edgelist, 'r') as f: for entry in f: if len(entry.strip()) == 0: continue splitentry = entry.split('\t') self.assertTrue(int(splitentry[1]) in edge_dict[int(splitentry[0])]) finally: shutil.rmtree(temp_dir) def test_get_algorithms(self): runner = Runner() try: runner.get_algorithms() self.fail('Expected CommunityDetectionError') except cdapsutil.CommunityDetectionError as ce: self.assertEqual('Not implemented for this Runner', str(ce)) def test_run(self): runner = Runner() try: runner.run() self.fail('Expected CommunityDetectionError') except cdapsutil.CommunityDetectionError as ce: self.assertEqual('Not implemented for this Runner', str(ce)) if __name__ == '__main__': sys.exit(unittest.main())
#!/usr/bin/env python # -*- coding: utf-8 -*- """ test_runner ---------------------------------- Tests for `cdapsutil.runner` module. """ import os import stat import sys import tempfile import shutil import unittest import ndex2 import cdapsutil from cdapsutil.runner import Runner class TestRunner(unittest.TestCase): def setUp(self): pass def tearDown(self): pass def get_data_dir(self): return os.path.join(os.path.dirname(__file__), 'data') def get_human_hiv_as_nice_cx(self): """ :return: """ return ndex2.create_nice_cx_from_file(os.path.join(self.get_data_dir(), 'hiv_human_ppi.cx')) def get_edge_dict(self, net_cx): edge_dict = {} for edge_id, edge_obj in net_cx.get_edges(): if edge_obj['s'] not in edge_dict: edge_dict[edge_obj['s']] = set() edge_dict[edge_obj['s']].add(edge_obj['t']) return edge_dict def test_get_edge_list(self): net_cx = self.get_human_hiv_as_nice_cx() edge_dict = self.get_edge_dict(net_cx) res = Runner._get_edge_list(net_cx) for entry in res.split('\n'): if len(entry.strip()) == 0: continue splitentry = entry.split('\t') self.assertTrue(int(splitentry[1]) in edge_dict[int(splitentry[0])]) def test_write_edge_list(self): temp_dir = tempfile.mkdtemp() try: net_cx = self.get_human_hiv_as_nice_cx() edge_dict = self.get_edge_dict(net_cx) input_edgelist = Runner._write_edge_list(net_cx, temp_dir) with open(input_edgelist, 'r') as f: for entry in f: if len(entry.strip()) == 0: continue splitentry = entry.split('\t') self.assertTrue(int(splitentry[1]) in edge_dict[int(splitentry[0])]) finally: shutil.rmtree(temp_dir) def test_get_algorithms(self): runner = Runner() try: runner.get_algorithms() self.fail('Expected CommunityDetectionError') except cdapsutil.CommunityDetectionError as ce: self.assertEqual('Not implemented for this Runner', str(ce)) def test_run(self): runner = Runner() try: runner.run() self.fail('Expected CommunityDetectionError') except cdapsutil.CommunityDetectionError as ce: self.assertEqual('Not implemented for this Runner', str(ce)) if __name__ == '__main__': sys.exit(unittest.main())
en
0.304821
#!/usr/bin/env python # -*- coding: utf-8 -*- test_runner ---------------------------------- Tests for `cdapsutil.runner` module. :return:
2.371505
2
arch/api/test/dummy_test.py
chenj133/FATE
3
6625307
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from arch.api.eggroll import init, parallelize # from arch.api.cluster.mock_roll import init, parallelize import numpy as np def f(iterator): sum = 0 for k, v in iterator: sum += v return sum if __name__ == "__main__": init() _matrix = np.ones([400, 50]) _table = parallelize(_matrix, partition=40) c = _table.mapValues(lambda _x: _x) dict(c.collect()) print(list(c.collect())) _table = parallelize(["b", "a", "c"], partition=5) a = _table.mapValues(lambda _x: _x + "1") print(list(a.collect())) print(dict(a.collect())) print(list(_table.collect())) x = _table.map(lambda k, v: (v, v + "1")) print(list(x.collect())) _table = parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])], include_key=True) print(list(_table.mapValues(lambda _x: len(_x)).collect())) _table = parallelize([1, 2, 3, 4, 5], partition=2) print(list(_table.mapPartitions(f).collect())) from operator import add print(parallelize([1, 2, 3, 4, 5], partition=4).reduce(add)) x = parallelize([("a", 1), ("b", 4)], include_key=True) y = parallelize([("a", 2), ("c", 3)], include_key=True) print(list(x.join(y, lambda v1, v2: v1 + v2).collect())) x = parallelize(range(100), partition=4) print(x.sample(0.1, 81).count()) print(list(parallelize([0, 2, 3, 4, 6], partition=5).glom().collect()))
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # from arch.api.eggroll import init, parallelize # from arch.api.cluster.mock_roll import init, parallelize import numpy as np def f(iterator): sum = 0 for k, v in iterator: sum += v return sum if __name__ == "__main__": init() _matrix = np.ones([400, 50]) _table = parallelize(_matrix, partition=40) c = _table.mapValues(lambda _x: _x) dict(c.collect()) print(list(c.collect())) _table = parallelize(["b", "a", "c"], partition=5) a = _table.mapValues(lambda _x: _x + "1") print(list(a.collect())) print(dict(a.collect())) print(list(_table.collect())) x = _table.map(lambda k, v: (v, v + "1")) print(list(x.collect())) _table = parallelize([("a", ["apple", "banana", "lemon"]), ("b", ["grapes"])], include_key=True) print(list(_table.mapValues(lambda _x: len(_x)).collect())) _table = parallelize([1, 2, 3, 4, 5], partition=2) print(list(_table.mapPartitions(f).collect())) from operator import add print(parallelize([1, 2, 3, 4, 5], partition=4).reduce(add)) x = parallelize([("a", 1), ("b", 4)], include_key=True) y = parallelize([("a", 2), ("c", 3)], include_key=True) print(list(x.join(y, lambda v1, v2: v1 + v2).collect())) x = parallelize(range(100), partition=4) print(x.sample(0.1, 81).count()) print(list(parallelize([0, 2, 3, 4, 6], partition=5).glom().collect()))
en
0.820728
# # Copyright 2019 The FATE Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # from arch.api.cluster.mock_roll import init, parallelize
2.230907
2
python_crash_course/functions/make_album.py
heniu1985/Learning
0
6625308
def make_album(artist, title, tracks): """Zwraca słownik z informacją o artyście, tytule albumu i liczbie utworów""" album = {"Artysta": artist, "Tytuł": title, "Liczba utworów": int(tracks)} return album while True: print("Podaj nazwę artysty, tytuł albumu i liczbę utworów na płycie:") print("(w celu wyświetlenia słownika i wyjścia z programu wpisz 'q' w dowolnym momencie)") artist = input("Artysta: ") if artist == "q": break title = input("Tytuł: ") if title == "q": break tracks = input("Liczba utworów: ") if tracks == "q": break slownik = make_album(artist, title, tracks) print(slownik)
def make_album(artist, title, tracks): """Zwraca słownik z informacją o artyście, tytule albumu i liczbie utworów""" album = {"Artysta": artist, "Tytuł": title, "Liczba utworów": int(tracks)} return album while True: print("Podaj nazwę artysty, tytuł albumu i liczbę utworów na płycie:") print("(w celu wyświetlenia słownika i wyjścia z programu wpisz 'q' w dowolnym momencie)") artist = input("Artysta: ") if artist == "q": break title = input("Tytuł: ") if title == "q": break tracks = input("Liczba utworów: ") if tracks == "q": break slownik = make_album(artist, title, tracks) print(slownik)
pl
0.998608
Zwraca słownik z informacją o artyście, tytule albumu i liczbie utworów
3.8401
4
inselect/tests/lib/test_sparse_date.py
NaturalHistoryMuseum/inselect
128
6625309
import datetime import unittest from inselect.lib.inselect_error import InselectError from inselect.lib.sparse_date import SparseDate class TestSparseDate(unittest.TestCase): def test_init(self): self.assertRaises(ValueError, SparseDate, None, None, None) self.assertEqual((2012, 1, 1), tuple(SparseDate(2012, 1, 1))) self.assertEqual((2012, 12, 31), tuple(SparseDate(2012, 12, 31))) def test_as_bool(self): self.assertTrue(SparseDate(2012, None, None)) self.assertTrue(SparseDate(2012, 1, None)) self.assertTrue(SparseDate(2012, 1, 2)) def test_resolution(self): self.assertEqual('day', SparseDate(2012, 2, 29).resolution) self.assertEqual('month', SparseDate(2012, 2, None).resolution) self.assertEqual('year', SparseDate(2012, None, None).resolution) def test_illegal_dates(self): # February in a leap year self.assertEqual((2012, 2, 29), tuple(SparseDate(2012, 2, 29))) # February in a non-leap year self.assertRaises(ValueError, SparseDate, 2011, 2, 29) def test_zero(self): # Zero year, month or day self.assertRaises(ValueError, SparseDate, 0000, None, None) self.assertRaises(ValueError, SparseDate, 2000, 0, None) self.assertRaises(ValueError, SparseDate, 2000, 1, 0) def test_invalid_month(self): self.assertRaises(ValueError, SparseDate, 2012, 0, 1) self.assertRaises(ValueError, SparseDate, 2012, -1, 1) self.assertRaises(ValueError, SparseDate, 2012, 13, 1) def test_invalid_day(self): self.assertRaises(ValueError, SparseDate, 2012, 1, -1) self.assertRaises(ValueError, SparseDate, 2012, 1, 32) def test_missing_year(self): self.assertRaises(ValueError, SparseDate, None, 1, None) self.assertRaises(ValueError, SparseDate, None, 1, 1) def test_missing_month(self): self.assertRaises(ValueError, SparseDate, 2012, None, 1) def test_noninteger_values(self): self.assertRaises(ValueError, SparseDate, 2012.0, None, None) self.assertRaises(ValueError, SparseDate, 2012, 1.0, None) self.assertRaises(ValueError, SparseDate, 2012, 1, 1.0) def test_downsample(self): self.assertEqual(SparseDate(2012, 5, 1).downsample('day'), SparseDate(2012, 5, 1)) self.assertEqual(SparseDate(2012, 5, 1).downsample('month'), SparseDate(2012, 5, None)) self.assertEqual(SparseDate(2012, 5, 1).downsample('year'), SparseDate(2012, None, None)) with self.assertRaises(ValueError): SparseDate(2012, 1, None).downsample('day') with self.assertRaises(ValueError): SparseDate(2012, None, None).downsample('month') with self.assertRaises(ValueError): SparseDate(2012, 1, None).downsample('x') with self.assertRaises(ValueError): SparseDate(2012, 1, None).downsample('') def test_downsample_to_common(self): a = SparseDate(2012, 6, 1) self.assertEqual([SparseDate(2012, 6, 1)], list(SparseDate.downsample_to_common([a]))) b = SparseDate(2012, 1, None) self.assertEqual([SparseDate(2012, 6, None), SparseDate(2012, 1, None)], list(SparseDate.downsample_to_common([a, b]))) c = SparseDate(2012, None, None) self.assertEqual([SparseDate(2012, None, None), SparseDate(2012, None, None), SparseDate(2012, None, None)], list(SparseDate.downsample_to_common([a, b, c]))) def test_comparison(self): # Equal self.assertEqual(SparseDate(2012, None, None), SparseDate(2012, None, None)) self.assertEqual(SparseDate(2012, 1, None), SparseDate(2012, 1, None)) self.assertEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 1)) # Not equal self.assertNotEqual(SparseDate(2012, None, None), SparseDate(2011, None, None)) self.assertNotEqual(SparseDate(2012, 1, None), SparseDate(2012, 2, None)) self.assertNotEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 2)) # Greater self.assertGreater(SparseDate(2013, None, None), SparseDate(2012, None, None)) self.assertGreaterEqual(SparseDate(2012, None, None), SparseDate(2012, None, None)) self.assertGreaterEqual(SparseDate(2013, None, None), SparseDate(2012, None, None)) self.assertGreater(SparseDate(2012, 2, None), SparseDate(2012, 1, None)) self.assertGreaterEqual(SparseDate(2012, 1, None), SparseDate(2012, 1, None)) self.assertGreaterEqual(SparseDate(2012, 2, None), SparseDate(2012, 1, None)) self.assertGreater(SparseDate(2012, 1, 2), SparseDate(2012, 1, 1)) self.assertGreaterEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 1)) self.assertGreaterEqual(SparseDate(2012, 1, 2), SparseDate(2012, 1, 1)) # Less self.assertLess(SparseDate(2012, None, None), SparseDate(2013, None, None)) self.assertLessEqual(SparseDate(2012, None, None), SparseDate(2012, None, None)) self.assertLessEqual(SparseDate(2012, None, None), SparseDate(2013, None, None)) self.assertLess(SparseDate(2012, 1, None), SparseDate(2012, 2, None)) self.assertLessEqual(SparseDate(2012, 1, None), SparseDate(2012, 1, None)) self.assertLessEqual(SparseDate(2012, 1, None), SparseDate(2012, 2, None)) self.assertLess(SparseDate(2012, 1, 1), SparseDate(2012, 1, 2)) self.assertLessEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 1)) self.assertLessEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 2)) # Can't compare SparseDates of different resolutions with self.assertRaises(InselectError): SparseDate(2012, None, None) > SparseDate(2012, 1, None) with self.assertRaises(InselectError): SparseDate(2012, 1, None) > SparseDate(2012, 1, 1) a = SparseDate(2012, 1, None) with self.assertRaises(NotImplementedError): a == '' with self.assertRaises(NotImplementedError): a == 1 def test_range(self): self.assertEqual(SparseDate(2012, 1, 1), SparseDate(2012, None, None).earliest()) self.assertEqual(SparseDate(2012, 12, 31), SparseDate(2012, None, None).latest()) self.assertEqual(SparseDate(2012, 2, 1), SparseDate(2012, 2, None).earliest()) self.assertEqual(SparseDate(2012, 2, 29), SparseDate(2012, 2, None).latest()) self.assertEqual(SparseDate(2012, 2, 5), SparseDate(2012, 2, 5).earliest()) self.assertEqual(SparseDate(2012, 2, 5), SparseDate(2012, 2, 5).latest()) def test_as_date(self): self.assertEqual(datetime.date(2012, 8, 1), SparseDate(2012, 8, 1).as_date()) with self.assertRaises(InselectError): SparseDate(2012, 8, None).as_date() def test_hash(self): a, b, c = SparseDate(2012, 8, 1), SparseDate(2012, 8, None), SparseDate(2012, None, None) self.assertEqual(hash(a), hash(SparseDate(2012, 8, 1))) self.assertNotEqual(hash(a), hash(b)) self.assertNotEqual(hash(a), hash(c)) self.assertNotEqual(hash(b), hash(c)) def test_str(self): a = SparseDate(2012, 8, 1) b = SparseDate(2012, 8, None) c = SparseDate(2012, None, None) self.assertEqual('2012-8-1', str(a)) self.assertEqual('2012-8-None', str(b)) self.assertEqual('2012-None-None', str(c)) self.assertEqual(eval(repr(a)), a) self.assertEqual(eval(repr(b)), b) self.assertEqual(eval(repr(c)), c) if __name__ == '__main__': unittest.main()
import datetime import unittest from inselect.lib.inselect_error import InselectError from inselect.lib.sparse_date import SparseDate class TestSparseDate(unittest.TestCase): def test_init(self): self.assertRaises(ValueError, SparseDate, None, None, None) self.assertEqual((2012, 1, 1), tuple(SparseDate(2012, 1, 1))) self.assertEqual((2012, 12, 31), tuple(SparseDate(2012, 12, 31))) def test_as_bool(self): self.assertTrue(SparseDate(2012, None, None)) self.assertTrue(SparseDate(2012, 1, None)) self.assertTrue(SparseDate(2012, 1, 2)) def test_resolution(self): self.assertEqual('day', SparseDate(2012, 2, 29).resolution) self.assertEqual('month', SparseDate(2012, 2, None).resolution) self.assertEqual('year', SparseDate(2012, None, None).resolution) def test_illegal_dates(self): # February in a leap year self.assertEqual((2012, 2, 29), tuple(SparseDate(2012, 2, 29))) # February in a non-leap year self.assertRaises(ValueError, SparseDate, 2011, 2, 29) def test_zero(self): # Zero year, month or day self.assertRaises(ValueError, SparseDate, 0000, None, None) self.assertRaises(ValueError, SparseDate, 2000, 0, None) self.assertRaises(ValueError, SparseDate, 2000, 1, 0) def test_invalid_month(self): self.assertRaises(ValueError, SparseDate, 2012, 0, 1) self.assertRaises(ValueError, SparseDate, 2012, -1, 1) self.assertRaises(ValueError, SparseDate, 2012, 13, 1) def test_invalid_day(self): self.assertRaises(ValueError, SparseDate, 2012, 1, -1) self.assertRaises(ValueError, SparseDate, 2012, 1, 32) def test_missing_year(self): self.assertRaises(ValueError, SparseDate, None, 1, None) self.assertRaises(ValueError, SparseDate, None, 1, 1) def test_missing_month(self): self.assertRaises(ValueError, SparseDate, 2012, None, 1) def test_noninteger_values(self): self.assertRaises(ValueError, SparseDate, 2012.0, None, None) self.assertRaises(ValueError, SparseDate, 2012, 1.0, None) self.assertRaises(ValueError, SparseDate, 2012, 1, 1.0) def test_downsample(self): self.assertEqual(SparseDate(2012, 5, 1).downsample('day'), SparseDate(2012, 5, 1)) self.assertEqual(SparseDate(2012, 5, 1).downsample('month'), SparseDate(2012, 5, None)) self.assertEqual(SparseDate(2012, 5, 1).downsample('year'), SparseDate(2012, None, None)) with self.assertRaises(ValueError): SparseDate(2012, 1, None).downsample('day') with self.assertRaises(ValueError): SparseDate(2012, None, None).downsample('month') with self.assertRaises(ValueError): SparseDate(2012, 1, None).downsample('x') with self.assertRaises(ValueError): SparseDate(2012, 1, None).downsample('') def test_downsample_to_common(self): a = SparseDate(2012, 6, 1) self.assertEqual([SparseDate(2012, 6, 1)], list(SparseDate.downsample_to_common([a]))) b = SparseDate(2012, 1, None) self.assertEqual([SparseDate(2012, 6, None), SparseDate(2012, 1, None)], list(SparseDate.downsample_to_common([a, b]))) c = SparseDate(2012, None, None) self.assertEqual([SparseDate(2012, None, None), SparseDate(2012, None, None), SparseDate(2012, None, None)], list(SparseDate.downsample_to_common([a, b, c]))) def test_comparison(self): # Equal self.assertEqual(SparseDate(2012, None, None), SparseDate(2012, None, None)) self.assertEqual(SparseDate(2012, 1, None), SparseDate(2012, 1, None)) self.assertEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 1)) # Not equal self.assertNotEqual(SparseDate(2012, None, None), SparseDate(2011, None, None)) self.assertNotEqual(SparseDate(2012, 1, None), SparseDate(2012, 2, None)) self.assertNotEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 2)) # Greater self.assertGreater(SparseDate(2013, None, None), SparseDate(2012, None, None)) self.assertGreaterEqual(SparseDate(2012, None, None), SparseDate(2012, None, None)) self.assertGreaterEqual(SparseDate(2013, None, None), SparseDate(2012, None, None)) self.assertGreater(SparseDate(2012, 2, None), SparseDate(2012, 1, None)) self.assertGreaterEqual(SparseDate(2012, 1, None), SparseDate(2012, 1, None)) self.assertGreaterEqual(SparseDate(2012, 2, None), SparseDate(2012, 1, None)) self.assertGreater(SparseDate(2012, 1, 2), SparseDate(2012, 1, 1)) self.assertGreaterEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 1)) self.assertGreaterEqual(SparseDate(2012, 1, 2), SparseDate(2012, 1, 1)) # Less self.assertLess(SparseDate(2012, None, None), SparseDate(2013, None, None)) self.assertLessEqual(SparseDate(2012, None, None), SparseDate(2012, None, None)) self.assertLessEqual(SparseDate(2012, None, None), SparseDate(2013, None, None)) self.assertLess(SparseDate(2012, 1, None), SparseDate(2012, 2, None)) self.assertLessEqual(SparseDate(2012, 1, None), SparseDate(2012, 1, None)) self.assertLessEqual(SparseDate(2012, 1, None), SparseDate(2012, 2, None)) self.assertLess(SparseDate(2012, 1, 1), SparseDate(2012, 1, 2)) self.assertLessEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 1)) self.assertLessEqual(SparseDate(2012, 1, 1), SparseDate(2012, 1, 2)) # Can't compare SparseDates of different resolutions with self.assertRaises(InselectError): SparseDate(2012, None, None) > SparseDate(2012, 1, None) with self.assertRaises(InselectError): SparseDate(2012, 1, None) > SparseDate(2012, 1, 1) a = SparseDate(2012, 1, None) with self.assertRaises(NotImplementedError): a == '' with self.assertRaises(NotImplementedError): a == 1 def test_range(self): self.assertEqual(SparseDate(2012, 1, 1), SparseDate(2012, None, None).earliest()) self.assertEqual(SparseDate(2012, 12, 31), SparseDate(2012, None, None).latest()) self.assertEqual(SparseDate(2012, 2, 1), SparseDate(2012, 2, None).earliest()) self.assertEqual(SparseDate(2012, 2, 29), SparseDate(2012, 2, None).latest()) self.assertEqual(SparseDate(2012, 2, 5), SparseDate(2012, 2, 5).earliest()) self.assertEqual(SparseDate(2012, 2, 5), SparseDate(2012, 2, 5).latest()) def test_as_date(self): self.assertEqual(datetime.date(2012, 8, 1), SparseDate(2012, 8, 1).as_date()) with self.assertRaises(InselectError): SparseDate(2012, 8, None).as_date() def test_hash(self): a, b, c = SparseDate(2012, 8, 1), SparseDate(2012, 8, None), SparseDate(2012, None, None) self.assertEqual(hash(a), hash(SparseDate(2012, 8, 1))) self.assertNotEqual(hash(a), hash(b)) self.assertNotEqual(hash(a), hash(c)) self.assertNotEqual(hash(b), hash(c)) def test_str(self): a = SparseDate(2012, 8, 1) b = SparseDate(2012, 8, None) c = SparseDate(2012, None, None) self.assertEqual('2012-8-1', str(a)) self.assertEqual('2012-8-None', str(b)) self.assertEqual('2012-None-None', str(c)) self.assertEqual(eval(repr(a)), a) self.assertEqual(eval(repr(b)), b) self.assertEqual(eval(repr(c)), c) if __name__ == '__main__': unittest.main()
en
0.870359
# February in a leap year # February in a non-leap year # Zero year, month or day # Equal # Not equal # Greater # Less # Can't compare SparseDates of different resolutions
2.932548
3
runs/kubernetes/start_haproxy_bak.py
Ruilkyu/kubernetes_start
2
6625310
<reponame>Ruilkyu/kubernetes_start<filename>runs/kubernetes/start_haproxy_bak.py """ 时间:2020/6/13 作者:lurui 功能:在master部署并启动haproxy """ import os import subprocess import time def start_haproxy(): basedir = os.path.dirname(os.path.dirname(os.getcwd())) haproxy_path = basedir + '/deploy/haproxy' masterpath = basedir + '/ansible/hosts/master_hosts' print("Sir,Starting Copy Haproxy Config!") try: copy_haproxy_cfg = subprocess.check_output('''ansible master -i {0} -m copy -a "src={1}/cfg/haproxy.cfg dest=/etc/haproxy/"'''.format(masterpath, haproxy_path), shell=True) print(copy_haproxy_cfg.decode()) except Exception as e: print(e) print("Sir,Copy Haproxy Config Has Completed!") print("Sir,Starting Copy Haproxy Svc!") try: copy_haproxy_svc = subprocess.check_output( '''ansible master -i {0} -m copy -a "src={1}/svc/haproxy.service dest=/usr/lib/systemd/system/"'''.format(masterpath, haproxy_path), shell=True) print(copy_haproxy_svc.decode()) except Exception as e: print(e) print("Sir,Copy Haproxy Svc Has Completed!") print("Sir,Starting Copy Haproxy Bin!") try: copy_haproxy_bin = subprocess.check_output( '''ansible master -i {0} -m copy -a "src={1}/bin dest=/tmp/"'''.format(masterpath, haproxy_path), shell=True) print(copy_haproxy_bin.decode()) add_haproxy_bin = subprocess.check_output('''ansible master -i {0} -m shell -a "systemctl stop haproxy && cd /tmp/bin && chmod +x * && cp * /usr/sbin/ && rm -rf /tmp/*"'''.format(masterpath), shell=True) print(add_haproxy_bin.decode()) except Exception as e: print(e) print("Sir,Copy Haproxy Bin Has Completed!") time.sleep(5) print("Sir,Starting Start Haproxy!") try: start_haproxy = subprocess.check_output('''ansible master -i {0} -m shell -a "systemctl daemon-reload && systemctl enable haproxy && systemctl restart haproxy"'''.format(masterpath), shell=True) print(start_haproxy.decode()) except Exception as e: print(e) print("Sir,Start Haproxy Has Completed!") start_haproxy()
""" 时间:2020/6/13 作者:lurui 功能:在master部署并启动haproxy """ import os import subprocess import time def start_haproxy(): basedir = os.path.dirname(os.path.dirname(os.getcwd())) haproxy_path = basedir + '/deploy/haproxy' masterpath = basedir + '/ansible/hosts/master_hosts' print("Sir,Starting Copy Haproxy Config!") try: copy_haproxy_cfg = subprocess.check_output('''ansible master -i {0} -m copy -a "src={1}/cfg/haproxy.cfg dest=/etc/haproxy/"'''.format(masterpath, haproxy_path), shell=True) print(copy_haproxy_cfg.decode()) except Exception as e: print(e) print("Sir,Copy Haproxy Config Has Completed!") print("Sir,Starting Copy Haproxy Svc!") try: copy_haproxy_svc = subprocess.check_output( '''ansible master -i {0} -m copy -a "src={1}/svc/haproxy.service dest=/usr/lib/systemd/system/"'''.format(masterpath, haproxy_path), shell=True) print(copy_haproxy_svc.decode()) except Exception as e: print(e) print("Sir,Copy Haproxy Svc Has Completed!") print("Sir,Starting Copy Haproxy Bin!") try: copy_haproxy_bin = subprocess.check_output( '''ansible master -i {0} -m copy -a "src={1}/bin dest=/tmp/"'''.format(masterpath, haproxy_path), shell=True) print(copy_haproxy_bin.decode()) add_haproxy_bin = subprocess.check_output('''ansible master -i {0} -m shell -a "systemctl stop haproxy && cd /tmp/bin && chmod +x * && cp * /usr/sbin/ && rm -rf /tmp/*"'''.format(masterpath), shell=True) print(add_haproxy_bin.decode()) except Exception as e: print(e) print("Sir,Copy Haproxy Bin Has Completed!") time.sleep(5) print("Sir,Starting Start Haproxy!") try: start_haproxy = subprocess.check_output('''ansible master -i {0} -m shell -a "systemctl daemon-reload && systemctl enable haproxy && systemctl restart haproxy"'''.format(masterpath), shell=True) print(start_haproxy.decode()) except Exception as e: print(e) print("Sir,Start Haproxy Has Completed!") start_haproxy()
en
0.339652
时间:2020/6/13 作者:lurui 功能:在master部署并启动haproxy ansible master -i {0} -m copy -a "src={1}/cfg/haproxy.cfg dest=/etc/haproxy/" ansible master -i {0} -m copy -a "src={1}/svc/haproxy.service dest=/usr/lib/systemd/system/" ansible master -i {0} -m copy -a "src={1}/bin dest=/tmp/" ansible master -i {0} -m shell -a "systemctl stop haproxy && cd /tmp/bin && chmod +x * && cp * /usr/sbin/ && rm -rf /tmp/*" ansible master -i {0} -m shell -a "systemctl daemon-reload && systemctl enable haproxy && systemctl restart haproxy"
2.339352
2
CrashCourse/birthday.py
axetang/AxePython
1
6625311
<filename>CrashCourse/birthday.py<gh_stars>1-10 age = 23 message = "Happy " + str(age) + "rd Birthday!" print(message, 3/2, 3/2.0) # this is comment
<filename>CrashCourse/birthday.py<gh_stars>1-10 age = 23 message = "Happy " + str(age) + "rd Birthday!" print(message, 3/2, 3/2.0) # this is comment
en
0.952623
# this is comment
2.841553
3
main.py
PracowniaProg/minesweeper
0
6625312
<filename>main.py import sys from PyQt5.QtWidgets import QApplication from source.gui import MainWidget # Starting the application if __name__ == '__main__': app = QApplication(sys.argv) w = MainWidget() w.setWindowTitle("Minesweeper") w.show() sys.exit(app.exec_())
<filename>main.py import sys from PyQt5.QtWidgets import QApplication from source.gui import MainWidget # Starting the application if __name__ == '__main__': app = QApplication(sys.argv) w = MainWidget() w.setWindowTitle("Minesweeper") w.show() sys.exit(app.exec_())
en
0.814578
# Starting the application
2.738351
3
ibis/tests/expr/test_case.py
GrapeBaBa/ibis
1
6625313
<reponame>GrapeBaBa/ibis import ibis import ibis.expr.datatypes as dt import ibis.expr.operations as ops import ibis.expr.types as ir from ibis.tests.util import assert_equal, assert_pickle_roundtrip def test_ifelse(table): bools = table.g.isnull() result = bools.ifelse("foo", "bar") assert isinstance(result, ir.StringColumn) def test_simple_case_expr(table): case1, result1 = "foo", table.a case2, result2 = "bar", table.c default_result = table.b expr1 = table.g.lower().cases( [(case1, result1), (case2, result2)], default=default_result ) expr2 = ( table.g.lower() .case() .when(case1, result1) .when(case2, result2) .else_(default_result) .end() ) assert_equal(expr1, expr2) assert isinstance(expr1, ir.IntegerColumn) def test_multiple_case_expr(table): case1 = table.a == 5 case2 = table.b == 128 case3 = table.c == 1000 result1 = table.f result2 = table.b * 2 result3 = table.e default = table.d expr = ( ibis.case() .when(case1, result1) .when(case2, result2) .when(case3, result3) .else_(default) .end() ) op = expr.op() assert isinstance(expr, ir.FloatingColumn) assert isinstance(op, ops.SearchedCase) assert op.default is default def test_pickle_multiple_case_node(table): case1 = table.a == 5 case2 = table.b == 128 case3 = table.c == 1000 result1 = table.f result2 = table.b * 2 result3 = table.e default = table.d expr = ( ibis.case() .when(case1, result1) .when(case2, result2) .when(case3, result3) .else_(default) .end() ) op = expr.op() assert_pickle_roundtrip(op) def test_simple_case_null_else(table): expr = table.g.case().when("foo", "bar").end() op = expr.op() assert isinstance(expr, ir.StringColumn) assert isinstance(op.default, ir.ValueExpr) assert isinstance(op.default.op(), ops.Cast) assert op.default.op().to == dt.string def test_multiple_case_null_else(table): expr = ibis.case().when(table.g == "foo", "bar").end() op = expr.op() assert isinstance(expr, ir.StringColumn) assert isinstance(op.default, ir.ValueExpr) assert isinstance(op.default.op(), ops.Cast) assert op.default.op().to == dt.string def test_case_mixed_type(): t0 = ibis.table( [('one', 'string'), ('two', 'double'), ('three', 'int32')], name='my_data', ) expr = ( t0.three.case() .when(0, 'low') .when(1, 'high') .else_('null') .end() .name('label') ) result = t0[expr] assert result['label'].type().equals(dt.string)
import ibis import ibis.expr.datatypes as dt import ibis.expr.operations as ops import ibis.expr.types as ir from ibis.tests.util import assert_equal, assert_pickle_roundtrip def test_ifelse(table): bools = table.g.isnull() result = bools.ifelse("foo", "bar") assert isinstance(result, ir.StringColumn) def test_simple_case_expr(table): case1, result1 = "foo", table.a case2, result2 = "bar", table.c default_result = table.b expr1 = table.g.lower().cases( [(case1, result1), (case2, result2)], default=default_result ) expr2 = ( table.g.lower() .case() .when(case1, result1) .when(case2, result2) .else_(default_result) .end() ) assert_equal(expr1, expr2) assert isinstance(expr1, ir.IntegerColumn) def test_multiple_case_expr(table): case1 = table.a == 5 case2 = table.b == 128 case3 = table.c == 1000 result1 = table.f result2 = table.b * 2 result3 = table.e default = table.d expr = ( ibis.case() .when(case1, result1) .when(case2, result2) .when(case3, result3) .else_(default) .end() ) op = expr.op() assert isinstance(expr, ir.FloatingColumn) assert isinstance(op, ops.SearchedCase) assert op.default is default def test_pickle_multiple_case_node(table): case1 = table.a == 5 case2 = table.b == 128 case3 = table.c == 1000 result1 = table.f result2 = table.b * 2 result3 = table.e default = table.d expr = ( ibis.case() .when(case1, result1) .when(case2, result2) .when(case3, result3) .else_(default) .end() ) op = expr.op() assert_pickle_roundtrip(op) def test_simple_case_null_else(table): expr = table.g.case().when("foo", "bar").end() op = expr.op() assert isinstance(expr, ir.StringColumn) assert isinstance(op.default, ir.ValueExpr) assert isinstance(op.default.op(), ops.Cast) assert op.default.op().to == dt.string def test_multiple_case_null_else(table): expr = ibis.case().when(table.g == "foo", "bar").end() op = expr.op() assert isinstance(expr, ir.StringColumn) assert isinstance(op.default, ir.ValueExpr) assert isinstance(op.default.op(), ops.Cast) assert op.default.op().to == dt.string def test_case_mixed_type(): t0 = ibis.table( [('one', 'string'), ('two', 'double'), ('three', 'int32')], name='my_data', ) expr = ( t0.three.case() .when(0, 'low') .when(1, 'high') .else_('null') .end() .name('label') ) result = t0[expr] assert result['label'].type().equals(dt.string)
none
1
2.32357
2
hiargparse/alternatives/__init__.py
KKawamura1/hiargparse
4
6625314
<gh_stars>1-10 from .arg_parse import ArgumentParser from .namespace import Namespace
from .arg_parse import ArgumentParser from .namespace import Namespace
none
1
1.230838
1
app/modules/User/routes/userACPBlueprint.py
VadymHutei/ukubuka
0
6625315
<filename>app/modules/User/routes/userACPBlueprint.py from flask import Blueprint, request from modules.Language.requestDecorators import languageRedirect from modules.Session.requestDecorators import withSession from modules.User.controllers.UserACPController import UserACPController userACPBlueprint = Blueprint('userACPBlueprint', __name__, url_prefix='/<string:language>/acp/users') userACPController = UserACPController() @userACPBlueprint.route('', methods=['GET']) @languageRedirect @withSession def usersACPRoute(): return userACPController.usersPageAction() @userACPBlueprint.route('/edit', methods=['GET', 'POST']) @languageRedirect @withSession def editUserACPRoute(): if request.method == 'GET': return userACPController.editUserPageAction() elif request.method == 'POST': return userACPController.editUserAction() @userACPBlueprint.route('/block', methods=['GET']) @languageRedirect @withSession def blockUserACPRoute(): return userACPController.blockUserAction() @userACPBlueprint.route('/unblock', methods=['GET']) @languageRedirect @withSession def unblockUserACPRoute(): return userACPController.unblockUserAction() @userACPBlueprint.route('/delete', methods=['GET']) @languageRedirect @withSession def deleteUserACPRoute(): return userACPController.deleteUserAction()
<filename>app/modules/User/routes/userACPBlueprint.py from flask import Blueprint, request from modules.Language.requestDecorators import languageRedirect from modules.Session.requestDecorators import withSession from modules.User.controllers.UserACPController import UserACPController userACPBlueprint = Blueprint('userACPBlueprint', __name__, url_prefix='/<string:language>/acp/users') userACPController = UserACPController() @userACPBlueprint.route('', methods=['GET']) @languageRedirect @withSession def usersACPRoute(): return userACPController.usersPageAction() @userACPBlueprint.route('/edit', methods=['GET', 'POST']) @languageRedirect @withSession def editUserACPRoute(): if request.method == 'GET': return userACPController.editUserPageAction() elif request.method == 'POST': return userACPController.editUserAction() @userACPBlueprint.route('/block', methods=['GET']) @languageRedirect @withSession def blockUserACPRoute(): return userACPController.blockUserAction() @userACPBlueprint.route('/unblock', methods=['GET']) @languageRedirect @withSession def unblockUserACPRoute(): return userACPController.unblockUserAction() @userACPBlueprint.route('/delete', methods=['GET']) @languageRedirect @withSession def deleteUserACPRoute(): return userACPController.deleteUserAction()
none
1
2.27075
2
masterCategory.py
bKolisnik/Condition-CNN
1
6625316
<filename>masterCategory.py import tensorflow as tf from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Flatten, Input, Conv2D, concatenate from tensorflow.keras.layers import BatchNormalization, Dropout from tensorflow.keras.models import Model from tensorflow.keras.optimizers import SGD import numpy as np import tensorflow.keras.backend as K from tensorflow.keras.callbacks import Callback, ModelCheckpoint import sys class MasterCategory: '''This model is based off of VGG16 with the addition of BatchNorm layers and then branching ''' def __init__(self, label): self.master_classes=4 self.sub_classes=21 self.art_classes=45 input_image = Input(shape=(224,224,3),name="InputImg") #--- block 1 --- x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_image) x = BatchNormalization()(x) x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) #--- block 2 --- x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) x = BatchNormalization()(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) #--- block 3 --- x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) x = BatchNormalization()(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) #--- block 4 --- x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) x = BatchNormalization()(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) #--- block 5 masterCategory --- x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1_mas')(x) x = BatchNormalization()(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2_mas')(x) x = BatchNormalization()(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3_mas')(x) x = BatchNormalization()(x) #--- masterCategory prediction--- x = Flatten(name='flatten')(x) x = Dense(256, activation='relu', name='fc_mas')(x) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Dense(256, activation='relu', name='fc2_mas')(x) x = BatchNormalization()(x) x = Dropout(0.5)(x) pred = Dense(self.master_classes, activation='softmax', name='master_output')(x) model = Model( inputs=input_image, outputs=pred, name="Baseline_masterCategory_CNN") trainable_params= np.sum([K.count_params(w) for w in model.trainable_weights]) #trainable_params = tf.keras.backend.count_params(model.trainable_weights) print("Trainable paramaters: "+str(trainable_params)) #Keras will automaticall use categorical accuracy when accuracy is used. model.compile(optimizer=SGD(lr=0.001, momentum=0.9), loss='categorical_crossentropy', metrics=['categorical_accuracy']) checkpoint = ModelCheckpoint("../weights/"+label+"_best_weights.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True,mode='auto') self.cbks = [checkpoint] self.model = model
<filename>masterCategory.py import tensorflow as tf from tensorflow.keras.applications.resnet50 import ResNet50 from tensorflow.keras.applications.vgg16 import VGG16 from tensorflow.keras.applications.inception_v3 import InceptionV3 from tensorflow.keras.layers import Dense, GlobalAveragePooling2D, MaxPooling2D, Flatten, Input, Conv2D, concatenate from tensorflow.keras.layers import BatchNormalization, Dropout from tensorflow.keras.models import Model from tensorflow.keras.optimizers import SGD import numpy as np import tensorflow.keras.backend as K from tensorflow.keras.callbacks import Callback, ModelCheckpoint import sys class MasterCategory: '''This model is based off of VGG16 with the addition of BatchNorm layers and then branching ''' def __init__(self, label): self.master_classes=4 self.sub_classes=21 self.art_classes=45 input_image = Input(shape=(224,224,3),name="InputImg") #--- block 1 --- x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv1')(input_image) x = BatchNormalization()(x) x = Conv2D(64, (3, 3), activation='relu', padding='same', name='block1_conv2')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block1_pool')(x) #--- block 2 --- x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv1')(x) x = BatchNormalization()(x) x = Conv2D(128, (3, 3), activation='relu', padding='same', name='block2_conv2')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block2_pool')(x) #--- block 3 --- x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv1')(x) x = BatchNormalization()(x) x = Conv2D(256, (3, 3), activation='relu', padding='same', name='block3_conv2')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block3_pool')(x) #--- block 4 --- x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv1')(x) x = BatchNormalization()(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block4_conv2')(x) x = BatchNormalization()(x) x = MaxPooling2D((2, 2), strides=(2, 2), name='block4_pool')(x) #--- block 5 masterCategory --- x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv1_mas')(x) x = BatchNormalization()(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv2_mas')(x) x = BatchNormalization()(x) x = Conv2D(512, (3, 3), activation='relu', padding='same', name='block5_conv3_mas')(x) x = BatchNormalization()(x) #--- masterCategory prediction--- x = Flatten(name='flatten')(x) x = Dense(256, activation='relu', name='fc_mas')(x) x = BatchNormalization()(x) x = Dropout(0.5)(x) x = Dense(256, activation='relu', name='fc2_mas')(x) x = BatchNormalization()(x) x = Dropout(0.5)(x) pred = Dense(self.master_classes, activation='softmax', name='master_output')(x) model = Model( inputs=input_image, outputs=pred, name="Baseline_masterCategory_CNN") trainable_params= np.sum([K.count_params(w) for w in model.trainable_weights]) #trainable_params = tf.keras.backend.count_params(model.trainable_weights) print("Trainable paramaters: "+str(trainable_params)) #Keras will automaticall use categorical accuracy when accuracy is used. model.compile(optimizer=SGD(lr=0.001, momentum=0.9), loss='categorical_crossentropy', metrics=['categorical_accuracy']) checkpoint = ModelCheckpoint("../weights/"+label+"_best_weights.h5", monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=True,mode='auto') self.cbks = [checkpoint] self.model = model
en
0.685959
This model is based off of VGG16 with the addition of BatchNorm layers and then branching #--- block 1 --- #--- block 2 --- #--- block 3 --- #--- block 4 --- #--- block 5 masterCategory --- #--- masterCategory prediction--- #trainable_params = tf.keras.backend.count_params(model.trainable_weights) #Keras will automaticall use categorical accuracy when accuracy is used.
2.537768
3
src/lbzdisc/cogs.py
deafmute1/listenbrainz-disc
1
6625317
<reponame>deafmute1/listenbrainz-disc #stdlib from enum import Enum from typing import Iterable, Tuple, Union, Optional import logging import importlib.metadata from datetime import datetime #self import lbzdisc.utils as utils from lbzdisc.data import DataManager #pypi from discord.ext import commands import discord import pylistenbrainz import musicbrainzngs musicbrainzngs.set_useragent('listenbrainz-disc', importlib.metadata.version('listenbrainz-disc') , contact='https://github.com/deafmute1/listenbrainz-disc') modes = ['PLAIN', 'PLAINFULL', 'EMBED', 'EMBEDFULL'] plain_view_modes = ('PLAIN', 'PLAINFULL') embed_view_modes = ('EMBED', 'EMBEDFULL') class Core(commands.Cog): def __init__(self, bot:commands.Bot, data: DataManager) -> None: self.bot = bot self.data = data self.lbz = pylistenbrainz.ListenBrainz() async def _format_no_data(self, text: str) -> Union[str, discord.Embed, None]: content, embed = None, None mode = self.data.get_global_option('mode') if mode in plain_view_modes: content = text elif mode in embed_view_modes: embed = discord.Embed(description = text) return content, embed async def _get_cover_url(self, listen: pylistenbrainz.Listen) -> Union[str, discord.Embed]: try: if listen.release_group_mbid is not None: images = musicbrainzngs.get_release_group_image_list(listen.release_group_mbid)['images'] elif listen.release_mbid is not None: images = musicbrainzngs.get_image_list(listen.release_mbid)['images'] else: res = musicbrainzngs.search_releases(listen.release_name, artist=listen.artist_name)['release-list'] if res is not None and res != []: images = musicbrainzngs.get_image_list(res[0])['images'] for image in images: if image['front']: return image['thumbnails']['250'] except Exception: logging.exception("Failed to retrieve cover url from musicbrainz API") async def _format_listens(self, header: str, footer: str, listens: list, ctx: commands.Context) -> Union[str, discord.Embed, None]: content = None embed = None mode = self.data.get_global_option('mode') if mode == 'PLAIN': content = header + \ ''.join([f'{i+1}: **{e.track_name}** by {e.artist_name} | {e.release_name}\n' for i, e in enumerate(listens)]) + \ footer elif mode == 'PLAINFULL': content = header + \ ''.join([f'{i+1}: **{e.track_name}** by {e.artist_name} | {e.release_name}\n at {datetime.fromtimestamp(e.listened_at)}' for i, e in enumerate(listens)]) + \ footer elif mode in embed_view_modes: uid = self.data.get_user_by_id(ctx.author.id) embed = discord.Embed() embed.set_author( name = header, url = f'https://listenbrainz.org/user/{uid}/', icon_url = ctx.author.avatar_url ) embed.set_footer(text = footer) url = await self._get_cover_url(listens[0]) if url is not None: logging.debug("Got thumbnail url from musicbrainz: {url}") embed.set_thumbnail(url = url) for i, e in enumerate(listens): field = f'By {e.artist_name} | {e.release_name}' if mode == 'EMBEDFULL': field += f'\n At {datetime.fromtimestamp(e.listened_at)}' embed.add_field(name=f'**{i+1}: {e.track_name} **', value=field) return (content, embed) @commands.command( aliases = ['lbz','lb','r', 'listens'], description = 'Get recent listens for user', usage = 'lbz <NUMBER OF LISTENS=2> <LISTENBRAINZ USER=!set_user>' ) async def get_listens(self, ctx: commands.Context, count: Optional[int] = 2, *, user: Optional[str] = None) -> None: if user is None: user = self.data.get_user_by_id(ctx.author.id) if user is None: await ctx.send(f'You neither specified a user, nor have a user set using set_user.') content, embed = await self._format_listens( f'Recent listens for {utils.nick_or_name(ctx.author)}: \n', f'{utils.nick_or_name(ctx.author)} has {self.lbz.get_user_listen_count(user)} listens', self.lbz.get_listens(user, count=count), ctx ) if embed is not None: await ctx.send(embed = embed) else: await ctx.send(content=content, embed = embed) @commands.command( alias = ['np', 'playing'], description = 'Get current listen for user', usage = 'np <LISTENBRAINZ USER=!set_user>' ) async def now_playing(self, ctx: commands.Context, *, user: Optional[str] = None) -> None: if user is None: user = self.data.get_user_by_id(ctx.author.id) if user is None: await ctx.send(f'You neither specified a user, nor have a user set using set_user.') return listen = self.lbz.get_playing_now(user) logging.debug(f'Got listens: {listen}') if listen is None: content, embed = await self._format_no_data(f'Nothing currently playing for {utils.nick_or_name(ctx.author)}') else: content, embed = await self._format_listens( f'Now playing for {utils.nick_or_name(ctx.author)}', f'{utils.nick_or_name(ctx.author)} has {self.lbz.get_user_listen_count(user)} listens', [listen], ctx ) if content is None: await ctx.send(embed=embed) else: await ctx.send(content=content, embed = embed) @commands.command(description = 'Set your listenbrainz username') async def set_user(self, ctx: commands.Context, *, arg: str): await self.data.set_user_for_id(ctx.author.id, arg) await ctx.send(f'Set {utils.nick_or_name(ctx.author)}\'s listenbrainz user to {self.data.get_user_by_id(ctx.author.id)}') @commands.command(description = '[ADMIN] Set reply mode (overrides env settings). \ Valid values: ' + ','.join(modes) ) @commands.has_permissions(administrator = True) async def set_mode(self, ctx:commands.Context, *, mode: str): mode = mode.upper() if mode not in modes: await ctx.send('Invalid mode. Valid values are: ' + ','.join(modes)) return await self.data.set_global_option('mode', mode) set_mode = self.data.get_global_option('mode') await ctx.send(f'Set mode to {set_mode}')
#stdlib from enum import Enum from typing import Iterable, Tuple, Union, Optional import logging import importlib.metadata from datetime import datetime #self import lbzdisc.utils as utils from lbzdisc.data import DataManager #pypi from discord.ext import commands import discord import pylistenbrainz import musicbrainzngs musicbrainzngs.set_useragent('listenbrainz-disc', importlib.metadata.version('listenbrainz-disc') , contact='https://github.com/deafmute1/listenbrainz-disc') modes = ['PLAIN', 'PLAINFULL', 'EMBED', 'EMBEDFULL'] plain_view_modes = ('PLAIN', 'PLAINFULL') embed_view_modes = ('EMBED', 'EMBEDFULL') class Core(commands.Cog): def __init__(self, bot:commands.Bot, data: DataManager) -> None: self.bot = bot self.data = data self.lbz = pylistenbrainz.ListenBrainz() async def _format_no_data(self, text: str) -> Union[str, discord.Embed, None]: content, embed = None, None mode = self.data.get_global_option('mode') if mode in plain_view_modes: content = text elif mode in embed_view_modes: embed = discord.Embed(description = text) return content, embed async def _get_cover_url(self, listen: pylistenbrainz.Listen) -> Union[str, discord.Embed]: try: if listen.release_group_mbid is not None: images = musicbrainzngs.get_release_group_image_list(listen.release_group_mbid)['images'] elif listen.release_mbid is not None: images = musicbrainzngs.get_image_list(listen.release_mbid)['images'] else: res = musicbrainzngs.search_releases(listen.release_name, artist=listen.artist_name)['release-list'] if res is not None and res != []: images = musicbrainzngs.get_image_list(res[0])['images'] for image in images: if image['front']: return image['thumbnails']['250'] except Exception: logging.exception("Failed to retrieve cover url from musicbrainz API") async def _format_listens(self, header: str, footer: str, listens: list, ctx: commands.Context) -> Union[str, discord.Embed, None]: content = None embed = None mode = self.data.get_global_option('mode') if mode == 'PLAIN': content = header + \ ''.join([f'{i+1}: **{e.track_name}** by {e.artist_name} | {e.release_name}\n' for i, e in enumerate(listens)]) + \ footer elif mode == 'PLAINFULL': content = header + \ ''.join([f'{i+1}: **{e.track_name}** by {e.artist_name} | {e.release_name}\n at {datetime.fromtimestamp(e.listened_at)}' for i, e in enumerate(listens)]) + \ footer elif mode in embed_view_modes: uid = self.data.get_user_by_id(ctx.author.id) embed = discord.Embed() embed.set_author( name = header, url = f'https://listenbrainz.org/user/{uid}/', icon_url = ctx.author.avatar_url ) embed.set_footer(text = footer) url = await self._get_cover_url(listens[0]) if url is not None: logging.debug("Got thumbnail url from musicbrainz: {url}") embed.set_thumbnail(url = url) for i, e in enumerate(listens): field = f'By {e.artist_name} | {e.release_name}' if mode == 'EMBEDFULL': field += f'\n At {datetime.fromtimestamp(e.listened_at)}' embed.add_field(name=f'**{i+1}: {e.track_name} **', value=field) return (content, embed) @commands.command( aliases = ['lbz','lb','r', 'listens'], description = 'Get recent listens for user', usage = 'lbz <NUMBER OF LISTENS=2> <LISTENBRAINZ USER=!set_user>' ) async def get_listens(self, ctx: commands.Context, count: Optional[int] = 2, *, user: Optional[str] = None) -> None: if user is None: user = self.data.get_user_by_id(ctx.author.id) if user is None: await ctx.send(f'You neither specified a user, nor have a user set using set_user.') content, embed = await self._format_listens( f'Recent listens for {utils.nick_or_name(ctx.author)}: \n', f'{utils.nick_or_name(ctx.author)} has {self.lbz.get_user_listen_count(user)} listens', self.lbz.get_listens(user, count=count), ctx ) if embed is not None: await ctx.send(embed = embed) else: await ctx.send(content=content, embed = embed) @commands.command( alias = ['np', 'playing'], description = 'Get current listen for user', usage = 'np <LISTENBRAINZ USER=!set_user>' ) async def now_playing(self, ctx: commands.Context, *, user: Optional[str] = None) -> None: if user is None: user = self.data.get_user_by_id(ctx.author.id) if user is None: await ctx.send(f'You neither specified a user, nor have a user set using set_user.') return listen = self.lbz.get_playing_now(user) logging.debug(f'Got listens: {listen}') if listen is None: content, embed = await self._format_no_data(f'Nothing currently playing for {utils.nick_or_name(ctx.author)}') else: content, embed = await self._format_listens( f'Now playing for {utils.nick_or_name(ctx.author)}', f'{utils.nick_or_name(ctx.author)} has {self.lbz.get_user_listen_count(user)} listens', [listen], ctx ) if content is None: await ctx.send(embed=embed) else: await ctx.send(content=content, embed = embed) @commands.command(description = 'Set your listenbrainz username') async def set_user(self, ctx: commands.Context, *, arg: str): await self.data.set_user_for_id(ctx.author.id, arg) await ctx.send(f'Set {utils.nick_or_name(ctx.author)}\'s listenbrainz user to {self.data.get_user_by_id(ctx.author.id)}') @commands.command(description = '[ADMIN] Set reply mode (overrides env settings). \ Valid values: ' + ','.join(modes) ) @commands.has_permissions(administrator = True) async def set_mode(self, ctx:commands.Context, *, mode: str): mode = mode.upper() if mode not in modes: await ctx.send('Invalid mode. Valid values are: ' + ','.join(modes)) return await self.data.set_global_option('mode', mode) set_mode = self.data.get_global_option('mode') await ctx.send(f'Set mode to {set_mode}')
en
0.511567
#stdlib #self #pypi
2.194691
2
t.py
Jie-OY/-
2
6625318
<filename>t.py # -*- coding: utf-8 -*- """ @author: W@I@S@E @contact: <EMAIL> @site: http://hfutoyj.cn/ @file: t.py @time: 2017/9/8 13:02 """ import copy from collections import defaultdict l = ['ab', 'abc', 'b', 'bc'] tmp = [] for i in l: tmp.append(i[0]) r = set(tmp) print(r) d = {1: 2, 3: 4} while True: for i in range(10): pass else: continue print('1')
<filename>t.py # -*- coding: utf-8 -*- """ @author: W@I@S@E @contact: <EMAIL> @site: http://hfutoyj.cn/ @file: t.py @time: 2017/9/8 13:02 """ import copy from collections import defaultdict l = ['ab', 'abc', 'b', 'bc'] tmp = [] for i in l: tmp.append(i[0]) r = set(tmp) print(r) d = {1: 2, 3: 4} while True: for i in range(10): pass else: continue print('1')
en
0.315418
# -*- coding: utf-8 -*- @author: W@I@S@E @contact: <EMAIL> @site: http://hfutoyj.cn/ @file: t.py @time: 2017/9/8 13:02
2.963952
3
ucsmsdk/mometa/firmware/FirmwareBootDefinition.py
anoop1984/python_sdk
0
6625319
<gh_stars>0 """This module contains the general information for FirmwareBootDefinition ManagedObject.""" import sys, os from ...ucsmo import ManagedObject from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta from ...ucsmeta import VersionMeta class FirmwareBootDefinitionConsts(): TYPE_ADAPTOR = "adaptor" TYPE_BLADE_BIOS = "blade-bios" TYPE_BLADE_CONTROLLER = "blade-controller" TYPE_BOARD_CONTROLLER = "board-controller" TYPE_CATALOG = "catalog" TYPE_CMC = "cmc" TYPE_DEBUG_PLUG_IN = "debug-plug-in" TYPE_DIAG = "diag" TYPE_FEX = "fex" TYPE_FLEXFLASH_CONTROLLER = "flexflash-controller" TYPE_GRAPHICS_CARD = "graphics-card" TYPE_HOST_HBA = "host-hba" TYPE_HOST_HBA_OPTIONROM = "host-hba-optionrom" TYPE_HOST_NIC = "host-nic" TYPE_HOST_NIC_OPTIONROM = "host-nic-optionrom" TYPE_IOCARD = "iocard" TYPE_LOCAL_DISK = "local-disk" TYPE_MGMT_EXT = "mgmt-ext" TYPE_PSU = "psu" TYPE_SAS_EXPANDER = "sas-expander" TYPE_STORAGE_CONTROLLER = "storage-controller" TYPE_STORAGE_NODE_CONTROLLER = "storage-node-controller" TYPE_SWITCH = "switch" TYPE_SWITCH_KERNEL = "switch-kernel" TYPE_SWITCH_SOFTWARE = "switch-software" TYPE_SYSTEM = "system" TYPE_UNSPECIFIED = "unspecified" class FirmwareBootDefinition(ManagedObject): """This is FirmwareBootDefinition class.""" consts = FirmwareBootDefinitionConsts() naming_props = set([]) mo_meta = MoMeta("FirmwareBootDefinition", "firmwareBootDefinition", "fw-boot-def", VersionMeta.Version101e, "InputOutput", 0x1f, [], ["admin"], [u'adaptorHostEthIf', u'adaptorHostFcIf', u'biosUnit', u'capabilityCatalogue', u'capabilityMgmtExtension', u'equipmentPsu', u'graphicsCard', u'mgmtController', u'storageController', u'storageLocalDisk', u'storageSasExpander'], [u'firmwareBootUnit', u'firmwareUcscInfo'], ["Get"]) prop_meta = { "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []), "sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []), "type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["adaptor", "blade-bios", "blade-controller", "board-controller", "catalog", "cmc", "debug-plug-in", "diag", "fex", "flexflash-controller", "graphics-card", "host-hba", "host-hba-optionrom", "host-nic", "host-nic-optionrom", "iocard", "local-disk", "mgmt-ext", "psu", "sas-expander", "storage-controller", "storage-node-controller", "switch", "switch-kernel", "switch-software", "system", "unspecified"], []), } prop_map = { "childAction": "child_action", "dn": "dn", "rn": "rn", "sacl": "sacl", "status": "status", "type": "type", } def __init__(self, parent_mo_or_dn, **kwargs): self._dirty_mask = 0 self.child_action = None self.sacl = None self.status = None self.type = None ManagedObject.__init__(self, "FirmwareBootDefinition", parent_mo_or_dn, **kwargs)
"""This module contains the general information for FirmwareBootDefinition ManagedObject.""" import sys, os from ...ucsmo import ManagedObject from ...ucscoremeta import UcsVersion, MoPropertyMeta, MoMeta from ...ucsmeta import VersionMeta class FirmwareBootDefinitionConsts(): TYPE_ADAPTOR = "adaptor" TYPE_BLADE_BIOS = "blade-bios" TYPE_BLADE_CONTROLLER = "blade-controller" TYPE_BOARD_CONTROLLER = "board-controller" TYPE_CATALOG = "catalog" TYPE_CMC = "cmc" TYPE_DEBUG_PLUG_IN = "debug-plug-in" TYPE_DIAG = "diag" TYPE_FEX = "fex" TYPE_FLEXFLASH_CONTROLLER = "flexflash-controller" TYPE_GRAPHICS_CARD = "graphics-card" TYPE_HOST_HBA = "host-hba" TYPE_HOST_HBA_OPTIONROM = "host-hba-optionrom" TYPE_HOST_NIC = "host-nic" TYPE_HOST_NIC_OPTIONROM = "host-nic-optionrom" TYPE_IOCARD = "iocard" TYPE_LOCAL_DISK = "local-disk" TYPE_MGMT_EXT = "mgmt-ext" TYPE_PSU = "psu" TYPE_SAS_EXPANDER = "sas-expander" TYPE_STORAGE_CONTROLLER = "storage-controller" TYPE_STORAGE_NODE_CONTROLLER = "storage-node-controller" TYPE_SWITCH = "switch" TYPE_SWITCH_KERNEL = "switch-kernel" TYPE_SWITCH_SOFTWARE = "switch-software" TYPE_SYSTEM = "system" TYPE_UNSPECIFIED = "unspecified" class FirmwareBootDefinition(ManagedObject): """This is FirmwareBootDefinition class.""" consts = FirmwareBootDefinitionConsts() naming_props = set([]) mo_meta = MoMeta("FirmwareBootDefinition", "firmwareBootDefinition", "fw-boot-def", VersionMeta.Version101e, "InputOutput", 0x1f, [], ["admin"], [u'adaptorHostEthIf', u'adaptorHostFcIf', u'biosUnit', u'capabilityCatalogue', u'capabilityMgmtExtension', u'equipmentPsu', u'graphicsCard', u'mgmtController', u'storageController', u'storageLocalDisk', u'storageSasExpander'], [u'firmwareBootUnit', u'firmwareUcscInfo'], ["Get"]) prop_meta = { "child_action": MoPropertyMeta("child_action", "childAction", "string", VersionMeta.Version101e, MoPropertyMeta.INTERNAL, 0x2, None, None, r"""((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1}""", [], []), "dn": MoPropertyMeta("dn", "dn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x4, 0, 256, None, [], []), "rn": MoPropertyMeta("rn", "rn", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, 0x8, 0, 256, None, [], []), "sacl": MoPropertyMeta("sacl", "sacl", "string", VersionMeta.Version302a, MoPropertyMeta.READ_ONLY, None, None, None, r"""((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1}""", [], []), "status": MoPropertyMeta("status", "status", "string", VersionMeta.Version101e, MoPropertyMeta.READ_WRITE, 0x10, None, None, r"""((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}""", [], []), "type": MoPropertyMeta("type", "type", "string", VersionMeta.Version101e, MoPropertyMeta.READ_ONLY, None, None, None, None, ["adaptor", "blade-bios", "blade-controller", "board-controller", "catalog", "cmc", "debug-plug-in", "diag", "fex", "flexflash-controller", "graphics-card", "host-hba", "host-hba-optionrom", "host-nic", "host-nic-optionrom", "iocard", "local-disk", "mgmt-ext", "psu", "sas-expander", "storage-controller", "storage-node-controller", "switch", "switch-kernel", "switch-software", "system", "unspecified"], []), } prop_map = { "childAction": "child_action", "dn": "dn", "rn": "rn", "sacl": "sacl", "status": "status", "type": "type", } def __init__(self, parent_mo_or_dn, **kwargs): self._dirty_mask = 0 self.child_action = None self.sacl = None self.status = None self.type = None ManagedObject.__init__(self, "FirmwareBootDefinition", parent_mo_or_dn, **kwargs)
en
0.468669
This module contains the general information for FirmwareBootDefinition ManagedObject. This is FirmwareBootDefinition class. ((deleteAll|ignore|deleteNonPresent),){0,2}(deleteAll|ignore|deleteNonPresent){0,1} ((none|del|mod|addchild|cascade),){0,4}(none|del|mod|addchild|cascade){0,1} ((removed|created|modified|deleted),){0,3}(removed|created|modified|deleted){0,1}
2.04085
2
py2/ex31.py
iamovrhere/lpthw
0
6625320
prompt = "> " print "You enter a dark room with two doors. Do you go through door #1 or door #2" door = raw_input(prompt) if door == "1": print "There's a giant bear here eating cheese cake. What do you do?" print "1. Take the cake" print "2. Scream at the bear" bear = raw_input(prompt) if bear == "1": print "You grab it. That takes the cake! Haha, I slay me." print "Oh wait, no. That's the bear" print "The bear eats your face off. Good job!" elif bear == "2": print "The bear eats your legs off. Weird motivation" else: print "Well, doing %s is probably better? Bear gets bored and runs off" % bear elif door == "2": print "You stare into the endless abyss, the abyss stares back" print "1. Blueberries" print "2. Yellow jacket clothespins" print "3. Understanding revolvers yelling melodies" insanity = raw_input(prompt) if insanity == "1" or insanity == "2": print "Your body survives powered by the mind of jello. Good job!" else: print "The insanity rots your eyes into a pool of jello. Good job!" else: print "You stumble around and fall on a knife and die. Good job!"
prompt = "> " print "You enter a dark room with two doors. Do you go through door #1 or door #2" door = raw_input(prompt) if door == "1": print "There's a giant bear here eating cheese cake. What do you do?" print "1. Take the cake" print "2. Scream at the bear" bear = raw_input(prompt) if bear == "1": print "You grab it. That takes the cake! Haha, I slay me." print "Oh wait, no. That's the bear" print "The bear eats your face off. Good job!" elif bear == "2": print "The bear eats your legs off. Weird motivation" else: print "Well, doing %s is probably better? Bear gets bored and runs off" % bear elif door == "2": print "You stare into the endless abyss, the abyss stares back" print "1. Blueberries" print "2. Yellow jacket clothespins" print "3. Understanding revolvers yelling melodies" insanity = raw_input(prompt) if insanity == "1" or insanity == "2": print "Your body survives powered by the mind of jello. Good job!" else: print "The insanity rots your eyes into a pool of jello. Good job!" else: print "You stumble around and fall on a knife and die. Good job!"
nl
0.506358
#1 or door #2"
3.80525
4
bsd2/vagrant-ansible/ansible/lib/ansible/runner/lookup_plugins/env.py
dlab-berkeley/collaboratool-archive
1
6625321
# (c) 2012, <NAME> <jpmens(at)gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible import utils, errors import os class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if isinstance(terms, basestring): terms = [ terms ] ret = [] for term in terms: var = term.split()[0] ret.append(os.getenv(var, '')) return ret
# (c) 2012, <NAME> <jpmens(at)gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>. from ansible import utils, errors import os class LookupModule(object): def __init__(self, basedir=None, **kwargs): self.basedir = basedir def run(self, terms, inject=None, **kwargs): terms = utils.listify_lookup_plugin_terms(terms, self.basedir, inject) if isinstance(terms, basestring): terms = [ terms ] ret = [] for term in terms: var = term.split()[0] ret.append(os.getenv(var, '')) return ret
en
0.878479
# (c) 2012, <NAME> <jpmens(at)gmail.com> # # This file is part of Ansible # # Ansible is free software: you can redistribute it and/or modify # it under the terms of the GNU General Public License as published by # the Free Software Foundation, either version 3 of the License, or # (at your option) any later version. # # Ansible is distributed in the hope that it will be useful, # but WITHOUT ANY WARRANTY; without even the implied warranty of # MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the # GNU General Public License for more details. # # You should have received a copy of the GNU General Public License # along with Ansible. If not, see <http://www.gnu.org/licenses/>.
1.980739
2
immvis/grpc/mappers/kmeans_analysis_mapping.py
imdavi/immvis-server
0
6625322
<filename>immvis/grpc/mappers/kmeans_analysis_mapping.py from ..proto.immvis_pb2 import KMeansAnalysisResponse, NormalisedDataset from pandas import DataFrame def map_to_k_means_analysis_response(k_means_analysis_result) -> KMeansAnalysisResponse: return KMeansAnalysisResponse( labelsMapping=_map_to_data_frame(labels_mapping), centroids=_map_to_data_frame(centroids) ) def _map_to_data_frame(data_frame: DataFrame) -> NormalisedDataset: pass
<filename>immvis/grpc/mappers/kmeans_analysis_mapping.py from ..proto.immvis_pb2 import KMeansAnalysisResponse, NormalisedDataset from pandas import DataFrame def map_to_k_means_analysis_response(k_means_analysis_result) -> KMeansAnalysisResponse: return KMeansAnalysisResponse( labelsMapping=_map_to_data_frame(labels_mapping), centroids=_map_to_data_frame(centroids) ) def _map_to_data_frame(data_frame: DataFrame) -> NormalisedDataset: pass
none
1
2.248303
2
paddlex/paddleseg/models/sfnet.py
cheneyveron/PaddleX
3,655
6625323
<reponame>cheneyveron/PaddleX<filename>paddlex/paddleseg/models/sfnet.py # Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import paddle.nn as nn import paddle.nn.functional as F from paddlex.paddleseg.models import layers from paddlex.paddleseg.cvlibs import manager from paddlex.paddleseg.utils import utils @manager.MODELS.add_component class SFNet(nn.Layer): """ The SFNet implementation based on PaddlePaddle. The original article refers to <NAME>, et al. "Semantic Flow for Fast and Accurate Scene Parsing" (https://arxiv.org/pdf/2002.10120.pdf). Args: num_classes (int): The unique number of target classes. backbone (Paddle.nn.Layer): Backbone network, currently support Resnet50/101. backbone_indices (tuple): Four values in the tuple indicate the indices of output of backbone. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: False. align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. pretrained (str, optional): The path or url of pretrained model. Default: None. """ def __init__(self, num_classes, backbone, backbone_indices, enable_auxiliary_loss=False, align_corners=False, pretrained=None): super(SFNet, self).__init__() self.backbone = backbone self.backbone_indices = backbone_indices self.in_channels = [ self.backbone.feat_channels[i] for i in backbone_indices ] self.align_corners = align_corners self.pretrained = pretrained self.enable_auxiliary_loss = enable_auxiliary_loss if self.backbone.layers == 18: fpn_dim = 128 inplane_head = 512 fpn_inplanes = [64, 128, 256, 512] else: fpn_dim = 256 inplane_head = 2048 fpn_inplanes = [256, 512, 1024, 2048] self.head = SFNetHead( inplane=inplane_head, num_class=num_classes, fpn_inplanes=fpn_inplanes, fpn_dim=fpn_dim, enable_auxiliary_loss=self.enable_auxiliary_loss) self.init_weight() def forward(self, x): feats = self.backbone(x) feats = [feats[i] for i in self.backbone_indices] logit_list = self.head(feats) logit_list = [ F.interpolate( logit, x.shape[2:], mode='bilinear', align_corners=self.align_corners) for logit in logit_list ] return logit_list def init_weight(self): if self.pretrained is not None: utils.load_entire_model(self, self.pretrained) class SFNetHead(nn.Layer): """ The SFNetHead implementation. Args: inplane (int): Input channels of PPM module. num_class (int): The unique number of target classes. fpn_inplanes (list): The feature channels from backbone. fpn_dim (int, optional): The input channels of FAM module. Default: 256. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: False. """ def __init__(self, inplane, num_class, fpn_inplanes, fpn_dim=256, enable_auxiliary_loss=False): super(SFNetHead, self).__init__() self.ppm = layers.PPModule( in_channels=inplane, out_channels=fpn_dim, bin_sizes=(1, 2, 3, 6), dim_reduction=True, align_corners=True) self.enable_auxiliary_loss = enable_auxiliary_loss self.fpn_in = [] for fpn_inplane in fpn_inplanes[:-1]: self.fpn_in.append( nn.Sequential( nn.Conv2D(fpn_inplane, fpn_dim, 1), layers.SyncBatchNorm(fpn_dim), nn.ReLU())) self.fpn_in = nn.LayerList(self.fpn_in) self.fpn_out = [] self.fpn_out_align = [] self.dsn = [] for i in range(len(fpn_inplanes) - 1): self.fpn_out.append( nn.Sequential( layers.ConvBNReLU( fpn_dim, fpn_dim, 3, bias_attr=False))) self.fpn_out_align.append( AlignedModule( inplane=fpn_dim, outplane=fpn_dim // 2)) if self.enable_auxiliary_loss: self.dsn.append( nn.Sequential( layers.AuxLayer(fpn_dim, fpn_dim, num_class))) self.fpn_out = nn.LayerList(self.fpn_out) self.fpn_out_align = nn.LayerList(self.fpn_out_align) if self.enable_auxiliary_loss: self.dsn = nn.LayerList(self.dsn) self.conv_last = nn.Sequential( layers.ConvBNReLU( len(fpn_inplanes) * fpn_dim, fpn_dim, 3, bias_attr=False), nn.Conv2D( fpn_dim, num_class, kernel_size=1)) def forward(self, conv_out): psp_out = self.ppm(conv_out[-1]) f = psp_out fpn_feature_list = [psp_out] out = [] for i in reversed(range(len(conv_out) - 1)): conv_x = conv_out[i] conv_x = self.fpn_in[i](conv_x) f = self.fpn_out_align[i]([conv_x, f]) f = conv_x + f fpn_feature_list.append(self.fpn_out[i](f)) if self.enable_auxiliary_loss: out.append(self.dsn[i](f)) fpn_feature_list.reverse() output_size = fpn_feature_list[0].shape[2:] fusion_list = [fpn_feature_list[0]] for i in range(1, len(fpn_feature_list)): fusion_list.append( F.interpolate( fpn_feature_list[i], output_size, mode='bilinear', align_corners=True)) fusion_out = paddle.concat(fusion_list, 1) x = self.conv_last(fusion_out) if self.enable_auxiliary_loss: out.append(x) return out else: return [x] class AlignedModule(nn.Layer): """ The FAM module implementation. Args: inplane (int): Input channles of FAM module. outplane (int): Output channels of FAN module. kernel_size (int, optional): Kernel size of semantic flow convolution layer. Default: 3. """ def __init__(self, inplane, outplane, kernel_size=3): super(AlignedModule, self).__init__() self.down_h = nn.Conv2D(inplane, outplane, 1, bias_attr=False) self.down_l = nn.Conv2D(inplane, outplane, 1, bias_attr=False) self.flow_make = nn.Conv2D( outplane * 2, 2, kernel_size=kernel_size, padding=1, bias_attr=False) def flow_warp(self, inputs, flow, size): out_h, out_w = size n, c, h, w = inputs.shape norm = paddle.to_tensor([[[[out_w, out_h]]]]).astype('float32') h = paddle.linspace(-1.0, 1.0, out_h).reshape([-1, 1]).tile([1, out_w]) w = paddle.linspace(-1.0, 1.0, out_w).tile([out_h, 1]) grid = paddle.concat([paddle.unsqueeze(w, 2), paddle.unsqueeze(h, 2)], 2) grid = grid.tile([n, 1, 1, 1]).astype('float32') grid = grid + flow.transpose([0, 2, 3, 1]) / norm output = F.grid_sample(inputs, grid) return output def forward(self, x): low_feature, h_feature = x h_feature_orign = h_feature h, w = low_feature.shape[2:] size = (h, w) low_feature = self.down_l(low_feature) h_feature = self.down_h(h_feature) h_feature = F.interpolate( h_feature, size=size, mode='bilinear', align_corners=True) flow = self.flow_make(paddle.concat([h_feature, low_feature], 1)) h_feature = self.flow_warp(h_feature_orign, flow, size=size) return h_feature
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import paddle import paddle.nn as nn import paddle.nn.functional as F from paddlex.paddleseg.models import layers from paddlex.paddleseg.cvlibs import manager from paddlex.paddleseg.utils import utils @manager.MODELS.add_component class SFNet(nn.Layer): """ The SFNet implementation based on PaddlePaddle. The original article refers to <NAME>, et al. "Semantic Flow for Fast and Accurate Scene Parsing" (https://arxiv.org/pdf/2002.10120.pdf). Args: num_classes (int): The unique number of target classes. backbone (Paddle.nn.Layer): Backbone network, currently support Resnet50/101. backbone_indices (tuple): Four values in the tuple indicate the indices of output of backbone. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: False. align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. pretrained (str, optional): The path or url of pretrained model. Default: None. """ def __init__(self, num_classes, backbone, backbone_indices, enable_auxiliary_loss=False, align_corners=False, pretrained=None): super(SFNet, self).__init__() self.backbone = backbone self.backbone_indices = backbone_indices self.in_channels = [ self.backbone.feat_channels[i] for i in backbone_indices ] self.align_corners = align_corners self.pretrained = pretrained self.enable_auxiliary_loss = enable_auxiliary_loss if self.backbone.layers == 18: fpn_dim = 128 inplane_head = 512 fpn_inplanes = [64, 128, 256, 512] else: fpn_dim = 256 inplane_head = 2048 fpn_inplanes = [256, 512, 1024, 2048] self.head = SFNetHead( inplane=inplane_head, num_class=num_classes, fpn_inplanes=fpn_inplanes, fpn_dim=fpn_dim, enable_auxiliary_loss=self.enable_auxiliary_loss) self.init_weight() def forward(self, x): feats = self.backbone(x) feats = [feats[i] for i in self.backbone_indices] logit_list = self.head(feats) logit_list = [ F.interpolate( logit, x.shape[2:], mode='bilinear', align_corners=self.align_corners) for logit in logit_list ] return logit_list def init_weight(self): if self.pretrained is not None: utils.load_entire_model(self, self.pretrained) class SFNetHead(nn.Layer): """ The SFNetHead implementation. Args: inplane (int): Input channels of PPM module. num_class (int): The unique number of target classes. fpn_inplanes (list): The feature channels from backbone. fpn_dim (int, optional): The input channels of FAM module. Default: 256. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: False. """ def __init__(self, inplane, num_class, fpn_inplanes, fpn_dim=256, enable_auxiliary_loss=False): super(SFNetHead, self).__init__() self.ppm = layers.PPModule( in_channels=inplane, out_channels=fpn_dim, bin_sizes=(1, 2, 3, 6), dim_reduction=True, align_corners=True) self.enable_auxiliary_loss = enable_auxiliary_loss self.fpn_in = [] for fpn_inplane in fpn_inplanes[:-1]: self.fpn_in.append( nn.Sequential( nn.Conv2D(fpn_inplane, fpn_dim, 1), layers.SyncBatchNorm(fpn_dim), nn.ReLU())) self.fpn_in = nn.LayerList(self.fpn_in) self.fpn_out = [] self.fpn_out_align = [] self.dsn = [] for i in range(len(fpn_inplanes) - 1): self.fpn_out.append( nn.Sequential( layers.ConvBNReLU( fpn_dim, fpn_dim, 3, bias_attr=False))) self.fpn_out_align.append( AlignedModule( inplane=fpn_dim, outplane=fpn_dim // 2)) if self.enable_auxiliary_loss: self.dsn.append( nn.Sequential( layers.AuxLayer(fpn_dim, fpn_dim, num_class))) self.fpn_out = nn.LayerList(self.fpn_out) self.fpn_out_align = nn.LayerList(self.fpn_out_align) if self.enable_auxiliary_loss: self.dsn = nn.LayerList(self.dsn) self.conv_last = nn.Sequential( layers.ConvBNReLU( len(fpn_inplanes) * fpn_dim, fpn_dim, 3, bias_attr=False), nn.Conv2D( fpn_dim, num_class, kernel_size=1)) def forward(self, conv_out): psp_out = self.ppm(conv_out[-1]) f = psp_out fpn_feature_list = [psp_out] out = [] for i in reversed(range(len(conv_out) - 1)): conv_x = conv_out[i] conv_x = self.fpn_in[i](conv_x) f = self.fpn_out_align[i]([conv_x, f]) f = conv_x + f fpn_feature_list.append(self.fpn_out[i](f)) if self.enable_auxiliary_loss: out.append(self.dsn[i](f)) fpn_feature_list.reverse() output_size = fpn_feature_list[0].shape[2:] fusion_list = [fpn_feature_list[0]] for i in range(1, len(fpn_feature_list)): fusion_list.append( F.interpolate( fpn_feature_list[i], output_size, mode='bilinear', align_corners=True)) fusion_out = paddle.concat(fusion_list, 1) x = self.conv_last(fusion_out) if self.enable_auxiliary_loss: out.append(x) return out else: return [x] class AlignedModule(nn.Layer): """ The FAM module implementation. Args: inplane (int): Input channles of FAM module. outplane (int): Output channels of FAN module. kernel_size (int, optional): Kernel size of semantic flow convolution layer. Default: 3. """ def __init__(self, inplane, outplane, kernel_size=3): super(AlignedModule, self).__init__() self.down_h = nn.Conv2D(inplane, outplane, 1, bias_attr=False) self.down_l = nn.Conv2D(inplane, outplane, 1, bias_attr=False) self.flow_make = nn.Conv2D( outplane * 2, 2, kernel_size=kernel_size, padding=1, bias_attr=False) def flow_warp(self, inputs, flow, size): out_h, out_w = size n, c, h, w = inputs.shape norm = paddle.to_tensor([[[[out_w, out_h]]]]).astype('float32') h = paddle.linspace(-1.0, 1.0, out_h).reshape([-1, 1]).tile([1, out_w]) w = paddle.linspace(-1.0, 1.0, out_w).tile([out_h, 1]) grid = paddle.concat([paddle.unsqueeze(w, 2), paddle.unsqueeze(h, 2)], 2) grid = grid.tile([n, 1, 1, 1]).astype('float32') grid = grid + flow.transpose([0, 2, 3, 1]) / norm output = F.grid_sample(inputs, grid) return output def forward(self, x): low_feature, h_feature = x h_feature_orign = h_feature h, w = low_feature.shape[2:] size = (h, w) low_feature = self.down_l(low_feature) h_feature = self.down_h(h_feature) h_feature = F.interpolate( h_feature, size=size, mode='bilinear', align_corners=True) flow = self.flow_make(paddle.concat([h_feature, low_feature], 1)) h_feature = self.flow_warp(h_feature_orign, flow, size=size) return h_feature
en
0.709685
# Copyright (c) 2021 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. The SFNet implementation based on PaddlePaddle. The original article refers to <NAME>, et al. "Semantic Flow for Fast and Accurate Scene Parsing" (https://arxiv.org/pdf/2002.10120.pdf). Args: num_classes (int): The unique number of target classes. backbone (Paddle.nn.Layer): Backbone network, currently support Resnet50/101. backbone_indices (tuple): Four values in the tuple indicate the indices of output of backbone. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: False. align_corners (bool, optional): An argument of F.interpolate. It should be set to False when the feature size is even, e.g. 1024x512, otherwise it is True, e.g. 769x769. Default: False. pretrained (str, optional): The path or url of pretrained model. Default: None. The SFNetHead implementation. Args: inplane (int): Input channels of PPM module. num_class (int): The unique number of target classes. fpn_inplanes (list): The feature channels from backbone. fpn_dim (int, optional): The input channels of FAM module. Default: 256. enable_auxiliary_loss (bool, optional): A bool value indicates whether adding auxiliary loss. Default: False. The FAM module implementation. Args: inplane (int): Input channles of FAM module. outplane (int): Output channels of FAN module. kernel_size (int, optional): Kernel size of semantic flow convolution layer. Default: 3.
2.197904
2
zerver/forms.py
rhencke/zulip
0
6625324
<gh_stars>0 from django import forms from django.conf import settings from django.contrib.auth import authenticate from django.contrib.auth.forms import SetPasswordForm, AuthenticationForm, \ PasswordResetForm from django.core.exceptions import ValidationError from django.urls import reverse from django.core.validators import validate_email from django.db.models.query import QuerySet from django.utils.translation import ugettext as _ from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.shortcuts import get_current_site from django.utils.http import urlsafe_base64_encode from django.utils.encoding import force_bytes from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.http import HttpRequest from jinja2 import Markup as mark_safe from zerver.lib.actions import do_change_password, email_not_system_bot, \ validate_email_for_realm from zerver.lib.name_restrictions import is_reserved_subdomain, is_disposable_domain from zerver.lib.request import JsonableError from zerver.lib.send_email import send_email, FromAddress from zerver.lib.subdomains import get_subdomain, user_matches_subdomain, is_root_domain_available from zerver.lib.users import check_full_name from zerver.models import Realm, get_active_user, UserProfile, get_realm, email_to_domain, \ email_allowed_for_realm, DisposableEmailError, DomainNotAllowedForRealmError, \ EmailContainsPlusError from zproject.backends import email_auth_enabled, email_belongs_to_ldap import logging import re import DNS from typing import Any, Callable, List, Optional, Dict from two_factor.forms import AuthenticationTokenForm as TwoFactorAuthenticationTokenForm from two_factor.utils import totp_digits MIT_VALIDATION_ERROR = u'That user does not exist at MIT or is a ' + \ u'<a href="https://ist.mit.edu/email-lists">mailing list</a>. ' + \ u'If you want to sign up an alias for Zulip, ' + \ u'<a href="mailto:<EMAIL>">contact us</a>.' WRONG_SUBDOMAIN_ERROR = "Your Zulip account is not a member of the " + \ "organization associated with this subdomain. " + \ "Please contact %s with any questions!" % (FromAddress.SUPPORT,) def email_is_not_mit_mailing_list(email: str) -> None: """Prevent MIT mailing lists from signing up for Zulip""" if "@mit.edu" in email: username = email.rsplit("@", 1)[0] # Check whether the user exists and can get mail. try: DNS.dnslookup("%s.pobox.ns.athena.mit.edu" % username, DNS.Type.TXT) except DNS.Base.ServerError as e: if e.rcode == DNS.Status.NXDOMAIN: raise ValidationError(mark_safe(MIT_VALIDATION_ERROR)) else: raise AssertionError("Unexpected DNS error") def check_subdomain_available(subdomain: str, from_management_command: bool=False) -> None: error_strings = { 'too short': _("Subdomain needs to have length 3 or greater."), 'extremal dash': _("Subdomain cannot start or end with a '-'."), 'bad character': _("Subdomain can only have lowercase letters, numbers, and '-'s."), 'unavailable': _("Subdomain unavailable. Please choose a different one.")} if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN: if is_root_domain_available(): return raise ValidationError(error_strings['unavailable']) if subdomain[0] == '-' or subdomain[-1] == '-': raise ValidationError(error_strings['extremal dash']) if not re.match('^[a-z0-9-]*$', subdomain): raise ValidationError(error_strings['bad character']) if from_management_command: return if len(subdomain) < 3: raise ValidationError(error_strings['too short']) if is_reserved_subdomain(subdomain) or \ get_realm(subdomain) is not None: raise ValidationError(error_strings['unavailable']) class RegistrationForm(forms.Form): MAX_PASSWORD_LENGTH = 100 full_name = forms.CharField(max_length=UserProfile.MAX_NAME_LENGTH) # The required-ness of the password field gets overridden if it isn't # actually required for a realm password = forms.CharField(widget=forms.PasswordInput, max_length=MAX_PASSWORD_LENGTH) realm_subdomain = forms.CharField(max_length=Realm.MAX_REALM_SUBDOMAIN_LENGTH, required=False) def __init__(self, *args: Any, **kwargs: Any) -> None: # Since the superclass doesn't except random extra kwargs, we # remove it from the kwargs dict before initializing. self.realm_creation = kwargs['realm_creation'] del kwargs['realm_creation'] super().__init__(*args, **kwargs) if settings.TERMS_OF_SERVICE: self.fields['terms'] = forms.BooleanField(required=True) self.fields['realm_name'] = forms.CharField( max_length=Realm.MAX_REALM_NAME_LENGTH, required=self.realm_creation) def clean_full_name(self) -> str: try: return check_full_name(self.cleaned_data['full_name']) except JsonableError as e: raise ValidationError(e.msg) def clean_realm_subdomain(self) -> str: if not self.realm_creation: # This field is only used if realm_creation return "" subdomain = self.cleaned_data['realm_subdomain'] if 'realm_in_root_domain' in self.data: subdomain = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN check_subdomain_available(subdomain) return subdomain class ToSForm(forms.Form): terms = forms.BooleanField(required=True) class HomepageForm(forms.Form): email = forms.EmailField() def __init__(self, *args: Any, **kwargs: Any) -> None: self.realm = kwargs.pop('realm', None) self.from_multiuse_invite = kwargs.pop('from_multiuse_invite', False) super().__init__(*args, **kwargs) def clean_email(self) -> str: """Returns the email if and only if the user's email address is allowed to join the realm they are trying to join.""" email = self.cleaned_data['email'] # Otherwise, the user is trying to join a specific realm. realm = self.realm from_multiuse_invite = self.from_multiuse_invite if realm is None: raise ValidationError(_("The organization you are trying to " "join using {email} does not " "exist.").format(email=email)) if not from_multiuse_invite and realm.invite_required: raise ValidationError(_("Please request an invite for {email} " "from the organization " "administrator.").format(email=email)) try: email_allowed_for_realm(email, realm) except DomainNotAllowedForRealmError: raise ValidationError( _("Your email address, {email}, is not in one of the domains " "that are allowed to register for accounts in this organization.").format( string_id=realm.string_id, email=email)) except DisposableEmailError: raise ValidationError(_("Please use your real email address.")) except EmailContainsPlusError: raise ValidationError(_("Email addresses containing + are not allowed in this organization.")) validate_email_for_realm(realm, email) if realm.is_zephyr_mirror_realm: email_is_not_mit_mailing_list(email) return email def email_is_not_disposable(email: str) -> None: if is_disposable_domain(email_to_domain(email)): raise ValidationError(_("Please use your real email address.")) class RealmCreationForm(forms.Form): # This form determines whether users can create a new realm. email = forms.EmailField(validators=[email_not_system_bot, email_is_not_disposable]) class LoggingSetPasswordForm(SetPasswordForm): def save(self, commit: bool=True) -> UserProfile: do_change_password(self.user, self.cleaned_data['<PASSWORD>_password1'], commit=commit) return self.user class ZulipPasswordResetForm(PasswordResetForm): def save(self, domain_override: Optional[bool]=None, subject_template_name: str='registration/password_reset_subject.txt', email_template_name: str='registration/password_reset_email.html', use_https: bool=False, token_generator: PasswordResetTokenGenerator=default_token_generator, from_email: Optional[str]=None, request: HttpRequest=None, html_email_template_name: Optional[str]=None, extra_email_context: Optional[Dict[str, Any]]=None ) -> None: """ If the email address has an account in the target realm, generates a one-use only link for resetting password and sends to the user. We send a different email if an associated account does not exist in the database, or an account does exist, but not in the realm. Note: We ignore protocol and the various email template arguments (those are an artifact of using Django's password reset framework). """ email = self.cleaned_data["email"] realm = get_realm(get_subdomain(request)) if not email_auth_enabled(realm): logging.info("Password reset attempted for %s even though password auth is disabled." % (email,)) return if email_belongs_to_ldap(realm, email): # TODO: Ideally, we'd provide a user-facing error here # about the fact that they aren't allowed to have a # password in the Zulip server and should change it in LDAP. logging.info("Password reset not allowed for user in LDAP domain") return if realm.deactivated: logging.info("Realm is deactivated") return user = None # type: Optional[UserProfile] try: user = get_active_user(email, realm) except UserProfile.DoesNotExist: pass context = { 'email': email, 'realm_uri': realm.uri, } if user is not None: token = token_generator.make_token(user) uid = urlsafe_base64_encode(force_bytes(user.id)).decode('ascii') endpoint = reverse('django.contrib.auth.views.password_reset_confirm', kwargs=dict(uidb64=uid, token=token)) context['no_account_in_realm'] = False context['reset_url'] = "{}{}".format(user.realm.uri, endpoint) send_email('zerver/emails/password_reset', to_user_id=user.id, from_name="Zulip Account Security", from_address=FromAddress.tokenized_no_reply_address(), context=context) else: context['no_account_in_realm'] = True accounts = UserProfile.objects.filter(email__iexact=email) if accounts: context['accounts'] = accounts context['multiple_accounts'] = accounts.count() != 1 send_email('zerver/emails/password_reset', to_email=email, from_name="Zulip Account Security", from_address=FromAddress.tokenized_no_reply_address(), context=context) class CreateUserForm(forms.Form): full_name = forms.CharField(max_length=100) email = forms.EmailField() class OurAuthenticationForm(AuthenticationForm): def clean(self) -> Dict[str, Any]: username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username is not None and password: subdomain = get_subdomain(self.request) realm = get_realm(subdomain) return_data = {} # type: Dict[str, Any] self.user_cache = authenticate(self.request, username=username, password=password, realm=realm, return_data=return_data) if return_data.get("inactive_realm"): raise AssertionError("Programming error: inactive realm in authentication form") if return_data.get("inactive_user") and not return_data.get("is_mirror_dummy"): # We exclude mirror dummy accounts here. They should be treated as the # user never having had an account, so we let them fall through to the # normal invalid_login case below. error_msg = ( u"Your account is no longer active. " u"Please contact your organization administrator to reactivate it.") raise ValidationError(mark_safe(error_msg)) if return_data.get("invalid_subdomain"): logging.warning("User %s attempted to password login to wrong subdomain %s" % (username, subdomain)) raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR)) if self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', params={'username': self.username_field.verbose_name}, ) self.confirm_login_allowed(self.user_cache) return self.cleaned_data def add_prefix(self, field_name: str) -> str: """Disable prefix, since Zulip doesn't use this Django forms feature (and django-two-factor does use it), and we'd like both to be happy with this form. """ return field_name class AuthenticationTokenForm(TwoFactorAuthenticationTokenForm): """ We add this form to update the widget of otp_token. The default widget is an input element whose type is a number, which doesn't stylistically match our theme. """ otp_token = forms.IntegerField(label=_("Token"), min_value=1, max_value=int('9' * totp_digits()), widget=forms.TextInput) class MultiEmailField(forms.Field): def to_python(self, emails: str) -> List[str]: """Normalize data to a list of strings.""" if not emails: return [] return [email.strip() for email in emails.split(',')] def validate(self, emails: List[str]) -> None: """Check if value consists only of valid emails.""" super().validate(emails) for email in emails: validate_email(email) class FindMyTeamForm(forms.Form): emails = MultiEmailField( help_text=_("Add up to 10 comma-separated email addresses.")) def clean_emails(self) -> List[str]: emails = self.cleaned_data['emails'] if len(emails) > 10: raise forms.ValidationError(_("Please enter at most 10 emails.")) return emails
from django import forms from django.conf import settings from django.contrib.auth import authenticate from django.contrib.auth.forms import SetPasswordForm, AuthenticationForm, \ PasswordResetForm from django.core.exceptions import ValidationError from django.urls import reverse from django.core.validators import validate_email from django.db.models.query import QuerySet from django.utils.translation import ugettext as _ from django.contrib.auth.tokens import default_token_generator from django.contrib.sites.shortcuts import get_current_site from django.utils.http import urlsafe_base64_encode from django.utils.encoding import force_bytes from django.contrib.auth.tokens import PasswordResetTokenGenerator from django.http import HttpRequest from jinja2 import Markup as mark_safe from zerver.lib.actions import do_change_password, email_not_system_bot, \ validate_email_for_realm from zerver.lib.name_restrictions import is_reserved_subdomain, is_disposable_domain from zerver.lib.request import JsonableError from zerver.lib.send_email import send_email, FromAddress from zerver.lib.subdomains import get_subdomain, user_matches_subdomain, is_root_domain_available from zerver.lib.users import check_full_name from zerver.models import Realm, get_active_user, UserProfile, get_realm, email_to_domain, \ email_allowed_for_realm, DisposableEmailError, DomainNotAllowedForRealmError, \ EmailContainsPlusError from zproject.backends import email_auth_enabled, email_belongs_to_ldap import logging import re import DNS from typing import Any, Callable, List, Optional, Dict from two_factor.forms import AuthenticationTokenForm as TwoFactorAuthenticationTokenForm from two_factor.utils import totp_digits MIT_VALIDATION_ERROR = u'That user does not exist at MIT or is a ' + \ u'<a href="https://ist.mit.edu/email-lists">mailing list</a>. ' + \ u'If you want to sign up an alias for Zulip, ' + \ u'<a href="mailto:<EMAIL>">contact us</a>.' WRONG_SUBDOMAIN_ERROR = "Your Zulip account is not a member of the " + \ "organization associated with this subdomain. " + \ "Please contact %s with any questions!" % (FromAddress.SUPPORT,) def email_is_not_mit_mailing_list(email: str) -> None: """Prevent MIT mailing lists from signing up for Zulip""" if "@mit.edu" in email: username = email.rsplit("@", 1)[0] # Check whether the user exists and can get mail. try: DNS.dnslookup("%s.pobox.ns.athena.mit.edu" % username, DNS.Type.TXT) except DNS.Base.ServerError as e: if e.rcode == DNS.Status.NXDOMAIN: raise ValidationError(mark_safe(MIT_VALIDATION_ERROR)) else: raise AssertionError("Unexpected DNS error") def check_subdomain_available(subdomain: str, from_management_command: bool=False) -> None: error_strings = { 'too short': _("Subdomain needs to have length 3 or greater."), 'extremal dash': _("Subdomain cannot start or end with a '-'."), 'bad character': _("Subdomain can only have lowercase letters, numbers, and '-'s."), 'unavailable': _("Subdomain unavailable. Please choose a different one.")} if subdomain == Realm.SUBDOMAIN_FOR_ROOT_DOMAIN: if is_root_domain_available(): return raise ValidationError(error_strings['unavailable']) if subdomain[0] == '-' or subdomain[-1] == '-': raise ValidationError(error_strings['extremal dash']) if not re.match('^[a-z0-9-]*$', subdomain): raise ValidationError(error_strings['bad character']) if from_management_command: return if len(subdomain) < 3: raise ValidationError(error_strings['too short']) if is_reserved_subdomain(subdomain) or \ get_realm(subdomain) is not None: raise ValidationError(error_strings['unavailable']) class RegistrationForm(forms.Form): MAX_PASSWORD_LENGTH = 100 full_name = forms.CharField(max_length=UserProfile.MAX_NAME_LENGTH) # The required-ness of the password field gets overridden if it isn't # actually required for a realm password = forms.CharField(widget=forms.PasswordInput, max_length=MAX_PASSWORD_LENGTH) realm_subdomain = forms.CharField(max_length=Realm.MAX_REALM_SUBDOMAIN_LENGTH, required=False) def __init__(self, *args: Any, **kwargs: Any) -> None: # Since the superclass doesn't except random extra kwargs, we # remove it from the kwargs dict before initializing. self.realm_creation = kwargs['realm_creation'] del kwargs['realm_creation'] super().__init__(*args, **kwargs) if settings.TERMS_OF_SERVICE: self.fields['terms'] = forms.BooleanField(required=True) self.fields['realm_name'] = forms.CharField( max_length=Realm.MAX_REALM_NAME_LENGTH, required=self.realm_creation) def clean_full_name(self) -> str: try: return check_full_name(self.cleaned_data['full_name']) except JsonableError as e: raise ValidationError(e.msg) def clean_realm_subdomain(self) -> str: if not self.realm_creation: # This field is only used if realm_creation return "" subdomain = self.cleaned_data['realm_subdomain'] if 'realm_in_root_domain' in self.data: subdomain = Realm.SUBDOMAIN_FOR_ROOT_DOMAIN check_subdomain_available(subdomain) return subdomain class ToSForm(forms.Form): terms = forms.BooleanField(required=True) class HomepageForm(forms.Form): email = forms.EmailField() def __init__(self, *args: Any, **kwargs: Any) -> None: self.realm = kwargs.pop('realm', None) self.from_multiuse_invite = kwargs.pop('from_multiuse_invite', False) super().__init__(*args, **kwargs) def clean_email(self) -> str: """Returns the email if and only if the user's email address is allowed to join the realm they are trying to join.""" email = self.cleaned_data['email'] # Otherwise, the user is trying to join a specific realm. realm = self.realm from_multiuse_invite = self.from_multiuse_invite if realm is None: raise ValidationError(_("The organization you are trying to " "join using {email} does not " "exist.").format(email=email)) if not from_multiuse_invite and realm.invite_required: raise ValidationError(_("Please request an invite for {email} " "from the organization " "administrator.").format(email=email)) try: email_allowed_for_realm(email, realm) except DomainNotAllowedForRealmError: raise ValidationError( _("Your email address, {email}, is not in one of the domains " "that are allowed to register for accounts in this organization.").format( string_id=realm.string_id, email=email)) except DisposableEmailError: raise ValidationError(_("Please use your real email address.")) except EmailContainsPlusError: raise ValidationError(_("Email addresses containing + are not allowed in this organization.")) validate_email_for_realm(realm, email) if realm.is_zephyr_mirror_realm: email_is_not_mit_mailing_list(email) return email def email_is_not_disposable(email: str) -> None: if is_disposable_domain(email_to_domain(email)): raise ValidationError(_("Please use your real email address.")) class RealmCreationForm(forms.Form): # This form determines whether users can create a new realm. email = forms.EmailField(validators=[email_not_system_bot, email_is_not_disposable]) class LoggingSetPasswordForm(SetPasswordForm): def save(self, commit: bool=True) -> UserProfile: do_change_password(self.user, self.cleaned_data['<PASSWORD>_password1'], commit=commit) return self.user class ZulipPasswordResetForm(PasswordResetForm): def save(self, domain_override: Optional[bool]=None, subject_template_name: str='registration/password_reset_subject.txt', email_template_name: str='registration/password_reset_email.html', use_https: bool=False, token_generator: PasswordResetTokenGenerator=default_token_generator, from_email: Optional[str]=None, request: HttpRequest=None, html_email_template_name: Optional[str]=None, extra_email_context: Optional[Dict[str, Any]]=None ) -> None: """ If the email address has an account in the target realm, generates a one-use only link for resetting password and sends to the user. We send a different email if an associated account does not exist in the database, or an account does exist, but not in the realm. Note: We ignore protocol and the various email template arguments (those are an artifact of using Django's password reset framework). """ email = self.cleaned_data["email"] realm = get_realm(get_subdomain(request)) if not email_auth_enabled(realm): logging.info("Password reset attempted for %s even though password auth is disabled." % (email,)) return if email_belongs_to_ldap(realm, email): # TODO: Ideally, we'd provide a user-facing error here # about the fact that they aren't allowed to have a # password in the Zulip server and should change it in LDAP. logging.info("Password reset not allowed for user in LDAP domain") return if realm.deactivated: logging.info("Realm is deactivated") return user = None # type: Optional[UserProfile] try: user = get_active_user(email, realm) except UserProfile.DoesNotExist: pass context = { 'email': email, 'realm_uri': realm.uri, } if user is not None: token = token_generator.make_token(user) uid = urlsafe_base64_encode(force_bytes(user.id)).decode('ascii') endpoint = reverse('django.contrib.auth.views.password_reset_confirm', kwargs=dict(uidb64=uid, token=token)) context['no_account_in_realm'] = False context['reset_url'] = "{}{}".format(user.realm.uri, endpoint) send_email('zerver/emails/password_reset', to_user_id=user.id, from_name="Zulip Account Security", from_address=FromAddress.tokenized_no_reply_address(), context=context) else: context['no_account_in_realm'] = True accounts = UserProfile.objects.filter(email__iexact=email) if accounts: context['accounts'] = accounts context['multiple_accounts'] = accounts.count() != 1 send_email('zerver/emails/password_reset', to_email=email, from_name="Zulip Account Security", from_address=FromAddress.tokenized_no_reply_address(), context=context) class CreateUserForm(forms.Form): full_name = forms.CharField(max_length=100) email = forms.EmailField() class OurAuthenticationForm(AuthenticationForm): def clean(self) -> Dict[str, Any]: username = self.cleaned_data.get('username') password = self.cleaned_data.get('password') if username is not None and password: subdomain = get_subdomain(self.request) realm = get_realm(subdomain) return_data = {} # type: Dict[str, Any] self.user_cache = authenticate(self.request, username=username, password=password, realm=realm, return_data=return_data) if return_data.get("inactive_realm"): raise AssertionError("Programming error: inactive realm in authentication form") if return_data.get("inactive_user") and not return_data.get("is_mirror_dummy"): # We exclude mirror dummy accounts here. They should be treated as the # user never having had an account, so we let them fall through to the # normal invalid_login case below. error_msg = ( u"Your account is no longer active. " u"Please contact your organization administrator to reactivate it.") raise ValidationError(mark_safe(error_msg)) if return_data.get("invalid_subdomain"): logging.warning("User %s attempted to password login to wrong subdomain %s" % (username, subdomain)) raise ValidationError(mark_safe(WRONG_SUBDOMAIN_ERROR)) if self.user_cache is None: raise forms.ValidationError( self.error_messages['invalid_login'], code='invalid_login', params={'username': self.username_field.verbose_name}, ) self.confirm_login_allowed(self.user_cache) return self.cleaned_data def add_prefix(self, field_name: str) -> str: """Disable prefix, since Zulip doesn't use this Django forms feature (and django-two-factor does use it), and we'd like both to be happy with this form. """ return field_name class AuthenticationTokenForm(TwoFactorAuthenticationTokenForm): """ We add this form to update the widget of otp_token. The default widget is an input element whose type is a number, which doesn't stylistically match our theme. """ otp_token = forms.IntegerField(label=_("Token"), min_value=1, max_value=int('9' * totp_digits()), widget=forms.TextInput) class MultiEmailField(forms.Field): def to_python(self, emails: str) -> List[str]: """Normalize data to a list of strings.""" if not emails: return [] return [email.strip() for email in emails.split(',')] def validate(self, emails: List[str]) -> None: """Check if value consists only of valid emails.""" super().validate(emails) for email in emails: validate_email(email) class FindMyTeamForm(forms.Form): emails = MultiEmailField( help_text=_("Add up to 10 comma-separated email addresses.")) def clean_emails(self) -> List[str]: emails = self.cleaned_data['emails'] if len(emails) > 10: raise forms.ValidationError(_("Please enter at most 10 emails.")) return emails
en
0.889647
Prevent MIT mailing lists from signing up for Zulip # Check whether the user exists and can get mail. # The required-ness of the password field gets overridden if it isn't # actually required for a realm # Since the superclass doesn't except random extra kwargs, we # remove it from the kwargs dict before initializing. # This field is only used if realm_creation Returns the email if and only if the user's email address is allowed to join the realm they are trying to join. # Otherwise, the user is trying to join a specific realm. # This form determines whether users can create a new realm. If the email address has an account in the target realm, generates a one-use only link for resetting password and sends to the user. We send a different email if an associated account does not exist in the database, or an account does exist, but not in the realm. Note: We ignore protocol and the various email template arguments (those are an artifact of using Django's password reset framework). # TODO: Ideally, we'd provide a user-facing error here # about the fact that they aren't allowed to have a # password in the Zulip server and should change it in LDAP. # type: Optional[UserProfile] # type: Dict[str, Any] # We exclude mirror dummy accounts here. They should be treated as the # user never having had an account, so we let them fall through to the # normal invalid_login case below. Disable prefix, since Zulip doesn't use this Django forms feature (and django-two-factor does use it), and we'd like both to be happy with this form. We add this form to update the widget of otp_token. The default widget is an input element whose type is a number, which doesn't stylistically match our theme. Normalize data to a list of strings. Check if value consists only of valid emails.
1.30202
1
tdda/constraints/flags.py
jjlee42/tdda
0
6625325
<gh_stars>0 # -*- coding: utf-8 -*- """ Helpers for command-line option flags for discover and verify """ from __future__ import division from __future__ import print_function from __future__ import absolute_import import argparse import sys DISCOVER_HELP = ''' Optional flags are: * -r or --rex Include regular expression generation * -R or --norex Exclude regular expression generation (the default) ''' VERIFY_HELP = ''' Optional flags are: * -a, --all Report all fields, even if there are no failures * -f, --fields Report only fields with failures * -c, --constraints Report only individual constraints that fail. Not yet implemented. * -1, --oneperline Report each constraint failure on a separate line. Not yet implemented. * -7, --ascii Report in ASCII form, without using special characters. * --epsilon E Use this value of epsilon for fuzziness in comparing numeric values ''' DETECT_HELP = ''' Optional flags are: * --epsilon E Use this value of epsilon for fuzziness in comparing numeric values * --write-all Include passing records when detecting * --per-constraint Write one column per failing constraint when detecting, as well as the n_failures total column for each row. * --output-fields FIELD1,FIELD2 Specify original columns to write out when detecting. If used with no field names, all original columns will be included. * --rownumber Include a row-number in the output file when detecting. The row number is automatically included if no output fields are specified. Rows are numbered from 0. ''' def discover_parser(usage=''): formatter = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(prog='tdda discover', epilog=usage + DISCOVER_HELP, formatter_class=formatter) parser.add_argument('-?', '--?', action='help', help='same as -h or --help') parser.add_argument('-r', '--rex', action='store_true', help='include regular expression generation') parser.add_argument('-R', '--norex', action='store_true', help='exclude regular expression generation') return parser def discover_flags(parser, args, params): flags, more = parser.parse_known_args(args) if len(more) > 0: print(parser.epilog, file=sys.stderr) sys.exit(1) params['inc_rex'] = flags.rex return flags def verify_parser(usage=''): formatter = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(prog='tdda verify', epilog=usage + VERIFY_HELP, formatter_class=formatter) parser.add_argument('-?', '--?', action='help', help='same as -h or --help') parser.add_argument('-a', '--all', action='store_true', help='report all fields, even if there are ' 'no failures') parser.add_argument('-f', '--fields', action='store_true', help='report only fields with failures') parser.add_argument('-c', '--constraints', action='store_true', help='report only individual constraints that fail') parser.add_argument('-1', '--oneperline', action='store_true', help='report each constraint failure on a ' 'separate line') parser.add_argument('-7', '--ascii', action='store_true', help='report without using special characters') parser.add_argument('-type_checking', action='store_true', help='strict or sloppy') parser.add_argument('-epsilon', '--epsilon', nargs=1, help='epsilon fuzziness') return parser def detect_parser(usage=''): formatter = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(prog='tdda detect', epilog=usage + DETECT_HELP, formatter_class=formatter) parser.add_argument('-?', '--?', action='help', help='same as -h or --help') parser.add_argument('-type_checking', action='store_true', help='strict or sloppy') parser.add_argument('-epsilon', '--epsilon', nargs=1, help='epsilon fuzziness') parser.add_argument('--write-all', action='store_true', help='Include passing records') parser.add_argument('--per-constraint', action='store_true', help='Write one column per failing constraint ' 'in addition to n_failures') parser.add_argument('--output-fields', nargs='*', help='Specify original columns to write out. ' 'If used with no field names, then ' 'all original columns will be included') parser.add_argument('--rownumber', action='store_true', help='Include a row-number in the output file when ' 'detecting. Rows are numbered from 0') return parser def verify_flags(parser, args, params): flags, more = parser.parse_known_args(args) if len(more) > 0: print(parser.epilog, file=sys.stderr) sys.exit(1) params.update({ 'report': 'all', 'one_per_line': False, 'ascii': False, }) if flags.all: params['report'] = 'all' elif flags.fields: params['report'] = 'fields' elif flags.constraints: params['report'] = 'constraints' if flags.oneperline: params['one_per_line'] = True if flags.ascii: params['ascii'] = True if flags.type_checking: params['type_checking'] = True if flags.epsilon: params['epsilon'] = float(flags.epsilon[0]) return flags def detect_flags(parser, args, params): flags, more = parser.parse_known_args(args) if len(more) > 0: print(parser.epilog, file=sys.stderr) sys.exit(1) if flags.type_checking: params['type_checking'] = True if flags.epsilon: params['epsilon'] = float(flags.epsilon[0]) if flags.write_all: params['write_all'] = True if flags.per_constraint: params['per_constraint'] = True if flags.rownumber: params['rownumber'] = True if flags.output_fields is not None: params['output_fields'] = flags.output_fields params['in_place'] = False # Only applicable in API case return flags
# -*- coding: utf-8 -*- """ Helpers for command-line option flags for discover and verify """ from __future__ import division from __future__ import print_function from __future__ import absolute_import import argparse import sys DISCOVER_HELP = ''' Optional flags are: * -r or --rex Include regular expression generation * -R or --norex Exclude regular expression generation (the default) ''' VERIFY_HELP = ''' Optional flags are: * -a, --all Report all fields, even if there are no failures * -f, --fields Report only fields with failures * -c, --constraints Report only individual constraints that fail. Not yet implemented. * -1, --oneperline Report each constraint failure on a separate line. Not yet implemented. * -7, --ascii Report in ASCII form, without using special characters. * --epsilon E Use this value of epsilon for fuzziness in comparing numeric values ''' DETECT_HELP = ''' Optional flags are: * --epsilon E Use this value of epsilon for fuzziness in comparing numeric values * --write-all Include passing records when detecting * --per-constraint Write one column per failing constraint when detecting, as well as the n_failures total column for each row. * --output-fields FIELD1,FIELD2 Specify original columns to write out when detecting. If used with no field names, all original columns will be included. * --rownumber Include a row-number in the output file when detecting. The row number is automatically included if no output fields are specified. Rows are numbered from 0. ''' def discover_parser(usage=''): formatter = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(prog='tdda discover', epilog=usage + DISCOVER_HELP, formatter_class=formatter) parser.add_argument('-?', '--?', action='help', help='same as -h or --help') parser.add_argument('-r', '--rex', action='store_true', help='include regular expression generation') parser.add_argument('-R', '--norex', action='store_true', help='exclude regular expression generation') return parser def discover_flags(parser, args, params): flags, more = parser.parse_known_args(args) if len(more) > 0: print(parser.epilog, file=sys.stderr) sys.exit(1) params['inc_rex'] = flags.rex return flags def verify_parser(usage=''): formatter = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(prog='tdda verify', epilog=usage + VERIFY_HELP, formatter_class=formatter) parser.add_argument('-?', '--?', action='help', help='same as -h or --help') parser.add_argument('-a', '--all', action='store_true', help='report all fields, even if there are ' 'no failures') parser.add_argument('-f', '--fields', action='store_true', help='report only fields with failures') parser.add_argument('-c', '--constraints', action='store_true', help='report only individual constraints that fail') parser.add_argument('-1', '--oneperline', action='store_true', help='report each constraint failure on a ' 'separate line') parser.add_argument('-7', '--ascii', action='store_true', help='report without using special characters') parser.add_argument('-type_checking', action='store_true', help='strict or sloppy') parser.add_argument('-epsilon', '--epsilon', nargs=1, help='epsilon fuzziness') return parser def detect_parser(usage=''): formatter = argparse.RawDescriptionHelpFormatter parser = argparse.ArgumentParser(prog='tdda detect', epilog=usage + DETECT_HELP, formatter_class=formatter) parser.add_argument('-?', '--?', action='help', help='same as -h or --help') parser.add_argument('-type_checking', action='store_true', help='strict or sloppy') parser.add_argument('-epsilon', '--epsilon', nargs=1, help='epsilon fuzziness') parser.add_argument('--write-all', action='store_true', help='Include passing records') parser.add_argument('--per-constraint', action='store_true', help='Write one column per failing constraint ' 'in addition to n_failures') parser.add_argument('--output-fields', nargs='*', help='Specify original columns to write out. ' 'If used with no field names, then ' 'all original columns will be included') parser.add_argument('--rownumber', action='store_true', help='Include a row-number in the output file when ' 'detecting. Rows are numbered from 0') return parser def verify_flags(parser, args, params): flags, more = parser.parse_known_args(args) if len(more) > 0: print(parser.epilog, file=sys.stderr) sys.exit(1) params.update({ 'report': 'all', 'one_per_line': False, 'ascii': False, }) if flags.all: params['report'] = 'all' elif flags.fields: params['report'] = 'fields' elif flags.constraints: params['report'] = 'constraints' if flags.oneperline: params['one_per_line'] = True if flags.ascii: params['ascii'] = True if flags.type_checking: params['type_checking'] = True if flags.epsilon: params['epsilon'] = float(flags.epsilon[0]) return flags def detect_flags(parser, args, params): flags, more = parser.parse_known_args(args) if len(more) > 0: print(parser.epilog, file=sys.stderr) sys.exit(1) if flags.type_checking: params['type_checking'] = True if flags.epsilon: params['epsilon'] = float(flags.epsilon[0]) if flags.write_all: params['write_all'] = True if flags.per_constraint: params['per_constraint'] = True if flags.rownumber: params['rownumber'] = True if flags.output_fields is not None: params['output_fields'] = flags.output_fields params['in_place'] = False # Only applicable in API case return flags
en
0.678302
# -*- coding: utf-8 -*- Helpers for command-line option flags for discover and verify Optional flags are: * -r or --rex Include regular expression generation * -R or --norex Exclude regular expression generation (the default) Optional flags are: * -a, --all Report all fields, even if there are no failures * -f, --fields Report only fields with failures * -c, --constraints Report only individual constraints that fail. Not yet implemented. * -1, --oneperline Report each constraint failure on a separate line. Not yet implemented. * -7, --ascii Report in ASCII form, without using special characters. * --epsilon E Use this value of epsilon for fuzziness in comparing numeric values Optional flags are: * --epsilon E Use this value of epsilon for fuzziness in comparing numeric values * --write-all Include passing records when detecting * --per-constraint Write one column per failing constraint when detecting, as well as the n_failures total column for each row. * --output-fields FIELD1,FIELD2 Specify original columns to write out when detecting. If used with no field names, all original columns will be included. * --rownumber Include a row-number in the output file when detecting. The row number is automatically included if no output fields are specified. Rows are numbered from 0. # Only applicable in API case
2.851411
3
oscarapi/tests/unit/testcheckout.py
ski-family/django-oscar-api
311
6625326
from decimal import Decimal from mock import patch from django.contrib.auth import get_user_model from django.urls import reverse from django.test.client import RequestFactory from oscar.core.loading import get_model from rest_framework.response import Response from oscarapi.tests.utils import APITest from oscarapi.serializers.checkout import CheckoutSerializer Basket = get_model("basket", "Basket") User = get_user_model() Order = get_model("order", "Order") class CheckoutTest(APITest): fixtures = [ "product", "productcategory", "productattribute", "productclass", "productattributevalue", "category", "attributeoptiongroup", "attributeoption", "stockrecord", "partner", "orderanditemcharges", "country", ] def _get_common_payload(self, basket_url): return { "basket": basket_url, "guest_email": "<EMAIL>", "total": "50.00", "shipping_method_code": "no-shipping-required", "shipping_charge": {"currency": "EUR", "excl_tax": "0.00", "tax": "0.00"}, "shipping_address": { "country": "http://127.0.0.1:8000/api/countries/NL/", "first_name": "Henk", "last_name": "<NAME>", "line1": "Roemerlaan 44", "line2": "", "line3": "", "line4": "Kroekingen", "notes": "Niet STUK MAKEN OK!!!!", "phone_number": "+31 26 370 4887", "postcode": "7777KK", "state": "Gerendrecht", "title": "Mr", }, } def test_checkout_serializer_validation(self): self.login(username="nobody", password="<PASSWORD>") # first create a basket and a checkout payload response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) basket = response.data payload = self._get_common_payload(basket["url"]) # create a request and user for the serializer rf = RequestFactory() request = rf.post("/checkout", **payload) request.user = User.objects.get(username="nobody") serializer = CheckoutSerializer(data=payload, context={"request": request}) self.assertTrue(serializer.is_valid()) # see https://github.com/django-oscar/django-oscar-api/issues/188 self.assertEqual(serializer.validated_data["total"], Decimal("50.00")) def test_checkout(self): """Test if an order can be placed as an authenticated user with session based auth.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 200, response.data) self.assertEqual( response.data["email"], "<EMAIL>", ) self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) def test_checkout_header(self): """Prove that the user 'nobody' can checkout his cart when authenticating with header session.""" self.hlogin("nobody", "nobody", session_id="nobody") response = self.get("api-basket", session_id="nobody", authenticated=True) self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) response = self.post( "api-checkout", session_id="nobody", authenticated=True, **payload ) self.assertEqual(response.status_code, 406) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, session_id="nobody", authenticated=True, ) self.assertEqual(response.status_code, 200) response = self.post( "api-checkout", session_id="nobody", authenticated=True, **payload ) self.assertEqual(response.status_code, 200, response.data) self.assertEqual( response.data["email"], "<EMAIL>", ) self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) def test_checkout_implicit_shipping(self): """Test if an order can be placed without specifying shipping method.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) del payload["shipping_method_code"] del payload["shipping_charge"] response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 200) self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) def test_checkout_billing_address(self): """Test if an order can be placed with a billing address.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["billing_address"] = { "country": "http://127.0.0.1:8000/api/countries/NL/", "first_name": "Jos", "last_name": "Henken", "line1": "Stationstraat 4", "line2": "", "line3": "", "line4": "Hengelo", "notes": "", "phone_number": "+31 26 370 1111", "postcode": "1234AA", "state": "Gelderland", "title": "Mr", } self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 200) def test_checkout_wrong_billing_address(self): """Prove that an order cannot be placed with invalid billing address.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["billing_address"] = {"country": "This is wrong"} self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) response = self.post("api-checkout", **payload) # It should complain about the billing address self.assertEqual(response.status_code, 406) self.assertEqual( response.data["billing_address"]["country"][0], "Invalid hyperlink - No URL match.", ) def test_client_cannot_falsify_total_price(self): """Prove that the total price variable sent along with a checkout request, can not be manipulated.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["total"] = "150.00" # Instead of '50.00' self.response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.response.assertStatusEqual(200) self.response = self.post("api-checkout", **payload) self.response.assertStatusEqual(406) self.response.assertValueEqual( "non_field_errors", ["Total incorrect 150.00 != 50.00"] ) def test_client_cannot_falsify_shipping_charge(self): """Prove that the shipping charge variable sent along with a checkout request, can not be manipulated.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["shipping_charge"]["excl_tax"] = "42.00" # Instead of '0.00' response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406, response.data) error_message = response.data["non_field_errors"][0] self.assertIn("Shipping price incorrect", error_message) def test_utf8_encoding(self): """We should accept utf-8 (non ascii) characters in the address.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["shipping_address"]["line1"] = "Ї ❤ chǼractɇɌȘ" self.response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.response.assertStatusEqual(200) self.response = self.post("api-checkout", **payload) self.response.assertStatusEqual(200) self.assertEqual( self.response.data["shipping_address"]["line1"], u"Ї ❤ chǼractɇɌȘ" ) def test_checkout_empty_basket(self): """When basket is empty, checkout should raise an error.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data response = self.get(basket["lines"]) self.assertTrue(response.status_code, 200) lines = response.data self.assertEqual(lines, []) payload = self._get_common_payload(basket.get("url")) self.response = self.post("api-checkout", **payload) self.response.assertStatusEqual(406) self.response.assertValueEqual( "non_field_errors", ["Cannot checkout with empty basket"] ) def test_total_is_optional(self): """Total should be an optional value.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) del payload["total"] self.response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.response.assertStatusEqual(200) self.response = self.post("api-checkout", **payload) self.response.assertStatusEqual(200) def test_can_login_with_frozen_user_basket(self): """When a user has an unpaid order, he should still be able to log in.""" self.test_checkout() self.delete("api-login") self.get("api-basket") self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.response = self.post("api-login", username="nobody", password="<PASSWORD>") self.response.assertStatusEqual(200) self.login(username="nobody", password="<PASSWORD>") def test_anonymous_checkout(self): """Test if an order can be placed as an anonymous user.""" response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) del payload["guest_email"] with self.settings(OSCAR_ALLOW_ANON_CHECKOUT=True): response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) # No guest email specified should say 406 response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) # An empty email address should say this as well payload["guest_email"] = "" response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) # Add in guest_email to get a 200 payload["guest_email"] = "<EMAIL>" response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 200) self.assertEqual(response.data["email"], "<EMAIL>") self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) def test_checkout_creates_an_order(self): """After checkout has been done, a user should have gained an order object.""" # first create an anonymous order self.test_anonymous_checkout() # and now an order for the user nobody self.login(username="nobody", password="<PASSWORD>") self.test_checkout() self.response = self.get("order-list") # the anonymous order should not be listed self.assertEqual(len(self.response), 1, "An order should have been created.") order_url = self.response.data[0]["url"] self.response = self.get(order_url) orderlines_url = self.response["lines"] self.response = self.get(orderlines_url) self.assertEqual(len(self.response), 1, "The order should have one orderline.") orderline_url = self.response.data[0]["url"] self.response = self.get(orderline_url) self.assertEqual( self.response["order"], order_url, "the order url from a line is the same as the one created", ) def test_order_api_surcharges(self): """Surcharges should be shown in the API when they are applied""" # and now an order for the user nobody self.login(username="nobody", password="<PASSWORD>") self.test_checkout() self.response = self.get("order-list") self.assertEqual(len(self.response), 1, "An order should have been created.") order_url = self.response.data[0]["url"] order = Order.objects.get(number=self.response.data[0]["number"]) order.surcharges.create( name="Surcharge", code="surcharge", excl_tax=10.00, incl_tax=10.00 ) self.response = self.get(order_url) self.assertEqual( len(self.response["surcharges"]), 1, "The order should have one surcharge." ) self.assertEqual(self.response["surcharges"][0]["code"], "surcharge") self.assertEqual(self.response["surcharges"][0]["name"], "Surcharge") self.assertEqual(self.response["surcharges"][0]["excl_tax"], "10.00") self.assertEqual(self.response["surcharges"][0]["incl_tax"], "10.00") @patch("oscarapi.signals.oscarapi_post_checkout.send") def test_post_checkout_signal_send(self, mock): """The `oscarapi_post_checkout` signal should be send after checkout.""" self.test_anonymous_checkout() self.assertTrue(mock.called) # Make sure it's a django Response instance and not the DRF module self.assertTrue(isinstance(mock.call_args[1]["response"], Response)) def test_checkout_permissions(self): """Prove that someone cannot check out someone else's cart by mistake.""" # First login as nobody self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") # Store this basket because somebody is going to checkout with this basket = response.data nobody_basket_url = basket.get("url") self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.client.logout() # Now login as somebody and fill another basket self.login(username="somebody", password="<PASSWORD>") self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) # So let's checkout with nobody's basket WHAHAAAHAHA! payload = self._get_common_payload(nobody_basket_url) # Oh, this is indeed not possible response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 401) self.assertEqual(response.data, "Unauthorized") def test_shipping_methods(self): """Test if shipping methods can be fetched for baskets.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) payload = self._get_common_payload(None)["shipping_address"] self.response = self.post("api-basket-shipping-methods", **payload) self.response.assertStatusEqual(200) self.assertEqual(len(self.response), 1) self.assertDictEqual( self.response[0], { "code": "no-shipping-required", "name": "No shipping required", "description": "", "is_discounted": False, "discount": 0, "price": { "currency": None, "excl_tax": "0.00", "incl_tax": "0.00", "tax": "0.00", }, }, ) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) self.response = self.post("api-basket-shipping-methods", **payload) self.response.assertStatusEqual(200) self.assertEqual(len(self.response), 1) self.assertDictEqual( self.response[0], { "code": "free-shipping", "name": "Free shipping", "description": "", "is_discounted": False, "discount": 0, "price": { "currency": "EUR", "excl_tax": "0.00", "incl_tax": "0.00", "tax": "0.00", }, }, ) def test_cart_immutable_after_checkout(self): """Prove that the cart can not be changed after checkout.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.post("api-checkout", **payload) self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) url = reverse("basket-detail", args=(basket["id"],)) response = self.get(url) self.assertEqual(response.status_code, 404) # Frozen basket can not be accessed
from decimal import Decimal from mock import patch from django.contrib.auth import get_user_model from django.urls import reverse from django.test.client import RequestFactory from oscar.core.loading import get_model from rest_framework.response import Response from oscarapi.tests.utils import APITest from oscarapi.serializers.checkout import CheckoutSerializer Basket = get_model("basket", "Basket") User = get_user_model() Order = get_model("order", "Order") class CheckoutTest(APITest): fixtures = [ "product", "productcategory", "productattribute", "productclass", "productattributevalue", "category", "attributeoptiongroup", "attributeoption", "stockrecord", "partner", "orderanditemcharges", "country", ] def _get_common_payload(self, basket_url): return { "basket": basket_url, "guest_email": "<EMAIL>", "total": "50.00", "shipping_method_code": "no-shipping-required", "shipping_charge": {"currency": "EUR", "excl_tax": "0.00", "tax": "0.00"}, "shipping_address": { "country": "http://127.0.0.1:8000/api/countries/NL/", "first_name": "Henk", "last_name": "<NAME>", "line1": "Roemerlaan 44", "line2": "", "line3": "", "line4": "Kroekingen", "notes": "Niet STUK MAKEN OK!!!!", "phone_number": "+31 26 370 4887", "postcode": "7777KK", "state": "Gerendrecht", "title": "Mr", }, } def test_checkout_serializer_validation(self): self.login(username="nobody", password="<PASSWORD>") # first create a basket and a checkout payload response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) basket = response.data payload = self._get_common_payload(basket["url"]) # create a request and user for the serializer rf = RequestFactory() request = rf.post("/checkout", **payload) request.user = User.objects.get(username="nobody") serializer = CheckoutSerializer(data=payload, context={"request": request}) self.assertTrue(serializer.is_valid()) # see https://github.com/django-oscar/django-oscar-api/issues/188 self.assertEqual(serializer.validated_data["total"], Decimal("50.00")) def test_checkout(self): """Test if an order can be placed as an authenticated user with session based auth.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 200, response.data) self.assertEqual( response.data["email"], "<EMAIL>", ) self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) def test_checkout_header(self): """Prove that the user 'nobody' can checkout his cart when authenticating with header session.""" self.hlogin("nobody", "nobody", session_id="nobody") response = self.get("api-basket", session_id="nobody", authenticated=True) self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) response = self.post( "api-checkout", session_id="nobody", authenticated=True, **payload ) self.assertEqual(response.status_code, 406) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, session_id="nobody", authenticated=True, ) self.assertEqual(response.status_code, 200) response = self.post( "api-checkout", session_id="nobody", authenticated=True, **payload ) self.assertEqual(response.status_code, 200, response.data) self.assertEqual( response.data["email"], "<EMAIL>", ) self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) def test_checkout_implicit_shipping(self): """Test if an order can be placed without specifying shipping method.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) del payload["shipping_method_code"] del payload["shipping_charge"] response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 200) self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) def test_checkout_billing_address(self): """Test if an order can be placed with a billing address.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["billing_address"] = { "country": "http://127.0.0.1:8000/api/countries/NL/", "first_name": "Jos", "last_name": "Henken", "line1": "Stationstraat 4", "line2": "", "line3": "", "line4": "Hengelo", "notes": "", "phone_number": "+31 26 370 1111", "postcode": "1234AA", "state": "Gelderland", "title": "Mr", } self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 200) def test_checkout_wrong_billing_address(self): """Prove that an order cannot be placed with invalid billing address.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["billing_address"] = {"country": "This is wrong"} self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) response = self.post("api-checkout", **payload) # It should complain about the billing address self.assertEqual(response.status_code, 406) self.assertEqual( response.data["billing_address"]["country"][0], "Invalid hyperlink - No URL match.", ) def test_client_cannot_falsify_total_price(self): """Prove that the total price variable sent along with a checkout request, can not be manipulated.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["total"] = "150.00" # Instead of '50.00' self.response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.response.assertStatusEqual(200) self.response = self.post("api-checkout", **payload) self.response.assertStatusEqual(406) self.response.assertValueEqual( "non_field_errors", ["Total incorrect 150.00 != 50.00"] ) def test_client_cannot_falsify_shipping_charge(self): """Prove that the shipping charge variable sent along with a checkout request, can not be manipulated.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["shipping_charge"]["excl_tax"] = "42.00" # Instead of '0.00' response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406, response.data) error_message = response.data["non_field_errors"][0] self.assertIn("Shipping price incorrect", error_message) def test_utf8_encoding(self): """We should accept utf-8 (non ascii) characters in the address.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) payload["shipping_address"]["line1"] = "Ї ❤ chǼractɇɌȘ" self.response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.response.assertStatusEqual(200) self.response = self.post("api-checkout", **payload) self.response.assertStatusEqual(200) self.assertEqual( self.response.data["shipping_address"]["line1"], u"Ї ❤ chǼractɇɌȘ" ) def test_checkout_empty_basket(self): """When basket is empty, checkout should raise an error.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data response = self.get(basket["lines"]) self.assertTrue(response.status_code, 200) lines = response.data self.assertEqual(lines, []) payload = self._get_common_payload(basket.get("url")) self.response = self.post("api-checkout", **payload) self.response.assertStatusEqual(406) self.response.assertValueEqual( "non_field_errors", ["Cannot checkout with empty basket"] ) def test_total_is_optional(self): """Total should be an optional value.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) del payload["total"] self.response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.response.assertStatusEqual(200) self.response = self.post("api-checkout", **payload) self.response.assertStatusEqual(200) def test_can_login_with_frozen_user_basket(self): """When a user has an unpaid order, he should still be able to log in.""" self.test_checkout() self.delete("api-login") self.get("api-basket") self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.response = self.post("api-login", username="nobody", password="<PASSWORD>") self.response.assertStatusEqual(200) self.login(username="nobody", password="<PASSWORD>") def test_anonymous_checkout(self): """Test if an order can be placed as an anonymous user.""" response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) del payload["guest_email"] with self.settings(OSCAR_ALLOW_ANON_CHECKOUT=True): response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) # No guest email specified should say 406 response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) # An empty email address should say this as well payload["guest_email"] = "" response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 406) # Add in guest_email to get a 200 payload["guest_email"] = "<EMAIL>" response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 200) self.assertEqual(response.data["email"], "<EMAIL>") self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) def test_checkout_creates_an_order(self): """After checkout has been done, a user should have gained an order object.""" # first create an anonymous order self.test_anonymous_checkout() # and now an order for the user nobody self.login(username="nobody", password="<PASSWORD>") self.test_checkout() self.response = self.get("order-list") # the anonymous order should not be listed self.assertEqual(len(self.response), 1, "An order should have been created.") order_url = self.response.data[0]["url"] self.response = self.get(order_url) orderlines_url = self.response["lines"] self.response = self.get(orderlines_url) self.assertEqual(len(self.response), 1, "The order should have one orderline.") orderline_url = self.response.data[0]["url"] self.response = self.get(orderline_url) self.assertEqual( self.response["order"], order_url, "the order url from a line is the same as the one created", ) def test_order_api_surcharges(self): """Surcharges should be shown in the API when they are applied""" # and now an order for the user nobody self.login(username="nobody", password="<PASSWORD>") self.test_checkout() self.response = self.get("order-list") self.assertEqual(len(self.response), 1, "An order should have been created.") order_url = self.response.data[0]["url"] order = Order.objects.get(number=self.response.data[0]["number"]) order.surcharges.create( name="Surcharge", code="surcharge", excl_tax=10.00, incl_tax=10.00 ) self.response = self.get(order_url) self.assertEqual( len(self.response["surcharges"]), 1, "The order should have one surcharge." ) self.assertEqual(self.response["surcharges"][0]["code"], "surcharge") self.assertEqual(self.response["surcharges"][0]["name"], "Surcharge") self.assertEqual(self.response["surcharges"][0]["excl_tax"], "10.00") self.assertEqual(self.response["surcharges"][0]["incl_tax"], "10.00") @patch("oscarapi.signals.oscarapi_post_checkout.send") def test_post_checkout_signal_send(self, mock): """The `oscarapi_post_checkout` signal should be send after checkout.""" self.test_anonymous_checkout() self.assertTrue(mock.called) # Make sure it's a django Response instance and not the DRF module self.assertTrue(isinstance(mock.call_args[1]["response"], Response)) def test_checkout_permissions(self): """Prove that someone cannot check out someone else's cart by mistake.""" # First login as nobody self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") # Store this basket because somebody is going to checkout with this basket = response.data nobody_basket_url = basket.get("url") self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.client.logout() # Now login as somebody and fill another basket self.login(username="somebody", password="<PASSWORD>") self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) # So let's checkout with nobody's basket WHAHAAAHAHA! payload = self._get_common_payload(nobody_basket_url) # Oh, this is indeed not possible response = self.post("api-checkout", **payload) self.assertEqual(response.status_code, 401) self.assertEqual(response.data, "Unauthorized") def test_shipping_methods(self): """Test if shipping methods can be fetched for baskets.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) payload = self._get_common_payload(None)["shipping_address"] self.response = self.post("api-basket-shipping-methods", **payload) self.response.assertStatusEqual(200) self.assertEqual(len(self.response), 1) self.assertDictEqual( self.response[0], { "code": "no-shipping-required", "name": "No shipping required", "description": "", "is_discounted": False, "discount": 0, "price": { "currency": None, "excl_tax": "0.00", "incl_tax": "0.00", "tax": "0.00", }, }, ) response = self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.assertEqual(response.status_code, 200) self.response = self.post("api-basket-shipping-methods", **payload) self.response.assertStatusEqual(200) self.assertEqual(len(self.response), 1) self.assertDictEqual( self.response[0], { "code": "free-shipping", "name": "Free shipping", "description": "", "is_discounted": False, "discount": 0, "price": { "currency": "EUR", "excl_tax": "0.00", "incl_tax": "0.00", "tax": "0.00", }, }, ) def test_cart_immutable_after_checkout(self): """Prove that the cart can not be changed after checkout.""" self.login(username="nobody", password="<PASSWORD>") response = self.get("api-basket") self.assertTrue(response.status_code, 200) basket = response.data payload = self._get_common_payload(basket["url"]) self.post( "api-basket-add-product", url="http://testserver/api/products/1/", quantity=5, ) self.post("api-checkout", **payload) self.assertEqual( Basket.objects.get(pk=basket["id"]).status, "Frozen", "Basket should be frozen after placing order and before payment", ) url = reverse("basket-detail", args=(basket["id"],)) response = self.get(url) self.assertEqual(response.status_code, 404) # Frozen basket can not be accessed
en
0.897809
# first create a basket and a checkout payload # create a request and user for the serializer # see https://github.com/django-oscar/django-oscar-api/issues/188 Test if an order can be placed as an authenticated user with session based auth. Prove that the user 'nobody' can checkout his cart when authenticating with header session. Test if an order can be placed without specifying shipping method. Test if an order can be placed with a billing address. Prove that an order cannot be placed with invalid billing address. # It should complain about the billing address Prove that the total price variable sent along with a checkout request, can not be manipulated. # Instead of '50.00' Prove that the shipping charge variable sent along with a checkout request, can not be manipulated. # Instead of '0.00' We should accept utf-8 (non ascii) characters in the address. When basket is empty, checkout should raise an error. Total should be an optional value. When a user has an unpaid order, he should still be able to log in. Test if an order can be placed as an anonymous user. # No guest email specified should say 406 # An empty email address should say this as well # Add in guest_email to get a 200 After checkout has been done, a user should have gained an order object. # first create an anonymous order # and now an order for the user nobody # the anonymous order should not be listed Surcharges should be shown in the API when they are applied # and now an order for the user nobody The `oscarapi_post_checkout` signal should be send after checkout. # Make sure it's a django Response instance and not the DRF module Prove that someone cannot check out someone else's cart by mistake. # First login as nobody # Store this basket because somebody is going to checkout with this # Now login as somebody and fill another basket # So let's checkout with nobody's basket WHAHAAAHAHA! # Oh, this is indeed not possible Test if shipping methods can be fetched for baskets. Prove that the cart can not be changed after checkout. # Frozen basket can not be accessed
2.067074
2
phase2_stuff/swasp/swasp_fits_to_dat.py
davidjwilson/pceb
0
6625327
<reponame>davidjwilson/pceb import pyfits as fits #incase I want to use it on work desktop #from astropy.io import fits import matplotlib.pyplot as plt import numpy as np import os #turns SWASP fits files into dat files. star='UZ_Sex' #for fits_file in files: # if fits_file[-4:] == 'mxlo': hdulist = fits.open('1SWASPJ102834.89-000029.1.fits') scidata = hdulist[1].data t=scidata['TMID'] for fluxnum in ['FLUX1', 'FLUX2', 'FLUX3']: plt.errorbar(t, scidata[fluxnum], yerr=scidata[fluxnum+'_ERR'], label=fluxnum, marker='o', ls ='none', capsize=0) plt.legend() """ f = scidata['FLUX'][0] w = [scidata['WAVELENGTH'][0]+i*scidata['DELTAW'][0] for i in xrange(len(f))] #e = scidata['SIGMA'][0] #print w w, f, = np.array(w), np.array(f) w, f = w[(f>0)], f[(f>0)] hdulist.close() plt.figure(star+'_iue') plt.plot(w, f, drawstyle='steps-mid') #plt.plot(w,e) #fl=open('iue/dat_files/'+star+'_'+str(fits_file[0:2])+'_iue.dat', 'w') #for j in xrange(len(w)): # fl.write((str(w[j])+' '+str(f[j])+'\n')) """ plt.show()
import pyfits as fits #incase I want to use it on work desktop #from astropy.io import fits import matplotlib.pyplot as plt import numpy as np import os #turns SWASP fits files into dat files. star='UZ_Sex' #for fits_file in files: # if fits_file[-4:] == 'mxlo': hdulist = fits.open('1SWASPJ102834.89-000029.1.fits') scidata = hdulist[1].data t=scidata['TMID'] for fluxnum in ['FLUX1', 'FLUX2', 'FLUX3']: plt.errorbar(t, scidata[fluxnum], yerr=scidata[fluxnum+'_ERR'], label=fluxnum, marker='o', ls ='none', capsize=0) plt.legend() """ f = scidata['FLUX'][0] w = [scidata['WAVELENGTH'][0]+i*scidata['DELTAW'][0] for i in xrange(len(f))] #e = scidata['SIGMA'][0] #print w w, f, = np.array(w), np.array(f) w, f = w[(f>0)], f[(f>0)] hdulist.close() plt.figure(star+'_iue') plt.plot(w, f, drawstyle='steps-mid') #plt.plot(w,e) #fl=open('iue/dat_files/'+star+'_'+str(fits_file[0:2])+'_iue.dat', 'w') #for j in xrange(len(w)): # fl.write((str(w[j])+' '+str(f[j])+'\n')) """ plt.show()
en
0.329262
#incase I want to use it on work desktop #from astropy.io import fits #turns SWASP fits files into dat files. #for fits_file in files: # if fits_file[-4:] == 'mxlo': f = scidata['FLUX'][0] w = [scidata['WAVELENGTH'][0]+i*scidata['DELTAW'][0] for i in xrange(len(f))] #e = scidata['SIGMA'][0] #print w w, f, = np.array(w), np.array(f) w, f = w[(f>0)], f[(f>0)] hdulist.close() plt.figure(star+'_iue') plt.plot(w, f, drawstyle='steps-mid') #plt.plot(w,e) #fl=open('iue/dat_files/'+star+'_'+str(fits_file[0:2])+'_iue.dat', 'w') #for j in xrange(len(w)): # fl.write((str(w[j])+' '+str(f[j])+'\n'))
2.4834
2
habitat_baselines/rl/ppo/policy.py
erick84mm/habitat-api
1
6625328
<reponame>erick84mm/habitat-api<filename>habitat_baselines/rl/ppo/policy.py #!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import abc import numpy as np import torch import torch.nn as nn from habitat_baselines.common.utils import CategoricalNet, Flatten from habitat_baselines.rl.models.rnn_state_encoder import RNNStateEncoder from habitat_baselines.rl.models.simple_cnn import SimpleCNN class Policy(nn.Module): def __init__(self, net, dim_actions): super().__init__() self.net = net self.dim_actions = dim_actions self.action_distribution = CategoricalNet( self.net.output_size, self.dim_actions ) self.critic = CriticHead(self.net.output_size) def forward(self, *x): raise NotImplementedError def act( self, observations, rnn_hidden_states, prev_actions, masks, deterministic=False, ): features, rnn_hidden_states = self.net( observations, rnn_hidden_states, prev_actions, masks ) distribution = self.action_distribution(features) value = self.critic(features) if deterministic: action = distribution.mode() else: action = distribution.sample() action_log_probs = distribution.log_probs(action) return value, action, action_log_probs, rnn_hidden_states def get_value(self, observations, rnn_hidden_states, prev_actions, masks): features, _ = self.net( observations, rnn_hidden_states, prev_actions, masks ) return self.critic(features) def evaluate_actions( self, observations, rnn_hidden_states, prev_actions, masks, action ): features, rnn_hidden_states = self.net( observations, rnn_hidden_states, prev_actions, masks ) distribution = self.action_distribution(features) value = self.critic(features) action_log_probs = distribution.log_probs(action) distribution_entropy = distribution.entropy().mean() return value, action_log_probs, distribution_entropy, rnn_hidden_states class CriticHead(nn.Module): def __init__(self, input_size): super().__init__() self.fc = nn.Linear(input_size, 1) nn.init.orthogonal_(self.fc.weight) nn.init.constant_(self.fc.bias, 0) def forward(self, x): return self.fc(x) class PointNavBaselinePolicy(Policy): def __init__( self, observation_space, action_space, goal_sensor_uuid, hidden_size=512, ): super().__init__( PointNavBaselineNet( observation_space=observation_space, hidden_size=hidden_size, goal_sensor_uuid=goal_sensor_uuid, ), action_space.n, ) class Net(nn.Module, metaclass=abc.ABCMeta): @abc.abstractmethod def forward(self, observations, rnn_hidden_states, prev_actions, masks): pass @property @abc.abstractmethod def output_size(self): pass @property @abc.abstractmethod def num_recurrent_layers(self): pass @property @abc.abstractmethod def is_blind(self): pass class PointNavBaselineNet(Net): r"""Network which passes the input image through CNN and concatenates goal vector with CNN's output and passes that through RNN. """ def __init__(self, observation_space, hidden_size, goal_sensor_uuid): super().__init__() self.goal_sensor_uuid = goal_sensor_uuid self._n_input_goal = observation_space.spaces[ self.goal_sensor_uuid ].shape[0] self._hidden_size = hidden_size self.visual_encoder = SimpleCNN(observation_space, hidden_size) self.state_encoder = RNNStateEncoder( (0 if self.is_blind else self._hidden_size) + self._n_input_goal, self._hidden_size, ) self.train() @property def output_size(self): return self._hidden_size @property def is_blind(self): return self.visual_encoder.is_blind @property def num_recurrent_layers(self): return self.state_encoder.num_recurrent_layers def get_target_encoding(self, observations): return observations[self.goal_sensor_uuid] def forward(self, observations, rnn_hidden_states, prev_actions, masks): target_encoding = self.get_target_encoding(observations) x = [target_encoding] if not self.is_blind: perception_embed = self.visual_encoder(observations) x = [perception_embed] + x x = torch.cat(x, dim=1) x, rnn_hidden_states = self.state_encoder(x, rnn_hidden_states, masks) return x, rnn_hidden_states
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import abc import numpy as np import torch import torch.nn as nn from habitat_baselines.common.utils import CategoricalNet, Flatten from habitat_baselines.rl.models.rnn_state_encoder import RNNStateEncoder from habitat_baselines.rl.models.simple_cnn import SimpleCNN class Policy(nn.Module): def __init__(self, net, dim_actions): super().__init__() self.net = net self.dim_actions = dim_actions self.action_distribution = CategoricalNet( self.net.output_size, self.dim_actions ) self.critic = CriticHead(self.net.output_size) def forward(self, *x): raise NotImplementedError def act( self, observations, rnn_hidden_states, prev_actions, masks, deterministic=False, ): features, rnn_hidden_states = self.net( observations, rnn_hidden_states, prev_actions, masks ) distribution = self.action_distribution(features) value = self.critic(features) if deterministic: action = distribution.mode() else: action = distribution.sample() action_log_probs = distribution.log_probs(action) return value, action, action_log_probs, rnn_hidden_states def get_value(self, observations, rnn_hidden_states, prev_actions, masks): features, _ = self.net( observations, rnn_hidden_states, prev_actions, masks ) return self.critic(features) def evaluate_actions( self, observations, rnn_hidden_states, prev_actions, masks, action ): features, rnn_hidden_states = self.net( observations, rnn_hidden_states, prev_actions, masks ) distribution = self.action_distribution(features) value = self.critic(features) action_log_probs = distribution.log_probs(action) distribution_entropy = distribution.entropy().mean() return value, action_log_probs, distribution_entropy, rnn_hidden_states class CriticHead(nn.Module): def __init__(self, input_size): super().__init__() self.fc = nn.Linear(input_size, 1) nn.init.orthogonal_(self.fc.weight) nn.init.constant_(self.fc.bias, 0) def forward(self, x): return self.fc(x) class PointNavBaselinePolicy(Policy): def __init__( self, observation_space, action_space, goal_sensor_uuid, hidden_size=512, ): super().__init__( PointNavBaselineNet( observation_space=observation_space, hidden_size=hidden_size, goal_sensor_uuid=goal_sensor_uuid, ), action_space.n, ) class Net(nn.Module, metaclass=abc.ABCMeta): @abc.abstractmethod def forward(self, observations, rnn_hidden_states, prev_actions, masks): pass @property @abc.abstractmethod def output_size(self): pass @property @abc.abstractmethod def num_recurrent_layers(self): pass @property @abc.abstractmethod def is_blind(self): pass class PointNavBaselineNet(Net): r"""Network which passes the input image through CNN and concatenates goal vector with CNN's output and passes that through RNN. """ def __init__(self, observation_space, hidden_size, goal_sensor_uuid): super().__init__() self.goal_sensor_uuid = goal_sensor_uuid self._n_input_goal = observation_space.spaces[ self.goal_sensor_uuid ].shape[0] self._hidden_size = hidden_size self.visual_encoder = SimpleCNN(observation_space, hidden_size) self.state_encoder = RNNStateEncoder( (0 if self.is_blind else self._hidden_size) + self._n_input_goal, self._hidden_size, ) self.train() @property def output_size(self): return self._hidden_size @property def is_blind(self): return self.visual_encoder.is_blind @property def num_recurrent_layers(self): return self.state_encoder.num_recurrent_layers def get_target_encoding(self, observations): return observations[self.goal_sensor_uuid] def forward(self, observations, rnn_hidden_states, prev_actions, masks): target_encoding = self.get_target_encoding(observations) x = [target_encoding] if not self.is_blind: perception_embed = self.visual_encoder(observations) x = [perception_embed] + x x = torch.cat(x, dim=1) x, rnn_hidden_states = self.state_encoder(x, rnn_hidden_states, masks) return x, rnn_hidden_states
en
0.92462
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. Network which passes the input image through CNN and concatenates goal vector with CNN's output and passes that through RNN.
2.192237
2
pkg/suggestion/v1alpha1/tests/test_algorithm_manager.py
terrytangyuan/katib
6
6625329
import os import yaml import pytest import numpy as np from box import Box from pkg.api.v1alpha1.python import api_pb2 from ..bayesianoptimization.src.algorithm_manager import AlgorithmManager TEST_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.fixture def study_config(): with open(os.path.join(TEST_DIR, "study_config.yaml"), "r") as f: contents = yaml.safe_load(f) return Box(contents) @pytest.fixture def observations(): with open(os.path.join(TEST_DIR, "parameter_values.yaml"), "r") as f: contents = yaml.safe_load(f) return Box(contents) def test_algorithm_manager(study_config, observations): study_id = "test_id" x_next = [1.0, 1, 5, "true"] manager = AlgorithmManager(study_id, study_config, observations.parameters, observations.metrics) assert manager.study_id == study_id assert manager.study_config == study_config assert manager.goal == api_pb2.MAXIMIZE assert manager.types == [api_pb2.DOUBLE, api_pb2.INT, api_pb2.DISCRETE, api_pb2.CATEGORICAL] assert manager.names == ["x", "y", "fake_discrete", "fake_categorical"] assert manager.dim == 5 assert manager.lower_bound == [-5.0, -5, 2, 0, 0] assert manager.upper_bound == [5.0, 5, 5, 1, 1] assert manager.discrete_info == [{"name": "fake_discrete", "values": [2, 3, 5]}] assert manager.categorical_info == \ [{"name": "fake_categorical", "values": ["true", "false"], "number": 2}] assert np.allclose(manager.X_train, np.array([[1.0, 1, 2, 1, 0], [1.0, 1, 3, 0, 1]])) assert np.allclose(manager.y_train, np.array([1.0, 1.0])) parsed_x_next = manager.parse_x_next(x_next) x_next_dict = manager.convert_to_dict(parsed_x_next) assert x_next_dict == \ [{"name": "x", "value": 1.0, "type": api_pb2.DOUBLE}, {"name": "y", "value": 1, "type": api_pb2.INT}, {"name": "fake_discrete", "value": 5, "type": api_pb2.DISCRETE}, {"name": "fake_categorical", "value": "true", "type": api_pb2.CATEGORICAL}]
import os import yaml import pytest import numpy as np from box import Box from pkg.api.v1alpha1.python import api_pb2 from ..bayesianoptimization.src.algorithm_manager import AlgorithmManager TEST_DIR = os.path.dirname(os.path.realpath(__file__)) @pytest.fixture def study_config(): with open(os.path.join(TEST_DIR, "study_config.yaml"), "r") as f: contents = yaml.safe_load(f) return Box(contents) @pytest.fixture def observations(): with open(os.path.join(TEST_DIR, "parameter_values.yaml"), "r") as f: contents = yaml.safe_load(f) return Box(contents) def test_algorithm_manager(study_config, observations): study_id = "test_id" x_next = [1.0, 1, 5, "true"] manager = AlgorithmManager(study_id, study_config, observations.parameters, observations.metrics) assert manager.study_id == study_id assert manager.study_config == study_config assert manager.goal == api_pb2.MAXIMIZE assert manager.types == [api_pb2.DOUBLE, api_pb2.INT, api_pb2.DISCRETE, api_pb2.CATEGORICAL] assert manager.names == ["x", "y", "fake_discrete", "fake_categorical"] assert manager.dim == 5 assert manager.lower_bound == [-5.0, -5, 2, 0, 0] assert manager.upper_bound == [5.0, 5, 5, 1, 1] assert manager.discrete_info == [{"name": "fake_discrete", "values": [2, 3, 5]}] assert manager.categorical_info == \ [{"name": "fake_categorical", "values": ["true", "false"], "number": 2}] assert np.allclose(manager.X_train, np.array([[1.0, 1, 2, 1, 0], [1.0, 1, 3, 0, 1]])) assert np.allclose(manager.y_train, np.array([1.0, 1.0])) parsed_x_next = manager.parse_x_next(x_next) x_next_dict = manager.convert_to_dict(parsed_x_next) assert x_next_dict == \ [{"name": "x", "value": 1.0, "type": api_pb2.DOUBLE}, {"name": "y", "value": 1, "type": api_pb2.INT}, {"name": "fake_discrete", "value": 5, "type": api_pb2.DISCRETE}, {"name": "fake_categorical", "value": "true", "type": api_pb2.CATEGORICAL}]
none
1
1.982436
2
statdyn/figures/interactive_config.py
malramsay64/MD-Molecules-Hoomd
1
6625330
<reponame>malramsay64/MD-Molecules-Hoomd<filename>statdyn/figures/interactive_config.py #! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2017 <NAME> <<EMAIL>> # # Distributed under terms of the MIT license. # # pylint: skip-file """Create an interactive view of a configuration.""" import functools import logging from functools import partial from pathlib import Path import gsd.hoomd from bokeh.layouts import column, row, widgetbox from bokeh.models import (Button, ColumnDataSource, RadioButtonGroup, Select, Slider, Toggle) from bokeh.plotting import curdoc, figure from tornado import gen from statdyn.analysis.order import (compute_ml_order, compute_voronoi_neighs, dt_model, knn_model, nn_model, orientational_order) from statdyn.figures.configuration import plot, plot_circles, snapshot2data from statdyn.molecules import Trimer logger = logging.getLogger(__name__) # Definition of initial state trj = None snapshot = None extra_particles = True molecule = Trimer() default_dir = '.' timestep = 0 Lx, Ly = (60, 60) source = ColumnDataSource(data={'x': [0], 'y': [0], 'radius': [1], 'colour': ['red']}) play = False doc = curdoc() def get_filename(): return str(Path.cwd() / directory.value / fname.value) def update_files(attr, old, new): fname.options = new if new: fname.value = new[0] update_trajectory(None, None, fname.value) def update_trajectory(attr, old, new): global trj trj = gsd.hoomd.open(get_filename(), 'rb') # Bokeh will cope with IndexError in file but not beginning and end # of slider being the same value. index.end = max(len(trj) - 1, 1) if index.value > len(trj) - 1: update_index(None, None, len(trj)-1) else: update_index(None, None, index.value) def update_index(attr, old, new): update_snapshot(attr, old, int(new)) def incr_index(): if index.value < index.end: index.value += increment_size.value def decr_index(): if index.value > index.start: index.value -= increment_size.value def update_snapshot(attr, old, new): if old != new: global snapshot try: snapshot = trj[new] except IndexError: pass update_data(None, None, None) @gen.coroutine def update_source(data): source.data = data def update_data(attr, old, new): try: p.title.text = 'Timestep: {:.5g}'.format(snapshot.configuration.step) data = snapshot2data(snapshot, molecule=molecule, extra_particles=extra_particles, ordering=order_parameters[OP_KEYS[ordered.active]], invert_colours=order_emphasis.active, ) source.data = data except AttributeError: pass def update_data_now(arg): update_data(None, None, None) def update_directory(attr, old, new): files = sorted([filename.name for filename in Path(directory.value).glob('dump*.gsd')]) if files: update_files(None, None, files) def play_pause_toggle(arg): if arg: doc.add_periodic_callback(incr_index, 100) else: doc.remove_periodic_callback(incr_index) DIR_OPTIONS = sorted([d.parts[-1] for d in Path.cwd().glob('*/') if d.is_dir() and len(list(d.glob('dump*.gsd')))]) try: directory = Select(value=DIR_OPTIONS[-1], title='Source directory', options=DIR_OPTIONS) except IndexError: directory = Select(title='Source directory', options=DIR_OPTIONS) directory.on_change('value', update_directory) fname = Select(title='File', value='', options=[]) fname.on_change('value', update_trajectory) index = Slider(title='Index', value=0, start=0, end=1, step=1) index.on_change('value', update_index) order_parameters = { 'None': None, 'Orient': orientational_order, 'Neural Net': functools.partial(compute_ml_order, nn_model()), 'Decision Tree': functools.partial(compute_ml_order, dt_model()), 'KNN Model': functools.partial(compute_ml_order, knn_model()), 'Num Neighs': lambda box, pos, orient: compute_voronoi_neighs(box, pos) == 6, } OP_KEYS = list(order_parameters.keys()) ordered = RadioButtonGroup( labels=OP_KEYS, active=0) ordered.on_click(update_data_now) order_emphasis = Toggle(name='emphaisis', label="Toggle Emphasis", active=True) order_emphasis.on_click(update_data_now) radius_scale = Slider(title='Particle Radius', value=1, start=0.1, end=2, step=0.05) radius_scale.on_change('value', update_data) play_pause = Toggle(name='Play/Pause', label="Play/Pause") play_pause.on_click(play_pause_toggle) nextFrame = Button(label='Next') nextFrame.on_click(incr_index) prevFrame = Button(label='Previous') prevFrame.on_click(decr_index) increment_size = Slider(title='Increment Size', value=1, start=1, end=100, step=1) media = widgetbox([prevFrame, play_pause, nextFrame, increment_size], width=300) # When using webgl as the backend the save option doesn't work for some reason. p = figure(width=920, height=800, aspect_scale=1, match_aspect=True, title='Timestep: {:.2g}'.format(timestep), output_backend='webgl', active_scroll='wheel_zoom') p.xgrid.grid_line_color = None p.ygrid.grid_line_color = None update_directory(None, None, default_dir) update_data(None, None, None) plot_circles(p, source) controls = widgetbox([directory, fname, index, ordered], width=300) layout = row(column(controls, order_emphasis, media), p) doc.add_root(layout) doc.title = "Configurations"
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2017 <NAME> <<EMAIL>> # # Distributed under terms of the MIT license. # # pylint: skip-file """Create an interactive view of a configuration.""" import functools import logging from functools import partial from pathlib import Path import gsd.hoomd from bokeh.layouts import column, row, widgetbox from bokeh.models import (Button, ColumnDataSource, RadioButtonGroup, Select, Slider, Toggle) from bokeh.plotting import curdoc, figure from tornado import gen from statdyn.analysis.order import (compute_ml_order, compute_voronoi_neighs, dt_model, knn_model, nn_model, orientational_order) from statdyn.figures.configuration import plot, plot_circles, snapshot2data from statdyn.molecules import Trimer logger = logging.getLogger(__name__) # Definition of initial state trj = None snapshot = None extra_particles = True molecule = Trimer() default_dir = '.' timestep = 0 Lx, Ly = (60, 60) source = ColumnDataSource(data={'x': [0], 'y': [0], 'radius': [1], 'colour': ['red']}) play = False doc = curdoc() def get_filename(): return str(Path.cwd() / directory.value / fname.value) def update_files(attr, old, new): fname.options = new if new: fname.value = new[0] update_trajectory(None, None, fname.value) def update_trajectory(attr, old, new): global trj trj = gsd.hoomd.open(get_filename(), 'rb') # Bokeh will cope with IndexError in file but not beginning and end # of slider being the same value. index.end = max(len(trj) - 1, 1) if index.value > len(trj) - 1: update_index(None, None, len(trj)-1) else: update_index(None, None, index.value) def update_index(attr, old, new): update_snapshot(attr, old, int(new)) def incr_index(): if index.value < index.end: index.value += increment_size.value def decr_index(): if index.value > index.start: index.value -= increment_size.value def update_snapshot(attr, old, new): if old != new: global snapshot try: snapshot = trj[new] except IndexError: pass update_data(None, None, None) @gen.coroutine def update_source(data): source.data = data def update_data(attr, old, new): try: p.title.text = 'Timestep: {:.5g}'.format(snapshot.configuration.step) data = snapshot2data(snapshot, molecule=molecule, extra_particles=extra_particles, ordering=order_parameters[OP_KEYS[ordered.active]], invert_colours=order_emphasis.active, ) source.data = data except AttributeError: pass def update_data_now(arg): update_data(None, None, None) def update_directory(attr, old, new): files = sorted([filename.name for filename in Path(directory.value).glob('dump*.gsd')]) if files: update_files(None, None, files) def play_pause_toggle(arg): if arg: doc.add_periodic_callback(incr_index, 100) else: doc.remove_periodic_callback(incr_index) DIR_OPTIONS = sorted([d.parts[-1] for d in Path.cwd().glob('*/') if d.is_dir() and len(list(d.glob('dump*.gsd')))]) try: directory = Select(value=DIR_OPTIONS[-1], title='Source directory', options=DIR_OPTIONS) except IndexError: directory = Select(title='Source directory', options=DIR_OPTIONS) directory.on_change('value', update_directory) fname = Select(title='File', value='', options=[]) fname.on_change('value', update_trajectory) index = Slider(title='Index', value=0, start=0, end=1, step=1) index.on_change('value', update_index) order_parameters = { 'None': None, 'Orient': orientational_order, 'Neural Net': functools.partial(compute_ml_order, nn_model()), 'Decision Tree': functools.partial(compute_ml_order, dt_model()), 'KNN Model': functools.partial(compute_ml_order, knn_model()), 'Num Neighs': lambda box, pos, orient: compute_voronoi_neighs(box, pos) == 6, } OP_KEYS = list(order_parameters.keys()) ordered = RadioButtonGroup( labels=OP_KEYS, active=0) ordered.on_click(update_data_now) order_emphasis = Toggle(name='emphaisis', label="Toggle Emphasis", active=True) order_emphasis.on_click(update_data_now) radius_scale = Slider(title='Particle Radius', value=1, start=0.1, end=2, step=0.05) radius_scale.on_change('value', update_data) play_pause = Toggle(name='Play/Pause', label="Play/Pause") play_pause.on_click(play_pause_toggle) nextFrame = Button(label='Next') nextFrame.on_click(incr_index) prevFrame = Button(label='Previous') prevFrame.on_click(decr_index) increment_size = Slider(title='Increment Size', value=1, start=1, end=100, step=1) media = widgetbox([prevFrame, play_pause, nextFrame, increment_size], width=300) # When using webgl as the backend the save option doesn't work for some reason. p = figure(width=920, height=800, aspect_scale=1, match_aspect=True, title='Timestep: {:.2g}'.format(timestep), output_backend='webgl', active_scroll='wheel_zoom') p.xgrid.grid_line_color = None p.ygrid.grid_line_color = None update_directory(None, None, default_dir) update_data(None, None, None) plot_circles(p, source) controls = widgetbox([directory, fname, index, ordered], width=300) layout = row(column(controls, order_emphasis, media), p) doc.add_root(layout) doc.title = "Configurations"
en
0.798493
#! /usr/bin/env python # -*- coding: utf-8 -*- # vim:fenc=utf-8 # # Copyright © 2017 <NAME> <<EMAIL>> # # Distributed under terms of the MIT license. # # pylint: skip-file Create an interactive view of a configuration. # Definition of initial state # Bokeh will cope with IndexError in file but not beginning and end # of slider being the same value. # When using webgl as the backend the save option doesn't work for some reason.
1.972119
2
PointMatcher/utils/qt.py
daisatojp/PointMatcher
2
6625331
import os import os.path as osp import re import sys from PyQt5 import QtGui from PyQt5 import QtCore from PyQt5 import QtWidgets from PointMatcher.utils.filesystem import icon_path def newButton(text, icon=None, slot=None): b = QtWidgets.QPushButton(text) if icon is not None: b.setIcon(QtGui.QIcon(icon_path(icon))) if slot is not None: b.clicked.connect(slot) return b def newAction( parent, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, enabled=True): """Create a new action and assign callbacks, shortcuts, etc.""" a = QtWidgets.QAction(text, parent) if icon is not None: a.setIcon(QtGui.QIcon(icon_path(icon))) if shortcut is not None: if isinstance(shortcut, (list, tuple)): a.setShortcuts(shortcut) else: a.setShortcut(shortcut) if tip is not None: a.setToolTip(tip) a.setStatusTip(tip) if slot is not None: a.triggered.connect(slot) if checkable: a.setCheckable(True) a.setEnabled(enabled) return a def addActions(widget, actions): for action in actions: if action is None: widget.addSeparator() elif isinstance(action, QtWidgets.QMenu): widget.addMenu(action) else: widget.addAction(action)
import os import os.path as osp import re import sys from PyQt5 import QtGui from PyQt5 import QtCore from PyQt5 import QtWidgets from PointMatcher.utils.filesystem import icon_path def newButton(text, icon=None, slot=None): b = QtWidgets.QPushButton(text) if icon is not None: b.setIcon(QtGui.QIcon(icon_path(icon))) if slot is not None: b.clicked.connect(slot) return b def newAction( parent, text, slot=None, shortcut=None, icon=None, tip=None, checkable=False, enabled=True): """Create a new action and assign callbacks, shortcuts, etc.""" a = QtWidgets.QAction(text, parent) if icon is not None: a.setIcon(QtGui.QIcon(icon_path(icon))) if shortcut is not None: if isinstance(shortcut, (list, tuple)): a.setShortcuts(shortcut) else: a.setShortcut(shortcut) if tip is not None: a.setToolTip(tip) a.setStatusTip(tip) if slot is not None: a.triggered.connect(slot) if checkable: a.setCheckable(True) a.setEnabled(enabled) return a def addActions(widget, actions): for action in actions: if action is None: widget.addSeparator() elif isinstance(action, QtWidgets.QMenu): widget.addMenu(action) else: widget.addAction(action)
en
0.63101
Create a new action and assign callbacks, shortcuts, etc.
2.434392
2
chapterfour/magicians.py
cmotek/python_crashcourse
0
6625332
magicians = ['alice', 'david', 'carolina'] for magician in magicians: print(f"{magician.title()}, that was a great trick!") print(f"I can't wait to see your next trick, {magician.title()}.\n") print("Thank you, everyone. That was a great magic show!")
magicians = ['alice', 'david', 'carolina'] for magician in magicians: print(f"{magician.title()}, that was a great trick!") print(f"I can't wait to see your next trick, {magician.title()}.\n") print("Thank you, everyone. That was a great magic show!")
none
1
3.149302
3
src/webapi/migrations/0052_image_video.py
kumagallium/labmine-api
0
6625333
# Generated by Django 2.2.1 on 2021-02-17 04:27 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('webapi', '0051_explanation'), ] operations = [ migrations.CreateModel( name='Video', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('video_name', models.CharField(max_length=255)), ('video_url', models.TextField()), ('cluster', models.IntegerField(default=2)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('editor', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image_name', models.CharField(max_length=255)), ('image_url', models.TextField()), ('cluster', models.IntegerField(default=2)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('editor', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), ]
# Generated by Django 2.2.1 on 2021-02-17 04:27 from django.conf import settings from django.db import migrations, models import django.db.models.deletion class Migration(migrations.Migration): dependencies = [ migrations.swappable_dependency(settings.AUTH_USER_MODEL), ('webapi', '0051_explanation'), ] operations = [ migrations.CreateModel( name='Video', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('video_name', models.CharField(max_length=255)), ('video_url', models.TextField()), ('cluster', models.IntegerField(default=2)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('editor', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), migrations.CreateModel( name='Image', fields=[ ('id', models.AutoField(auto_created=True, primary_key=True, serialize=False, verbose_name='ID')), ('image_name', models.CharField(max_length=255)), ('image_url', models.TextField()), ('cluster', models.IntegerField(default=2)), ('created_at', models.DateTimeField(auto_now_add=True)), ('updated_at', models.DateTimeField(auto_now=True)), ('editor', models.ForeignKey(on_delete=django.db.models.deletion.PROTECT, to=settings.AUTH_USER_MODEL)), ], ), ]
en
0.819112
# Generated by Django 2.2.1 on 2021-02-17 04:27
1.746673
2
h/storage.py
y3g0r/h
0
6625334
# -*- coding: utf-8 -*- """ Annotation storage API. This module provides the core API with access to basic persistence functions for storing and retrieving annotations. Data passed to these functions is assumed to be validated. """ # FIXME: This module was originally written to be a single point of # indirection through which the storage backend could be swapped out on # the fly. This helped us to migrate from Elasticsearch-based # persistence to PostgreSQL persistence. # # The purpose of this module is now primarily to serve as a place to # wrap up the business logic of creating and retrieving annotations. As # such, it probably makes more sense for this to be split up into a # couple of different services at some point. from datetime import datetime from pyramid import i18n from h import models, schemas from h.db import types from h.util.group_scope import url_in_scope from h.models.document import update_document_metadata _ = i18n.TranslationStringFactory(__package__) def fetch_annotation(session, id_): """ Fetch the annotation with the given id. :param session: the database session :type session: sqlalchemy.orm.session.Session :param id_: the annotation ID :type id_: str :returns: the annotation, if found, or None. :rtype: h.models.Annotation, NoneType """ try: return session.query(models.Annotation).get(id_) except types.InvalidUUID: return None def fetch_ordered_annotations(session, ids, query_processor=None): """ Fetch all annotations with the given ids and order them based on the list of ids. The optional `query_processor` parameter allows for passing in a function that can change the query before it is run, especially useful for eager-loading certain data. The function will get the query as an argument and has to return a query object again. :param session: the database session :type session: sqlalchemy.orm.session.Session :param ids: the list of annotation ids :type ids: list :param query_processor: an optional function that takes the query and returns an updated query :type query_processor: callable :returns: the annotation, if found, or None. :rtype: h.models.Annotation, NoneType """ if not ids: return [] ordering = {x: i for i, x in enumerate(ids)} query = session.query(models.Annotation).filter(models.Annotation.id.in_(ids)) if query_processor: query = query_processor(query) anns = sorted(query, key=lambda a: ordering.get(a.id)) return anns def create_annotation(request, data, group_service): """ Create an annotation from already-validated data. :param request: the request object :type request: pyramid.request.Request :param data: an annotation data dict that has already been validated by :py:class:`h.schemas.annotation.CreateAnnotationSchema` :type data: dict :param group_service: a service object that implements :py:class:`h.interfaces.IGroupService` :type group_service: :py:class:`h.interfaces.IGroupService` :returns: the created and flushed annotation :rtype: :py:class:`h.models.Annotation` """ created = updated = datetime.utcnow() document_uri_dicts = data["document"]["document_uri_dicts"] document_meta_dicts = data["document"]["document_meta_dicts"] del data["document"] # Replies must have the same group as their parent. if data["references"]: top_level_annotation_id = data["references"][0] top_level_annotation = fetch_annotation(request.db, top_level_annotation_id) if top_level_annotation: data["groupid"] = top_level_annotation.groupid else: raise schemas.ValidationError( "references.0: " + _("Annotation {id} does not exist").format(id=top_level_annotation_id) ) # The user must have permission to create an annotation in the group # they've asked to create one in. If the application didn't configure # a groupfinder we will allow writing this annotation without any # further checks. group = group_service.find(data["groupid"]) if group is None or not request.has_permission("write", context=group): raise schemas.ValidationError( "group: " + _("You may not create annotations " "in the specified group!") ) _validate_group_scope(group, data["target_uri"]) annotation = models.Annotation(**data) annotation.created = created annotation.updated = updated document = update_document_metadata( request.db, annotation.target_uri, document_meta_dicts, document_uri_dicts, created=created, updated=updated, ) annotation.document = document request.db.add(annotation) request.db.flush() return annotation def update_annotation(request, id_, data, group_service): """ Update an existing annotation and its associated document metadata. Update the annotation identified by ``id_`` with the given data. Create, delete and update document metadata as appropriate. :param request: the request object :param id_: the ID of the annotation to be updated, this is assumed to be a validated ID of an annotation that does already exist in the database :type id_: string :param data: the validated data with which to update the annotation :type data: dict :type group_service: :py:class:`h.interfaces.IGroupService` :returns: the updated annotation :rtype: h.models.Annotation """ updated = datetime.utcnow() # Remove any 'document' field first so that we don't try to save it on the # annotation object. document = data.pop("document", None) annotation = request.db.query(models.Annotation).get(id_) annotation.updated = updated group = group_service.find(annotation.groupid) if group is None: raise schemas.ValidationError( "group: " + _("Invalid group specified for annotation") ) if data.get("target_uri", None): _validate_group_scope(group, data["target_uri"]) annotation.extra.update(data.pop("extra", {})) for key, value in data.items(): setattr(annotation, key, value) if document: document_uri_dicts = document["document_uri_dicts"] document_meta_dicts = document["document_meta_dicts"] document = update_document_metadata( request.db, annotation.target_uri, document_meta_dicts, document_uri_dicts, updated=updated, ) annotation.document = document return annotation def expand_uri(session, uri): """ Return all URIs which refer to the same underlying document as `uri`. This function determines whether we already have "document" records for the passed URI, and if so returns the set of all URIs which we currently believe refer to the same document. :param session: the database session :type session: sqlalchemy.orm.session.Session :param uri: a URI associated with the document :type uri: str :returns: a list of equivalent URIs :rtype: list """ doc = models.Document.find_by_uris(session, [uri]).one_or_none() if doc is None: return [uri] # We check if the match was a "canonical" link. If so, all annotations # created on that page are guaranteed to have that as their target.source # field, so we don't need to expand to other URIs and risk false positives. docuris = doc.document_uris for docuri in docuris: if docuri.uri == uri and docuri.type == "rel-canonical": return [uri] return [docuri.uri for docuri in docuris] def _validate_group_scope(group, target_uri): # If no scopes are present, or if the group is configured to allow # annotations outside of its scope, there's nothing to do here if not group.scopes or group.enforce_scope is False: return # The target URI must match at least one # of a group's defined scopes, if the group has any group_scopes = [scope.scope for scope in group.scopes] if not url_in_scope(target_uri, group_scopes): raise schemas.ValidationError( "group scope: " + _("Annotations for this target URI " "are not allowed in this group") )
# -*- coding: utf-8 -*- """ Annotation storage API. This module provides the core API with access to basic persistence functions for storing and retrieving annotations. Data passed to these functions is assumed to be validated. """ # FIXME: This module was originally written to be a single point of # indirection through which the storage backend could be swapped out on # the fly. This helped us to migrate from Elasticsearch-based # persistence to PostgreSQL persistence. # # The purpose of this module is now primarily to serve as a place to # wrap up the business logic of creating and retrieving annotations. As # such, it probably makes more sense for this to be split up into a # couple of different services at some point. from datetime import datetime from pyramid import i18n from h import models, schemas from h.db import types from h.util.group_scope import url_in_scope from h.models.document import update_document_metadata _ = i18n.TranslationStringFactory(__package__) def fetch_annotation(session, id_): """ Fetch the annotation with the given id. :param session: the database session :type session: sqlalchemy.orm.session.Session :param id_: the annotation ID :type id_: str :returns: the annotation, if found, or None. :rtype: h.models.Annotation, NoneType """ try: return session.query(models.Annotation).get(id_) except types.InvalidUUID: return None def fetch_ordered_annotations(session, ids, query_processor=None): """ Fetch all annotations with the given ids and order them based on the list of ids. The optional `query_processor` parameter allows for passing in a function that can change the query before it is run, especially useful for eager-loading certain data. The function will get the query as an argument and has to return a query object again. :param session: the database session :type session: sqlalchemy.orm.session.Session :param ids: the list of annotation ids :type ids: list :param query_processor: an optional function that takes the query and returns an updated query :type query_processor: callable :returns: the annotation, if found, or None. :rtype: h.models.Annotation, NoneType """ if not ids: return [] ordering = {x: i for i, x in enumerate(ids)} query = session.query(models.Annotation).filter(models.Annotation.id.in_(ids)) if query_processor: query = query_processor(query) anns = sorted(query, key=lambda a: ordering.get(a.id)) return anns def create_annotation(request, data, group_service): """ Create an annotation from already-validated data. :param request: the request object :type request: pyramid.request.Request :param data: an annotation data dict that has already been validated by :py:class:`h.schemas.annotation.CreateAnnotationSchema` :type data: dict :param group_service: a service object that implements :py:class:`h.interfaces.IGroupService` :type group_service: :py:class:`h.interfaces.IGroupService` :returns: the created and flushed annotation :rtype: :py:class:`h.models.Annotation` """ created = updated = datetime.utcnow() document_uri_dicts = data["document"]["document_uri_dicts"] document_meta_dicts = data["document"]["document_meta_dicts"] del data["document"] # Replies must have the same group as their parent. if data["references"]: top_level_annotation_id = data["references"][0] top_level_annotation = fetch_annotation(request.db, top_level_annotation_id) if top_level_annotation: data["groupid"] = top_level_annotation.groupid else: raise schemas.ValidationError( "references.0: " + _("Annotation {id} does not exist").format(id=top_level_annotation_id) ) # The user must have permission to create an annotation in the group # they've asked to create one in. If the application didn't configure # a groupfinder we will allow writing this annotation without any # further checks. group = group_service.find(data["groupid"]) if group is None or not request.has_permission("write", context=group): raise schemas.ValidationError( "group: " + _("You may not create annotations " "in the specified group!") ) _validate_group_scope(group, data["target_uri"]) annotation = models.Annotation(**data) annotation.created = created annotation.updated = updated document = update_document_metadata( request.db, annotation.target_uri, document_meta_dicts, document_uri_dicts, created=created, updated=updated, ) annotation.document = document request.db.add(annotation) request.db.flush() return annotation def update_annotation(request, id_, data, group_service): """ Update an existing annotation and its associated document metadata. Update the annotation identified by ``id_`` with the given data. Create, delete and update document metadata as appropriate. :param request: the request object :param id_: the ID of the annotation to be updated, this is assumed to be a validated ID of an annotation that does already exist in the database :type id_: string :param data: the validated data with which to update the annotation :type data: dict :type group_service: :py:class:`h.interfaces.IGroupService` :returns: the updated annotation :rtype: h.models.Annotation """ updated = datetime.utcnow() # Remove any 'document' field first so that we don't try to save it on the # annotation object. document = data.pop("document", None) annotation = request.db.query(models.Annotation).get(id_) annotation.updated = updated group = group_service.find(annotation.groupid) if group is None: raise schemas.ValidationError( "group: " + _("Invalid group specified for annotation") ) if data.get("target_uri", None): _validate_group_scope(group, data["target_uri"]) annotation.extra.update(data.pop("extra", {})) for key, value in data.items(): setattr(annotation, key, value) if document: document_uri_dicts = document["document_uri_dicts"] document_meta_dicts = document["document_meta_dicts"] document = update_document_metadata( request.db, annotation.target_uri, document_meta_dicts, document_uri_dicts, updated=updated, ) annotation.document = document return annotation def expand_uri(session, uri): """ Return all URIs which refer to the same underlying document as `uri`. This function determines whether we already have "document" records for the passed URI, and if so returns the set of all URIs which we currently believe refer to the same document. :param session: the database session :type session: sqlalchemy.orm.session.Session :param uri: a URI associated with the document :type uri: str :returns: a list of equivalent URIs :rtype: list """ doc = models.Document.find_by_uris(session, [uri]).one_or_none() if doc is None: return [uri] # We check if the match was a "canonical" link. If so, all annotations # created on that page are guaranteed to have that as their target.source # field, so we don't need to expand to other URIs and risk false positives. docuris = doc.document_uris for docuri in docuris: if docuri.uri == uri and docuri.type == "rel-canonical": return [uri] return [docuri.uri for docuri in docuris] def _validate_group_scope(group, target_uri): # If no scopes are present, or if the group is configured to allow # annotations outside of its scope, there's nothing to do here if not group.scopes or group.enforce_scope is False: return # The target URI must match at least one # of a group's defined scopes, if the group has any group_scopes = [scope.scope for scope in group.scopes] if not url_in_scope(target_uri, group_scopes): raise schemas.ValidationError( "group scope: " + _("Annotations for this target URI " "are not allowed in this group") )
en
0.837322
# -*- coding: utf-8 -*- Annotation storage API. This module provides the core API with access to basic persistence functions for storing and retrieving annotations. Data passed to these functions is assumed to be validated. # FIXME: This module was originally written to be a single point of # indirection through which the storage backend could be swapped out on # the fly. This helped us to migrate from Elasticsearch-based # persistence to PostgreSQL persistence. # # The purpose of this module is now primarily to serve as a place to # wrap up the business logic of creating and retrieving annotations. As # such, it probably makes more sense for this to be split up into a # couple of different services at some point. Fetch the annotation with the given id. :param session: the database session :type session: sqlalchemy.orm.session.Session :param id_: the annotation ID :type id_: str :returns: the annotation, if found, or None. :rtype: h.models.Annotation, NoneType Fetch all annotations with the given ids and order them based on the list of ids. The optional `query_processor` parameter allows for passing in a function that can change the query before it is run, especially useful for eager-loading certain data. The function will get the query as an argument and has to return a query object again. :param session: the database session :type session: sqlalchemy.orm.session.Session :param ids: the list of annotation ids :type ids: list :param query_processor: an optional function that takes the query and returns an updated query :type query_processor: callable :returns: the annotation, if found, or None. :rtype: h.models.Annotation, NoneType Create an annotation from already-validated data. :param request: the request object :type request: pyramid.request.Request :param data: an annotation data dict that has already been validated by :py:class:`h.schemas.annotation.CreateAnnotationSchema` :type data: dict :param group_service: a service object that implements :py:class:`h.interfaces.IGroupService` :type group_service: :py:class:`h.interfaces.IGroupService` :returns: the created and flushed annotation :rtype: :py:class:`h.models.Annotation` # Replies must have the same group as their parent. # The user must have permission to create an annotation in the group # they've asked to create one in. If the application didn't configure # a groupfinder we will allow writing this annotation without any # further checks. Update an existing annotation and its associated document metadata. Update the annotation identified by ``id_`` with the given data. Create, delete and update document metadata as appropriate. :param request: the request object :param id_: the ID of the annotation to be updated, this is assumed to be a validated ID of an annotation that does already exist in the database :type id_: string :param data: the validated data with which to update the annotation :type data: dict :type group_service: :py:class:`h.interfaces.IGroupService` :returns: the updated annotation :rtype: h.models.Annotation # Remove any 'document' field first so that we don't try to save it on the # annotation object. Return all URIs which refer to the same underlying document as `uri`. This function determines whether we already have "document" records for the passed URI, and if so returns the set of all URIs which we currently believe refer to the same document. :param session: the database session :type session: sqlalchemy.orm.session.Session :param uri: a URI associated with the document :type uri: str :returns: a list of equivalent URIs :rtype: list # We check if the match was a "canonical" link. If so, all annotations # created on that page are guaranteed to have that as their target.source # field, so we don't need to expand to other URIs and risk false positives. # If no scopes are present, or if the group is configured to allow # annotations outside of its scope, there's nothing to do here # The target URI must match at least one # of a group's defined scopes, if the group has any
2.604407
3
elasticapm/traces.py
HonzaKral/apm-agent-python
0
6625335
<reponame>HonzaKral/apm-agent-python<gh_stars>0 # BSD 3-Clause License # # Copyright (c) 2019, Elasticsearch BV # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import functools import random import re import threading import time import timeit import warnings from collections import defaultdict from elasticapm.conf import constants from elasticapm.conf.constants import LABEL_RE, SPAN, TRANSACTION from elasticapm.context import init_execution_context from elasticapm.metrics.base_metrics import Timer from elasticapm.utils import compat, encoding, get_name_from_func from elasticapm.utils.deprecation import deprecated from elasticapm.utils.disttracing import TraceParent, TracingOptions from elasticapm.utils.logging import get_logger __all__ = ("capture_span", "tag", "label", "set_transaction_name", "set_custom_context", "set_user_context") error_logger = get_logger("elasticapm.errors") logger = get_logger("elasticapm.traces") _time_func = timeit.default_timer execution_context = init_execution_context() class ChildDuration(object): __slots__ = ("obj", "_nesting_level", "_start", "_duration", "_lock") def __init__(self, obj): self.obj = obj self._nesting_level = 0 self._start = None self._duration = 0 self._lock = threading.Lock() def start(self, timestamp): with self._lock: self._nesting_level += 1 if self._nesting_level == 1: self._start = timestamp def stop(self, timestamp): with self._lock: self._nesting_level -= 1 if self._nesting_level == 0: self._duration += timestamp - self._start @property def duration(self): return self._duration class BaseSpan(object): def __init__(self, labels=None): self._child_durations = ChildDuration(self) self.labels = {} self.outcome = None if labels: self.label(**labels) def child_started(self, timestamp): self._child_durations.start(timestamp) def child_ended(self, timestamp): self._child_durations.stop(timestamp) def end(self, skip_frames=0, duration=None): raise NotImplementedError() def label(self, **labels): """ Label this span with one or multiple key/value labels. Keys should be strings, values can be strings, booleans, or numerical values (int, float, Decimal) span_obj.label(key1="value1", key2=True, key3=42) Note that keys will be dedotted, replacing dot (.), star (*) and double quote (") with an underscore (_) :param labels: key/value pairs of labels :return: None """ labels = encoding.enforce_label_format(labels) self.labels.update(labels) @deprecated("transaction/span.label()") def tag(self, **tags): """ This method is deprecated, please use "label()" instead. Tag this span with one or multiple key/value tags. Both the values should be strings span_obj.tag(key1="value1", key2="value2") Note that keys will be dedotted, replacing dot (.), star (*) and double quote (") with an underscore (_) :param tags: key/value pairs of tags :return: None """ for key in tags.keys(): self.labels[LABEL_RE.sub("_", compat.text_type(key))] = encoding.keyword_field(compat.text_type(tags[key])) def set_success(self): self.outcome = "success" def set_failure(self): self.outcome = "failure" class Transaction(BaseSpan): def __init__( self, tracer, transaction_type="custom", trace_parent=None, is_sampled=True, start=None, sample_rate=None ): self.id = "%016x" % random.getrandbits(64) self.trace_parent = trace_parent if start: self.timestamp = self.start_time = start else: self.timestamp, self.start_time = time.time(), _time_func() self.name = None self.duration = None self.result = None self.transaction_type = transaction_type self.tracer = tracer self.dropped_spans = 0 self.context = {} self._is_sampled = is_sampled self.sample_rate = sample_rate self._span_counter = 0 self._span_timers = defaultdict(Timer) self._span_timers_lock = threading.Lock() try: self._breakdown = self.tracer._agent._metrics.get_metricset( "elasticapm.metrics.sets.breakdown.BreakdownMetricSet" ) except (LookupError, AttributeError): self._breakdown = None try: self._transaction_metrics = self.tracer._agent._metrics.get_metricset( "elasticapm.metrics.sets.transactions.TransactionsMetricSet" ) except (LookupError, AttributeError): self._transaction_metrics = None super(Transaction, self).__init__() def end(self, skip_frames=0, duration=None): self.duration = duration if duration is not None else (_time_func() - self.start_time) if self._transaction_metrics: self._transaction_metrics.timer( "transaction.duration", reset_on_collect=True, **{"transaction.name": self.name, "transaction.type": self.transaction_type} ).update(self.duration) if self._breakdown: for (span_type, span_subtype), timer in compat.iteritems(self._span_timers): labels = { "span.type": span_type, "transaction.name": self.name, "transaction.type": self.transaction_type, } if span_subtype: labels["span.subtype"] = span_subtype self._breakdown.timer("span.self_time", reset_on_collect=True, **labels).update(*timer.val) labels = {"transaction.name": self.name, "transaction.type": self.transaction_type} if self.is_sampled: self._breakdown.counter("transaction.breakdown.count", reset_on_collect=True, **labels).inc() self._breakdown.timer( "span.self_time", reset_on_collect=True, **{"span.type": "app", "transaction.name": self.name, "transaction.type": self.transaction_type} ).update(self.duration - self._child_durations.duration) def _begin_span( self, name, span_type, context=None, leaf=False, labels=None, parent_span_id=None, span_subtype=None, span_action=None, sync=None, start=None, ): parent_span = execution_context.get_span() tracer = self.tracer if parent_span and parent_span.leaf: span = DroppedSpan(parent_span, leaf=True) elif tracer.config.transaction_max_spans and self._span_counter > tracer.config.transaction_max_spans - 1: self.dropped_spans += 1 span = DroppedSpan(parent_span) self._span_counter += 1 else: span = Span( transaction=self, name=name, span_type=span_type or "code.custom", context=context, leaf=leaf, labels=labels, parent=parent_span, parent_span_id=parent_span_id, span_subtype=span_subtype, span_action=span_action, sync=sync, start=start, ) span.frames = tracer.frames_collector_func() self._span_counter += 1 execution_context.set_span(span) return span def begin_span( self, name, span_type, context=None, leaf=False, labels=None, span_subtype=None, span_action=None, sync=None, start=None, ): """ Begin a new span :param name: name of the span :param span_type: type of the span :param context: a context dict :param leaf: True if this is a leaf span :param labels: a flat string/string dict of labels :param span_subtype: sub type of the span, e.g. "postgresql" :param span_action: action of the span , e.g. "query" :param sync: indicate if the span is synchronous or not. In most cases, `None` should be used :param start: timestamp, mostly useful for testing :return: the Span object """ return self._begin_span( name, span_type, context=context, leaf=leaf, labels=labels, parent_span_id=None, span_subtype=span_subtype, span_action=span_action, sync=sync, start=start, ) def end_span(self, skip_frames=0, duration=None, outcome="unknown"): """ End the currently active span :param skip_frames: numbers of frames to skip in the stack trace :param duration: override duration, mostly useful for testing :param outcome: outcome of the span, either success, failure or unknown :return: the ended span """ span = execution_context.get_span() if span is None: raise LookupError() # only overwrite span outcome if it is still unknown if not span.outcome or span.outcome == "unknown": span.outcome = outcome span.end(skip_frames=skip_frames, duration=duration) return span def ensure_parent_id(self): """If current trace_parent has no span_id, generate one, then return it This is used to generate a span ID which the RUM agent will use to correlate the RUM transaction with the backend transaction. """ if self.trace_parent.span_id == self.id: self.trace_parent.span_id = "%016x" % random.getrandbits(64) logger.debug("Set parent id to generated %s", self.trace_parent.span_id) return self.trace_parent.span_id def to_dict(self): self.context["tags"] = self.labels result = { "id": self.id, "trace_id": self.trace_parent.trace_id, "name": encoding.keyword_field(self.name or ""), "type": encoding.keyword_field(self.transaction_type), "duration": self.duration * 1000, # milliseconds "result": encoding.keyword_field(str(self.result)), "timestamp": int(self.timestamp * 1000000), # microseconds "outcome": self.outcome, "sampled": self.is_sampled, "span_count": {"started": self._span_counter - self.dropped_spans, "dropped": self.dropped_spans}, } if self.sample_rate is not None: result["sample_rate"] = float(self.sample_rate) if self.trace_parent: result["trace_id"] = self.trace_parent.trace_id # only set parent_id if this transaction isn't the root if self.trace_parent.span_id and self.trace_parent.span_id != self.id: result["parent_id"] = self.trace_parent.span_id if self.is_sampled: result["context"] = self.context return result def track_span_duration(self, span_type, span_subtype, self_duration): # TODO: once asynchronous spans are supported, we should check if the transaction is already finished # TODO: and, if it has, exit without tracking. with self._span_timers_lock: self._span_timers[(span_type, span_subtype)].update(self_duration) @property def is_sampled(self): return self._is_sampled @is_sampled.setter def is_sampled(self, is_sampled): """ This should never be called in normal operation, but often is used for testing. We just want to make sure our sample_rate comes out correctly in tracestate if we set is_sampled to False. """ self._is_sampled = is_sampled if not is_sampled: if self.sample_rate: self.sample_rate = "0" self.trace_parent.add_tracestate(constants.TRACESTATE.SAMPLE_RATE, self.sample_rate) class Span(BaseSpan): __slots__ = ( "id", "transaction", "name", "type", "subtype", "action", "context", "leaf", "timestamp", "start_time", "duration", "parent", "parent_span_id", "frames", "labels", "sync", "outcome", "_child_durations", ) def __init__( self, transaction, name, span_type, context=None, leaf=False, labels=None, parent=None, parent_span_id=None, span_subtype=None, span_action=None, sync=None, start=None, ): """ Create a new Span :param transaction: transaction object that this span relates to :param name: Generic name of the span :param span_type: type of the span, e.g. db :param context: context dictionary :param leaf: is this span a leaf span? :param labels: a dict of labels :param parent_span_id: override of the span ID :param span_subtype: sub type of the span, e.g. mysql :param span_action: sub type of the span, e.g. query :param sync: indicate if the span was executed synchronously or asynchronously :param start: timestamp, mostly useful for testing """ self.start_time = start or _time_func() self.id = "%016x" % random.getrandbits(64) self.transaction = transaction self.name = name self.context = context if context is not None else {} self.leaf = leaf # timestamp is bit of a mix of monotonic and non-monotonic time sources. # we take the (non-monotonic) transaction timestamp, and add the (monotonic) difference of span # start time and transaction start time. In this respect, the span timestamp is guaranteed to grow # monotonically with respect to the transaction timestamp self.timestamp = transaction.timestamp + (self.start_time - transaction.start_time) self.duration = None self.parent = parent self.parent_span_id = parent_span_id self.frames = None self.sync = sync if span_subtype is None and "." in span_type: # old style dottet type, let's split it up type_bits = span_type.split(".") if len(type_bits) == 2: span_type, span_subtype = type_bits[:2] else: span_type, span_subtype, span_action = type_bits[:3] self.type = span_type self.subtype = span_subtype self.action = span_action if self.transaction._breakdown: p = self.parent if self.parent else self.transaction p.child_started(self.start_time) super(Span, self).__init__(labels=labels) def to_dict(self): result = { "id": self.id, "transaction_id": self.transaction.id, "trace_id": self.transaction.trace_parent.trace_id, # use either the explicitly set parent_span_id, or the id of the parent, or finally the transaction id "parent_id": self.parent_span_id or (self.parent.id if self.parent else self.transaction.id), "name": encoding.keyword_field(self.name), "type": encoding.keyword_field(self.type), "subtype": encoding.keyword_field(self.subtype), "action": encoding.keyword_field(self.action), "timestamp": int(self.timestamp * 1000000), # microseconds "duration": self.duration * 1000, # milliseconds "outcome": self.outcome, } if self.transaction.sample_rate is not None: result["sample_rate"] = float(self.transaction.sample_rate) if self.sync is not None: result["sync"] = self.sync if self.labels: if self.context is None: self.context = {} self.context["tags"] = self.labels if self.context: result["context"] = self.context if self.frames: result["stacktrace"] = self.frames return result def end(self, skip_frames=0, duration=None): """ End this span and queue it for sending. :param skip_frames: amount of frames to skip from the beginning of the stack trace :param duration: override duration, mostly useful for testing :return: None """ tracer = self.transaction.tracer timestamp = _time_func() self.duration = duration if duration is not None else (timestamp - self.start_time) if not tracer.span_frames_min_duration or self.duration >= tracer.span_frames_min_duration: self.frames = tracer.frames_processing_func(self.frames)[skip_frames:] else: self.frames = None execution_context.set_span(self.parent) tracer.queue_func(SPAN, self.to_dict()) if self.transaction._breakdown: p = self.parent if self.parent else self.transaction p.child_ended(self.start_time + self.duration) self.transaction.track_span_duration( self.type, self.subtype, self.duration - self._child_durations.duration ) def update_context(self, key, data): """ Update the context data for given key :param key: the key, e.g. "db" :param data: a dictionary :return: None """ current = self.context.get(key, {}) current.update(data) self.context[key] = current def __str__(self): return u"{}/{}/{}".format(self.name, self.type, self.subtype) class DroppedSpan(BaseSpan): __slots__ = ("leaf", "parent", "id") def __init__(self, parent, leaf=False): self.parent = parent self.leaf = leaf self.id = None super(DroppedSpan, self).__init__() def end(self, skip_frames=0, duration=None): execution_context.set_span(self.parent) def child_started(self, timestamp): pass def child_ended(self, timestamp): pass def update_context(self, key, data): pass @property def type(self): return None @property def subtype(self): return None @property def action(self): return None @property def context(self): return None @property def outcome(self): return "unknown" @outcome.setter def outcome(self, value): return class Tracer(object): def __init__(self, frames_collector_func, frames_processing_func, queue_func, config, agent): self.config = config self.queue_func = queue_func self.frames_processing_func = frames_processing_func self.frames_collector_func = frames_collector_func self._agent = agent self._ignore_patterns = [re.compile(p) for p in config.transactions_ignore_patterns or []] @property def span_frames_min_duration(self): if self.config.span_frames_min_duration in (-1, None): return None else: return self.config.span_frames_min_duration / 1000.0 def begin_transaction(self, transaction_type, trace_parent=None, start=None): """ Start a new transactions and bind it in a thread-local variable :param transaction_type: type of the transaction, e.g. "request" :param trace_parent: an optional TraceParent object :param start: override the start timestamp, mostly useful for testing :returns the Transaction object """ if trace_parent: is_sampled = bool(trace_parent.trace_options.recorded) sample_rate = trace_parent.tracestate_dict.get(constants.TRACESTATE.SAMPLE_RATE) else: is_sampled = ( self.config.transaction_sample_rate == 1.0 or self.config.transaction_sample_rate > random.random() ) if not is_sampled: sample_rate = "0" else: sample_rate = str(self.config.transaction_sample_rate) transaction = Transaction( self, transaction_type, trace_parent=trace_parent, is_sampled=is_sampled, start=start, sample_rate=sample_rate, ) if trace_parent is None: transaction.trace_parent = TraceParent( constants.TRACE_CONTEXT_VERSION, "%032x" % random.getrandbits(128), transaction.id, TracingOptions(recorded=is_sampled), ) transaction.trace_parent.add_tracestate(constants.TRACESTATE.SAMPLE_RATE, sample_rate) execution_context.set_transaction(transaction) return transaction def end_transaction(self, result=None, transaction_name=None, duration=None): """ End the current transaction and queue it for sending :param result: result of the transaction, e.g. "OK" or 200 :param transaction_name: name of the transaction :param duration: override duration, mostly useful for testing :return: """ transaction = execution_context.get_transaction(clear=True) if transaction: if transaction.name is None: transaction.name = transaction_name if transaction_name is not None else "" transaction.end(duration=duration) if self._should_ignore(transaction.name): return if transaction.result is None: transaction.result = result self.queue_func(TRANSACTION, transaction.to_dict()) return transaction def _should_ignore(self, transaction_name): for pattern in self._ignore_patterns: if pattern.search(transaction_name): return True return False class capture_span(object): __slots__ = ( "name", "type", "subtype", "action", "extra", "skip_frames", "leaf", "labels", "duration", "start", "sync", ) def __init__( self, name=None, span_type="code.custom", extra=None, skip_frames=0, leaf=False, tags=None, labels=None, span_subtype=None, span_action=None, start=None, duration=None, sync=None, ): self.name = name self.type = span_type self.subtype = span_subtype self.action = span_action self.extra = extra self.skip_frames = skip_frames self.leaf = leaf if tags and not labels: warnings.warn( 'The tags argument to capture_span is deprecated, use "labels" instead', category=DeprecationWarning, stacklevel=2, ) labels = tags self.labels = labels self.start = start self.duration = duration self.sync = sync def __call__(self, func): self.name = self.name or get_name_from_func(func) @functools.wraps(func) def decorated(*args, **kwds): with self: return func(*args, **kwds) return decorated def __enter__(self): transaction = execution_context.get_transaction() if transaction and transaction.is_sampled: return transaction.begin_span( self.name, self.type, context=self.extra, leaf=self.leaf, labels=self.labels, span_subtype=self.subtype, span_action=self.action, start=self.start, sync=self.sync, ) def __exit__(self, exc_type, exc_val, exc_tb): transaction = execution_context.get_transaction() if transaction and transaction.is_sampled: try: outcome = "failure" if exc_val else "success" span = transaction.end_span(self.skip_frames, duration=self.duration, outcome=outcome) if exc_val and not isinstance(span, DroppedSpan): try: exc_val._elastic_apm_span_id = span.id except AttributeError: # could happen if the exception has __slots__ pass except LookupError: logger.debug("ended non-existing span %s of type %s", self.name, self.type) def label(**labels): """ Labels current transaction. Keys should be strings, values can be strings, booleans, or numerical values (int, float, Decimal) :param labels: key/value map of labels """ transaction = execution_context.get_transaction() if not transaction: error_logger.warning("Ignored labels %s. No transaction currently active.", ", ".join(labels.keys())) else: transaction.label(**labels) @deprecated("elasticapm.label") def tag(**tags): """ Tags current transaction. Both key and value of the label should be strings. """ transaction = execution_context.get_transaction() if not transaction: error_logger.warning("Ignored tags %s. No transaction currently active.", ", ".join(tags.keys())) else: transaction.tag(**tags) def set_transaction_name(name, override=True): """ Sets the name of the transaction :param name: the name of the transaction :param override: if set to False, the name is only set if no name has been set before :return: None """ transaction = execution_context.get_transaction() if not transaction: return if transaction.name is None or override: transaction.name = name def set_transaction_result(result, override=True): """ Sets the result of the transaction. The result could be e.g. the HTTP status class (e.g "HTTP 5xx") for HTTP requests, or "success"/"fail" for background tasks. :param name: the name of the transaction :param override: if set to False, the name is only set if no name has been set before :return: None """ transaction = execution_context.get_transaction() if not transaction: return if transaction.result is None or override: transaction.result = result def set_transaction_outcome(outcome=None, http_status_code=None, override=True): """ Set the outcome of the transaction. This should only be done at the end of a transaction after the outcome is determined. If an invalid outcome is provided, an INFO level log message will be issued. :param outcome: the outcome of the transaction. Allowed values are "success", "failure", "unknown". None is allowed if a http_status_code is provided. :param http_status_code: An integer value of the HTTP status code. If provided, the outcome will be determined based on the status code: Success if the status is lower than 500, failure otherwise. If both a valid outcome and an http_status_code is provided, the former is used :param override: If set to False, the outcome will only be updated if its current value is None :return: None """ transaction = execution_context.get_transaction() if not transaction: return if http_status_code and outcome not in constants.OUTCOME: try: http_status_code = int(http_status_code) outcome = constants.OUTCOME.SUCCESS if http_status_code < 500 else constants.OUTCOME.FAILURE except ValueError: logger.info('Invalid HTTP status %r provided, outcome set to "unknown"', http_status_code) outcome = constants.OUTCOME.UNKNOWN elif outcome not in constants.OUTCOME: logger.info('Invalid outcome %r provided, outcome set to "unknown"', outcome) outcome = constants.OUTCOME.UNKNOWN if outcome and (transaction.outcome is None or override): transaction.outcome = outcome def get_transaction_id(): """ Returns the current transaction ID """ transaction = execution_context.get_transaction() if not transaction: return return transaction.id def get_trace_parent_header(): """ Return the trace parent header for the current transaction. """ transaction = execution_context.get_transaction() if not transaction or not transaction.trace_parent: return return transaction.trace_parent.to_string() def get_trace_id(): """ Returns the current trace ID """ transaction = execution_context.get_transaction() if not transaction: return return transaction.trace_parent.trace_id if transaction.trace_parent else None def get_span_id(): """ Returns the current span ID """ span = execution_context.get_span() if not span: return return span.id def set_context(data, key="custom"): """ Attach contextual data to the current transaction and errors that happen during the current transaction. If the transaction is not sampled, this function becomes a no-op. :param data: a dictionary, or a callable that returns a dictionary :param key: the namespace for this data """ transaction = execution_context.get_transaction() if not (transaction and transaction.is_sampled): return if callable(data): data = data() # remove invalid characters from key names for k in list(data.keys()): if LABEL_RE.search(k): data[LABEL_RE.sub("_", k)] = data.pop(k) if key in transaction.context: transaction.context[key].update(data) else: transaction.context[key] = data set_custom_context = functools.partial(set_context, key="custom") def set_user_context(username=None, email=None, user_id=None): data = {} if username is not None: data["username"] = encoding.keyword_field(username) if email is not None: data["email"] = encoding.keyword_field(email) if user_id is not None: data["id"] = encoding.keyword_field(user_id) set_context(data, "user")
# BSD 3-Clause License # # Copyright (c) 2019, Elasticsearch BV # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. import functools import random import re import threading import time import timeit import warnings from collections import defaultdict from elasticapm.conf import constants from elasticapm.conf.constants import LABEL_RE, SPAN, TRANSACTION from elasticapm.context import init_execution_context from elasticapm.metrics.base_metrics import Timer from elasticapm.utils import compat, encoding, get_name_from_func from elasticapm.utils.deprecation import deprecated from elasticapm.utils.disttracing import TraceParent, TracingOptions from elasticapm.utils.logging import get_logger __all__ = ("capture_span", "tag", "label", "set_transaction_name", "set_custom_context", "set_user_context") error_logger = get_logger("elasticapm.errors") logger = get_logger("elasticapm.traces") _time_func = timeit.default_timer execution_context = init_execution_context() class ChildDuration(object): __slots__ = ("obj", "_nesting_level", "_start", "_duration", "_lock") def __init__(self, obj): self.obj = obj self._nesting_level = 0 self._start = None self._duration = 0 self._lock = threading.Lock() def start(self, timestamp): with self._lock: self._nesting_level += 1 if self._nesting_level == 1: self._start = timestamp def stop(self, timestamp): with self._lock: self._nesting_level -= 1 if self._nesting_level == 0: self._duration += timestamp - self._start @property def duration(self): return self._duration class BaseSpan(object): def __init__(self, labels=None): self._child_durations = ChildDuration(self) self.labels = {} self.outcome = None if labels: self.label(**labels) def child_started(self, timestamp): self._child_durations.start(timestamp) def child_ended(self, timestamp): self._child_durations.stop(timestamp) def end(self, skip_frames=0, duration=None): raise NotImplementedError() def label(self, **labels): """ Label this span with one or multiple key/value labels. Keys should be strings, values can be strings, booleans, or numerical values (int, float, Decimal) span_obj.label(key1="value1", key2=True, key3=42) Note that keys will be dedotted, replacing dot (.), star (*) and double quote (") with an underscore (_) :param labels: key/value pairs of labels :return: None """ labels = encoding.enforce_label_format(labels) self.labels.update(labels) @deprecated("transaction/span.label()") def tag(self, **tags): """ This method is deprecated, please use "label()" instead. Tag this span with one or multiple key/value tags. Both the values should be strings span_obj.tag(key1="value1", key2="value2") Note that keys will be dedotted, replacing dot (.), star (*) and double quote (") with an underscore (_) :param tags: key/value pairs of tags :return: None """ for key in tags.keys(): self.labels[LABEL_RE.sub("_", compat.text_type(key))] = encoding.keyword_field(compat.text_type(tags[key])) def set_success(self): self.outcome = "success" def set_failure(self): self.outcome = "failure" class Transaction(BaseSpan): def __init__( self, tracer, transaction_type="custom", trace_parent=None, is_sampled=True, start=None, sample_rate=None ): self.id = "%016x" % random.getrandbits(64) self.trace_parent = trace_parent if start: self.timestamp = self.start_time = start else: self.timestamp, self.start_time = time.time(), _time_func() self.name = None self.duration = None self.result = None self.transaction_type = transaction_type self.tracer = tracer self.dropped_spans = 0 self.context = {} self._is_sampled = is_sampled self.sample_rate = sample_rate self._span_counter = 0 self._span_timers = defaultdict(Timer) self._span_timers_lock = threading.Lock() try: self._breakdown = self.tracer._agent._metrics.get_metricset( "elasticapm.metrics.sets.breakdown.BreakdownMetricSet" ) except (LookupError, AttributeError): self._breakdown = None try: self._transaction_metrics = self.tracer._agent._metrics.get_metricset( "elasticapm.metrics.sets.transactions.TransactionsMetricSet" ) except (LookupError, AttributeError): self._transaction_metrics = None super(Transaction, self).__init__() def end(self, skip_frames=0, duration=None): self.duration = duration if duration is not None else (_time_func() - self.start_time) if self._transaction_metrics: self._transaction_metrics.timer( "transaction.duration", reset_on_collect=True, **{"transaction.name": self.name, "transaction.type": self.transaction_type} ).update(self.duration) if self._breakdown: for (span_type, span_subtype), timer in compat.iteritems(self._span_timers): labels = { "span.type": span_type, "transaction.name": self.name, "transaction.type": self.transaction_type, } if span_subtype: labels["span.subtype"] = span_subtype self._breakdown.timer("span.self_time", reset_on_collect=True, **labels).update(*timer.val) labels = {"transaction.name": self.name, "transaction.type": self.transaction_type} if self.is_sampled: self._breakdown.counter("transaction.breakdown.count", reset_on_collect=True, **labels).inc() self._breakdown.timer( "span.self_time", reset_on_collect=True, **{"span.type": "app", "transaction.name": self.name, "transaction.type": self.transaction_type} ).update(self.duration - self._child_durations.duration) def _begin_span( self, name, span_type, context=None, leaf=False, labels=None, parent_span_id=None, span_subtype=None, span_action=None, sync=None, start=None, ): parent_span = execution_context.get_span() tracer = self.tracer if parent_span and parent_span.leaf: span = DroppedSpan(parent_span, leaf=True) elif tracer.config.transaction_max_spans and self._span_counter > tracer.config.transaction_max_spans - 1: self.dropped_spans += 1 span = DroppedSpan(parent_span) self._span_counter += 1 else: span = Span( transaction=self, name=name, span_type=span_type or "code.custom", context=context, leaf=leaf, labels=labels, parent=parent_span, parent_span_id=parent_span_id, span_subtype=span_subtype, span_action=span_action, sync=sync, start=start, ) span.frames = tracer.frames_collector_func() self._span_counter += 1 execution_context.set_span(span) return span def begin_span( self, name, span_type, context=None, leaf=False, labels=None, span_subtype=None, span_action=None, sync=None, start=None, ): """ Begin a new span :param name: name of the span :param span_type: type of the span :param context: a context dict :param leaf: True if this is a leaf span :param labels: a flat string/string dict of labels :param span_subtype: sub type of the span, e.g. "postgresql" :param span_action: action of the span , e.g. "query" :param sync: indicate if the span is synchronous or not. In most cases, `None` should be used :param start: timestamp, mostly useful for testing :return: the Span object """ return self._begin_span( name, span_type, context=context, leaf=leaf, labels=labels, parent_span_id=None, span_subtype=span_subtype, span_action=span_action, sync=sync, start=start, ) def end_span(self, skip_frames=0, duration=None, outcome="unknown"): """ End the currently active span :param skip_frames: numbers of frames to skip in the stack trace :param duration: override duration, mostly useful for testing :param outcome: outcome of the span, either success, failure or unknown :return: the ended span """ span = execution_context.get_span() if span is None: raise LookupError() # only overwrite span outcome if it is still unknown if not span.outcome or span.outcome == "unknown": span.outcome = outcome span.end(skip_frames=skip_frames, duration=duration) return span def ensure_parent_id(self): """If current trace_parent has no span_id, generate one, then return it This is used to generate a span ID which the RUM agent will use to correlate the RUM transaction with the backend transaction. """ if self.trace_parent.span_id == self.id: self.trace_parent.span_id = "%016x" % random.getrandbits(64) logger.debug("Set parent id to generated %s", self.trace_parent.span_id) return self.trace_parent.span_id def to_dict(self): self.context["tags"] = self.labels result = { "id": self.id, "trace_id": self.trace_parent.trace_id, "name": encoding.keyword_field(self.name or ""), "type": encoding.keyword_field(self.transaction_type), "duration": self.duration * 1000, # milliseconds "result": encoding.keyword_field(str(self.result)), "timestamp": int(self.timestamp * 1000000), # microseconds "outcome": self.outcome, "sampled": self.is_sampled, "span_count": {"started": self._span_counter - self.dropped_spans, "dropped": self.dropped_spans}, } if self.sample_rate is not None: result["sample_rate"] = float(self.sample_rate) if self.trace_parent: result["trace_id"] = self.trace_parent.trace_id # only set parent_id if this transaction isn't the root if self.trace_parent.span_id and self.trace_parent.span_id != self.id: result["parent_id"] = self.trace_parent.span_id if self.is_sampled: result["context"] = self.context return result def track_span_duration(self, span_type, span_subtype, self_duration): # TODO: once asynchronous spans are supported, we should check if the transaction is already finished # TODO: and, if it has, exit without tracking. with self._span_timers_lock: self._span_timers[(span_type, span_subtype)].update(self_duration) @property def is_sampled(self): return self._is_sampled @is_sampled.setter def is_sampled(self, is_sampled): """ This should never be called in normal operation, but often is used for testing. We just want to make sure our sample_rate comes out correctly in tracestate if we set is_sampled to False. """ self._is_sampled = is_sampled if not is_sampled: if self.sample_rate: self.sample_rate = "0" self.trace_parent.add_tracestate(constants.TRACESTATE.SAMPLE_RATE, self.sample_rate) class Span(BaseSpan): __slots__ = ( "id", "transaction", "name", "type", "subtype", "action", "context", "leaf", "timestamp", "start_time", "duration", "parent", "parent_span_id", "frames", "labels", "sync", "outcome", "_child_durations", ) def __init__( self, transaction, name, span_type, context=None, leaf=False, labels=None, parent=None, parent_span_id=None, span_subtype=None, span_action=None, sync=None, start=None, ): """ Create a new Span :param transaction: transaction object that this span relates to :param name: Generic name of the span :param span_type: type of the span, e.g. db :param context: context dictionary :param leaf: is this span a leaf span? :param labels: a dict of labels :param parent_span_id: override of the span ID :param span_subtype: sub type of the span, e.g. mysql :param span_action: sub type of the span, e.g. query :param sync: indicate if the span was executed synchronously or asynchronously :param start: timestamp, mostly useful for testing """ self.start_time = start or _time_func() self.id = "%016x" % random.getrandbits(64) self.transaction = transaction self.name = name self.context = context if context is not None else {} self.leaf = leaf # timestamp is bit of a mix of monotonic and non-monotonic time sources. # we take the (non-monotonic) transaction timestamp, and add the (monotonic) difference of span # start time and transaction start time. In this respect, the span timestamp is guaranteed to grow # monotonically with respect to the transaction timestamp self.timestamp = transaction.timestamp + (self.start_time - transaction.start_time) self.duration = None self.parent = parent self.parent_span_id = parent_span_id self.frames = None self.sync = sync if span_subtype is None and "." in span_type: # old style dottet type, let's split it up type_bits = span_type.split(".") if len(type_bits) == 2: span_type, span_subtype = type_bits[:2] else: span_type, span_subtype, span_action = type_bits[:3] self.type = span_type self.subtype = span_subtype self.action = span_action if self.transaction._breakdown: p = self.parent if self.parent else self.transaction p.child_started(self.start_time) super(Span, self).__init__(labels=labels) def to_dict(self): result = { "id": self.id, "transaction_id": self.transaction.id, "trace_id": self.transaction.trace_parent.trace_id, # use either the explicitly set parent_span_id, or the id of the parent, or finally the transaction id "parent_id": self.parent_span_id or (self.parent.id if self.parent else self.transaction.id), "name": encoding.keyword_field(self.name), "type": encoding.keyword_field(self.type), "subtype": encoding.keyword_field(self.subtype), "action": encoding.keyword_field(self.action), "timestamp": int(self.timestamp * 1000000), # microseconds "duration": self.duration * 1000, # milliseconds "outcome": self.outcome, } if self.transaction.sample_rate is not None: result["sample_rate"] = float(self.transaction.sample_rate) if self.sync is not None: result["sync"] = self.sync if self.labels: if self.context is None: self.context = {} self.context["tags"] = self.labels if self.context: result["context"] = self.context if self.frames: result["stacktrace"] = self.frames return result def end(self, skip_frames=0, duration=None): """ End this span and queue it for sending. :param skip_frames: amount of frames to skip from the beginning of the stack trace :param duration: override duration, mostly useful for testing :return: None """ tracer = self.transaction.tracer timestamp = _time_func() self.duration = duration if duration is not None else (timestamp - self.start_time) if not tracer.span_frames_min_duration or self.duration >= tracer.span_frames_min_duration: self.frames = tracer.frames_processing_func(self.frames)[skip_frames:] else: self.frames = None execution_context.set_span(self.parent) tracer.queue_func(SPAN, self.to_dict()) if self.transaction._breakdown: p = self.parent if self.parent else self.transaction p.child_ended(self.start_time + self.duration) self.transaction.track_span_duration( self.type, self.subtype, self.duration - self._child_durations.duration ) def update_context(self, key, data): """ Update the context data for given key :param key: the key, e.g. "db" :param data: a dictionary :return: None """ current = self.context.get(key, {}) current.update(data) self.context[key] = current def __str__(self): return u"{}/{}/{}".format(self.name, self.type, self.subtype) class DroppedSpan(BaseSpan): __slots__ = ("leaf", "parent", "id") def __init__(self, parent, leaf=False): self.parent = parent self.leaf = leaf self.id = None super(DroppedSpan, self).__init__() def end(self, skip_frames=0, duration=None): execution_context.set_span(self.parent) def child_started(self, timestamp): pass def child_ended(self, timestamp): pass def update_context(self, key, data): pass @property def type(self): return None @property def subtype(self): return None @property def action(self): return None @property def context(self): return None @property def outcome(self): return "unknown" @outcome.setter def outcome(self, value): return class Tracer(object): def __init__(self, frames_collector_func, frames_processing_func, queue_func, config, agent): self.config = config self.queue_func = queue_func self.frames_processing_func = frames_processing_func self.frames_collector_func = frames_collector_func self._agent = agent self._ignore_patterns = [re.compile(p) for p in config.transactions_ignore_patterns or []] @property def span_frames_min_duration(self): if self.config.span_frames_min_duration in (-1, None): return None else: return self.config.span_frames_min_duration / 1000.0 def begin_transaction(self, transaction_type, trace_parent=None, start=None): """ Start a new transactions and bind it in a thread-local variable :param transaction_type: type of the transaction, e.g. "request" :param trace_parent: an optional TraceParent object :param start: override the start timestamp, mostly useful for testing :returns the Transaction object """ if trace_parent: is_sampled = bool(trace_parent.trace_options.recorded) sample_rate = trace_parent.tracestate_dict.get(constants.TRACESTATE.SAMPLE_RATE) else: is_sampled = ( self.config.transaction_sample_rate == 1.0 or self.config.transaction_sample_rate > random.random() ) if not is_sampled: sample_rate = "0" else: sample_rate = str(self.config.transaction_sample_rate) transaction = Transaction( self, transaction_type, trace_parent=trace_parent, is_sampled=is_sampled, start=start, sample_rate=sample_rate, ) if trace_parent is None: transaction.trace_parent = TraceParent( constants.TRACE_CONTEXT_VERSION, "%032x" % random.getrandbits(128), transaction.id, TracingOptions(recorded=is_sampled), ) transaction.trace_parent.add_tracestate(constants.TRACESTATE.SAMPLE_RATE, sample_rate) execution_context.set_transaction(transaction) return transaction def end_transaction(self, result=None, transaction_name=None, duration=None): """ End the current transaction and queue it for sending :param result: result of the transaction, e.g. "OK" or 200 :param transaction_name: name of the transaction :param duration: override duration, mostly useful for testing :return: """ transaction = execution_context.get_transaction(clear=True) if transaction: if transaction.name is None: transaction.name = transaction_name if transaction_name is not None else "" transaction.end(duration=duration) if self._should_ignore(transaction.name): return if transaction.result is None: transaction.result = result self.queue_func(TRANSACTION, transaction.to_dict()) return transaction def _should_ignore(self, transaction_name): for pattern in self._ignore_patterns: if pattern.search(transaction_name): return True return False class capture_span(object): __slots__ = ( "name", "type", "subtype", "action", "extra", "skip_frames", "leaf", "labels", "duration", "start", "sync", ) def __init__( self, name=None, span_type="code.custom", extra=None, skip_frames=0, leaf=False, tags=None, labels=None, span_subtype=None, span_action=None, start=None, duration=None, sync=None, ): self.name = name self.type = span_type self.subtype = span_subtype self.action = span_action self.extra = extra self.skip_frames = skip_frames self.leaf = leaf if tags and not labels: warnings.warn( 'The tags argument to capture_span is deprecated, use "labels" instead', category=DeprecationWarning, stacklevel=2, ) labels = tags self.labels = labels self.start = start self.duration = duration self.sync = sync def __call__(self, func): self.name = self.name or get_name_from_func(func) @functools.wraps(func) def decorated(*args, **kwds): with self: return func(*args, **kwds) return decorated def __enter__(self): transaction = execution_context.get_transaction() if transaction and transaction.is_sampled: return transaction.begin_span( self.name, self.type, context=self.extra, leaf=self.leaf, labels=self.labels, span_subtype=self.subtype, span_action=self.action, start=self.start, sync=self.sync, ) def __exit__(self, exc_type, exc_val, exc_tb): transaction = execution_context.get_transaction() if transaction and transaction.is_sampled: try: outcome = "failure" if exc_val else "success" span = transaction.end_span(self.skip_frames, duration=self.duration, outcome=outcome) if exc_val and not isinstance(span, DroppedSpan): try: exc_val._elastic_apm_span_id = span.id except AttributeError: # could happen if the exception has __slots__ pass except LookupError: logger.debug("ended non-existing span %s of type %s", self.name, self.type) def label(**labels): """ Labels current transaction. Keys should be strings, values can be strings, booleans, or numerical values (int, float, Decimal) :param labels: key/value map of labels """ transaction = execution_context.get_transaction() if not transaction: error_logger.warning("Ignored labels %s. No transaction currently active.", ", ".join(labels.keys())) else: transaction.label(**labels) @deprecated("elasticapm.label") def tag(**tags): """ Tags current transaction. Both key and value of the label should be strings. """ transaction = execution_context.get_transaction() if not transaction: error_logger.warning("Ignored tags %s. No transaction currently active.", ", ".join(tags.keys())) else: transaction.tag(**tags) def set_transaction_name(name, override=True): """ Sets the name of the transaction :param name: the name of the transaction :param override: if set to False, the name is only set if no name has been set before :return: None """ transaction = execution_context.get_transaction() if not transaction: return if transaction.name is None or override: transaction.name = name def set_transaction_result(result, override=True): """ Sets the result of the transaction. The result could be e.g. the HTTP status class (e.g "HTTP 5xx") for HTTP requests, or "success"/"fail" for background tasks. :param name: the name of the transaction :param override: if set to False, the name is only set if no name has been set before :return: None """ transaction = execution_context.get_transaction() if not transaction: return if transaction.result is None or override: transaction.result = result def set_transaction_outcome(outcome=None, http_status_code=None, override=True): """ Set the outcome of the transaction. This should only be done at the end of a transaction after the outcome is determined. If an invalid outcome is provided, an INFO level log message will be issued. :param outcome: the outcome of the transaction. Allowed values are "success", "failure", "unknown". None is allowed if a http_status_code is provided. :param http_status_code: An integer value of the HTTP status code. If provided, the outcome will be determined based on the status code: Success if the status is lower than 500, failure otherwise. If both a valid outcome and an http_status_code is provided, the former is used :param override: If set to False, the outcome will only be updated if its current value is None :return: None """ transaction = execution_context.get_transaction() if not transaction: return if http_status_code and outcome not in constants.OUTCOME: try: http_status_code = int(http_status_code) outcome = constants.OUTCOME.SUCCESS if http_status_code < 500 else constants.OUTCOME.FAILURE except ValueError: logger.info('Invalid HTTP status %r provided, outcome set to "unknown"', http_status_code) outcome = constants.OUTCOME.UNKNOWN elif outcome not in constants.OUTCOME: logger.info('Invalid outcome %r provided, outcome set to "unknown"', outcome) outcome = constants.OUTCOME.UNKNOWN if outcome and (transaction.outcome is None or override): transaction.outcome = outcome def get_transaction_id(): """ Returns the current transaction ID """ transaction = execution_context.get_transaction() if not transaction: return return transaction.id def get_trace_parent_header(): """ Return the trace parent header for the current transaction. """ transaction = execution_context.get_transaction() if not transaction or not transaction.trace_parent: return return transaction.trace_parent.to_string() def get_trace_id(): """ Returns the current trace ID """ transaction = execution_context.get_transaction() if not transaction: return return transaction.trace_parent.trace_id if transaction.trace_parent else None def get_span_id(): """ Returns the current span ID """ span = execution_context.get_span() if not span: return return span.id def set_context(data, key="custom"): """ Attach contextual data to the current transaction and errors that happen during the current transaction. If the transaction is not sampled, this function becomes a no-op. :param data: a dictionary, or a callable that returns a dictionary :param key: the namespace for this data """ transaction = execution_context.get_transaction() if not (transaction and transaction.is_sampled): return if callable(data): data = data() # remove invalid characters from key names for k in list(data.keys()): if LABEL_RE.search(k): data[LABEL_RE.sub("_", k)] = data.pop(k) if key in transaction.context: transaction.context[key].update(data) else: transaction.context[key] = data set_custom_context = functools.partial(set_context, key="custom") def set_user_context(username=None, email=None, user_id=None): data = {} if username is not None: data["username"] = encoding.keyword_field(username) if email is not None: data["email"] = encoding.keyword_field(email) if user_id is not None: data["id"] = encoding.keyword_field(user_id) set_context(data, "user")
en
0.756522
# BSD 3-Clause License # # Copyright (c) 2019, Elasticsearch BV # All rights reserved. # # Redistribution and use in source and binary forms, with or without # modification, are permitted provided that the following conditions are met: # # * Redistributions of source code must retain the above copyright notice, this # list of conditions and the following disclaimer. # # * Redistributions in binary form must reproduce the above copyright notice, # this list of conditions and the following disclaimer in the documentation # and/or other materials provided with the distribution. # # * Neither the name of the copyright holder nor the names of its # contributors may be used to endorse or promote products derived from # this software without specific prior written permission. # # THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" # AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE # IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE # DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE # FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL # DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR # SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER # CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, # OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE # OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE. Label this span with one or multiple key/value labels. Keys should be strings, values can be strings, booleans, or numerical values (int, float, Decimal) span_obj.label(key1="value1", key2=True, key3=42) Note that keys will be dedotted, replacing dot (.), star (*) and double quote (") with an underscore (_) :param labels: key/value pairs of labels :return: None This method is deprecated, please use "label()" instead. Tag this span with one or multiple key/value tags. Both the values should be strings span_obj.tag(key1="value1", key2="value2") Note that keys will be dedotted, replacing dot (.), star (*) and double quote (") with an underscore (_) :param tags: key/value pairs of tags :return: None Begin a new span :param name: name of the span :param span_type: type of the span :param context: a context dict :param leaf: True if this is a leaf span :param labels: a flat string/string dict of labels :param span_subtype: sub type of the span, e.g. "postgresql" :param span_action: action of the span , e.g. "query" :param sync: indicate if the span is synchronous or not. In most cases, `None` should be used :param start: timestamp, mostly useful for testing :return: the Span object End the currently active span :param skip_frames: numbers of frames to skip in the stack trace :param duration: override duration, mostly useful for testing :param outcome: outcome of the span, either success, failure or unknown :return: the ended span # only overwrite span outcome if it is still unknown If current trace_parent has no span_id, generate one, then return it This is used to generate a span ID which the RUM agent will use to correlate the RUM transaction with the backend transaction. # milliseconds # microseconds # only set parent_id if this transaction isn't the root # TODO: once asynchronous spans are supported, we should check if the transaction is already finished # TODO: and, if it has, exit without tracking. This should never be called in normal operation, but often is used for testing. We just want to make sure our sample_rate comes out correctly in tracestate if we set is_sampled to False. Create a new Span :param transaction: transaction object that this span relates to :param name: Generic name of the span :param span_type: type of the span, e.g. db :param context: context dictionary :param leaf: is this span a leaf span? :param labels: a dict of labels :param parent_span_id: override of the span ID :param span_subtype: sub type of the span, e.g. mysql :param span_action: sub type of the span, e.g. query :param sync: indicate if the span was executed synchronously or asynchronously :param start: timestamp, mostly useful for testing # timestamp is bit of a mix of monotonic and non-monotonic time sources. # we take the (non-monotonic) transaction timestamp, and add the (monotonic) difference of span # start time and transaction start time. In this respect, the span timestamp is guaranteed to grow # monotonically with respect to the transaction timestamp # old style dottet type, let's split it up # use either the explicitly set parent_span_id, or the id of the parent, or finally the transaction id # microseconds # milliseconds End this span and queue it for sending. :param skip_frames: amount of frames to skip from the beginning of the stack trace :param duration: override duration, mostly useful for testing :return: None Update the context data for given key :param key: the key, e.g. "db" :param data: a dictionary :return: None Start a new transactions and bind it in a thread-local variable :param transaction_type: type of the transaction, e.g. "request" :param trace_parent: an optional TraceParent object :param start: override the start timestamp, mostly useful for testing :returns the Transaction object End the current transaction and queue it for sending :param result: result of the transaction, e.g. "OK" or 200 :param transaction_name: name of the transaction :param duration: override duration, mostly useful for testing :return: # could happen if the exception has __slots__ Labels current transaction. Keys should be strings, values can be strings, booleans, or numerical values (int, float, Decimal) :param labels: key/value map of labels Tags current transaction. Both key and value of the label should be strings. Sets the name of the transaction :param name: the name of the transaction :param override: if set to False, the name is only set if no name has been set before :return: None Sets the result of the transaction. The result could be e.g. the HTTP status class (e.g "HTTP 5xx") for HTTP requests, or "success"/"fail" for background tasks. :param name: the name of the transaction :param override: if set to False, the name is only set if no name has been set before :return: None Set the outcome of the transaction. This should only be done at the end of a transaction after the outcome is determined. If an invalid outcome is provided, an INFO level log message will be issued. :param outcome: the outcome of the transaction. Allowed values are "success", "failure", "unknown". None is allowed if a http_status_code is provided. :param http_status_code: An integer value of the HTTP status code. If provided, the outcome will be determined based on the status code: Success if the status is lower than 500, failure otherwise. If both a valid outcome and an http_status_code is provided, the former is used :param override: If set to False, the outcome will only be updated if its current value is None :return: None Returns the current transaction ID Return the trace parent header for the current transaction. Returns the current trace ID Returns the current span ID Attach contextual data to the current transaction and errors that happen during the current transaction. If the transaction is not sampled, this function becomes a no-op. :param data: a dictionary, or a callable that returns a dictionary :param key: the namespace for this data # remove invalid characters from key names
0.958052
1
Leetcode/65. Valid Number/solution2.py
asanoviskhak/Outtalent
51
6625336
<gh_stars>10-100 import re P0 = re.compile('^[-+]?(\d+\.\d*|\d*\.\d+|\d+)$') P1 = re.compile('^[-+]?\d+\s*$') class Solution: def isNumber(self, s: str) -> bool: s = s.strip() if 'e' in s: a = s.split('e') if len(a) > 2: return False return P0.match(a[0]) and P1.match(a[1]) else: return P0.match(s)
import re P0 = re.compile('^[-+]?(\d+\.\d*|\d*\.\d+|\d+)$') P1 = re.compile('^[-+]?\d+\s*$') class Solution: def isNumber(self, s: str) -> bool: s = s.strip() if 'e' in s: a = s.split('e') if len(a) > 2: return False return P0.match(a[0]) and P1.match(a[1]) else: return P0.match(s)
none
1
3.383423
3
buildAssetBundle/buildAssetBundle.py
iGameDesign/iLittleTools
0
6625337
# -*- coding: UTF-8 -*- ''' Copyright(c) Funova FileName : buildAssetBundle.py Creator : pengpeng Date : 2014-12-25 11:11 Comment : ModifyHistory : ''' __version__ = '1.0.0.0' __author__ = 'pengpeng' import os, sys import string import re import argparse import md5, hashlib from ftplib import FTP ''' 1. 编码转换 2. 生成meta文件 ''' g_TestSuite = r"buildAssetBundle" g_RootPath = r"E:\workspace\gunsoul_mobile\game\project\game-xx" g_resourcePath = r"\Assets\resourcex" g_streamPath = r"\Assets\streamingassets" g_OutPath = r"E:\workspace\gunsoul_mobile\game\project\game-xx\Assets\streamingassets" g_TabFile = r"buildAssetBundle.tab" g_LogFile = r"logs\buildAssetBundle.log" g_metastring = '''fileFormatVersion: 2 guid: %s DefaultImporter: userData: ''' g_updatefilename = "update.ini" g_resourcemetatable = "resources.metatable" g_platform = "android" g_version = "0.1.%s" g_ResourceType = 1 g_smbAddress = r"\\gscdn.funova.com\update" def FindFiles(dir, out, filter): if not os.path.exists(dir): print "path not exists." return listdir = os.listdir(dir) for file in listdir: filename = os.path.join(dir, file) if os.path.isfile(filename): ext = os.path.splitext(filename)[1] # print ext if ext.lower() in filter or ext == '': # print filename out.append(filename) elif os.path.isdir(filename): if file == ".svn": continue out = FindFiles(filename, out, filter) return out def convert( inputfile, outputfile, in_enc = "gbk", out_enc="UTF8" ): try: #print "convert " + inputfile, content = open(inputfile).read() new_content = content.decode(in_enc).encode(out_enc) open(outputfile, 'w').write(new_content) except Exception,e: print "convert error:", inputfile print e def ftp_upload(filename): ftp = FTP() ftp.set_debuglevel(2)#打开调试级别2,显示详细信息;0为关闭调试信息 ftp.connect('192.168.0.1','21')#连接 ftp.login('admin','admin')#登录,如果匿名登录则用空串代替即可 #print ftp.getwelcome()#显示ftp服务器欢迎信息 #ftp.cwd('xxx/xxx/') #选择操作目录 bufsize = 1024#设置缓冲块大小 file_handler = open(filename,'rb')#以读模式在本地打开文件 ftp.storbinary('STOR %s' % os.path.basename(filename),file_handler,bufsize)#上传文件 ftp.set_debuglevel(0) file_handler.close() ftp.quit() print "ftp up OK" def smb_upload(filename, smb_address): path, name = os.path.split(filename) open(smb_address + "\\" + name, 'w').write(open(filename, 'r').read()) print "smb upload OK" #''' if __name__ == "__main__": import time print("begin at: %s" % (time.strftime('%Y-%m-%d -- %H:%M:%S',time.localtime(time.time())))) startTime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) parser = argparse.ArgumentParser() parser.add_argument('-d') # tab file dir parser.add_argument('-o') # output dir parser.add_argument('-p') # platform parser.add_argument('-t') # resource type parser.print_help() args = parser.parse_known_args()[0] if args.d: g_RootPath = args.d g_platform = args.p g_ResourceType = args.t print g_RootPath g_resourcePath = g_RootPath + g_resourcePath g_streamPath = g_RootPath + g_streamPath postfix1 = ".assetbundle" postfix2 = ".meta" # upfiles = [] tabfiles = open(g_TabFile).readlines() if len(tabfiles) > 0: for file in tabfiles: # print file file = file.replace("\n", "").replace("\r", "") if file == "": break # 生成bundle文件名 newfilename = g_streamPath + "\\" + file[len(g_resourcePath) + 1:].replace("\\", "_") + postfix1 # metefilename = newfilename + postfix2 # print newfilename # print metefilename # 生成bundle文件,其实就是转换编码 convert(file, newfilename) # # 生成bundle文件的md5码 # f = open(newfilename, 'rb') # m = hashlib.md5() # m.update(f.read()) # md5 = m.hexdigest() # # md5 = "92ad09d9d394a844f8036165460bd27c" # # print md5 # # 生成meta文件 # open(metefilename, 'w').write(g_metastring % md5) upfiles.append(newfilename) pass # 生成resources.metatable # ios;tables_gold_seller_gold.tab|1|C07A2E2678C25930474B79B2ABA68C67|2575|519,520,521,522,523,524; # platform, filename, resource type, md5, file size, dependances metadatas = [] # files = [] # files = FindFiles(g_streamPath, files, [postfix1]) for file in upfiles: print file tfilename = os.path.split(file)[1][:-len(postfix1)] print tfilename f = open(file, 'rb') m = hashlib.md5() m.update(f.read()) f.close() md5 = m.hexdigest() fsize = os.path.getsize(file) metadata = "%s|%s|%s|%s|" % (tfilename, g_ResourceType, md5.upper(), fsize) # print metadata metadatas.append(metadata) outstring = g_platform + ";" + ";".join(metadatas) # print outstring open(g_streamPath + "\\" + g_resourcemetatable, "w").write(outstring) upfiles.append(g_streamPath + "\\" + g_resourcemetatable) # 获取子版本号 path = g_smbAddress + "\\" + g_platform + "\\" + g_updatefilename f = open(path, 'r') content = f.read() print content f.close() regx = re.compile(r"index=(\d+)") results = re.findall(regx, content) print results[0] # 修改子版本号 regx2 = re.compile(r"upVer=(.+)") subversion = (int(results[0]) + 1) g_version = g_version % subversion content = re.sub(regx2, r"upVer=%s" % g_version, content) content = re.sub(regx, r"index=%s" % subversion, content) print content f = open(path, 'w') f.write(content) f.close() # 上传ftp # files = [] # files = FindFiles(g_streamPath, files, [".assetbundle", ".metatable"]) for file in upfiles: # ftp_upload(file) path = g_smbAddress + "\\" + g_platform + "\\assets\\" + g_version if not os.path.exists(path): os.mkdir(path) pass smb_upload(file, g_smbAddress + "\\" + g_platform + "\\assets\\" + g_version) else: print "path is None." print("end at: %s" % (time.strftime('%Y-%m-%d -- %H:%M:%S', time.localtime(time.time())))) #''' # f = open(r"E:\workspace\gunsoul_mobile\game\project\game-xx\Assets\streamingassets\tables_weather_weather.tab.assetbundle", 'rb') # m = hashlib.md5() # m.update(f.read()) # md5 = m.hexdigest() # # md5 = "92ad09d9d394a844f8036165460bd27c" # print md5 # metadatas = [] # files = [] # files = FindFiles(g_streamPath, files, [".assetbundle"]) # for file in files: # print file # tfilename = os.path.split(file)[1][:-len(".assetbundle")] # print tfilename # f = open(file, 'rb') # m = hashlib.md5() # m.update(f.read()) # md5 = m.hexdigest() # fsize = os.path.getsize(file) # metadata = "%s|%s|%s|%s|" % (tfilename, g_ResourceType, md5, fsize) # print metadata # metadatas.append(metadata) # outstring = g_platform + ";" + ";".join(metadatas) # print outstring # open(g_streamPath + "\\" + g_resourcemetatable, "w").write(outstring) # # 获取子版本号 # path = g_smbAddress + "\\" + g_platform + "\\" + g_updatefilename # f = open(path, 'r') # content = f.read() # print content # f.close() # regx = re.compile(r"index=(\d+)") # results = re.findall(regx, content) # print results[0] # # 修改子版本号 # regx2 = re.compile(r"upVer=(.+)") # subversion = (int(results[0]) + 1) # g_version = g_version % subversion # content = re.sub(regx2, r"upVer=%s" % g_version, content) # content = re.sub(regx, r"index=%s" % subversion, content) # print content # f = open(path, 'w') # f.write(content) # f.close()
# -*- coding: UTF-8 -*- ''' Copyright(c) Funova FileName : buildAssetBundle.py Creator : pengpeng Date : 2014-12-25 11:11 Comment : ModifyHistory : ''' __version__ = '1.0.0.0' __author__ = 'pengpeng' import os, sys import string import re import argparse import md5, hashlib from ftplib import FTP ''' 1. 编码转换 2. 生成meta文件 ''' g_TestSuite = r"buildAssetBundle" g_RootPath = r"E:\workspace\gunsoul_mobile\game\project\game-xx" g_resourcePath = r"\Assets\resourcex" g_streamPath = r"\Assets\streamingassets" g_OutPath = r"E:\workspace\gunsoul_mobile\game\project\game-xx\Assets\streamingassets" g_TabFile = r"buildAssetBundle.tab" g_LogFile = r"logs\buildAssetBundle.log" g_metastring = '''fileFormatVersion: 2 guid: %s DefaultImporter: userData: ''' g_updatefilename = "update.ini" g_resourcemetatable = "resources.metatable" g_platform = "android" g_version = "0.1.%s" g_ResourceType = 1 g_smbAddress = r"\\gscdn.funova.com\update" def FindFiles(dir, out, filter): if not os.path.exists(dir): print "path not exists." return listdir = os.listdir(dir) for file in listdir: filename = os.path.join(dir, file) if os.path.isfile(filename): ext = os.path.splitext(filename)[1] # print ext if ext.lower() in filter or ext == '': # print filename out.append(filename) elif os.path.isdir(filename): if file == ".svn": continue out = FindFiles(filename, out, filter) return out def convert( inputfile, outputfile, in_enc = "gbk", out_enc="UTF8" ): try: #print "convert " + inputfile, content = open(inputfile).read() new_content = content.decode(in_enc).encode(out_enc) open(outputfile, 'w').write(new_content) except Exception,e: print "convert error:", inputfile print e def ftp_upload(filename): ftp = FTP() ftp.set_debuglevel(2)#打开调试级别2,显示详细信息;0为关闭调试信息 ftp.connect('192.168.0.1','21')#连接 ftp.login('admin','admin')#登录,如果匿名登录则用空串代替即可 #print ftp.getwelcome()#显示ftp服务器欢迎信息 #ftp.cwd('xxx/xxx/') #选择操作目录 bufsize = 1024#设置缓冲块大小 file_handler = open(filename,'rb')#以读模式在本地打开文件 ftp.storbinary('STOR %s' % os.path.basename(filename),file_handler,bufsize)#上传文件 ftp.set_debuglevel(0) file_handler.close() ftp.quit() print "ftp up OK" def smb_upload(filename, smb_address): path, name = os.path.split(filename) open(smb_address + "\\" + name, 'w').write(open(filename, 'r').read()) print "smb upload OK" #''' if __name__ == "__main__": import time print("begin at: %s" % (time.strftime('%Y-%m-%d -- %H:%M:%S',time.localtime(time.time())))) startTime = time.strftime('%Y-%m-%d %H:%M:%S',time.localtime(time.time())) parser = argparse.ArgumentParser() parser.add_argument('-d') # tab file dir parser.add_argument('-o') # output dir parser.add_argument('-p') # platform parser.add_argument('-t') # resource type parser.print_help() args = parser.parse_known_args()[0] if args.d: g_RootPath = args.d g_platform = args.p g_ResourceType = args.t print g_RootPath g_resourcePath = g_RootPath + g_resourcePath g_streamPath = g_RootPath + g_streamPath postfix1 = ".assetbundle" postfix2 = ".meta" # upfiles = [] tabfiles = open(g_TabFile).readlines() if len(tabfiles) > 0: for file in tabfiles: # print file file = file.replace("\n", "").replace("\r", "") if file == "": break # 生成bundle文件名 newfilename = g_streamPath + "\\" + file[len(g_resourcePath) + 1:].replace("\\", "_") + postfix1 # metefilename = newfilename + postfix2 # print newfilename # print metefilename # 生成bundle文件,其实就是转换编码 convert(file, newfilename) # # 生成bundle文件的md5码 # f = open(newfilename, 'rb') # m = hashlib.md5() # m.update(f.read()) # md5 = m.hexdigest() # # md5 = "92ad09d9d394a844f8036165460bd27c" # # print md5 # # 生成meta文件 # open(metefilename, 'w').write(g_metastring % md5) upfiles.append(newfilename) pass # 生成resources.metatable # ios;tables_gold_seller_gold.tab|1|C07A2E2678C25930474B79B2ABA68C67|2575|519,520,521,522,523,524; # platform, filename, resource type, md5, file size, dependances metadatas = [] # files = [] # files = FindFiles(g_streamPath, files, [postfix1]) for file in upfiles: print file tfilename = os.path.split(file)[1][:-len(postfix1)] print tfilename f = open(file, 'rb') m = hashlib.md5() m.update(f.read()) f.close() md5 = m.hexdigest() fsize = os.path.getsize(file) metadata = "%s|%s|%s|%s|" % (tfilename, g_ResourceType, md5.upper(), fsize) # print metadata metadatas.append(metadata) outstring = g_platform + ";" + ";".join(metadatas) # print outstring open(g_streamPath + "\\" + g_resourcemetatable, "w").write(outstring) upfiles.append(g_streamPath + "\\" + g_resourcemetatable) # 获取子版本号 path = g_smbAddress + "\\" + g_platform + "\\" + g_updatefilename f = open(path, 'r') content = f.read() print content f.close() regx = re.compile(r"index=(\d+)") results = re.findall(regx, content) print results[0] # 修改子版本号 regx2 = re.compile(r"upVer=(.+)") subversion = (int(results[0]) + 1) g_version = g_version % subversion content = re.sub(regx2, r"upVer=%s" % g_version, content) content = re.sub(regx, r"index=%s" % subversion, content) print content f = open(path, 'w') f.write(content) f.close() # 上传ftp # files = [] # files = FindFiles(g_streamPath, files, [".assetbundle", ".metatable"]) for file in upfiles: # ftp_upload(file) path = g_smbAddress + "\\" + g_platform + "\\assets\\" + g_version if not os.path.exists(path): os.mkdir(path) pass smb_upload(file, g_smbAddress + "\\" + g_platform + "\\assets\\" + g_version) else: print "path is None." print("end at: %s" % (time.strftime('%Y-%m-%d -- %H:%M:%S', time.localtime(time.time())))) #''' # f = open(r"E:\workspace\gunsoul_mobile\game\project\game-xx\Assets\streamingassets\tables_weather_weather.tab.assetbundle", 'rb') # m = hashlib.md5() # m.update(f.read()) # md5 = m.hexdigest() # # md5 = "92ad09d9d394a844f8036165460bd27c" # print md5 # metadatas = [] # files = [] # files = FindFiles(g_streamPath, files, [".assetbundle"]) # for file in files: # print file # tfilename = os.path.split(file)[1][:-len(".assetbundle")] # print tfilename # f = open(file, 'rb') # m = hashlib.md5() # m.update(f.read()) # md5 = m.hexdigest() # fsize = os.path.getsize(file) # metadata = "%s|%s|%s|%s|" % (tfilename, g_ResourceType, md5, fsize) # print metadata # metadatas.append(metadata) # outstring = g_platform + ";" + ";".join(metadatas) # print outstring # open(g_streamPath + "\\" + g_resourcemetatable, "w").write(outstring) # # 获取子版本号 # path = g_smbAddress + "\\" + g_platform + "\\" + g_updatefilename # f = open(path, 'r') # content = f.read() # print content # f.close() # regx = re.compile(r"index=(\d+)") # results = re.findall(regx, content) # print results[0] # # 修改子版本号 # regx2 = re.compile(r"upVer=(.+)") # subversion = (int(results[0]) + 1) # g_version = g_version % subversion # content = re.sub(regx2, r"upVer=%s" % g_version, content) # content = re.sub(regx, r"index=%s" % subversion, content) # print content # f = open(path, 'w') # f.write(content) # f.close()
en
0.260837
# -*- coding: UTF-8 -*- Copyright(c) Funova FileName : buildAssetBundle.py Creator : pengpeng Date : 2014-12-25 11:11 Comment : ModifyHistory : 1. 编码转换 2. 生成meta文件 fileFormatVersion: 2 guid: %s DefaultImporter: userData: # print ext # print filename #print "convert " + inputfile, #打开调试级别2,显示详细信息;0为关闭调试信息 #连接 #登录,如果匿名登录则用空串代替即可 #print ftp.getwelcome()#显示ftp服务器欢迎信息 #ftp.cwd('xxx/xxx/') #选择操作目录 #设置缓冲块大小 #以读模式在本地打开文件 #上传文件 #''' # tab file dir # output dir # platform # resource type # # print file # 生成bundle文件名 # metefilename = newfilename + postfix2 # print newfilename # print metefilename # 生成bundle文件,其实就是转换编码 # # 生成bundle文件的md5码 # f = open(newfilename, 'rb') # m = hashlib.md5() # m.update(f.read()) # md5 = m.hexdigest() # # md5 = "92ad09d9d394a844f8036165460bd27c" # # print md5 # # 生成meta文件 # open(metefilename, 'w').write(g_metastring % md5) # 生成resources.metatable # ios;tables_gold_seller_gold.tab|1|C07A2E2678C25930474B79B2ABA68C67|2575|519,520,521,522,523,524; # platform, filename, resource type, md5, file size, dependances # files = [] # files = FindFiles(g_streamPath, files, [postfix1]) # print metadata # print outstring # 获取子版本号 # 修改子版本号 # 上传ftp # files = [] # files = FindFiles(g_streamPath, files, [".assetbundle", ".metatable"]) # ftp_upload(file) #''' # f = open(r"E:\workspace\gunsoul_mobile\game\project\game-xx\Assets\streamingassets\tables_weather_weather.tab.assetbundle", 'rb') # m = hashlib.md5() # m.update(f.read()) # md5 = m.hexdigest() # # md5 = "92ad09d9d394a844f8036165460bd27c" # print md5 # metadatas = [] # files = [] # files = FindFiles(g_streamPath, files, [".assetbundle"]) # for file in files: # print file # tfilename = os.path.split(file)[1][:-len(".assetbundle")] # print tfilename # f = open(file, 'rb') # m = hashlib.md5() # m.update(f.read()) # md5 = m.hexdigest() # fsize = os.path.getsize(file) # metadata = "%s|%s|%s|%s|" % (tfilename, g_ResourceType, md5, fsize) # print metadata # metadatas.append(metadata) # outstring = g_platform + ";" + ";".join(metadatas) # print outstring # open(g_streamPath + "\\" + g_resourcemetatable, "w").write(outstring) # # 获取子版本号 # path = g_smbAddress + "\\" + g_platform + "\\" + g_updatefilename # f = open(path, 'r') # content = f.read() # print content # f.close() # regx = re.compile(r"index=(\d+)") # results = re.findall(regx, content) # print results[0] # # 修改子版本号 # regx2 = re.compile(r"upVer=(.+)") # subversion = (int(results[0]) + 1) # g_version = g_version % subversion # content = re.sub(regx2, r"upVer=%s" % g_version, content) # content = re.sub(regx, r"index=%s" % subversion, content) # print content # f = open(path, 'w') # f.write(content) # f.close()
2.109826
2
python-package/lightgbm/engine.py
rocknOdairi/LightGBM
0
6625338
<gh_stars>0 # coding: utf-8 """Library with training routines of LightGBM.""" import collections import copy from operator import attrgetter from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np from . import callback from .basic import Booster, Dataset, LightGBMError, _ArrayLike, _ConfigAliases, _InnerPredictor, _log_warning from .compat import SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold _LGBM_CustomObjectiveFunction = Callable[ [np.ndarray, Dataset], Tuple[_ArrayLike, _ArrayLike] ] _LGBM_CustomMetricFunction = Callable[ [np.ndarray, Dataset], Tuple[str, float, bool] ] def train( params: Dict[str, Any], train_set: Dataset, num_boost_round: int = 100, valid_sets: Optional[List[Dataset]] = None, valid_names: Optional[List[str]] = None, fobj: Optional[_LGBM_CustomObjectiveFunction] = None, feval: Optional[Union[_LGBM_CustomMetricFunction, List[_LGBM_CustomMetricFunction]]] = None, init_model: Optional[Union[str, Path, Booster]] = None, feature_name: Union[List[str], str] = 'auto', categorical_feature: Union[List[str], List[int], str] = 'auto', early_stopping_rounds: Optional[int] = None, evals_result: Optional[Dict[str, Any]] = None, keep_training_booster: bool = False, callbacks: Optional[List[Callable]] = None ) -> Booster: """Perform the training with given parameters. Parameters ---------- params : dict Parameters for training. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. valid_sets : list of Dataset, or None, optional (default=None) List of data to be evaluated on during training. valid_names : list of str, or None, optional (default=None) Names of ``valid_sets``. fobj : callable or None, optional (default=None) Customized objective function. Should accept two parameters: preds, train_data, and return (grad, hess). preds : numpy 1-D array The predicted values. Predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task. train_data : Dataset The training dataset. grad : list, numpy 1-D array or pandas Series The value of the first order derivative (gradient) of the loss with respect to the elements of preds for each sample point. hess : list, numpy 1-D array or pandas Series The value of the second order derivative (Hessian) of the loss with respect to the elements of preds for each sample point. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. feval : callable, list of callable, or None, optional (default=None) Customized evaluation function. Each evaluation function should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. preds : numpy 1-D array The predicted values. If ``fobj`` is specified, predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task in this case. train_data : Dataset The training dataset. eval_name : str The name of evaluation function (without whitespaces). eval_result : float The eval result. is_higher_better : bool Is eval result higher better, e.g. AUC is ``is_higher_better``. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set the ``metric`` parameter to the string ``"None"`` in ``params``. init_model : str, pathlib.Path, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of str, or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of str or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of str, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. The output cannot be monotonically constrained with respect to a categorical feature. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation data and one metric. If there's more than one, will check all of them. But the training data is ignored anyway. To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``. The index of iteration that has the best performance will be saved in the ``best_iteration`` field if early stopping logic is enabled by setting ``early_stopping_rounds``. evals_result : dict or None, optional (default=None) Dictionary used to store all evaluation results of all the items in ``valid_sets``. This should be initialized outside of your call to ``train()`` and should be empty. Any initial contents of the dictionary will be deleted. .. rubric:: Example With a ``valid_sets`` = [valid_set, train_set], ``valid_names`` = ['eval', 'train'] and a ``params`` = {'metric': 'logloss'} returns {'train': {'logloss': ['0.48253', '0.35953', ...]}, 'eval': {'logloss': ['0.480385', '0.357756', ...]}}. keep_training_booster : bool, optional (default=False) Whether the returned Booster will be used to keep training. If False, the returned value will be converted into _InnerPredictor before returning. This means you won't be able to use ``eval``, ``eval_train`` or ``eval_valid`` methods of the returned Booster. When your model is very large and cause the memory error, you can try to set this param to ``True`` to avoid the model conversion performed during the internal call of ``model_to_string``. You can still use _InnerPredictor as ``init_model`` for future continue training. callbacks : list of callable, or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. Returns ------- booster : Booster The trained Booster model. """ # create predictor first params = copy.deepcopy(params) if fobj is not None: for obj_alias in _ConfigAliases.get("objective"): params.pop(obj_alias, None) params['objective'] = 'none' for alias in _ConfigAliases.get("num_iterations"): if alias in params: num_boost_round = params.pop(alias) _log_warning(f"Found `{alias}` in params. Will use it instead of argument") params["num_iterations"] = num_boost_round # show deprecation warning only for early stop argument, setting early stop via global params should still be possible if early_stopping_rounds is not None and early_stopping_rounds > 0: _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'early_stopping()' callback via 'callbacks' argument instead.") for alias in _ConfigAliases.get("early_stopping_round"): if alias in params: early_stopping_rounds = params.pop(alias) params["early_stopping_round"] = early_stopping_rounds first_metric_only = params.get('first_metric_only', False) if num_boost_round <= 0: raise ValueError("num_boost_round should be greater than zero.") predictor: Optional[_InnerPredictor] = None if isinstance(init_model, (str, Path)): predictor = _InnerPredictor(model_file=init_model, pred_parameter=params) elif isinstance(init_model, Booster): predictor = init_model._to_predictor(dict(init_model.params, **params)) init_iteration = predictor.num_total_iteration if predictor is not None else 0 # check dataset if not isinstance(train_set, Dataset): raise TypeError("Training only accepts Dataset object") train_set._update_params(params) \ ._set_predictor(predictor) \ .set_feature_name(feature_name) \ .set_categorical_feature(categorical_feature) is_valid_contain_train = False train_data_name = "training" reduced_valid_sets = [] name_valid_sets = [] if valid_sets is not None: if isinstance(valid_sets, Dataset): valid_sets = [valid_sets] if isinstance(valid_names, str): valid_names = [valid_names] for i, valid_data in enumerate(valid_sets): # reduce cost for prediction training data if valid_data is train_set: is_valid_contain_train = True if valid_names is not None: train_data_name = valid_names[i] continue if not isinstance(valid_data, Dataset): raise TypeError("Training only accepts Dataset object") reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set)) if valid_names is not None and len(valid_names) > i: name_valid_sets.append(valid_names[i]) else: name_valid_sets.append(f'valid_{i}') # process callbacks if callbacks is None: callbacks_set = set() else: for i, cb in enumerate(callbacks): cb.__dict__.setdefault('order', i - len(callbacks)) callbacks_set = set(callbacks) # Most of legacy advanced options becomes callbacks if early_stopping_rounds is not None and early_stopping_rounds > 0: callbacks_set.add(callback.early_stopping(early_stopping_rounds, first_metric_only)) if evals_result is not None: _log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'record_evaluation()' callback via 'callbacks' argument instead.") callbacks_set.add(callback.record_evaluation(evals_result)) callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, 'before_iteration', False)} callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter('order')) callbacks_after_iter = sorted(callbacks_after_iter_set, key=attrgetter('order')) # construct booster try: booster = Booster(params=params, train_set=train_set) if is_valid_contain_train: booster.set_train_data_name(train_data_name) for valid_set, name_valid_set in zip(reduced_valid_sets, name_valid_sets): booster.add_valid(valid_set, name_valid_set) finally: train_set._reverse_update_params() for valid_set in reduced_valid_sets: valid_set._reverse_update_params() booster.best_iteration = 0 # start training for i in range(init_iteration, init_iteration + num_boost_round): for cb in callbacks_before_iter: cb(callback.CallbackEnv(model=booster, params=params, iteration=i, begin_iteration=init_iteration, end_iteration=init_iteration + num_boost_round, evaluation_result_list=None)) booster.update(fobj=fobj) evaluation_result_list = [] # check evaluation result. if valid_sets is not None: if is_valid_contain_train: evaluation_result_list.extend(booster.eval_train(feval)) evaluation_result_list.extend(booster.eval_valid(feval)) try: for cb in callbacks_after_iter: cb(callback.CallbackEnv(model=booster, params=params, iteration=i, begin_iteration=init_iteration, end_iteration=init_iteration + num_boost_round, evaluation_result_list=evaluation_result_list)) except callback.EarlyStopException as earlyStopException: booster.best_iteration = earlyStopException.best_iteration + 1 evaluation_result_list = earlyStopException.best_score break booster.best_score = collections.defaultdict(collections.OrderedDict) for dataset_name, eval_name, score, _ in evaluation_result_list: booster.best_score[dataset_name][eval_name] = score if not keep_training_booster: booster.model_from_string(booster.model_to_string()).free_dataset() return booster class CVBooster: """CVBooster in LightGBM. Auxiliary data structure to hold and redirect all boosters of ``cv`` function. This class has the same methods as Booster class. All method calls are actually performed for underlying Boosters and then all returned results are returned in a list. Attributes ---------- boosters : list of Booster The list of underlying fitted models. best_iteration : int The best iteration of fitted model. """ def __init__(self): """Initialize the CVBooster. Generally, no need to instantiate manually. """ self.boosters = [] self.best_iteration = -1 def _append(self, booster): """Add a booster to CVBooster.""" self.boosters.append(booster) def __getattr__(self, name): """Redirect methods call of CVBooster.""" def handler_function(*args, **kwargs): """Call methods with each booster, and concatenate their results.""" ret = [] for booster in self.boosters: ret.append(getattr(booster, name)(*args, **kwargs)) return ret return handler_function def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True, shuffle=True, eval_train_metric=False): """Make a n-fold list of Booster from random indices.""" full_data = full_data.construct() num_data = full_data.num_data() if folds is not None: if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'): raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples " "or scikit-learn splitter object with split method") if hasattr(folds, 'split'): group_info = full_data.get_group() if group_info is not None: group_info = np.array(group_info, dtype=np.int32, copy=False) flatted_group = np.repeat(range(len(group_info)), repeats=group_info) else: flatted_group = np.zeros(num_data, dtype=np.int32) folds = folds.split(X=np.empty(num_data), y=full_data.get_label(), groups=flatted_group) else: if any(params.get(obj_alias, "") in {"lambdarank", "rank_xendcg", "xendcg", "xe_ndcg", "xe_ndcg_mart", "xendcg_mart"} for obj_alias in _ConfigAliases.get("objective")): if not SKLEARN_INSTALLED: raise LightGBMError('scikit-learn is required for ranking cv') # ranking task, split according to groups group_info = np.array(full_data.get_group(), dtype=np.int32, copy=False) flatted_group = np.repeat(range(len(group_info)), repeats=group_info) group_kfold = _LGBMGroupKFold(n_splits=nfold) folds = group_kfold.split(X=np.empty(num_data), groups=flatted_group) elif stratified: if not SKLEARN_INSTALLED: raise LightGBMError('scikit-learn is required for stratified cv') skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed) folds = skf.split(X=np.empty(num_data), y=full_data.get_label()) else: if shuffle: randidx = np.random.RandomState(seed).permutation(num_data) else: randidx = np.arange(num_data) kstep = int(num_data / nfold) test_id = [randidx[i: i + kstep] for i in range(0, num_data, kstep)] train_id = [np.concatenate([test_id[i] for i in range(nfold) if k != i]) for k in range(nfold)] folds = zip(train_id, test_id) ret = CVBooster() for train_idx, test_idx in folds: train_set = full_data.subset(sorted(train_idx)) valid_set = full_data.subset(sorted(test_idx)) # run preprocessing on the data set if needed if fpreproc is not None: train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy()) else: tparam = params cvbooster = Booster(tparam, train_set) if eval_train_metric: cvbooster.add_valid(train_set, 'train') cvbooster.add_valid(valid_set, 'valid') ret._append(cvbooster) return ret def _agg_cv_result(raw_results, eval_train_metric=False): """Aggregate cross-validation results.""" cvmap = collections.OrderedDict() metric_type = {} for one_result in raw_results: for one_line in one_result: if eval_train_metric: key = f"{one_line[0]} {one_line[1]}" else: key = one_line[1] metric_type[key] = one_line[3] cvmap.setdefault(key, []) cvmap[key].append(one_line[2]) return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()] def cv(params, train_set, num_boost_round=100, folds=None, nfold=5, stratified=True, shuffle=True, metrics=None, fobj=None, feval=None, init_model=None, feature_name='auto', categorical_feature='auto', early_stopping_rounds=None, fpreproc=None, seed=0, callbacks=None, eval_train_metric=False, return_cvbooster=False): """Perform the cross-validation with given parameters. Parameters ---------- params : dict Parameters for Booster. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None) If generator or iterator, it should yield the train and test indices for each fold. If object, it should be one of the scikit-learn splitter classes (https://scikit-learn.org/stable/modules/classes.html#splitter-classes) and have ``split`` method. This argument has highest priority over other data split arguments. nfold : int, optional (default=5) Number of folds in CV. stratified : bool, optional (default=True) Whether to perform stratified sampling. shuffle : bool, optional (default=True) Whether to shuffle before splitting data. metrics : str, list of str, or None, optional (default=None) Evaluation metrics to be monitored while CV. If not None, the metric in ``params`` will be overridden. fobj : callable or None, optional (default=None) Customized objective function. Should accept two parameters: preds, train_data, and return (grad, hess). preds : numpy 1-D array The predicted values. Predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task. train_data : Dataset The training dataset. grad : list, numpy 1-D array or pandas Series The value of the first order derivative (gradient) of the loss with respect to the elements of preds for each sample point. hess : list, numpy 1-D array or pandas Series The value of the second order derivative (Hessian) of the loss with respect to the elements of preds for each sample point. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. feval : callable, list of callable, or None, optional (default=None) Customized evaluation function. Each evaluation function should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. preds : numpy 1-D array The predicted values. If ``fobj`` is specified, predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task in this case. train_data : Dataset The training dataset. eval_name : str The name of evaluation function (without whitespace). eval_result : float The eval result. is_higher_better : bool Is eval result higher better, e.g. AUC is ``is_higher_better``. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set ``metrics`` to the string ``"None"``. init_model : str, pathlib.Path, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of str, or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of str or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of str, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. The output cannot be monotonically constrained with respect to a categorical feature. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. CV score needs to improve at least every ``early_stopping_rounds`` round(s) to continue. Requires at least one metric. If there's more than one, will check all of them. To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``. Last entry in evaluation history is the one from the best iteration. fpreproc : callable or None, optional (default=None) Preprocessing function that takes (dtrain, dtest, params) and returns transformed versions of those. seed : int, optional (default=0) Seed used to generate the folds (passed to numpy.random.seed). callbacks : list of callable, or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. eval_train_metric : bool, optional (default=False) Whether to display the train metric in progress. The score of the metric is calculated again after each training step, so there is some impact on performance. return_cvbooster : bool, optional (default=False) Whether to return Booster models trained on each fold through ``CVBooster``. Returns ------- eval_hist : dict Evaluation history. The dictionary has the following format: {'metric1-mean': [values], 'metric1-stdv': [values], 'metric2-mean': [values], 'metric2-stdv': [values], ...}. If ``return_cvbooster=True``, also returns trained boosters via ``cvbooster`` key. """ if not isinstance(train_set, Dataset): raise TypeError("Training only accepts Dataset object") params = copy.deepcopy(params) if fobj is not None: for obj_alias in _ConfigAliases.get("objective"): params.pop(obj_alias, None) params['objective'] = 'none' for alias in _ConfigAliases.get("num_iterations"): if alias in params: _log_warning(f"Found '{alias}' in params. Will use it instead of 'num_boost_round' argument") num_boost_round = params.pop(alias) params["num_iterations"] = num_boost_round if early_stopping_rounds is not None and early_stopping_rounds > 0: _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'early_stopping()' callback via 'callbacks' argument instead.") for alias in _ConfigAliases.get("early_stopping_round"): if alias in params: early_stopping_rounds = params.pop(alias) params["early_stopping_round"] = early_stopping_rounds first_metric_only = params.get('first_metric_only', False) if num_boost_round <= 0: raise ValueError("num_boost_round should be greater than zero.") if isinstance(init_model, (str, Path)): predictor = _InnerPredictor(model_file=init_model, pred_parameter=params) elif isinstance(init_model, Booster): predictor = init_model._to_predictor(dict(init_model.params, **params)) else: predictor = None if metrics is not None: for metric_alias in _ConfigAliases.get("metric"): params.pop(metric_alias, None) params['metric'] = metrics train_set._update_params(params) \ ._set_predictor(predictor) \ .set_feature_name(feature_name) \ .set_categorical_feature(categorical_feature) results = collections.defaultdict(list) cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold, params=params, seed=seed, fpreproc=fpreproc, stratified=stratified, shuffle=shuffle, eval_train_metric=eval_train_metric) # setup callbacks if callbacks is None: callbacks = set() else: for i, cb in enumerate(callbacks): cb.__dict__.setdefault('order', i - len(callbacks)) callbacks = set(callbacks) if early_stopping_rounds is not None and early_stopping_rounds > 0: callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=False)) callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)} callbacks_after_iter = callbacks - callbacks_before_iter callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order')) callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order')) for i in range(num_boost_round): for cb in callbacks_before_iter: cb(callback.CallbackEnv(model=cvfolds, params=params, iteration=i, begin_iteration=0, end_iteration=num_boost_round, evaluation_result_list=None)) cvfolds.update(fobj=fobj) res = _agg_cv_result(cvfolds.eval_valid(feval), eval_train_metric) for _, key, mean, _, std in res: results[f'{key}-mean'].append(mean) results[f'{key}-stdv'].append(std) try: for cb in callbacks_after_iter: cb(callback.CallbackEnv(model=cvfolds, params=params, iteration=i, begin_iteration=0, end_iteration=num_boost_round, evaluation_result_list=res)) except callback.EarlyStopException as earlyStopException: cvfolds.best_iteration = earlyStopException.best_iteration + 1 for k in results: results[k] = results[k][:cvfolds.best_iteration] break if return_cvbooster: results['cvbooster'] = cvfolds return dict(results)
# coding: utf-8 """Library with training routines of LightGBM.""" import collections import copy from operator import attrgetter from pathlib import Path from typing import Any, Callable, Dict, List, Optional, Tuple, Union import numpy as np from . import callback from .basic import Booster, Dataset, LightGBMError, _ArrayLike, _ConfigAliases, _InnerPredictor, _log_warning from .compat import SKLEARN_INSTALLED, _LGBMGroupKFold, _LGBMStratifiedKFold _LGBM_CustomObjectiveFunction = Callable[ [np.ndarray, Dataset], Tuple[_ArrayLike, _ArrayLike] ] _LGBM_CustomMetricFunction = Callable[ [np.ndarray, Dataset], Tuple[str, float, bool] ] def train( params: Dict[str, Any], train_set: Dataset, num_boost_round: int = 100, valid_sets: Optional[List[Dataset]] = None, valid_names: Optional[List[str]] = None, fobj: Optional[_LGBM_CustomObjectiveFunction] = None, feval: Optional[Union[_LGBM_CustomMetricFunction, List[_LGBM_CustomMetricFunction]]] = None, init_model: Optional[Union[str, Path, Booster]] = None, feature_name: Union[List[str], str] = 'auto', categorical_feature: Union[List[str], List[int], str] = 'auto', early_stopping_rounds: Optional[int] = None, evals_result: Optional[Dict[str, Any]] = None, keep_training_booster: bool = False, callbacks: Optional[List[Callable]] = None ) -> Booster: """Perform the training with given parameters. Parameters ---------- params : dict Parameters for training. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. valid_sets : list of Dataset, or None, optional (default=None) List of data to be evaluated on during training. valid_names : list of str, or None, optional (default=None) Names of ``valid_sets``. fobj : callable or None, optional (default=None) Customized objective function. Should accept two parameters: preds, train_data, and return (grad, hess). preds : numpy 1-D array The predicted values. Predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task. train_data : Dataset The training dataset. grad : list, numpy 1-D array or pandas Series The value of the first order derivative (gradient) of the loss with respect to the elements of preds for each sample point. hess : list, numpy 1-D array or pandas Series The value of the second order derivative (Hessian) of the loss with respect to the elements of preds for each sample point. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. feval : callable, list of callable, or None, optional (default=None) Customized evaluation function. Each evaluation function should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. preds : numpy 1-D array The predicted values. If ``fobj`` is specified, predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task in this case. train_data : Dataset The training dataset. eval_name : str The name of evaluation function (without whitespaces). eval_result : float The eval result. is_higher_better : bool Is eval result higher better, e.g. AUC is ``is_higher_better``. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set the ``metric`` parameter to the string ``"None"`` in ``params``. init_model : str, pathlib.Path, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of str, or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of str or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of str, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. The output cannot be monotonically constrained with respect to a categorical feature. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation data and one metric. If there's more than one, will check all of them. But the training data is ignored anyway. To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``. The index of iteration that has the best performance will be saved in the ``best_iteration`` field if early stopping logic is enabled by setting ``early_stopping_rounds``. evals_result : dict or None, optional (default=None) Dictionary used to store all evaluation results of all the items in ``valid_sets``. This should be initialized outside of your call to ``train()`` and should be empty. Any initial contents of the dictionary will be deleted. .. rubric:: Example With a ``valid_sets`` = [valid_set, train_set], ``valid_names`` = ['eval', 'train'] and a ``params`` = {'metric': 'logloss'} returns {'train': {'logloss': ['0.48253', '0.35953', ...]}, 'eval': {'logloss': ['0.480385', '0.357756', ...]}}. keep_training_booster : bool, optional (default=False) Whether the returned Booster will be used to keep training. If False, the returned value will be converted into _InnerPredictor before returning. This means you won't be able to use ``eval``, ``eval_train`` or ``eval_valid`` methods of the returned Booster. When your model is very large and cause the memory error, you can try to set this param to ``True`` to avoid the model conversion performed during the internal call of ``model_to_string``. You can still use _InnerPredictor as ``init_model`` for future continue training. callbacks : list of callable, or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. Returns ------- booster : Booster The trained Booster model. """ # create predictor first params = copy.deepcopy(params) if fobj is not None: for obj_alias in _ConfigAliases.get("objective"): params.pop(obj_alias, None) params['objective'] = 'none' for alias in _ConfigAliases.get("num_iterations"): if alias in params: num_boost_round = params.pop(alias) _log_warning(f"Found `{alias}` in params. Will use it instead of argument") params["num_iterations"] = num_boost_round # show deprecation warning only for early stop argument, setting early stop via global params should still be possible if early_stopping_rounds is not None and early_stopping_rounds > 0: _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'early_stopping()' callback via 'callbacks' argument instead.") for alias in _ConfigAliases.get("early_stopping_round"): if alias in params: early_stopping_rounds = params.pop(alias) params["early_stopping_round"] = early_stopping_rounds first_metric_only = params.get('first_metric_only', False) if num_boost_round <= 0: raise ValueError("num_boost_round should be greater than zero.") predictor: Optional[_InnerPredictor] = None if isinstance(init_model, (str, Path)): predictor = _InnerPredictor(model_file=init_model, pred_parameter=params) elif isinstance(init_model, Booster): predictor = init_model._to_predictor(dict(init_model.params, **params)) init_iteration = predictor.num_total_iteration if predictor is not None else 0 # check dataset if not isinstance(train_set, Dataset): raise TypeError("Training only accepts Dataset object") train_set._update_params(params) \ ._set_predictor(predictor) \ .set_feature_name(feature_name) \ .set_categorical_feature(categorical_feature) is_valid_contain_train = False train_data_name = "training" reduced_valid_sets = [] name_valid_sets = [] if valid_sets is not None: if isinstance(valid_sets, Dataset): valid_sets = [valid_sets] if isinstance(valid_names, str): valid_names = [valid_names] for i, valid_data in enumerate(valid_sets): # reduce cost for prediction training data if valid_data is train_set: is_valid_contain_train = True if valid_names is not None: train_data_name = valid_names[i] continue if not isinstance(valid_data, Dataset): raise TypeError("Training only accepts Dataset object") reduced_valid_sets.append(valid_data._update_params(params).set_reference(train_set)) if valid_names is not None and len(valid_names) > i: name_valid_sets.append(valid_names[i]) else: name_valid_sets.append(f'valid_{i}') # process callbacks if callbacks is None: callbacks_set = set() else: for i, cb in enumerate(callbacks): cb.__dict__.setdefault('order', i - len(callbacks)) callbacks_set = set(callbacks) # Most of legacy advanced options becomes callbacks if early_stopping_rounds is not None and early_stopping_rounds > 0: callbacks_set.add(callback.early_stopping(early_stopping_rounds, first_metric_only)) if evals_result is not None: _log_warning("'evals_result' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'record_evaluation()' callback via 'callbacks' argument instead.") callbacks_set.add(callback.record_evaluation(evals_result)) callbacks_before_iter_set = {cb for cb in callbacks_set if getattr(cb, 'before_iteration', False)} callbacks_after_iter_set = callbacks_set - callbacks_before_iter_set callbacks_before_iter = sorted(callbacks_before_iter_set, key=attrgetter('order')) callbacks_after_iter = sorted(callbacks_after_iter_set, key=attrgetter('order')) # construct booster try: booster = Booster(params=params, train_set=train_set) if is_valid_contain_train: booster.set_train_data_name(train_data_name) for valid_set, name_valid_set in zip(reduced_valid_sets, name_valid_sets): booster.add_valid(valid_set, name_valid_set) finally: train_set._reverse_update_params() for valid_set in reduced_valid_sets: valid_set._reverse_update_params() booster.best_iteration = 0 # start training for i in range(init_iteration, init_iteration + num_boost_round): for cb in callbacks_before_iter: cb(callback.CallbackEnv(model=booster, params=params, iteration=i, begin_iteration=init_iteration, end_iteration=init_iteration + num_boost_round, evaluation_result_list=None)) booster.update(fobj=fobj) evaluation_result_list = [] # check evaluation result. if valid_sets is not None: if is_valid_contain_train: evaluation_result_list.extend(booster.eval_train(feval)) evaluation_result_list.extend(booster.eval_valid(feval)) try: for cb in callbacks_after_iter: cb(callback.CallbackEnv(model=booster, params=params, iteration=i, begin_iteration=init_iteration, end_iteration=init_iteration + num_boost_round, evaluation_result_list=evaluation_result_list)) except callback.EarlyStopException as earlyStopException: booster.best_iteration = earlyStopException.best_iteration + 1 evaluation_result_list = earlyStopException.best_score break booster.best_score = collections.defaultdict(collections.OrderedDict) for dataset_name, eval_name, score, _ in evaluation_result_list: booster.best_score[dataset_name][eval_name] = score if not keep_training_booster: booster.model_from_string(booster.model_to_string()).free_dataset() return booster class CVBooster: """CVBooster in LightGBM. Auxiliary data structure to hold and redirect all boosters of ``cv`` function. This class has the same methods as Booster class. All method calls are actually performed for underlying Boosters and then all returned results are returned in a list. Attributes ---------- boosters : list of Booster The list of underlying fitted models. best_iteration : int The best iteration of fitted model. """ def __init__(self): """Initialize the CVBooster. Generally, no need to instantiate manually. """ self.boosters = [] self.best_iteration = -1 def _append(self, booster): """Add a booster to CVBooster.""" self.boosters.append(booster) def __getattr__(self, name): """Redirect methods call of CVBooster.""" def handler_function(*args, **kwargs): """Call methods with each booster, and concatenate their results.""" ret = [] for booster in self.boosters: ret.append(getattr(booster, name)(*args, **kwargs)) return ret return handler_function def _make_n_folds(full_data, folds, nfold, params, seed, fpreproc=None, stratified=True, shuffle=True, eval_train_metric=False): """Make a n-fold list of Booster from random indices.""" full_data = full_data.construct() num_data = full_data.num_data() if folds is not None: if not hasattr(folds, '__iter__') and not hasattr(folds, 'split'): raise AttributeError("folds should be a generator or iterator of (train_idx, test_idx) tuples " "or scikit-learn splitter object with split method") if hasattr(folds, 'split'): group_info = full_data.get_group() if group_info is not None: group_info = np.array(group_info, dtype=np.int32, copy=False) flatted_group = np.repeat(range(len(group_info)), repeats=group_info) else: flatted_group = np.zeros(num_data, dtype=np.int32) folds = folds.split(X=np.empty(num_data), y=full_data.get_label(), groups=flatted_group) else: if any(params.get(obj_alias, "") in {"lambdarank", "rank_xendcg", "xendcg", "xe_ndcg", "xe_ndcg_mart", "xendcg_mart"} for obj_alias in _ConfigAliases.get("objective")): if not SKLEARN_INSTALLED: raise LightGBMError('scikit-learn is required for ranking cv') # ranking task, split according to groups group_info = np.array(full_data.get_group(), dtype=np.int32, copy=False) flatted_group = np.repeat(range(len(group_info)), repeats=group_info) group_kfold = _LGBMGroupKFold(n_splits=nfold) folds = group_kfold.split(X=np.empty(num_data), groups=flatted_group) elif stratified: if not SKLEARN_INSTALLED: raise LightGBMError('scikit-learn is required for stratified cv') skf = _LGBMStratifiedKFold(n_splits=nfold, shuffle=shuffle, random_state=seed) folds = skf.split(X=np.empty(num_data), y=full_data.get_label()) else: if shuffle: randidx = np.random.RandomState(seed).permutation(num_data) else: randidx = np.arange(num_data) kstep = int(num_data / nfold) test_id = [randidx[i: i + kstep] for i in range(0, num_data, kstep)] train_id = [np.concatenate([test_id[i] for i in range(nfold) if k != i]) for k in range(nfold)] folds = zip(train_id, test_id) ret = CVBooster() for train_idx, test_idx in folds: train_set = full_data.subset(sorted(train_idx)) valid_set = full_data.subset(sorted(test_idx)) # run preprocessing on the data set if needed if fpreproc is not None: train_set, valid_set, tparam = fpreproc(train_set, valid_set, params.copy()) else: tparam = params cvbooster = Booster(tparam, train_set) if eval_train_metric: cvbooster.add_valid(train_set, 'train') cvbooster.add_valid(valid_set, 'valid') ret._append(cvbooster) return ret def _agg_cv_result(raw_results, eval_train_metric=False): """Aggregate cross-validation results.""" cvmap = collections.OrderedDict() metric_type = {} for one_result in raw_results: for one_line in one_result: if eval_train_metric: key = f"{one_line[0]} {one_line[1]}" else: key = one_line[1] metric_type[key] = one_line[3] cvmap.setdefault(key, []) cvmap[key].append(one_line[2]) return [('cv_agg', k, np.mean(v), metric_type[k], np.std(v)) for k, v in cvmap.items()] def cv(params, train_set, num_boost_round=100, folds=None, nfold=5, stratified=True, shuffle=True, metrics=None, fobj=None, feval=None, init_model=None, feature_name='auto', categorical_feature='auto', early_stopping_rounds=None, fpreproc=None, seed=0, callbacks=None, eval_train_metric=False, return_cvbooster=False): """Perform the cross-validation with given parameters. Parameters ---------- params : dict Parameters for Booster. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None) If generator or iterator, it should yield the train and test indices for each fold. If object, it should be one of the scikit-learn splitter classes (https://scikit-learn.org/stable/modules/classes.html#splitter-classes) and have ``split`` method. This argument has highest priority over other data split arguments. nfold : int, optional (default=5) Number of folds in CV. stratified : bool, optional (default=True) Whether to perform stratified sampling. shuffle : bool, optional (default=True) Whether to shuffle before splitting data. metrics : str, list of str, or None, optional (default=None) Evaluation metrics to be monitored while CV. If not None, the metric in ``params`` will be overridden. fobj : callable or None, optional (default=None) Customized objective function. Should accept two parameters: preds, train_data, and return (grad, hess). preds : numpy 1-D array The predicted values. Predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task. train_data : Dataset The training dataset. grad : list, numpy 1-D array or pandas Series The value of the first order derivative (gradient) of the loss with respect to the elements of preds for each sample point. hess : list, numpy 1-D array or pandas Series The value of the second order derivative (Hessian) of the loss with respect to the elements of preds for each sample point. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. feval : callable, list of callable, or None, optional (default=None) Customized evaluation function. Each evaluation function should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. preds : numpy 1-D array The predicted values. If ``fobj`` is specified, predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task in this case. train_data : Dataset The training dataset. eval_name : str The name of evaluation function (without whitespace). eval_result : float The eval result. is_higher_better : bool Is eval result higher better, e.g. AUC is ``is_higher_better``. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set ``metrics`` to the string ``"None"``. init_model : str, pathlib.Path, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of str, or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of str or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of str, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. The output cannot be monotonically constrained with respect to a categorical feature. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. CV score needs to improve at least every ``early_stopping_rounds`` round(s) to continue. Requires at least one metric. If there's more than one, will check all of them. To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``. Last entry in evaluation history is the one from the best iteration. fpreproc : callable or None, optional (default=None) Preprocessing function that takes (dtrain, dtest, params) and returns transformed versions of those. seed : int, optional (default=0) Seed used to generate the folds (passed to numpy.random.seed). callbacks : list of callable, or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. eval_train_metric : bool, optional (default=False) Whether to display the train metric in progress. The score of the metric is calculated again after each training step, so there is some impact on performance. return_cvbooster : bool, optional (default=False) Whether to return Booster models trained on each fold through ``CVBooster``. Returns ------- eval_hist : dict Evaluation history. The dictionary has the following format: {'metric1-mean': [values], 'metric1-stdv': [values], 'metric2-mean': [values], 'metric2-stdv': [values], ...}. If ``return_cvbooster=True``, also returns trained boosters via ``cvbooster`` key. """ if not isinstance(train_set, Dataset): raise TypeError("Training only accepts Dataset object") params = copy.deepcopy(params) if fobj is not None: for obj_alias in _ConfigAliases.get("objective"): params.pop(obj_alias, None) params['objective'] = 'none' for alias in _ConfigAliases.get("num_iterations"): if alias in params: _log_warning(f"Found '{alias}' in params. Will use it instead of 'num_boost_round' argument") num_boost_round = params.pop(alias) params["num_iterations"] = num_boost_round if early_stopping_rounds is not None and early_stopping_rounds > 0: _log_warning("'early_stopping_rounds' argument is deprecated and will be removed in a future release of LightGBM. " "Pass 'early_stopping()' callback via 'callbacks' argument instead.") for alias in _ConfigAliases.get("early_stopping_round"): if alias in params: early_stopping_rounds = params.pop(alias) params["early_stopping_round"] = early_stopping_rounds first_metric_only = params.get('first_metric_only', False) if num_boost_round <= 0: raise ValueError("num_boost_round should be greater than zero.") if isinstance(init_model, (str, Path)): predictor = _InnerPredictor(model_file=init_model, pred_parameter=params) elif isinstance(init_model, Booster): predictor = init_model._to_predictor(dict(init_model.params, **params)) else: predictor = None if metrics is not None: for metric_alias in _ConfigAliases.get("metric"): params.pop(metric_alias, None) params['metric'] = metrics train_set._update_params(params) \ ._set_predictor(predictor) \ .set_feature_name(feature_name) \ .set_categorical_feature(categorical_feature) results = collections.defaultdict(list) cvfolds = _make_n_folds(train_set, folds=folds, nfold=nfold, params=params, seed=seed, fpreproc=fpreproc, stratified=stratified, shuffle=shuffle, eval_train_metric=eval_train_metric) # setup callbacks if callbacks is None: callbacks = set() else: for i, cb in enumerate(callbacks): cb.__dict__.setdefault('order', i - len(callbacks)) callbacks = set(callbacks) if early_stopping_rounds is not None and early_stopping_rounds > 0: callbacks.add(callback.early_stopping(early_stopping_rounds, first_metric_only, verbose=False)) callbacks_before_iter = {cb for cb in callbacks if getattr(cb, 'before_iteration', False)} callbacks_after_iter = callbacks - callbacks_before_iter callbacks_before_iter = sorted(callbacks_before_iter, key=attrgetter('order')) callbacks_after_iter = sorted(callbacks_after_iter, key=attrgetter('order')) for i in range(num_boost_round): for cb in callbacks_before_iter: cb(callback.CallbackEnv(model=cvfolds, params=params, iteration=i, begin_iteration=0, end_iteration=num_boost_round, evaluation_result_list=None)) cvfolds.update(fobj=fobj) res = _agg_cv_result(cvfolds.eval_valid(feval), eval_train_metric) for _, key, mean, _, std in res: results[f'{key}-mean'].append(mean) results[f'{key}-stdv'].append(std) try: for cb in callbacks_after_iter: cb(callback.CallbackEnv(model=cvfolds, params=params, iteration=i, begin_iteration=0, end_iteration=num_boost_round, evaluation_result_list=res)) except callback.EarlyStopException as earlyStopException: cvfolds.best_iteration = earlyStopException.best_iteration + 1 for k in results: results[k] = results[k][:cvfolds.best_iteration] break if return_cvbooster: results['cvbooster'] = cvfolds return dict(results)
en
0.70688
# coding: utf-8 Library with training routines of LightGBM. Perform the training with given parameters. Parameters ---------- params : dict Parameters for training. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. valid_sets : list of Dataset, or None, optional (default=None) List of data to be evaluated on during training. valid_names : list of str, or None, optional (default=None) Names of ``valid_sets``. fobj : callable or None, optional (default=None) Customized objective function. Should accept two parameters: preds, train_data, and return (grad, hess). preds : numpy 1-D array The predicted values. Predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task. train_data : Dataset The training dataset. grad : list, numpy 1-D array or pandas Series The value of the first order derivative (gradient) of the loss with respect to the elements of preds for each sample point. hess : list, numpy 1-D array or pandas Series The value of the second order derivative (Hessian) of the loss with respect to the elements of preds for each sample point. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. feval : callable, list of callable, or None, optional (default=None) Customized evaluation function. Each evaluation function should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. preds : numpy 1-D array The predicted values. If ``fobj`` is specified, predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task in this case. train_data : Dataset The training dataset. eval_name : str The name of evaluation function (without whitespaces). eval_result : float The eval result. is_higher_better : bool Is eval result higher better, e.g. AUC is ``is_higher_better``. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set the ``metric`` parameter to the string ``"None"`` in ``params``. init_model : str, pathlib.Path, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of str, or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of str or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of str, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. The output cannot be monotonically constrained with respect to a categorical feature. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. The model will train until the validation score stops improving. Validation score needs to improve at least every ``early_stopping_rounds`` round(s) to continue training. Requires at least one validation data and one metric. If there's more than one, will check all of them. But the training data is ignored anyway. To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``. The index of iteration that has the best performance will be saved in the ``best_iteration`` field if early stopping logic is enabled by setting ``early_stopping_rounds``. evals_result : dict or None, optional (default=None) Dictionary used to store all evaluation results of all the items in ``valid_sets``. This should be initialized outside of your call to ``train()`` and should be empty. Any initial contents of the dictionary will be deleted. .. rubric:: Example With a ``valid_sets`` = [valid_set, train_set], ``valid_names`` = ['eval', 'train'] and a ``params`` = {'metric': 'logloss'} returns {'train': {'logloss': ['0.48253', '0.35953', ...]}, 'eval': {'logloss': ['0.480385', '0.357756', ...]}}. keep_training_booster : bool, optional (default=False) Whether the returned Booster will be used to keep training. If False, the returned value will be converted into _InnerPredictor before returning. This means you won't be able to use ``eval``, ``eval_train`` or ``eval_valid`` methods of the returned Booster. When your model is very large and cause the memory error, you can try to set this param to ``True`` to avoid the model conversion performed during the internal call of ``model_to_string``. You can still use _InnerPredictor as ``init_model`` for future continue training. callbacks : list of callable, or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. Returns ------- booster : Booster The trained Booster model. # create predictor first # show deprecation warning only for early stop argument, setting early stop via global params should still be possible # check dataset # reduce cost for prediction training data # process callbacks # Most of legacy advanced options becomes callbacks # construct booster # start training # check evaluation result. CVBooster in LightGBM. Auxiliary data structure to hold and redirect all boosters of ``cv`` function. This class has the same methods as Booster class. All method calls are actually performed for underlying Boosters and then all returned results are returned in a list. Attributes ---------- boosters : list of Booster The list of underlying fitted models. best_iteration : int The best iteration of fitted model. Initialize the CVBooster. Generally, no need to instantiate manually. Add a booster to CVBooster. Redirect methods call of CVBooster. Call methods with each booster, and concatenate their results. Make a n-fold list of Booster from random indices. # ranking task, split according to groups # run preprocessing on the data set if needed Aggregate cross-validation results. Perform the cross-validation with given parameters. Parameters ---------- params : dict Parameters for Booster. train_set : Dataset Data to be trained on. num_boost_round : int, optional (default=100) Number of boosting iterations. folds : generator or iterator of (train_idx, test_idx) tuples, scikit-learn splitter object or None, optional (default=None) If generator or iterator, it should yield the train and test indices for each fold. If object, it should be one of the scikit-learn splitter classes (https://scikit-learn.org/stable/modules/classes.html#splitter-classes) and have ``split`` method. This argument has highest priority over other data split arguments. nfold : int, optional (default=5) Number of folds in CV. stratified : bool, optional (default=True) Whether to perform stratified sampling. shuffle : bool, optional (default=True) Whether to shuffle before splitting data. metrics : str, list of str, or None, optional (default=None) Evaluation metrics to be monitored while CV. If not None, the metric in ``params`` will be overridden. fobj : callable or None, optional (default=None) Customized objective function. Should accept two parameters: preds, train_data, and return (grad, hess). preds : numpy 1-D array The predicted values. Predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task. train_data : Dataset The training dataset. grad : list, numpy 1-D array or pandas Series The value of the first order derivative (gradient) of the loss with respect to the elements of preds for each sample point. hess : list, numpy 1-D array or pandas Series The value of the second order derivative (Hessian) of the loss with respect to the elements of preds for each sample point. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is score[j * num_data + i] and you should group grad and hess in this way as well. feval : callable, list of callable, or None, optional (default=None) Customized evaluation function. Each evaluation function should accept two parameters: preds, train_data, and return (eval_name, eval_result, is_higher_better) or list of such tuples. preds : numpy 1-D array The predicted values. If ``fobj`` is specified, predicted values are returned before any transformation, e.g. they are raw margin instead of probability of positive class for binary task in this case. train_data : Dataset The training dataset. eval_name : str The name of evaluation function (without whitespace). eval_result : float The eval result. is_higher_better : bool Is eval result higher better, e.g. AUC is ``is_higher_better``. For multi-class task, the preds is group by class_id first, then group by row_id. If you want to get i-th row preds in j-th class, the access way is preds[j * num_data + i]. To ignore the default metric corresponding to the used objective, set ``metrics`` to the string ``"None"``. init_model : str, pathlib.Path, Booster or None, optional (default=None) Filename of LightGBM model or Booster instance used for continue training. feature_name : list of str, or 'auto', optional (default="auto") Feature names. If 'auto' and data is pandas DataFrame, data columns names are used. categorical_feature : list of str or int, or 'auto', optional (default="auto") Categorical features. If list of int, interpreted as indices. If list of str, interpreted as feature names (need to specify ``feature_name`` as well). If 'auto' and data is pandas DataFrame, pandas unordered categorical columns are used. All values in categorical features should be less than int32 max value (2147483647). Large values could be memory consuming. Consider using consecutive integers starting from zero. All negative values in categorical features will be treated as missing values. The output cannot be monotonically constrained with respect to a categorical feature. early_stopping_rounds : int or None, optional (default=None) Activates early stopping. CV score needs to improve at least every ``early_stopping_rounds`` round(s) to continue. Requires at least one metric. If there's more than one, will check all of them. To check only the first metric, set the ``first_metric_only`` parameter to ``True`` in ``params``. Last entry in evaluation history is the one from the best iteration. fpreproc : callable or None, optional (default=None) Preprocessing function that takes (dtrain, dtest, params) and returns transformed versions of those. seed : int, optional (default=0) Seed used to generate the folds (passed to numpy.random.seed). callbacks : list of callable, or None, optional (default=None) List of callback functions that are applied at each iteration. See Callbacks in Python API for more information. eval_train_metric : bool, optional (default=False) Whether to display the train metric in progress. The score of the metric is calculated again after each training step, so there is some impact on performance. return_cvbooster : bool, optional (default=False) Whether to return Booster models trained on each fold through ``CVBooster``. Returns ------- eval_hist : dict Evaluation history. The dictionary has the following format: {'metric1-mean': [values], 'metric1-stdv': [values], 'metric2-mean': [values], 'metric2-stdv': [values], ...}. If ``return_cvbooster=True``, also returns trained boosters via ``cvbooster`` key. # setup callbacks
2.511728
3
bin/bitfinex.py
yuhan-wang/whalewatch
1
6625339
import collections import kafka from bfxapi import Client from order_book import OrderBook from order_book import kafka_send bfx = Client() exchange = "Bitfinex" host = ['localhost:9092'] producer = kafka.KafkaProducer(bootstrap_servers=host) kafka.KafkaClient(bootstrap_servers=host).add_topic('all') with open('./trading_pairs/bitfinex.pair', 'r') as f: pairs = [e.replace('\n', '') for e in f.readlines()] pairs = list(map(lambda x: 't' + x, pairs)) local_book = collections.defaultdict(OrderBook) @bfx.ws.on('error') def log_error(err): print("Error: {}".format(err)) @bfx.ws.on('order_book_update') def log_update(data): ob = local_book[data['symbol']] order_change = ob.update_order(data['data']) if order_change: kafka_send(producer, 'all', exchange, data['symbol'], order_change) @bfx.ws.on('order_book_snapshot') def log_snapshot(data): ob = local_book[data['symbol']] = OrderBook() ob.initialize_book('bitfinex', bfx.ws.orderBooks[data['symbol']].bids, bfx.ws.orderBooks[data['symbol']].asks) async def start(): for i, pair in enumerate(pairs): await bfx.ws.subscribe('book', pair, prec='P0', len='100') bfx.ws.on('connected', start) bfx.ws.run()
import collections import kafka from bfxapi import Client from order_book import OrderBook from order_book import kafka_send bfx = Client() exchange = "Bitfinex" host = ['localhost:9092'] producer = kafka.KafkaProducer(bootstrap_servers=host) kafka.KafkaClient(bootstrap_servers=host).add_topic('all') with open('./trading_pairs/bitfinex.pair', 'r') as f: pairs = [e.replace('\n', '') for e in f.readlines()] pairs = list(map(lambda x: 't' + x, pairs)) local_book = collections.defaultdict(OrderBook) @bfx.ws.on('error') def log_error(err): print("Error: {}".format(err)) @bfx.ws.on('order_book_update') def log_update(data): ob = local_book[data['symbol']] order_change = ob.update_order(data['data']) if order_change: kafka_send(producer, 'all', exchange, data['symbol'], order_change) @bfx.ws.on('order_book_snapshot') def log_snapshot(data): ob = local_book[data['symbol']] = OrderBook() ob.initialize_book('bitfinex', bfx.ws.orderBooks[data['symbol']].bids, bfx.ws.orderBooks[data['symbol']].asks) async def start(): for i, pair in enumerate(pairs): await bfx.ws.subscribe('book', pair, prec='P0', len='100') bfx.ws.on('connected', start) bfx.ws.run()
none
1
2.376256
2
indi_mr/fromindi.py
bernie-skipole/indi-mr
0
6625340
<reponame>bernie-skipole/indi-mr<gh_stars>0 ################### # # fromindi.py # ################### """Reads indi xml strings, parses them and places values into redis, ready for reading by the web server.""" import xml.etree.ElementTree as ET import os, math, json, pathlib from datetime import datetime from base64 import standard_b64decode, standard_b64encode from . import tools # All xml data received should be contained in one of the following tags TAGS = (b'defTextVector', b'defNumberVector', b'defSwitchVector', b'defLightVector', b'defBLOBVector', b'message', b'delProperty', b'setTextVector', b'setNumberVector', b'setSwitchVector', b'setLightVector', b'setBLOBVector' ) ########## redis keys and channels _KEYPREFIX = "" _TO_INDI_CHANNEL = "" _FROM_INDI_CHANNEL = "" # redis keys and data # # one key : set # 'devices' - set of device names ('devices' is a literal string) # multiple keys : sets # 'properties:<devicename>' - set of property names for the device ('properties' is a literal string # <devicename> is an actual device name) # multiple keys : hash tables ( python dictionaries ) # 'attributes:<propertyname>:<devicename>' - dictionary of attributes for the property ('attributes' is a literal string # <propertyname> is an actual property name # <devicename> is an actual device name # one key : list # 'messages' - list of "Timestamp space message" # multiple keys : lists # 'devicemessages:<devicename>' - list of "Timestamp space message" # multiple keys : sets # 'elements:<propertyname>:<devicename>' - set of element names for the device property # ('elements' is a literal string # <propertyname> is an actual property name # <devicename> is an actual device name) # multiple keys : hash tables ( python dictionaries ) # 'elementattributes:<elementname>:<propertyname>:<devicename>' - dictionary of attributes for the element # ('elementattributes' is a literal string # <elementname> is an actual element name # <propertyname> is an actual property name # <devicename> is an actual device name) _LOGLENGTHS = { 'devices' : 5, 'properties' : 5, 'attributes' : 5, 'elements': 5, 'messages': 5, 'textvector': 5, 'numbervector':50, 'switchvector':5, 'lightvector':5, 'blobvector':5 } _BLOBFOLDER = "" def receive_from_indiserver(data, root, rconn): """receives xml data, parses it and stores in redis. Publishes the data received on _FROM_INDI_CHANNEL, returns device name if given, or None""" global _FROM_INDI_CHANNEL if rconn is None: return # this timestamp is the time at which the data is received timestamp = datetime.utcnow().isoformat(sep='T') devicename = None if root.tag == "defTextVector": text_vector = TextVector() # create a TextVector object text_vector.setup_from_def(rconn, root) # store the received data in a TextVector object text_vector.write(rconn) # call the write method to store data in redis text_vector.log(rconn, timestamp) devicename = text_vector.device elif root.tag == "defNumberVector": number_vector = NumberVector() number_vector.setup_from_def(rconn, root) number_vector.write(rconn) number_vector.log(rconn, timestamp) devicename = number_vector.device elif root.tag == "defSwitchVector": switch_vector = SwitchVector() switch_vector.setup_from_def(rconn, root) switch_vector.write(rconn) switch_vector.log(rconn, timestamp) devicename = switch_vector.device elif root.tag == "defLightVector": light_vector = LightVector() light_vector.setup_from_def(rconn, root) light_vector.write(rconn) light_vector.log(rconn, timestamp) devicename = light_vector.device elif root.tag == "defBLOBVector": blob_vector = BLOBVector() blob_vector.setup_from_def(rconn, root) blob_vector.write(rconn) blob_vector.log(rconn, timestamp) devicename = blob_vector.device elif root.tag == "message": message = Message(root) message.write(rconn) message.log(rconn, timestamp) elif root.tag == "delProperty": delprop = delProperty(root) delprop.write(rconn) delprop.log(rconn, timestamp) elif root.tag == "setTextVector": text_vector = TextVector.update_from_setvector(rconn, root) if text_vector is not None: text_vector.log(rconn, timestamp) elif root.tag == "setNumberVector": number_vector = NumberVector.update_from_setvector(rconn, root) if number_vector is not None: number_vector.log(rconn, timestamp) elif root.tag == "setSwitchVector": switch_vector = SwitchVector.update_from_setvector(rconn, root) if switch_vector is not None: switch_vector.log(rconn, timestamp) elif root.tag == "setLightVector": light_vector = LightVector.update_from_setvector(rconn, root) if light_vector is not None: light_vector.log(rconn, timestamp) elif root.tag == "setBLOBVector": blob_vector = BLOBVector.update_from_setvector(rconn, root) if blob_vector is not None: blob_vector.log(rconn, timestamp) # and publishes the data received rconn.publish(_FROM_INDI_CHANNEL, data) return devicename def setup_redis(key_prefix, to_indi_channel, from_indi_channel, log_lengths, blob_folder): "Sets the redis key prefix and pubsub channels" global _KEYPREFIX, _TO_INDI_CHANNEL, _FROM_INDI_CHANNEL, _LOGLENGTHS, _BLOBFOLDER if key_prefix: _KEYPREFIX = key_prefix else: _KEYPREFIX = "" if to_indi_channel: _TO_INDI_CHANNEL = to_indi_channel else: _TO_INDI_CHANNEL = "" if from_indi_channel: _FROM_INDI_CHANNEL = from_indi_channel else: _FROM_INDI_CHANNEL = "" if log_lengths: # ensure no item in log_lengths has a value less than 1 new_log_lengths = {} for key,value in log_lengths.items(): if value<1: new_log_lengths[key]=1 else: new_log_lengths[key]=value _LOGLENGTHS.update(new_log_lengths) if blob_folder: _BLOBFOLDER = blob_folder else: _BLOBFOLDER = "" def get_to_indi_channel(): return _TO_INDI_CHANNEL def get_from_indi_channel(): return _FROM_INDI_CHANNEL def key(*keys): "Add the prefix to keys, delimit keys with :" # example - if keys are 'device', 'property' this will result in a key of # 'keyprefixdevice:property' return _KEYPREFIX + ":".join(keys) ############# Define properties class ParentProperty(): "Parent to Text, Number, Switch, Lights, Blob vectors" def __init__(self): "Parent Item" # add the class name so it is saved with attributes to redis, so the type of vector can be read self.vector = self.__class__.__name__ # self.elements is a dictionary which will hold the elements within this vector, keys are element names self.elements = {} def setup_from_def(self, rconn, vector): "Set up the object from def... element" self.device = vector.get("device") # name of Device self.name = vector.get("name") # name of Property # state case may be incorrect (some confusion in white paper over the case of 'Ok') state = vector.get("state").lower() # current state of Property should be one of Idle, Ok, Busy or Alert if state == "idle": self.state = "Idle" elif state == "ok": self.state = "Ok" elif state == "busy": self.state = "Busy" else: self.state = "Alert" # implied properties self.label = vector.get("label", self.name) # GUI label, use name by default self.group = vector.get("group", "DEFAULT GROUP") # Property group membership, blank by default self.timestamp = vector.get("timestamp", datetime.utcnow().isoformat()) # moment when these data were valid self.timeout = vector.get("timeout", 0) # worse-case time to affect, 0 default, N/A for ro self.message = vector.get("message", "") def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" self.device = device # name of Device self.name = name # name of Property self._status = False # read status, will be set to True if items read from redis are ok # read attributes from redis self._strattribs = self.get_attributes(rconn) # this should be a dictionary of attributes, if not found, it will be an empty dictionary if not self._strattribs: # returns with an empty self._strattribs return self.state = self._strattribs["state"] self.label = self._strattribs["label"] self.group = self._strattribs["group"] self.timestamp = self._strattribs["timestamp"] self.timeout = self._strattribs["timeout"] self.message = self._strattribs["message"] def _set_permission(self, permission): "Sets the possible permissions, Read-Only, Write-Only or Read-Write" if permission in ('ro', 'wo', 'rw'): self.perm = permission else: self.perm = 'ro' @staticmethod def get_devices(rconn): "Return a set of device names as saved in redis" deviceset = rconn.smembers(key('devices')) if not deviceset: return set() return set(d.decode("utf-8") for d in deviceset) def get_properties(self, rconn): "Returns a set of property names for this device as saved in redis" propertyset = rconn.smembers(key('properties', self.device)) if not propertyset: return set() return set(p.decode("utf-8") for p in propertyset) def get_attributes(self, rconn): "Returns a dictionary of attributes for this property and device as saved in redis" attdict = rconn.hgetall(key('attributes',self.name,self.device)) if not attdict: return {} return {k.decode("utf-8"):v.decode("utf-8") for k,v in attdict.items()} def get_elements(self, rconn): "Returns a set of element names for this device as saved in redis" elementset = rconn.smembers(key('elements',self.name,self.device)) if not elementset: return set() return set(e.decode("utf-8") for e in elementset) def get_elements_dict(self, rconn, elementname): "Returns a dictionary of element attributes for the given element name, as saved in redis" elkey = key("elementattributes", elementname, self.name, self.device) eldict = rconn.hgetall(elkey) if not eldict: return {} return {k.decode("utf-8"):v.decode("utf-8") for k,v in eldict.items()} def write(self, rconn): "Saves this device, and property to redis connection rconn" # add the device to redis set 'devices' rconn.sadd(key('devices'), self.device) # add device to 'devices' rconn.sadd(key('properties', self.device), self.name) # add property name to 'properties:<devicename>' # Saves the instance attributes to redis, apart from self.elements mapping = {key:value for key,value in self.__dict__.items() if (key != "elements") and (not key.startswith("_"))} rconn.hmset(key('attributes',self.name,self.device), mapping) # save updated elements for element in self.elements.values(): element.write(rconn, self.device, self.name) # save list of element names # get list of element names sorted by label elementlist = list(self.elements.keys()) elementlist.sort(key=lambda x: self.elements[x].label) for elementname in elementlist: rconn.sadd(key('elements', self.name, self.device), elementname) # add element name to 'elements:<propertyname>:<devicename>' def log(self, rconn, timestamp): "Reads last log entry in redis for this object, and, if changed, logs change with the given timestamp" global _LOGLENGTHS # log changes in devices to logdata:devices deviceset = self.get_devices(rconn) logkey = key("logdata", "devices") logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(deviceset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logdevices = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_devices_list" logdeviceset = set(json.loads(logdevices)) if logdeviceset != deviceset: # there has been a change in the devices newstring = timestamp + " " + json.dumps(list(deviceset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['devices']) # log changes in property names to logdata:properties:<devicename> propertyset = self.get_properties(rconn) logkey = key("logdata", 'properties', self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(propertyset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logproperties = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_properties_list" logpropertyset = set(json.loads(logproperties)) if logpropertyset != propertyset: # there has been a change in the properties newstring = timestamp + " " + json.dumps(list(propertyset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['properties']) # log changes in attributes to logdata:attributes:<propertyname>:<devicename> attdict = self.get_attributes(rconn) logkey = key("logdata", 'attributes',self.name,self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(attdict) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logattributes = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_attributes_dict" logattdict = json.loads(logattributes) if logattdict != attdict: # there has been a change in the attributes newstring = timestamp + " " + json.dumps(attdict) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['attributes']) # log changes in element names to logdata:elements:<propertyname>:<devicename> elementset = self.get_elements(rconn) logkey = key("logdata", 'elements',self.name,self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(elementset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logelements = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_elements_list" logelementset = set(json.loads(logelements)) if logelementset != elementset: # there has been a change in the elements newstring = timestamp + " " + json.dumps(list(elementset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['elements']) # log changes in element attributes for element in self.elements.values(): # log changes in attributes to logdata:elementattributes:<elementname>:<propertyname>:<devicename> elattdict = self.get_elements_dict(rconn, element.name) logkey = key("logdata", 'elementattributes',element.name, self.name, self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(elattdict) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logelattributes = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_element_attributes_dict" logelattdict = json.loads(logelattributes) if logelattdict != elattdict: # there has been a change in the element attributes newstring = timestamp + " " + json.dumps(elattdict) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS[self.vector.lower()]) @classmethod def read(cls, rconn, device, name): """Reads redis and returns an instance of this class""" # If device is not in the 'devices' set, return None if not rconn.sismember(key('devices'), device): return # If the property name is not recognised as a property of the device, return None if not rconn.sismember(key('properties', device), name): return # create an object of this class obj = cls() obj.setup_from_redis(rconn, device, name) if not obj._status: return return obj def update(self, rconn, vector): "Update the object attributes to redis" self.timestamp = vector.get("timestamp", datetime.utcnow().isoformat()) # moment when these data were valid self.timeout = vector.get("timeout", 0) for child in vector: element = self.elements[child.get("name")] element.update(rconn, self.device, self.name, child, self.timestamp, self.timeout) state = vector.get("state") # set state of Property; Idle, OK, Busy or Alert, no change if absent if state: self.state = state self.message = vector.get("message", "") # Saves the instance attributes to redis, apart from self.elements mapping = {key:value for key,value in self.__dict__.items() if (key != "elements") and (not key.startswith("_"))} rconn.hmset(key('attributes',self.name,self.device), mapping) @classmethod def update_from_setvector(cls, rconn, setvector): """Gets an instance of this class from redis, and updates it according to the instructions from the setvector Returns an updated instance of this class or None if unable to read the property""" device = setvector.get("device") if device is None: return name = setvector.get("name") if name is None: return # Create an instance of the class, by reading the property from redis currentvector = cls.read(rconn, device, name) if currentvector is None: # device or property is unknown return # call the update method of the property, this writes changes to redis currentvector.update(rconn, setvector) return currentvector def element_names(self): "Returns a list of element names" return list(self.elements.keys()) def __getitem__(self, key): "key is an element name, returns an element object" return self.elements[key] def __setitem__(self, key, value): "key is an element name, value is an element" if key != value.name: raise ValueError("The key should be equal to the name set in the element") self.elements[key] = value def __contains__(self, name): "Check if an element with this name is in the vector" return name in self.elements def __iter__(self): "Iterating over the property gives the elements" for element in self.elements.values(): yield element def __str__(self): "Creates a string of label:states" if not self.elements: return "" result = "" for element in self.elements.values(): result += element.label + " : " + str(element)+"\n" return result class ParentElement(): "Parent to Text, Number, Switch, Lights, Blob elements" def __init__(self, timestamp, timeout=0): "Adds timestamp and timeout to self, gets them from Vector parent" self.timestamp = timestamp self.timeout = timeout def setup_from_def(self, child, **kwargs): self.name = child.get("name") # name of the element, required value self.label = child.get("label", self.name) # GUI label, use name by default def setup_from_redis(self, rconn, device, name, element_name): self.name = element_name.decode("utf-8") self._status = False # read status, will be set to True if items read from redis are ok self._strattribs = self.get_attributes(rconn, device, name) if not self._strattribs: return self.label = self._strattribs["label"] # timestamp and timeout should already be set by the vector when instance created # but read them from redis anyway. Could be used as a form of checking to ensure # vector and elements are synchronised if "timestamp" in self._strattribs: self.timestamp = self._strattribs["timestamp"] if "timeout" in self._strattribs: self.timeout = self._strattribs["timeout"] def get_attributes(self, rconn, device, name): "Returns a dictionary of attributes for this element, given property name and device as saved in redis" attdict = rconn.hgetall(key('elementattributes', self.name, name, device)) if not attdict: return {} return {k.decode("utf-8"):v.decode("utf-8") for k,v in attdict.items()} def write(self, rconn, device, name): "Writes element attributes to redis" # create dictionary of non-private attributes attribs = {key:val for key,val in self.__dict__.items() if not key.startswith("_")} if attribs: rconn.hmset(key('elementattributes',self.name, name, device), attribs) def update(self, rconn, device, name, child, timestamp, timeout, **kwargs): "update the element, from a vector child, and write to redis" self.timestamp = timestamp self.timeout = timeout self.set_value(child) # change value to that given by the xml child self.write(rconn, device, name) def set_value(self, child): if (child is None) or (not child.text): self.value = "" else: self.value = child.text.strip() # remove any newlines around the xml text ################ Text ###################### class TextVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) perm = vector.get("perm") self._set_permission(perm) # ostensible Client controlability for child in vector: element = TextElement(self.timestamp, self.timeout) element.setup_from_def(child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = self._strattribs["perm"] # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = TextElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful class TextElement(ParentElement): "text elements contained in a TextVector" def setup_from_def(self, child, **kwargs): self.set_value(child) super().setup_from_def(child, **kwargs) def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.value = self._strattribs["value"] self._status = True def __str__(self): return self.value ################ Number ###################### class NumberVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) perm = vector.get("perm") self._set_permission(perm) # ostensible Client controlability for child in vector: element = NumberElement(self.timestamp, self.timeout) element.setup_from_def(child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = self._strattribs["perm"] # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = NumberElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful class NumberElement(ParentElement): "number elements contained in a NumberVector" def setup_from_def(self, child, **kwargs): # required number attributes self.format = child.get("format") # printf-style format for GUI display self.min = child.get("min") # minimal value self.max = child.get("max") # maximum value, ignore if min == max self.step = child.get("step") # allowed increments, ignore if 0 # get the raw self.value self.set_value(child) super().setup_from_def(child, **kwargs) def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.format = self._strattribs["format"] self.min = self._strattribs["min"] self.max = self._strattribs["max"] self.step = self._strattribs["step"] self.value = self._strattribs["value"] self._status = True def write(self, rconn, device, name): "Writes element attributes to redis" # create dictionary of non-private attributes attribs = {key:val for key,val in self.__dict__.items() if not key.startswith("_")} attribs["formatted_number"] = self.formatted_number() attribs["float_number"] = self.float_number() attribs["float_min"] = self.float_min() attribs["float_max"] = self.float_max() attribs["float_step"] = self.float_step() rconn.hmset(key('elementattributes',self.name, name, device), attribs) def formatted_number(self): """Returns the string of the number using the format value""" floatvalue = self.float_number() return tools.format_number(floatvalue, self.format) def float_number(self): """Returns the float of the number value""" return tools.number_to_float(self.value) def float_min(self): "Returns the float of the min value" return tools.number_to_float(self.min) def float_max(self): "Returns the float of the max value" return tools.number_to_float(self.max) def float_step(self): "Returns the float of the step value" return tools.number_to_float(self.step) def __str__(self): "Returns the formatted number, equivalent to self.formatted_number()" return self.formatted_number() ################ Switch ###################### class SwitchVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) perm = vector.get("perm") self._set_permission(perm) # ostensible Client controlability self.rule = vector.get("rule") # hint for GUI presentation (OneOfMany|AtMostOne|AnyOfMany) for child in vector: element = SwitchElement(self.timestamp, self.timeout) element.setup_from_def(child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = self._strattribs["perm"] self.rule = self._strattribs["rule"] # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = SwitchElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful def _set_permission(self, permission): "Sets the possible permissions, Read-Only or Read-Write" if permission in ('ro', 'rw'): self.perm = permission else: self.perm = 'ro' class SwitchElement(ParentElement): "switch elements contained in a SwitchVector" def setup_from_def(self, child, **kwargs): "value should be Off or On" self.set_value(child) super().setup_from_def(child, **kwargs) def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.value = self._strattribs["value"] self._status = True def __str__(self): return self.value ################ Lights ###################### class LightVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) self.perm = 'ro' # permission always Read-Only for child in vector: element = LightElement(self.timestamp, self.timeout) element.setup_from_def(child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = 'ro' # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = LightElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful class LightElement(ParentElement): "light elements contained in a LightVector" def setup_from_def(self, child, **kwargs): self.set_value(child) super().setup_from_def(child, **kwargs) def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.value = self._strattribs["value"] self._status = True def __str__(self): return self.value ################ BLOB ###################### class BLOBVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) perm = vector.get("perm") self._set_permission(perm) # ostensible Client controlability # as default blobs are disabled, check if this device is already known # in redis and if blobs were previously enabled attribs = self.get_attributes(rconn) if attribs and attribs['blobs'] == "Enabled": self.blobs = "Enabled" else: self.blobs = "Disabled" for child in vector: element = BLOBElement(self.timestamp, self.timeout) element.setup_from_def(child) # A defBLOB only has name and label, contents are empty, however if blobs are enabled # and this BLOB element has been previously defined, and a filepath saved in redis, # then get element pathname etc from redis if self.blobs == "Enabled": element.set_file(rconn, self.device, self.name, child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = self._strattribs["perm"] self.blobs = self._strattribs["blobs"] # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = BLOBElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful def update(self, rconn, vector): "Update the object attributes and changed elements to redis" # as this is only called when a setBLOBVector is received, it must mean that blobs are enabled self.blobs = "Enabled" super().update(rconn, vector) def __str__(self): "Creates a string of labels" if not self.elements: return "" result = "" for element in self.elements.values(): result += element.label + "\n" return result class BLOBElement(ParentElement): "BLOB elements contained in a BLOBVector" def setup_from_def(self, child, **kwargs): "Set up element from xml" # A defBLOB only has name and label, contents are empty # name and label are set in super super().setup_from_def(child, **kwargs) # initialise data self.size = "" # number of bytes in decoded and uncompressed BLOB self.format = "" # format as a file suffix, eg: .z, .fits, .fits.z self.filepath = "" def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.size = self._strattribs["size"] self.format = self._strattribs["format"] self.filepath = self._strattribs["filepath"] self._status = True def update(self, rconn, device, name, child, timestamp, timeout, **kwargs): "update the element, from a vector child, and write to redis" self.timestamp = timestamp self.timeout = timeout self.size = child.get("size") # number of bytes in decoded and uncompressed BLOB self.format = child.get("format") # format as a file suffix, eg: .z, .fits, .fits.z # If child.text, save standard_b64decode(child.text) to a file # and set the new filepath attribute of the element self.set_file(rconn, device, name, child) self.write(rconn, device, name) def set_value(self, child): "value is not used for a Blob" return def set_file(self, rconn, devicename, propertyname, child): """If child.text is blob data, this saves the file, and sets a filepath attribute If no text, checks if redis contains a previous filepath and uses that""" if not _BLOBFOLDER: return # check if the _BLOBFOLDER exists if not _BLOBFOLDER.exists(): # if not, create it _BLOBFOLDER.mkdir(parents=True) if child.text is None: # no new file # Check if a filepath exists in redis attribs = self.get_attributes(rconn, devicename, propertyname) if not attribs: # no new file is given in child.text, nor any file currently exists return # read from attributes, may not exist, so use the empty defaults self.filepath = attribs.get("filepath", "") if self.filepath: self.size = attribs.get("size","") self.format = attribs.get("format","") self.timestamp = attribs.get("timestamp", self.timestamp) return # a new file exists in child.text # make filename from timestamp, and change colon in the timestamp to _ for safer name filename = self.timestamp.replace(":", "_") + self.format counter = 0 while True: filepath = _BLOBFOLDER / filename if filepath.exists(): # append a digit to the filename counter += 1 filename = self.timestamp.replace(":", "_") + "_" + str(counter) + self.format else: # filepath does not exist, so a new file with this filepath can be created break filepath.write_bytes(standard_b64decode(child.text)) self.filepath = str(filepath) # size and format are specified in the child vector def __str__(self): return "" ################ Message #################### class Message(): "a message associated with a device or entire system" def __init__(self, child): self.device = child.get("device", "") # considered to be site-wide if absent self.timestamp = child.get("timestamp", datetime.utcnow().isoformat()) # moment when this message was generated self.message = child.get("message", "") # Received message @classmethod def get_message(cls, rconn, device=""): """Return the last message as list of [timestamp, message] or [] if not available If device not given, return the last system message If device given, the last message from this device is returned""" if device: mkey = key("devicemessages", device) else: mkey = key("messages") message = rconn.get(mkey) if message is None: return [] return message.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp message" def write(self, rconn): "Saves this message as a string, 'timestamp message'" if not self.message: return time_and_message = self.timestamp + " " + self.message if self.device: rconn.set(key('devicemessages', self.device), time_and_message) else: rconn.set(key('messages'), time_and_message) def log(self, rconn, timestamp): "Reads last log entry in redis for this object, and, if changed, logs change with the given timestamp" global _LOGLENGTHS # log changes in messages to logdata:messages or to logdata:devicemessages:<devicename> messagelist = self.get_message(rconn, device=self.device) if not messagelist: return if self.device: logkey = key("logdata", "devicemessages", self.device) else: logkey = key("logdata", "messages") logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logmessage = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_[timestamp message]" logmessagelist = json.loads(logmessage) if logmessagelist != messagelist: # there has been a change in the message newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['messages']) def __str__(self): return self.message ################## Deleting ##################### class delProperty(): # A Device may tell a Client a given Property is no longer available by sending delProperty. If the command specifies only a # Device without a Property, the Client must assume all the Properties for that Device, and indeed the Device itself, are no # longer available. def __init__(self, child): "Delete the given property, or device if property name is None" self.device = child.get("device") self.name = child.get("name", "") self.timestamp = child.get("timestamp", datetime.utcnow().isoformat()) # moment when this message was generated self.message = child.get("message", "") # Received message def write(self, rconn): "Deletes the property or device from redis" global _LOGLENGTHS if self.name: # delete the property and add the message to the device message if self.message: time_and_message = f"{self.timestamp} {self.message}" else: time_and_message = f"{self.timestamp} Property {self.name} deleted from device {self.device}" rconn.set(key('messages', self.device), time_and_message) # delete all elements associated with the property elements = rconn.smembers(key('elements', self.name, self.device)) # delete the set of elements for this property rconn.delete(key('elements', self.name, self.device)) element_names = list(en.decode("utf-8") for en in elements) for name in element_names: # delete the element attributes rconn.delete(key('elementattributes', name, self.name, self.device)) # and delete the property rconn.srem(key('properties', self.device), self.name) rconn.delete(key('attributes', self.name, self.device)) else: # delete the device and add the message to the system message if self.message: time_and_message = f"{self.timestamp} {self.message}" else: time_and_message = f"{self.timestamp} {self.device} deleted" rconn.set(key('messages'), time_and_message) # and delete all keys associated with the device properties = rconn.smembers(key('properties', self.device)) # delete the set of properties rconn.delete(key('properties', self.device)) property_names = list(pn.decode("utf-8") for pn in properties) for name in property_names: # delete all elements associated with the property elements = rconn.smembers(key('elements', name, self.device)) # delete the set of elements for this property rconn.delete(key('elements', name, self.device)) element_names = list(en.decode("utf-8") for en in elements) for ename in element_names: # delete the element attributes rconn.delete(key('elementattributes', ename, name, self.device)) # delete the properties attributes rconn.delete(key('attributes', name, self.device)) # delete messages associated with the device rconn.delete(key('messages', self.device)) # delete the device from the 'devices' set rconn.srem(key('devices'), self.device) def log(self, rconn, timestamp): "Reads last log entry in redis for this object, and, if changed, logs change with the given timestamp" global _LOGLENGTHS if self.name: # a property has been deleted, log changes in property names to logdata:properties:<devicename> propertysetfromredis = rconn.smembers(key('properties', self.device)) if not propertysetfromredis: propertyset = set(["--None--"]) else: propertyset = set(p.decode("utf-8") for p in propertysetfromredis) logkey = key("logdata", 'properties', self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(propertyset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logproperties = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_properties_list" logpropertyset = set(json.loads(logproperties)) if logpropertyset != propertyset: # there has been a change in the properties newstring = timestamp + " " + json.dumps(list(propertyset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['properties']) # log changes in messages to logdata:devicemessages:<devicename> messagelist = Message.get_message(rconn, device=self.device) if not messagelist: return logkey = key("logdata", "devicemessages", self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logmessage = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_[timestamp message]" logmessagelist = json.loads(logmessage) if logmessagelist != messagelist: # there has been a change in the message newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['messages']) else: # no property name, so an entire device has been wiped # log changes in devices to logdata:devices deviceset = ParentProperty.get_devices(rconn) logkey = key("logdata", "devices") logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(deviceset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logdevices = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_devices_list" logdeviceset = set(json.loads(logdevices)) if logdeviceset != deviceset: # there has been a change in the devices newstring = timestamp + " " + json.dumps(list(deviceset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['devices']) # log changes in messages to logdata:messages messagelist = Message.get_message(rconn) if not messagelist: return logkey = key("logdata", "messages") logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logmessage = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_[timestamp message]" logmessagelist = json.loads(logmessage) if logmessagelist != messagelist: # there has been a change in the message newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['messages'])
################### # # fromindi.py # ################### """Reads indi xml strings, parses them and places values into redis, ready for reading by the web server.""" import xml.etree.ElementTree as ET import os, math, json, pathlib from datetime import datetime from base64 import standard_b64decode, standard_b64encode from . import tools # All xml data received should be contained in one of the following tags TAGS = (b'defTextVector', b'defNumberVector', b'defSwitchVector', b'defLightVector', b'defBLOBVector', b'message', b'delProperty', b'setTextVector', b'setNumberVector', b'setSwitchVector', b'setLightVector', b'setBLOBVector' ) ########## redis keys and channels _KEYPREFIX = "" _TO_INDI_CHANNEL = "" _FROM_INDI_CHANNEL = "" # redis keys and data # # one key : set # 'devices' - set of device names ('devices' is a literal string) # multiple keys : sets # 'properties:<devicename>' - set of property names for the device ('properties' is a literal string # <devicename> is an actual device name) # multiple keys : hash tables ( python dictionaries ) # 'attributes:<propertyname>:<devicename>' - dictionary of attributes for the property ('attributes' is a literal string # <propertyname> is an actual property name # <devicename> is an actual device name # one key : list # 'messages' - list of "Timestamp space message" # multiple keys : lists # 'devicemessages:<devicename>' - list of "Timestamp space message" # multiple keys : sets # 'elements:<propertyname>:<devicename>' - set of element names for the device property # ('elements' is a literal string # <propertyname> is an actual property name # <devicename> is an actual device name) # multiple keys : hash tables ( python dictionaries ) # 'elementattributes:<elementname>:<propertyname>:<devicename>' - dictionary of attributes for the element # ('elementattributes' is a literal string # <elementname> is an actual element name # <propertyname> is an actual property name # <devicename> is an actual device name) _LOGLENGTHS = { 'devices' : 5, 'properties' : 5, 'attributes' : 5, 'elements': 5, 'messages': 5, 'textvector': 5, 'numbervector':50, 'switchvector':5, 'lightvector':5, 'blobvector':5 } _BLOBFOLDER = "" def receive_from_indiserver(data, root, rconn): """receives xml data, parses it and stores in redis. Publishes the data received on _FROM_INDI_CHANNEL, returns device name if given, or None""" global _FROM_INDI_CHANNEL if rconn is None: return # this timestamp is the time at which the data is received timestamp = datetime.utcnow().isoformat(sep='T') devicename = None if root.tag == "defTextVector": text_vector = TextVector() # create a TextVector object text_vector.setup_from_def(rconn, root) # store the received data in a TextVector object text_vector.write(rconn) # call the write method to store data in redis text_vector.log(rconn, timestamp) devicename = text_vector.device elif root.tag == "defNumberVector": number_vector = NumberVector() number_vector.setup_from_def(rconn, root) number_vector.write(rconn) number_vector.log(rconn, timestamp) devicename = number_vector.device elif root.tag == "defSwitchVector": switch_vector = SwitchVector() switch_vector.setup_from_def(rconn, root) switch_vector.write(rconn) switch_vector.log(rconn, timestamp) devicename = switch_vector.device elif root.tag == "defLightVector": light_vector = LightVector() light_vector.setup_from_def(rconn, root) light_vector.write(rconn) light_vector.log(rconn, timestamp) devicename = light_vector.device elif root.tag == "defBLOBVector": blob_vector = BLOBVector() blob_vector.setup_from_def(rconn, root) blob_vector.write(rconn) blob_vector.log(rconn, timestamp) devicename = blob_vector.device elif root.tag == "message": message = Message(root) message.write(rconn) message.log(rconn, timestamp) elif root.tag == "delProperty": delprop = delProperty(root) delprop.write(rconn) delprop.log(rconn, timestamp) elif root.tag == "setTextVector": text_vector = TextVector.update_from_setvector(rconn, root) if text_vector is not None: text_vector.log(rconn, timestamp) elif root.tag == "setNumberVector": number_vector = NumberVector.update_from_setvector(rconn, root) if number_vector is not None: number_vector.log(rconn, timestamp) elif root.tag == "setSwitchVector": switch_vector = SwitchVector.update_from_setvector(rconn, root) if switch_vector is not None: switch_vector.log(rconn, timestamp) elif root.tag == "setLightVector": light_vector = LightVector.update_from_setvector(rconn, root) if light_vector is not None: light_vector.log(rconn, timestamp) elif root.tag == "setBLOBVector": blob_vector = BLOBVector.update_from_setvector(rconn, root) if blob_vector is not None: blob_vector.log(rconn, timestamp) # and publishes the data received rconn.publish(_FROM_INDI_CHANNEL, data) return devicename def setup_redis(key_prefix, to_indi_channel, from_indi_channel, log_lengths, blob_folder): "Sets the redis key prefix and pubsub channels" global _KEYPREFIX, _TO_INDI_CHANNEL, _FROM_INDI_CHANNEL, _LOGLENGTHS, _BLOBFOLDER if key_prefix: _KEYPREFIX = key_prefix else: _KEYPREFIX = "" if to_indi_channel: _TO_INDI_CHANNEL = to_indi_channel else: _TO_INDI_CHANNEL = "" if from_indi_channel: _FROM_INDI_CHANNEL = from_indi_channel else: _FROM_INDI_CHANNEL = "" if log_lengths: # ensure no item in log_lengths has a value less than 1 new_log_lengths = {} for key,value in log_lengths.items(): if value<1: new_log_lengths[key]=1 else: new_log_lengths[key]=value _LOGLENGTHS.update(new_log_lengths) if blob_folder: _BLOBFOLDER = blob_folder else: _BLOBFOLDER = "" def get_to_indi_channel(): return _TO_INDI_CHANNEL def get_from_indi_channel(): return _FROM_INDI_CHANNEL def key(*keys): "Add the prefix to keys, delimit keys with :" # example - if keys are 'device', 'property' this will result in a key of # 'keyprefixdevice:property' return _KEYPREFIX + ":".join(keys) ############# Define properties class ParentProperty(): "Parent to Text, Number, Switch, Lights, Blob vectors" def __init__(self): "Parent Item" # add the class name so it is saved with attributes to redis, so the type of vector can be read self.vector = self.__class__.__name__ # self.elements is a dictionary which will hold the elements within this vector, keys are element names self.elements = {} def setup_from_def(self, rconn, vector): "Set up the object from def... element" self.device = vector.get("device") # name of Device self.name = vector.get("name") # name of Property # state case may be incorrect (some confusion in white paper over the case of 'Ok') state = vector.get("state").lower() # current state of Property should be one of Idle, Ok, Busy or Alert if state == "idle": self.state = "Idle" elif state == "ok": self.state = "Ok" elif state == "busy": self.state = "Busy" else: self.state = "Alert" # implied properties self.label = vector.get("label", self.name) # GUI label, use name by default self.group = vector.get("group", "DEFAULT GROUP") # Property group membership, blank by default self.timestamp = vector.get("timestamp", datetime.utcnow().isoformat()) # moment when these data were valid self.timeout = vector.get("timeout", 0) # worse-case time to affect, 0 default, N/A for ro self.message = vector.get("message", "") def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" self.device = device # name of Device self.name = name # name of Property self._status = False # read status, will be set to True if items read from redis are ok # read attributes from redis self._strattribs = self.get_attributes(rconn) # this should be a dictionary of attributes, if not found, it will be an empty dictionary if not self._strattribs: # returns with an empty self._strattribs return self.state = self._strattribs["state"] self.label = self._strattribs["label"] self.group = self._strattribs["group"] self.timestamp = self._strattribs["timestamp"] self.timeout = self._strattribs["timeout"] self.message = self._strattribs["message"] def _set_permission(self, permission): "Sets the possible permissions, Read-Only, Write-Only or Read-Write" if permission in ('ro', 'wo', 'rw'): self.perm = permission else: self.perm = 'ro' @staticmethod def get_devices(rconn): "Return a set of device names as saved in redis" deviceset = rconn.smembers(key('devices')) if not deviceset: return set() return set(d.decode("utf-8") for d in deviceset) def get_properties(self, rconn): "Returns a set of property names for this device as saved in redis" propertyset = rconn.smembers(key('properties', self.device)) if not propertyset: return set() return set(p.decode("utf-8") for p in propertyset) def get_attributes(self, rconn): "Returns a dictionary of attributes for this property and device as saved in redis" attdict = rconn.hgetall(key('attributes',self.name,self.device)) if not attdict: return {} return {k.decode("utf-8"):v.decode("utf-8") for k,v in attdict.items()} def get_elements(self, rconn): "Returns a set of element names for this device as saved in redis" elementset = rconn.smembers(key('elements',self.name,self.device)) if not elementset: return set() return set(e.decode("utf-8") for e in elementset) def get_elements_dict(self, rconn, elementname): "Returns a dictionary of element attributes for the given element name, as saved in redis" elkey = key("elementattributes", elementname, self.name, self.device) eldict = rconn.hgetall(elkey) if not eldict: return {} return {k.decode("utf-8"):v.decode("utf-8") for k,v in eldict.items()} def write(self, rconn): "Saves this device, and property to redis connection rconn" # add the device to redis set 'devices' rconn.sadd(key('devices'), self.device) # add device to 'devices' rconn.sadd(key('properties', self.device), self.name) # add property name to 'properties:<devicename>' # Saves the instance attributes to redis, apart from self.elements mapping = {key:value for key,value in self.__dict__.items() if (key != "elements") and (not key.startswith("_"))} rconn.hmset(key('attributes',self.name,self.device), mapping) # save updated elements for element in self.elements.values(): element.write(rconn, self.device, self.name) # save list of element names # get list of element names sorted by label elementlist = list(self.elements.keys()) elementlist.sort(key=lambda x: self.elements[x].label) for elementname in elementlist: rconn.sadd(key('elements', self.name, self.device), elementname) # add element name to 'elements:<propertyname>:<devicename>' def log(self, rconn, timestamp): "Reads last log entry in redis for this object, and, if changed, logs change with the given timestamp" global _LOGLENGTHS # log changes in devices to logdata:devices deviceset = self.get_devices(rconn) logkey = key("logdata", "devices") logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(deviceset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logdevices = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_devices_list" logdeviceset = set(json.loads(logdevices)) if logdeviceset != deviceset: # there has been a change in the devices newstring = timestamp + " " + json.dumps(list(deviceset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['devices']) # log changes in property names to logdata:properties:<devicename> propertyset = self.get_properties(rconn) logkey = key("logdata", 'properties', self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(propertyset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logproperties = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_properties_list" logpropertyset = set(json.loads(logproperties)) if logpropertyset != propertyset: # there has been a change in the properties newstring = timestamp + " " + json.dumps(list(propertyset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['properties']) # log changes in attributes to logdata:attributes:<propertyname>:<devicename> attdict = self.get_attributes(rconn) logkey = key("logdata", 'attributes',self.name,self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(attdict) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logattributes = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_attributes_dict" logattdict = json.loads(logattributes) if logattdict != attdict: # there has been a change in the attributes newstring = timestamp + " " + json.dumps(attdict) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['attributes']) # log changes in element names to logdata:elements:<propertyname>:<devicename> elementset = self.get_elements(rconn) logkey = key("logdata", 'elements',self.name,self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(elementset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logelements = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_elements_list" logelementset = set(json.loads(logelements)) if logelementset != elementset: # there has been a change in the elements newstring = timestamp + " " + json.dumps(list(elementset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['elements']) # log changes in element attributes for element in self.elements.values(): # log changes in attributes to logdata:elementattributes:<elementname>:<propertyname>:<devicename> elattdict = self.get_elements_dict(rconn, element.name) logkey = key("logdata", 'elementattributes',element.name, self.name, self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(elattdict) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logelattributes = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_element_attributes_dict" logelattdict = json.loads(logelattributes) if logelattdict != elattdict: # there has been a change in the element attributes newstring = timestamp + " " + json.dumps(elattdict) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS[self.vector.lower()]) @classmethod def read(cls, rconn, device, name): """Reads redis and returns an instance of this class""" # If device is not in the 'devices' set, return None if not rconn.sismember(key('devices'), device): return # If the property name is not recognised as a property of the device, return None if not rconn.sismember(key('properties', device), name): return # create an object of this class obj = cls() obj.setup_from_redis(rconn, device, name) if not obj._status: return return obj def update(self, rconn, vector): "Update the object attributes to redis" self.timestamp = vector.get("timestamp", datetime.utcnow().isoformat()) # moment when these data were valid self.timeout = vector.get("timeout", 0) for child in vector: element = self.elements[child.get("name")] element.update(rconn, self.device, self.name, child, self.timestamp, self.timeout) state = vector.get("state") # set state of Property; Idle, OK, Busy or Alert, no change if absent if state: self.state = state self.message = vector.get("message", "") # Saves the instance attributes to redis, apart from self.elements mapping = {key:value for key,value in self.__dict__.items() if (key != "elements") and (not key.startswith("_"))} rconn.hmset(key('attributes',self.name,self.device), mapping) @classmethod def update_from_setvector(cls, rconn, setvector): """Gets an instance of this class from redis, and updates it according to the instructions from the setvector Returns an updated instance of this class or None if unable to read the property""" device = setvector.get("device") if device is None: return name = setvector.get("name") if name is None: return # Create an instance of the class, by reading the property from redis currentvector = cls.read(rconn, device, name) if currentvector is None: # device or property is unknown return # call the update method of the property, this writes changes to redis currentvector.update(rconn, setvector) return currentvector def element_names(self): "Returns a list of element names" return list(self.elements.keys()) def __getitem__(self, key): "key is an element name, returns an element object" return self.elements[key] def __setitem__(self, key, value): "key is an element name, value is an element" if key != value.name: raise ValueError("The key should be equal to the name set in the element") self.elements[key] = value def __contains__(self, name): "Check if an element with this name is in the vector" return name in self.elements def __iter__(self): "Iterating over the property gives the elements" for element in self.elements.values(): yield element def __str__(self): "Creates a string of label:states" if not self.elements: return "" result = "" for element in self.elements.values(): result += element.label + " : " + str(element)+"\n" return result class ParentElement(): "Parent to Text, Number, Switch, Lights, Blob elements" def __init__(self, timestamp, timeout=0): "Adds timestamp and timeout to self, gets them from Vector parent" self.timestamp = timestamp self.timeout = timeout def setup_from_def(self, child, **kwargs): self.name = child.get("name") # name of the element, required value self.label = child.get("label", self.name) # GUI label, use name by default def setup_from_redis(self, rconn, device, name, element_name): self.name = element_name.decode("utf-8") self._status = False # read status, will be set to True if items read from redis are ok self._strattribs = self.get_attributes(rconn, device, name) if not self._strattribs: return self.label = self._strattribs["label"] # timestamp and timeout should already be set by the vector when instance created # but read them from redis anyway. Could be used as a form of checking to ensure # vector and elements are synchronised if "timestamp" in self._strattribs: self.timestamp = self._strattribs["timestamp"] if "timeout" in self._strattribs: self.timeout = self._strattribs["timeout"] def get_attributes(self, rconn, device, name): "Returns a dictionary of attributes for this element, given property name and device as saved in redis" attdict = rconn.hgetall(key('elementattributes', self.name, name, device)) if not attdict: return {} return {k.decode("utf-8"):v.decode("utf-8") for k,v in attdict.items()} def write(self, rconn, device, name): "Writes element attributes to redis" # create dictionary of non-private attributes attribs = {key:val for key,val in self.__dict__.items() if not key.startswith("_")} if attribs: rconn.hmset(key('elementattributes',self.name, name, device), attribs) def update(self, rconn, device, name, child, timestamp, timeout, **kwargs): "update the element, from a vector child, and write to redis" self.timestamp = timestamp self.timeout = timeout self.set_value(child) # change value to that given by the xml child self.write(rconn, device, name) def set_value(self, child): if (child is None) or (not child.text): self.value = "" else: self.value = child.text.strip() # remove any newlines around the xml text ################ Text ###################### class TextVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) perm = vector.get("perm") self._set_permission(perm) # ostensible Client controlability for child in vector: element = TextElement(self.timestamp, self.timeout) element.setup_from_def(child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = self._strattribs["perm"] # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = TextElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful class TextElement(ParentElement): "text elements contained in a TextVector" def setup_from_def(self, child, **kwargs): self.set_value(child) super().setup_from_def(child, **kwargs) def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.value = self._strattribs["value"] self._status = True def __str__(self): return self.value ################ Number ###################### class NumberVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) perm = vector.get("perm") self._set_permission(perm) # ostensible Client controlability for child in vector: element = NumberElement(self.timestamp, self.timeout) element.setup_from_def(child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = self._strattribs["perm"] # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = NumberElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful class NumberElement(ParentElement): "number elements contained in a NumberVector" def setup_from_def(self, child, **kwargs): # required number attributes self.format = child.get("format") # printf-style format for GUI display self.min = child.get("min") # minimal value self.max = child.get("max") # maximum value, ignore if min == max self.step = child.get("step") # allowed increments, ignore if 0 # get the raw self.value self.set_value(child) super().setup_from_def(child, **kwargs) def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.format = self._strattribs["format"] self.min = self._strattribs["min"] self.max = self._strattribs["max"] self.step = self._strattribs["step"] self.value = self._strattribs["value"] self._status = True def write(self, rconn, device, name): "Writes element attributes to redis" # create dictionary of non-private attributes attribs = {key:val for key,val in self.__dict__.items() if not key.startswith("_")} attribs["formatted_number"] = self.formatted_number() attribs["float_number"] = self.float_number() attribs["float_min"] = self.float_min() attribs["float_max"] = self.float_max() attribs["float_step"] = self.float_step() rconn.hmset(key('elementattributes',self.name, name, device), attribs) def formatted_number(self): """Returns the string of the number using the format value""" floatvalue = self.float_number() return tools.format_number(floatvalue, self.format) def float_number(self): """Returns the float of the number value""" return tools.number_to_float(self.value) def float_min(self): "Returns the float of the min value" return tools.number_to_float(self.min) def float_max(self): "Returns the float of the max value" return tools.number_to_float(self.max) def float_step(self): "Returns the float of the step value" return tools.number_to_float(self.step) def __str__(self): "Returns the formatted number, equivalent to self.formatted_number()" return self.formatted_number() ################ Switch ###################### class SwitchVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) perm = vector.get("perm") self._set_permission(perm) # ostensible Client controlability self.rule = vector.get("rule") # hint for GUI presentation (OneOfMany|AtMostOne|AnyOfMany) for child in vector: element = SwitchElement(self.timestamp, self.timeout) element.setup_from_def(child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = self._strattribs["perm"] self.rule = self._strattribs["rule"] # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = SwitchElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful def _set_permission(self, permission): "Sets the possible permissions, Read-Only or Read-Write" if permission in ('ro', 'rw'): self.perm = permission else: self.perm = 'ro' class SwitchElement(ParentElement): "switch elements contained in a SwitchVector" def setup_from_def(self, child, **kwargs): "value should be Off or On" self.set_value(child) super().setup_from_def(child, **kwargs) def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.value = self._strattribs["value"] self._status = True def __str__(self): return self.value ################ Lights ###################### class LightVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) self.perm = 'ro' # permission always Read-Only for child in vector: element = LightElement(self.timestamp, self.timeout) element.setup_from_def(child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = 'ro' # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = LightElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful class LightElement(ParentElement): "light elements contained in a LightVector" def setup_from_def(self, child, **kwargs): self.set_value(child) super().setup_from_def(child, **kwargs) def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.value = self._strattribs["value"] self._status = True def __str__(self): return self.value ################ BLOB ###################### class BLOBVector(ParentProperty): def setup_from_def(self, rconn, vector): "Set up the object from def... element" super().setup_from_def(rconn, vector) perm = vector.get("perm") self._set_permission(perm) # ostensible Client controlability # as default blobs are disabled, check if this device is already known # in redis and if blobs were previously enabled attribs = self.get_attributes(rconn) if attribs and attribs['blobs'] == "Enabled": self.blobs = "Enabled" else: self.blobs = "Disabled" for child in vector: element = BLOBElement(self.timestamp, self.timeout) element.setup_from_def(child) # A defBLOB only has name and label, contents are empty, however if blobs are enabled # and this BLOB element has been previously defined, and a filepath saved in redis, # then get element pathname etc from redis if self.blobs == "Enabled": element.set_file(rconn, self.device, self.name, child) self.elements[element.name] = element def setup_from_redis(self, rconn, device, name): "Set up the object from set... element" super().setup_from_redis(rconn, device, name) if not self._strattribs: # failed to read attributes return # the super call has set self._strattribs self.perm = self._strattribs["perm"] self.blobs = self._strattribs["blobs"] # read the elements elements = rconn.smembers(key('elements', name, device)) if not elements: return for element_name in elements: element = BLOBElement(self.timestamp, self.timeout) element.setup_from_redis(rconn, device, name, element_name) if not element._status: # failure to read the element return self.elements[element.name] = element self._status = True # read status set to True, redis read successful def update(self, rconn, vector): "Update the object attributes and changed elements to redis" # as this is only called when a setBLOBVector is received, it must mean that blobs are enabled self.blobs = "Enabled" super().update(rconn, vector) def __str__(self): "Creates a string of labels" if not self.elements: return "" result = "" for element in self.elements.values(): result += element.label + "\n" return result class BLOBElement(ParentElement): "BLOB elements contained in a BLOBVector" def setup_from_def(self, child, **kwargs): "Set up element from xml" # A defBLOB only has name and label, contents are empty # name and label are set in super super().setup_from_def(child, **kwargs) # initialise data self.size = "" # number of bytes in decoded and uncompressed BLOB self.format = "" # format as a file suffix, eg: .z, .fits, .fits.z self.filepath = "" def setup_from_redis(self, rconn, device, name, element_name): "Sets up element by reading redis" super().setup_from_redis(rconn, device, name, element_name) if not self._strattribs: # failed to read attributes return self.size = self._strattribs["size"] self.format = self._strattribs["format"] self.filepath = self._strattribs["filepath"] self._status = True def update(self, rconn, device, name, child, timestamp, timeout, **kwargs): "update the element, from a vector child, and write to redis" self.timestamp = timestamp self.timeout = timeout self.size = child.get("size") # number of bytes in decoded and uncompressed BLOB self.format = child.get("format") # format as a file suffix, eg: .z, .fits, .fits.z # If child.text, save standard_b64decode(child.text) to a file # and set the new filepath attribute of the element self.set_file(rconn, device, name, child) self.write(rconn, device, name) def set_value(self, child): "value is not used for a Blob" return def set_file(self, rconn, devicename, propertyname, child): """If child.text is blob data, this saves the file, and sets a filepath attribute If no text, checks if redis contains a previous filepath and uses that""" if not _BLOBFOLDER: return # check if the _BLOBFOLDER exists if not _BLOBFOLDER.exists(): # if not, create it _BLOBFOLDER.mkdir(parents=True) if child.text is None: # no new file # Check if a filepath exists in redis attribs = self.get_attributes(rconn, devicename, propertyname) if not attribs: # no new file is given in child.text, nor any file currently exists return # read from attributes, may not exist, so use the empty defaults self.filepath = attribs.get("filepath", "") if self.filepath: self.size = attribs.get("size","") self.format = attribs.get("format","") self.timestamp = attribs.get("timestamp", self.timestamp) return # a new file exists in child.text # make filename from timestamp, and change colon in the timestamp to _ for safer name filename = self.timestamp.replace(":", "_") + self.format counter = 0 while True: filepath = _BLOBFOLDER / filename if filepath.exists(): # append a digit to the filename counter += 1 filename = self.timestamp.replace(":", "_") + "_" + str(counter) + self.format else: # filepath does not exist, so a new file with this filepath can be created break filepath.write_bytes(standard_b64decode(child.text)) self.filepath = str(filepath) # size and format are specified in the child vector def __str__(self): return "" ################ Message #################### class Message(): "a message associated with a device or entire system" def __init__(self, child): self.device = child.get("device", "") # considered to be site-wide if absent self.timestamp = child.get("timestamp", datetime.utcnow().isoformat()) # moment when this message was generated self.message = child.get("message", "") # Received message @classmethod def get_message(cls, rconn, device=""): """Return the last message as list of [timestamp, message] or [] if not available If device not given, return the last system message If device given, the last message from this device is returned""" if device: mkey = key("devicemessages", device) else: mkey = key("messages") message = rconn.get(mkey) if message is None: return [] return message.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp message" def write(self, rconn): "Saves this message as a string, 'timestamp message'" if not self.message: return time_and_message = self.timestamp + " " + self.message if self.device: rconn.set(key('devicemessages', self.device), time_and_message) else: rconn.set(key('messages'), time_and_message) def log(self, rconn, timestamp): "Reads last log entry in redis for this object, and, if changed, logs change with the given timestamp" global _LOGLENGTHS # log changes in messages to logdata:messages or to logdata:devicemessages:<devicename> messagelist = self.get_message(rconn, device=self.device) if not messagelist: return if self.device: logkey = key("logdata", "devicemessages", self.device) else: logkey = key("logdata", "messages") logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logmessage = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_[timestamp message]" logmessagelist = json.loads(logmessage) if logmessagelist != messagelist: # there has been a change in the message newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['messages']) def __str__(self): return self.message ################## Deleting ##################### class delProperty(): # A Device may tell a Client a given Property is no longer available by sending delProperty. If the command specifies only a # Device without a Property, the Client must assume all the Properties for that Device, and indeed the Device itself, are no # longer available. def __init__(self, child): "Delete the given property, or device if property name is None" self.device = child.get("device") self.name = child.get("name", "") self.timestamp = child.get("timestamp", datetime.utcnow().isoformat()) # moment when this message was generated self.message = child.get("message", "") # Received message def write(self, rconn): "Deletes the property or device from redis" global _LOGLENGTHS if self.name: # delete the property and add the message to the device message if self.message: time_and_message = f"{self.timestamp} {self.message}" else: time_and_message = f"{self.timestamp} Property {self.name} deleted from device {self.device}" rconn.set(key('messages', self.device), time_and_message) # delete all elements associated with the property elements = rconn.smembers(key('elements', self.name, self.device)) # delete the set of elements for this property rconn.delete(key('elements', self.name, self.device)) element_names = list(en.decode("utf-8") for en in elements) for name in element_names: # delete the element attributes rconn.delete(key('elementattributes', name, self.name, self.device)) # and delete the property rconn.srem(key('properties', self.device), self.name) rconn.delete(key('attributes', self.name, self.device)) else: # delete the device and add the message to the system message if self.message: time_and_message = f"{self.timestamp} {self.message}" else: time_and_message = f"{self.timestamp} {self.device} deleted" rconn.set(key('messages'), time_and_message) # and delete all keys associated with the device properties = rconn.smembers(key('properties', self.device)) # delete the set of properties rconn.delete(key('properties', self.device)) property_names = list(pn.decode("utf-8") for pn in properties) for name in property_names: # delete all elements associated with the property elements = rconn.smembers(key('elements', name, self.device)) # delete the set of elements for this property rconn.delete(key('elements', name, self.device)) element_names = list(en.decode("utf-8") for en in elements) for ename in element_names: # delete the element attributes rconn.delete(key('elementattributes', ename, name, self.device)) # delete the properties attributes rconn.delete(key('attributes', name, self.device)) # delete messages associated with the device rconn.delete(key('messages', self.device)) # delete the device from the 'devices' set rconn.srem(key('devices'), self.device) def log(self, rconn, timestamp): "Reads last log entry in redis for this object, and, if changed, logs change with the given timestamp" global _LOGLENGTHS if self.name: # a property has been deleted, log changes in property names to logdata:properties:<devicename> propertysetfromredis = rconn.smembers(key('properties', self.device)) if not propertysetfromredis: propertyset = set(["--None--"]) else: propertyset = set(p.decode("utf-8") for p in propertysetfromredis) logkey = key("logdata", 'properties', self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(propertyset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logproperties = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_properties_list" logpropertyset = set(json.loads(logproperties)) if logpropertyset != propertyset: # there has been a change in the properties newstring = timestamp + " " + json.dumps(list(propertyset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['properties']) # log changes in messages to logdata:devicemessages:<devicename> messagelist = Message.get_message(rconn, device=self.device) if not messagelist: return logkey = key("logdata", "devicemessages", self.device) logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logmessage = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_[timestamp message]" logmessagelist = json.loads(logmessage) if logmessagelist != messagelist: # there has been a change in the message newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['messages']) else: # no property name, so an entire device has been wiped # log changes in devices to logdata:devices deviceset = ParentProperty.get_devices(rconn) logkey = key("logdata", "devices") logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(list(deviceset)) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logdevices = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_devices_list" logdeviceset = set(json.loads(logdevices)) if logdeviceset != deviceset: # there has been a change in the devices newstring = timestamp + " " + json.dumps(list(deviceset)) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['devices']) # log changes in messages to logdata:messages messagelist = Message.get_message(rconn) if not messagelist: return logkey = key("logdata", "messages") logentry = rconn.lindex(logkey, 0) # gets last log entry if logentry is None: newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) else: # Get the last log logtime, logmessage = logentry.decode("utf-8").split(" ", maxsplit=1) # decode b"timestamp json_string_of_[timestamp message]" logmessagelist = json.loads(logmessage) if logmessagelist != messagelist: # there has been a change in the message newstring = timestamp + " " + json.dumps(messagelist) rconn.lpush(logkey, newstring) # and limit number of logs rconn.ltrim(logkey, 0, _LOGLENGTHS['messages'])
en
0.789693
################### # # fromindi.py # ################### Reads indi xml strings, parses them and places values into redis, ready for reading by the web server. # All xml data received should be contained in one of the following tags ########## redis keys and channels # redis keys and data # # one key : set # 'devices' - set of device names ('devices' is a literal string) # multiple keys : sets # 'properties:<devicename>' - set of property names for the device ('properties' is a literal string # <devicename> is an actual device name) # multiple keys : hash tables ( python dictionaries ) # 'attributes:<propertyname>:<devicename>' - dictionary of attributes for the property ('attributes' is a literal string # <propertyname> is an actual property name # <devicename> is an actual device name # one key : list # 'messages' - list of "Timestamp space message" # multiple keys : lists # 'devicemessages:<devicename>' - list of "Timestamp space message" # multiple keys : sets # 'elements:<propertyname>:<devicename>' - set of element names for the device property # ('elements' is a literal string # <propertyname> is an actual property name # <devicename> is an actual device name) # multiple keys : hash tables ( python dictionaries ) # 'elementattributes:<elementname>:<propertyname>:<devicename>' - dictionary of attributes for the element # ('elementattributes' is a literal string # <elementname> is an actual element name # <propertyname> is an actual property name # <devicename> is an actual device name) receives xml data, parses it and stores in redis. Publishes the data received on _FROM_INDI_CHANNEL, returns device name if given, or None # this timestamp is the time at which the data is received # create a TextVector object # store the received data in a TextVector object # call the write method to store data in redis # and publishes the data received # ensure no item in log_lengths has a value less than 1 # example - if keys are 'device', 'property' this will result in a key of # 'keyprefixdevice:property' ############# Define properties # add the class name so it is saved with attributes to redis, so the type of vector can be read # self.elements is a dictionary which will hold the elements within this vector, keys are element names # name of Device # name of Property # state case may be incorrect (some confusion in white paper over the case of 'Ok') # current state of Property should be one of Idle, Ok, Busy or Alert # implied properties # GUI label, use name by default # Property group membership, blank by default # moment when these data were valid # worse-case time to affect, 0 default, N/A for ro # name of Device # name of Property # read status, will be set to True if items read from redis are ok # read attributes from redis # this should be a dictionary of attributes, if not found, it will be an empty dictionary # returns with an empty self._strattribs # add the device to redis set 'devices' # add device to 'devices' # add property name to 'properties:<devicename>' # Saves the instance attributes to redis, apart from self.elements # save updated elements # save list of element names # get list of element names sorted by label # add element name to 'elements:<propertyname>:<devicename>' # log changes in devices to logdata:devices # gets last log entry # Get the last log # decode b"timestamp json_string_of_devices_list" # there has been a change in the devices # and limit number of logs # log changes in property names to logdata:properties:<devicename> # gets last log entry # Get the last log # decode b"timestamp json_string_of_properties_list" # there has been a change in the properties # and limit number of logs # log changes in attributes to logdata:attributes:<propertyname>:<devicename> # gets last log entry # Get the last log # decode b"timestamp json_string_of_attributes_dict" # there has been a change in the attributes # and limit number of logs # log changes in element names to logdata:elements:<propertyname>:<devicename> # gets last log entry # Get the last log # decode b"timestamp json_string_of_elements_list" # there has been a change in the elements # and limit number of logs # log changes in element attributes # log changes in attributes to logdata:elementattributes:<elementname>:<propertyname>:<devicename> # gets last log entry # Get the last log # decode b"timestamp json_string_of_element_attributes_dict" # there has been a change in the element attributes # and limit number of logs Reads redis and returns an instance of this class # If device is not in the 'devices' set, return None # If the property name is not recognised as a property of the device, return None # create an object of this class # moment when these data were valid # set state of Property; Idle, OK, Busy or Alert, no change if absent # Saves the instance attributes to redis, apart from self.elements Gets an instance of this class from redis, and updates it according to the instructions from the setvector Returns an updated instance of this class or None if unable to read the property # Create an instance of the class, by reading the property from redis # device or property is unknown # call the update method of the property, this writes changes to redis # name of the element, required value # GUI label, use name by default # read status, will be set to True if items read from redis are ok # timestamp and timeout should already be set by the vector when instance created # but read them from redis anyway. Could be used as a form of checking to ensure # vector and elements are synchronised # create dictionary of non-private attributes # change value to that given by the xml child # remove any newlines around the xml text ################ Text ###################### # ostensible Client controlability # failed to read attributes # the super call has set self._strattribs # read the elements # failure to read the element # read status set to True, redis read successful # failed to read attributes ################ Number ###################### # ostensible Client controlability # failed to read attributes # the super call has set self._strattribs # read the elements # failure to read the element # read status set to True, redis read successful # required number attributes # printf-style format for GUI display # minimal value # maximum value, ignore if min == max # allowed increments, ignore if 0 # get the raw self.value # failed to read attributes # create dictionary of non-private attributes Returns the string of the number using the format value Returns the float of the number value ################ Switch ###################### # ostensible Client controlability # hint for GUI presentation (OneOfMany|AtMostOne|AnyOfMany) # failed to read attributes # the super call has set self._strattribs # read the elements # failure to read the element # read status set to True, redis read successful # failed to read attributes ################ Lights ###################### # permission always Read-Only # failed to read attributes # the super call has set self._strattribs # read the elements # failure to read the element # read status set to True, redis read successful # failed to read attributes ################ BLOB ###################### # ostensible Client controlability # as default blobs are disabled, check if this device is already known # in redis and if blobs were previously enabled # A defBLOB only has name and label, contents are empty, however if blobs are enabled # and this BLOB element has been previously defined, and a filepath saved in redis, # then get element pathname etc from redis # failed to read attributes # the super call has set self._strattribs # read the elements # failure to read the element # read status set to True, redis read successful # as this is only called when a setBLOBVector is received, it must mean that blobs are enabled # A defBLOB only has name and label, contents are empty # name and label are set in super # initialise data # number of bytes in decoded and uncompressed BLOB # format as a file suffix, eg: .z, .fits, .fits.z # failed to read attributes # number of bytes in decoded and uncompressed BLOB # format as a file suffix, eg: .z, .fits, .fits.z # If child.text, save standard_b64decode(child.text) to a file # and set the new filepath attribute of the element If child.text is blob data, this saves the file, and sets a filepath attribute If no text, checks if redis contains a previous filepath and uses that # check if the _BLOBFOLDER exists # if not, create it # no new file # Check if a filepath exists in redis # no new file is given in child.text, nor any file currently exists # read from attributes, may not exist, so use the empty defaults # a new file exists in child.text # make filename from timestamp, and change colon in the timestamp to _ for safer name # append a digit to the filename # filepath does not exist, so a new file with this filepath can be created # size and format are specified in the child vector ################ Message #################### # considered to be site-wide if absent # moment when this message was generated # Received message Return the last message as list of [timestamp, message] or [] if not available If device not given, return the last system message If device given, the last message from this device is returned # decode b"timestamp message" # log changes in messages to logdata:messages or to logdata:devicemessages:<devicename> # gets last log entry # Get the last log # decode b"timestamp json_string_of_[timestamp message]" # there has been a change in the message # and limit number of logs ################## Deleting ##################### # A Device may tell a Client a given Property is no longer available by sending delProperty. If the command specifies only a # Device without a Property, the Client must assume all the Properties for that Device, and indeed the Device itself, are no # longer available. # moment when this message was generated # Received message # delete the property and add the message to the device message # delete all elements associated with the property # delete the set of elements for this property # delete the element attributes # and delete the property # delete the device and add the message to the system message # and delete all keys associated with the device # delete the set of properties # delete all elements associated with the property # delete the set of elements for this property # delete the element attributes # delete the properties attributes # delete messages associated with the device # delete the device from the 'devices' set # a property has been deleted, log changes in property names to logdata:properties:<devicename> # gets last log entry # Get the last log # decode b"timestamp json_string_of_properties_list" # there has been a change in the properties # and limit number of logs # log changes in messages to logdata:devicemessages:<devicename> # gets last log entry # Get the last log # decode b"timestamp json_string_of_[timestamp message]" # there has been a change in the message # and limit number of logs # no property name, so an entire device has been wiped # log changes in devices to logdata:devices # gets last log entry # Get the last log # decode b"timestamp json_string_of_devices_list" # there has been a change in the devices # and limit number of logs # log changes in messages to logdata:messages # gets last log entry # Get the last log # decode b"timestamp json_string_of_[timestamp message]" # there has been a change in the message # and limit number of logs
2.457281
2
examples/leduc_holdem_cfr.py
drunkpig/rlcard
0
6625341
''' An example of solve Leduc Hold'em with CFR ''' import numpy as np import rlcard from rlcard.agents.cfr_agent import CFRAgent from rlcard import models from rlcard.utils.utils import set_global_seed, tournament from rlcard.utils.logger import Logger # Make environment and enable human mode env = rlcard.make('leduc-holdem', config={'allow_step_back':True}) eval_env = rlcard.make('leduc-holdem') # Set the iterations numbers and how frequently we evaluate the performance and save model evaluate_every = 100 save_plot_every = 1000 evaluate_num = 10000 episode_num = 10000 # The paths for saving the logs and learning curves log_dir = './experiments/leduc_holdem_cfr_result/' # Set a global seed set_global_seed(0) # Initilize CFR Agent agent = CFRAgent(env) agent.load() # If we have saved model, we first load the model # Evaluate CFR against pre-trained NFSP eval_env.set_agents([agent, models.load('leduc-holdem-nfsp').agents[0]]) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): agent.train() print('\rIteration {}'.format(episode), end='') # Evaluate the performance. Play with NFSP agents. if episode % evaluate_every == 0: agent.save() # Save model logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('CFR')
''' An example of solve Leduc Hold'em with CFR ''' import numpy as np import rlcard from rlcard.agents.cfr_agent import CFRAgent from rlcard import models from rlcard.utils.utils import set_global_seed, tournament from rlcard.utils.logger import Logger # Make environment and enable human mode env = rlcard.make('leduc-holdem', config={'allow_step_back':True}) eval_env = rlcard.make('leduc-holdem') # Set the iterations numbers and how frequently we evaluate the performance and save model evaluate_every = 100 save_plot_every = 1000 evaluate_num = 10000 episode_num = 10000 # The paths for saving the logs and learning curves log_dir = './experiments/leduc_holdem_cfr_result/' # Set a global seed set_global_seed(0) # Initilize CFR Agent agent = CFRAgent(env) agent.load() # If we have saved model, we first load the model # Evaluate CFR against pre-trained NFSP eval_env.set_agents([agent, models.load('leduc-holdem-nfsp').agents[0]]) # Init a Logger to plot the learning curve logger = Logger(log_dir) for episode in range(episode_num): agent.train() print('\rIteration {}'.format(episode), end='') # Evaluate the performance. Play with NFSP agents. if episode % evaluate_every == 0: agent.save() # Save model logger.log_performance(env.timestep, tournament(eval_env, evaluate_num)[0]) # Close files in the logger logger.close_files() # Plot the learning curve logger.plot('CFR')
en
0.860778
An example of solve Leduc Hold'em with CFR # Make environment and enable human mode # Set the iterations numbers and how frequently we evaluate the performance and save model # The paths for saving the logs and learning curves # Set a global seed # Initilize CFR Agent # If we have saved model, we first load the model # Evaluate CFR against pre-trained NFSP # Init a Logger to plot the learning curve # Evaluate the performance. Play with NFSP agents. # Save model # Close files in the logger # Plot the learning curve
2.684956
3
mil_reptile.py
ryanbrand/mil
0
6625342
<reponame>ryanbrand/mil """ This file defines Meta Imitation Learning (MIL). """ from __future__ import division import numpy as np import random import tensorflow as tf from tensorflow.python.platform import flags from tf_utils import * from utils import Timer from natsort import natsorted FLAGS = flags.FLAGS class MIL(object): """ Initialize MIL. Need to call init_network to contruct the architecture after init. """ def __init__(self, dU, state_idx=None, img_idx=None, network_config=None): # MIL hyperparams self.num_updates = FLAGS.num_updates self.update_batch_size = FLAGS.update_batch_size self.meta_batch_size = FLAGS.meta_batch_size self.meta_lr = FLAGS.meta_lr self.activation_fn = tf.nn.relu # by default, we use relu self.T = FLAGS.T self.network_params = network_config self.norm_type = FLAGS.norm # List of indices for state (vector) data and image (tensor) data in observation. self.state_idx, self.img_idx = state_idx, img_idx # Dimension of input and output of the model self._dO = len(img_idx) + len(state_idx) self._dU = dU def init_network(self, graph, input_tensors=None, restore_iter=0, prefix='Training_', algo='reptile'): """Helper method to initialize the tf networks used; takes in tf graph; initializes networks; calls construct_model; sets params based on training/validation/test mode contained in prefix var """ with graph.as_default(): with Timer('building TF network'): # map inputs to outputs result = self.construct_model(input_tensors=input_tensors, prefix=prefix, dim_input=self._dO, dim_output=self._dU, network_config=self.network_params) outputas, outputbs, test_output, lossesa, lossesb, final_eept_lossesb, flat_img_inputb, gradients, fast_weights = result # added code for reptile if algo == 'reptile': # get actual weights # maintain weights for each task # where W_i is the pre-update fine-tune of parameters on task i # w' = w - \eps 1/k sum_i^n (W_i - w) # self.weights set in construct_model to pre update weights weights = self.weights weight_keys = self.sorted_weight_keys #= natsorted(self.weights.keys()) new_weights = average_vars(fast_weights) # the import variables expects that weights is # self._model_state.import_variables(interpolate_vars(self.weights, new_weights, meta_step_size)) else: if 'Testing' in prefix: self.obs_tensor = self.obsa self.state_tensor = self.statea self.test_act_op = test_output self.image_op = flat_img_inputb trainable_vars = tf.trainable_variables() # pre-update losses total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(self.meta_batch_size) # post-update losses total_losses2 = [tf.reduce_sum(lossesb[j]) / tf.to_float(self.meta_batch_size) for j in range(self.num_updates)] total_final_eept_losses2 = [tf.reduce_sum(final_eept_lossesb[j]) / tf.to_float(self.meta_batch_size) for j in range(self.num_updates)] if 'Training' in prefix: self.total_loss1 = total_loss1 self.total_losses2 = total_losses2 self.total_final_eept_losses2 = total_final_eept_losses2 elif 'Validation' in prefix: self.val_total_loss1 = total_loss1 self.val_total_losses2 = total_losses2 self.val_total_final_eept_losses2 = total_final_eept_losses2 # TODO: add reptile in here if 'Training' in prefix: # TODO: figure out why we are using total_losses2[self.num_updates - 1], it is becuase you only update on loss of last fine-tune step self.train_op = tf.train.AdamOptimizer(self.meta_lr).minimize(self.total_losses2[self.num_updates - 1]) # Add summaries summ = [tf.summary.scalar(prefix + 'Pre-update_loss', self.total_loss1)] for j in xrange(self.num_updates): summ.append(tf.summary.scalar(prefix + 'Post-update_loss_step_%d' % j, self.total_losses2[j])) summ.append(tf.summary.scalar(prefix + 'Post-update_final_eept_loss_step_%d' % j, self.total_final_eept_losses2[j])) for k in xrange(len(self.sorted_weight_keys)): summ.append(tf.summary.histogram('Gradient_of_%s_step_%d' % (self.sorted_weight_keys[k], j), gradients[j][k])) self.train_summ_op = tf.summary.merge(summ) elif 'Validation' in prefix: # Add summaries summ = [tf.summary.scalar(prefix + 'Pre-update_loss', self.val_total_loss1)] for j in xrange(self.num_updates): summ.append(tf.summary.scalar(prefix + 'Post-update_loss_step_%d' % j, self.val_total_losses2[j])) summ.append(tf.summary.scalar(prefix + 'Post-update_final_eept_loss_step_%d' % j, self.val_total_final_eept_losses2[j])) self.val_summ_op = tf.summary.merge(summ) def construct_image_input(self, nn_input, state_idx, img_idx, network_config=None): """Preprocess images; takes in state_idx (list of indices for state data in observation), img_idx (list of indices for image data in observation), and nn_input; pretty much just preprocesses input; returns preprocessed image input, flattened image input, and the state input """ state_input = nn_input[:, 0:state_idx[-1]+1] flat_image_input = nn_input[:, state_idx[-1]+1:img_idx[-1]+1] # image goes through 3 convnet layers num_filters = network_config['num_filters'] im_height = network_config['image_height'] im_width = network_config['image_width'] num_channels = network_config['image_channels'] image_input = tf.reshape(flat_image_input, [-1, num_channels, im_width, im_height]) image_input = tf.transpose(image_input, perm=[0,3,2,1]) if FLAGS.pretrain_weight_path != 'N/A': image_input = image_input * 255.0 - tf.convert_to_tensor(np.array([103.939, 116.779, 123.68], np.float32)) # 'RGB'->'BGR' image_input = image_input[:, :, :, ::-1] return image_input, flat_image_input, state_input def construct_weights(self, dim_input=27, dim_output=7, network_config=None): """ Construct weights for the network; takes in input dim and final output dim and just builds conv and fc weights, including augmented bias thing (and 2-headed architecture if FLAGS.two_head is true); returns weights """ weights = {} num_filters = network_config['num_filters'] strides = network_config.get('strides', [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1]]) filter_sizes = network_config.get('filter_size', [3]*len(strides)) # used to be 2 if type(filter_sizes) is not list: filter_sizes = len(strides)*[filter_sizes] im_height = network_config['image_height'] im_width = network_config['image_width'] num_channels = network_config['image_channels'] is_dilated = network_config.get('is_dilated', False) use_fp = FLAGS.fp pretrain = FLAGS.pretrain_weight_path != 'N/A' train_pretrain_conv1 = FLAGS.train_pretrain_conv1 initialization = network_config.get('initialization', 'random') if pretrain: num_filters[0] = 64 pretrain_weight_path = FLAGS.pretrain_weight_path n_conv_layers = len(num_filters) downsample_factor = 1 for stride in strides: downsample_factor *= stride[1] if use_fp: self.conv_out_size = int(num_filters[-1]*2) else: self.conv_out_size = int(np.ceil(im_width/(downsample_factor)))*int(np.ceil(im_height/(downsample_factor)))*num_filters[-1] # conv weights fan_in = num_channels if FLAGS.conv_bt: fan_in += num_channels if FLAGS.conv_bt: weights['img_context'] = safe_get('img_context', initializer=tf.zeros([im_height, im_width, num_channels], dtype=tf.float32)) weights['img_context'] = tf.clip_by_value(weights['img_context'], 0., 1.) for i in xrange(n_conv_layers): if not pretrain or i != 0: if self.norm_type == 'selu': weights['wc%d' % (i+1)] = init_conv_weights_snn([filter_sizes[i], filter_sizes[i], fan_in, num_filters[i]], name='wc%d' % (i+1)) # 5x5 conv, 1 input, 32 outputs elif initialization == 'xavier': weights['wc%d' % (i+1)] = init_conv_weights_xavier([filter_sizes[i], filter_sizes[i], fan_in, num_filters[i]], name='wc%d' % (i+1)) # 5x5 conv, 1 input, 32 outputs elif initialization == 'random': weights['wc%d' % (i+1)] = init_weights([filter_sizes[i], filter_sizes[i], fan_in, num_filters[i]], name='wc%d' % (i+1)) # 5x5 conv, 1 input, 32 outputs else: raise NotImplementedError weights['bc%d' % (i+1)] = init_bias([num_filters[i]], name='bc%d' % (i+1)) fan_in = num_filters[i] else: import h5py assert num_filters[i] == 64 vgg_filter_size = 3 weights['wc%d' % (i+1)] = safe_get('wc%d' % (i+1), [vgg_filter_size, vgg_filter_size, fan_in, num_filters[i]], dtype=tf.float32, trainable=train_pretrain_conv1) weights['bc%d' % (i+1)] = safe_get('bc%d' % (i+1), [num_filters[i]], dtype=tf.float32, trainable=train_pretrain_conv1) pretrain_weight = h5py.File(pretrain_weight_path, 'r') conv_weight = pretrain_weight['block1_conv%d' % (i+1)]['block1_conv%d_W_1:0' % (i+1)][...] conv_bias = pretrain_weight['block1_conv%d' % (i+1)]['block1_conv%d_b_1:0' % (i+1)][...] weights['wc%d' % (i+1)].assign(conv_weight) weights['bc%d' % (i+1)].assign(conv_bias) fan_in = conv_weight.shape[-1] # fc weights in_shape = self.conv_out_size if not FLAGS.no_state: in_shape += len(self.state_idx) if FLAGS.learn_final_eept: final_eept_range = range(FLAGS.final_eept_min, FLAGS.final_eept_max) final_eept_in_shape = self.conv_out_size if FLAGS.fc_bt: weights['context_final_eept'] = safe_get('context_final_eept', initializer=tf.zeros([FLAGS.bt_dim], dtype=tf.float32)) final_eept_in_shape += FLAGS.bt_dim weights['w_ee'] = init_weights([final_eept_in_shape, len(final_eept_range)], name='w_ee') weights['b_ee'] = init_bias([len(final_eept_range)], name='b_ee') if FLAGS.two_head and FLAGS.no_final_eept: weights['w_ee_two_heads'] = init_weights([final_eept_in_shape, len(final_eept_range)], name='w_ee_two_heads') weights['b_ee_two_heads'] = init_bias([len(final_eept_range)], name='b_ee_two_heads') in_shape += (len(final_eept_range)) if FLAGS.fc_bt: in_shape += FLAGS.bt_dim if FLAGS.fc_bt: weights['context'] = safe_get('context', initializer=tf.zeros([FLAGS.bt_dim], dtype=tf.float32)) fc_weights = self.construct_fc_weights(in_shape, dim_output, network_config=network_config) self.conv_out_size_final = in_shape weights.update(fc_weights) return weights def construct_fc_weights(self, dim_input=27, dim_output=7, network_config=None): """ same as above just for only fc weights """ n_layers = network_config.get('n_layers', 4) dim_hidden = network_config.get('layer_size', [100]*(n_layers-1)) if type(dim_hidden) is not list: dim_hidden = (n_layers - 1)*[dim_hidden] dim_hidden.append(dim_output) weights = {} in_shape = dim_input for i in xrange(n_layers): if FLAGS.two_arms and i == 0: if self.norm_type == 'selu': weights['w_%d_img' % i] = init_fc_weights_snn([in_shape-len(self.state_idx), dim_hidden[i]], name='w_%d_img' % i) weights['w_%d_state' % i] = init_fc_weights_snn([len(self.state_idx), dim_hidden[i]], name='w_%d_state' % i) else: weights['w_%d_img' % i] = init_weights([in_shape-len(self.state_idx), dim_hidden[i]], name='w_%d_img' % i) weights['w_%d_state' % i] = init_weights([len(self.state_idx), dim_hidden[i]], name='w_%d_state' % i) weights['b_%d_state_two_arms' % i] = init_bias([dim_hidden[i]], name='b_%d_state_two_arms' % i) weights['b_%d_img' % i] = init_bias([dim_hidden[i]], name='b_%d_img' % i) weights['b_%d_state' % i] = init_bias([dim_hidden[i]], name='b_%d_state' % i) in_shape = dim_hidden[i] continue if i > 0 and FLAGS.all_fc_bt: in_shape += FLAGS.bt_dim weights['context_%d' % i] = init_bias([FLAGS.bt_dim], name='context_%d' % i) if self.norm_type == 'selu': weights['w_%d' % i] = init_fc_weights_snn([in_shape, dim_hidden[i]], name='w_%d' % i) else: weights['w_%d' % i] = init_weights([in_shape, dim_hidden[i]], name='w_%d' % i) weights['b_%d' % i] = init_bias([dim_hidden[i]], name='b_%d' % i) if (i == n_layers - 1 or (i == 0 and FLAGS.zero_state and not FLAGS.two_arms)) and FLAGS.two_head: weights['w_%d_two_heads' % i] = init_weights([in_shape, dim_hidden[i]], name='w_%d_two_heads' % i) weights['b_%d_two_heads' % i] = init_bias([dim_hidden[i]], name='b_%d_two_heads' % i) in_shape = dim_hidden[i] return weights def forward(self, image_input, state_input, weights, meta_testing=False, is_training=True, testing=False, network_config=None): """ Perform the forward pass; given image input, state input, and weight dict, perform standard forward pass in net, except only through conv layers really, then call fc_forward and output final result through that """ if FLAGS.fc_bt: im_height = network_config['image_height'] im_width = network_config['image_width'] num_channels = network_config['image_channels'] flatten_image = tf.reshape(image_input, [-1, im_height*im_width*num_channels]) context = tf.transpose(tf.gather(tf.transpose(tf.zeros_like(flatten_image)), range(FLAGS.bt_dim))) context += weights['context'] if FLAGS.learn_final_eept: context_final_eept = tf.transpose(tf.gather(tf.transpose(tf.zeros_like(flatten_image)), range(FLAGS.bt_dim))) context_final_eept += weights['context_final_eept'] norm_type = self.norm_type decay = network_config.get('decay', 0.9) strides = network_config.get('strides', [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1]]) downsample_factor = strides[0][1] n_strides = len(strides) n_conv_layers = len(strides) use_dropout = FLAGS.dropout prob = FLAGS.keep_prob is_dilated = network_config.get('is_dilated', False) im_height = network_config['image_height'] im_width = network_config['image_width'] num_channels = network_config['image_channels'] conv_layer = image_input if FLAGS.conv_bt: img_context = tf.zeros_like(conv_layer) img_context += weights['img_context'] conv_layer = tf.concat(axis=3, values=[conv_layer, img_context]) for i in xrange(n_conv_layers): if not use_dropout: conv_layer = norm(conv2d(img=conv_layer, w=weights['wc%d' % (i+1)], b=weights['bc%d' % (i+1)], strides=strides[i], is_dilated=is_dilated), \ norm_type=norm_type, decay=decay, id=i, is_training=is_training, activation_fn=self.activation_fn) else: conv_layer = dropout(norm(conv2d(img=conv_layer, w=weights['wc%d' % (i+1)], b=weights['bc%d' % (i+1)], strides=strides[i], is_dilated=is_dilated), \ norm_type=norm_type, decay=decay, id=i, is_training=is_training, activation_fn=self.activation_fn), keep_prob=prob, is_training=is_training, name='dropout_%d' % (i+1)) if FLAGS.fp: _, num_rows, num_cols, num_fp = conv_layer.get_shape() if is_dilated: num_rows = int(np.ceil(im_width/(downsample_factor**n_strides))) num_cols = int(np.ceil(im_height/(downsample_factor**n_strides))) num_rows, num_cols, num_fp = [int(x) for x in [num_rows, num_cols, num_fp]] x_map = np.empty([num_rows, num_cols], np.float32) y_map = np.empty([num_rows, num_cols], np.float32) for i in range(num_rows): for j in range(num_cols): x_map[i, j] = (i - num_rows / 2.0) / num_rows y_map[i, j] = (j - num_cols / 2.0) / num_cols x_map = tf.convert_to_tensor(x_map) y_map = tf.convert_to_tensor(y_map) x_map = tf.reshape(x_map, [num_rows * num_cols]) y_map = tf.reshape(y_map, [num_rows * num_cols]) # rearrange features to be [batch_size, num_fp, num_rows, num_cols] features = tf.reshape(tf.transpose(conv_layer, [0,3,1,2]), [-1, num_rows*num_cols]) softmax = tf.nn.softmax(features) fp_x = tf.reduce_sum(tf.multiply(x_map, softmax), [1], keep_dims=True) fp_y = tf.reduce_sum(tf.multiply(y_map, softmax), [1], keep_dims=True) conv_out_flat = tf.reshape(tf.concat(axis=1, values=[fp_x, fp_y]), [-1, num_fp*2]) else: conv_out_flat = tf.reshape(conv_layer, [-1, self.conv_out_size]) fc_input = tf.add(conv_out_flat, 0) if FLAGS.learn_final_eept: final_eept_range = range(FLAGS.final_eept_min, FLAGS.final_eept_max) if testing: T = 1 else: T = self.T conv_out_flat = tf.reshape(conv_out_flat, [-1, T, self.conv_out_size]) conv_size = self.conv_out_size if FLAGS.fc_bt: context_dim = FLAGS.bt_dim conv_out_flat = tf.concat(axis=2, values=[conv_out_flat, tf.reshape(context_final_eept, [-1, T, context_dim])]) conv_size += context_dim # only predict the final eept using the initial image final_ee_inp = tf.reshape(conv_out_flat, [-1, conv_size]) # use video for preupdate only if no_final_eept if (not FLAGS.learn_final_eept_whole_traj) or meta_testing: final_ee_inp = conv_out_flat[:, 0, :] if FLAGS.two_head and not meta_testing and FLAGS.no_final_eept: final_eept_pred = tf.matmul(final_ee_inp, weights['w_ee_two_heads']) + weights['b_ee_two_heads'] else: final_eept_pred = tf.matmul(final_ee_inp, weights['w_ee']) + weights['b_ee'] if (not FLAGS.learn_final_eept_whole_traj) or meta_testing: final_eept_pred = tf.reshape(tf.tile(tf.reshape(final_eept_pred, [-1]), [T]), [-1, len(final_eept_range)]) final_eept_concat = tf.identity(final_eept_pred) else: # Assume tbs == 1 # Only provide the FC layers with final_eept_pred at first time step final_eept_concat = final_eept_pred[0] final_eept_concat = tf.reshape(tf.tile(tf.reshape(final_eept_concat, [-1]), [T]), [-1, len(final_eept_range)]) fc_input = tf.concat(axis=1, values=[fc_input, final_eept_concat]) else: final_eept_pred = None if FLAGS.fc_bt: fc_input = tf.concat(axis=1, values=[fc_input, context]) return self.fc_forward(fc_input, weights, state_input=state_input, meta_testing=meta_testing, is_training=is_training, testing=testing, network_config=network_config), final_eept_pred def fc_forward(self, fc_input, weights, state_input=None, meta_testing=False, is_training=True, testing=False, network_config=None): ''' fc_forward: completes forward pass for conv net; performs forward pass given special fc input (flexible for multiple architectures, including 2-headed, etc) ''' n_layers = network_config.get('n_layers', 4) use_dropout = FLAGS.dropout prob = FLAGS.keep_prob fc_output = tf.add(fc_input, 0) use_selu = self.norm_type == 'selu' norm_type = self.norm_type if state_input is not None and not FLAGS.two_arms: fc_output = tf.concat(axis=1, values=[fc_output, state_input]) for i in xrange(n_layers): if i > 0 and FLAGS.all_fc_bt: context = tf.transpose(tf.gather(tf.transpose(tf.zeros_like(fc_output)), range(FLAGS.bt_dim))) context += weights['context_%d' % i] fc_output = tf.concat(axis=1, values=[fc_output, context]) if (i == n_layers - 1 or (i == 0 and FLAGS.zero_state and not FLAGS.two_arms)) and FLAGS.two_head and not meta_testing: fc_output = tf.matmul(fc_output, weights['w_%d_two_heads' % i]) + weights['b_%d_two_heads' % i] elif i == 0 and FLAGS.two_arms: assert state_input is not None if FLAGS.two_arms: state_part = weights['b_%d_state_two_arms' % i] else: state_part = tf.matmul(state_input, weights['w_%d_state' % i]) + weights['b_%d_state' % i] if not meta_testing: fc_output = tf.matmul(fc_output, weights['w_%d_img' % i]) + weights['b_%d_img' % i] + state_part else: fc_output = tf.matmul(fc_output, weights['w_%d_img' % i]) + weights['b_%d_img' % i] + \ tf.matmul(state_input, weights['w_%d_state' % i]) + weights['b_%d_state' % i] else: fc_output = tf.matmul(fc_output, weights['w_%d' % i]) + weights['b_%d' % i] if i != n_layers - 1: if use_selu: fc_output = selu(fc_output) else: fc_output = self.activation_fn(fc_output) # only use dropout for post-update if use_dropout: fc_output = dropout(fc_output, keep_prob=prob, is_training=is_training, name='dropout_fc_%d' % i, selu=use_selu) return fc_output def construct_model(self, input_tensors=None, prefix='Training_', dim_input=27, dim_output=7, network_config=None): """ Construct the meta-learning graph. Args: input_tensors: tensors of input videos, if available prefix: indicate whether we are building training, validation or testing graph. dim_input: Dimensionality of input. dim_output: Dimensionality of the output. network_config: dictionary of network structure parameters Returns: a tuple of output tensors. """ # create placeholders for observations, states, and actions if input_tensors is None: self.obsa = obsa = tf.placeholder(tf.float32, name='obsa') # meta_batch_size x update_batch_size x dim_input self.obsb = obsb = tf.placeholder(tf.float32, name='obsb') else: self.obsa = obsa = input_tensors['inputa'] # meta_batch_size x update_batch_size x dim_input self.obsb = obsb = input_tensors['inputb'] if not hasattr(self, 'statea'): self.statea = statea = tf.placeholder(tf.float32, name='statea') self.stateb = stateb = tf.placeholder(tf.float32, name='stateb') self.actiona = actiona = tf.placeholder(tf.float32, name='actiona') self.actionb = actionb = tf.placeholder(tf.float32, name='actionb') else: statea = self.statea stateb = self.stateb actiona = self.actiona actionb = self.actionb # feed states and observations in as input to model; this provides more info that just obs inputa = tf.concat(axis=2, values=[statea, obsa]) inputb = tf.concat(axis=2, values=[stateb, obsb]) with tf.variable_scope('model', reuse=None) as training_scope: # Construct layers weight & bias if 'weights' not in dir(self): if FLAGS.learn_final_eept: final_eept_range = range(FLAGS.final_eept_min, FLAGS.final_eept_max) self.weights = weights = self.construct_weights(dim_input, dim_output-len(final_eept_range), network_config=network_config) else: self.weights = weights = self.construct_weights(dim_input, dim_output, network_config=network_config) self.sorted_weight_keys = natsorted(self.weights.keys()) else: training_scope.reuse_variables() weights = self.weights # set hyperparameters self.step_size = FLAGS.train_update_lr loss_multiplier = FLAGS.loss_multiplier final_eept_loss_eps = FLAGS.final_eept_loss_eps act_loss_eps = FLAGS.act_loss_eps use_whole_traj = FLAGS.learn_final_eept_whole_traj # record losses for fine-tune and after meta update? num_updates = self.num_updates lossesa, outputsa = [], [] lossesb = [[] for _ in xrange(num_updates)] outputsb = [[] for _ in xrange(num_updates)] def batch_metalearn(inp): # input has two examples: action/obs a is for training on task, # action/obs b is for meta-training update # this is because you need to train on the task to get the loss L_i to take grad w.r.t. initial params inputa, inputb, actiona, actionb = inp inputa = tf.reshape(inputa, [-1, dim_input]) inputb = tf.reshape(inputb, [-1, dim_input]) actiona = tf.reshape(actiona, [-1, dim_output]) actionb = tf.reshape(actionb, [-1, dim_output]) gradients_summ = [] testing = 'Testing' in prefix # for learning end effector pose final_eepta, final_eeptb = None, None if FLAGS.learn_final_eept: final_eept_range = range(FLAGS.final_eept_min, FLAGS.final_eept_max) final_eepta = actiona[:, final_eept_range[0]:final_eept_range[-1]+1] final_eeptb = actionb[:, final_eept_range[0]:final_eept_range[-1]+1] actiona = actiona[:, :final_eept_range[0]] actionb = actionb[:, :final_eept_range[0]] if FLAGS.no_final_eept: final_eepta = tf.zeros_like(final_eepta) if FLAGS.no_action: actiona = tf.zeros_like(actiona) local_outputbs, local_lossesb, final_eept_lossesb = [], [], [] # Assume fixed data for each update # by update they mean the number of gradient steps on loss L_i before taking derivative w.r.t inital params actionas = [actiona]*num_updates # Convert to image dims inputa, _, state_inputa = self.construct_image_input(inputa, self.state_idx, self.img_idx, network_config=network_config) inputb, flat_img_inputb, state_inputb = self.construct_image_input(inputb, self.state_idx, self.img_idx, network_config=network_config) inputas = [inputa]*num_updates inputbs = [inputb]*num_updates if FLAGS.zero_state: state_inputa = tf.zeros_like(state_inputa) state_inputas = [state_inputa]*num_updates if FLAGS.no_state: state_inputa = None if FLAGS.learn_final_eept: final_eeptas = [final_eepta]*num_updates # euclidean loss layer = (action - mlp_out)'*precision*(action-mlp_out) = (u-uhat)'*A*(u-uhat) # Pre-update # aka update on task, single step of GD if 'Training' in prefix: local_outputa, final_eept_preda = self.forward(inputa, state_inputa, weights, network_config=network_config) else: local_outputa, final_eept_preda = self.forward(inputa, state_inputa, weights, is_training=False, network_config=network_config) if FLAGS.learn_final_eept: final_eept_lossa = euclidean_loss_layer(final_eept_preda, final_eepta, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) else: final_eept_lossa = tf.constant(0.0) local_lossa = act_loss_eps * euclidean_loss_layer(local_outputa, actiona, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) if FLAGS.learn_final_eept: local_lossa += final_eept_loss_eps * final_eept_lossa # Compute fast gradients - take GD step # Do normal updates on the local_lossa grads = tf.gradients(local_lossa, weights.values()) gradients = dict(zip(weights.keys(), grads)) # make fast gradient zero for weights with gradient None for key in gradients.keys(): if gradients[key] is None: gradients[key] = tf.zeros_like(weights[key]) if FLAGS.stop_grad: gradients = {key:tf.stop_gradient(gradients[key]) for key in gradients.keys()} if FLAGS.clip: clip_min = FLAGS.clip_min clip_max = FLAGS.clip_max for key in gradients.keys(): gradients[key] = tf.clip_by_value(gradients[key], clip_min, clip_max) if FLAGS.pretrain_weight_path != 'N/A': gradients['wc1'] = tf.zeros_like(gradients['wc1']) gradients['bc1'] = tf.zeros_like(gradients['bc1']) # add gradient for each key in sorted weight keys gradients_summ.append([gradients[key] for key in self.sorted_weight_keys]) # weird way to take GD step--but this is the update; w = w - lr*gradient; update weights for current task # fast_weights are the pre-update weights fast_weights = dict(zip(weights.keys(), [weights[key] - self.step_size*gradients[key] for key in weights.keys()])) # Post-update - aka meta update on demonstration b (note meta_testing=True) # Compute new loss after gradient update on weights w.r.t L_i if FLAGS.no_state: state_inputb = None if 'Training' in prefix: outputb, final_eept_predb = self.forward(inputb, state_inputb, fast_weights, meta_testing=True, network_config=network_config) else: outputb, final_eept_predb = self.forward(inputb, state_inputb, fast_weights, meta_testing=True, is_training=False, testing=testing, network_config=network_config) local_outputbs.append(outputb) if FLAGS.learn_final_eept: final_eept_lossb = euclidean_loss_layer(final_eept_predb, final_eeptb, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) else: final_eept_lossb = tf.constant(0.0) local_lossb = act_loss_eps * euclidean_loss_layer(outputb, actionb, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) if FLAGS.learn_final_eept: local_lossb += final_eept_loss_eps * final_eept_lossb if use_whole_traj: # assume tbs == 1 final_eept_lossb = euclidean_loss_layer(final_eept_predb[0], final_eeptb[0], multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) final_eept_lossesb.append(final_eept_lossb) local_lossesb.append(local_lossb) # take more gradient steps using the fast_weights computed above i.e. finetune on the task for num_updates for j in range(num_updates - 1): # more input-observation pairs; num_updates = num steps of SGD to take # Pre-update state_inputa_new = state_inputas[j+1] if FLAGS.no_state: state_inputa_new = None if 'Training' in prefix: outputa, final_eept_preda = self.forward(inputas[j+1], state_inputa_new, fast_weights, network_config=network_config) else: outputa, final_eept_preda = self.forward(inputas[j+1], state_inputa_new, fast_weights, is_training=False, testing=testing, network_config=network_config) if FLAGS.learn_final_eept: final_eept_lossa = euclidean_loss_layer(final_eept_preda, final_eeptas[j+1], multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) else: final_eept_lossa = tf.constant(0.0) loss = act_loss_eps * euclidean_loss_layer(outputa, actionas[j+1], multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) if FLAGS.learn_final_eept: loss += final_eept_loss_eps * final_eept_lossa # Compute fast gradients grads = tf.gradients(loss, fast_weights.values()) gradients = dict(zip(fast_weights.keys(), grads)) # make fast gradient zero for weights with gradient None for key in gradients.keys(): if gradients[key] is None: gradients[key] = tf.zeros_like(fast_weights[key]) if FLAGS.stop_grad: gradients = {key:tf.stop_gradient(gradients[key]) for key in gradients.keys()} if FLAGS.clip: clip_min = FLAGS.clip_min clip_max = FLAGS.clip_max for key in gradients.keys(): gradients[key] = tf.clip_by_value(gradients[key], clip_min, clip_max) if FLAGS.pretrain_weight_path != 'N/A': gradients['wc1'] = tf.zeros_like(gradients['wc1']) gradients['bc1'] = tf.zeros_like(gradients['bc1']) gradients_summ.append([gradients[key] for key in self.sorted_weight_keys]) fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - self.step_size*gradients[key] for key in fast_weights.keys()])) # Post-update if FLAGS.no_state: state_inputb = None if 'Training' in prefix: output, final_eept_predb = self.forward(inputbs[j+1], state_inputb, fast_weights, meta_testing=True, network_config=network_config) else: output, final_eept_predb = self.forward(inputbs[j+1], state_inputb, fast_weights, meta_testing=True, is_training=False, testing=testing, network_config=network_config) local_outputbs.append(output) if FLAGS.learn_final_eept: final_eept_lossb = euclidean_loss_layer(final_eept_predb, final_eeptb, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) else: final_eept_lossb = tf.constant(0.0) lossb = act_loss_eps * euclidean_loss_layer(output, actionb, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) if FLAGS.learn_final_eept: lossb += final_eept_loss_eps * final_eept_lossb if use_whole_traj: # assume tbs == 1 final_eept_lossb = euclidean_loss_layer(final_eept_predb[0], final_eeptb[0], multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) final_eept_lossesb.append(final_eept_lossb) local_lossesb.append(lossb) # assuming this is all of the loss / gradient information needed to make take a MIL step local_fn_output = [local_outputa, local_outputbs, local_outputbs[-1], local_lossa, \ local_lossesb, final_eept_lossesb, flat_img_inputb, gradients_summ, fast_weights] return local_fn_output if self.norm_type: # initialize batch norm vars. unused = batch_metalearn((inputa[0], inputb[0], actiona[0], actionb[0])) out_dtype = [tf.float32, [tf.float32]*num_updates, tf.float32, tf.float32, [tf.float32]*num_updates, [tf.float32]*num_updates, \ tf.float32, [[tf.float32]*len(self.weights.keys())]*num_updates, [tf.float]*len(self.weights.keys())] # creates a list of loss, gradient info to take MIL step # we are mapping over tasks in elems so each index is a task result = tf.map_fn(batch_metalearn, elems=(inputa, inputb, actiona, actionb), dtype=out_dtype) print 'Done with map.' return result
""" This file defines Meta Imitation Learning (MIL). """ from __future__ import division import numpy as np import random import tensorflow as tf from tensorflow.python.platform import flags from tf_utils import * from utils import Timer from natsort import natsorted FLAGS = flags.FLAGS class MIL(object): """ Initialize MIL. Need to call init_network to contruct the architecture after init. """ def __init__(self, dU, state_idx=None, img_idx=None, network_config=None): # MIL hyperparams self.num_updates = FLAGS.num_updates self.update_batch_size = FLAGS.update_batch_size self.meta_batch_size = FLAGS.meta_batch_size self.meta_lr = FLAGS.meta_lr self.activation_fn = tf.nn.relu # by default, we use relu self.T = FLAGS.T self.network_params = network_config self.norm_type = FLAGS.norm # List of indices for state (vector) data and image (tensor) data in observation. self.state_idx, self.img_idx = state_idx, img_idx # Dimension of input and output of the model self._dO = len(img_idx) + len(state_idx) self._dU = dU def init_network(self, graph, input_tensors=None, restore_iter=0, prefix='Training_', algo='reptile'): """Helper method to initialize the tf networks used; takes in tf graph; initializes networks; calls construct_model; sets params based on training/validation/test mode contained in prefix var """ with graph.as_default(): with Timer('building TF network'): # map inputs to outputs result = self.construct_model(input_tensors=input_tensors, prefix=prefix, dim_input=self._dO, dim_output=self._dU, network_config=self.network_params) outputas, outputbs, test_output, lossesa, lossesb, final_eept_lossesb, flat_img_inputb, gradients, fast_weights = result # added code for reptile if algo == 'reptile': # get actual weights # maintain weights for each task # where W_i is the pre-update fine-tune of parameters on task i # w' = w - \eps 1/k sum_i^n (W_i - w) # self.weights set in construct_model to pre update weights weights = self.weights weight_keys = self.sorted_weight_keys #= natsorted(self.weights.keys()) new_weights = average_vars(fast_weights) # the import variables expects that weights is # self._model_state.import_variables(interpolate_vars(self.weights, new_weights, meta_step_size)) else: if 'Testing' in prefix: self.obs_tensor = self.obsa self.state_tensor = self.statea self.test_act_op = test_output self.image_op = flat_img_inputb trainable_vars = tf.trainable_variables() # pre-update losses total_loss1 = tf.reduce_sum(lossesa) / tf.to_float(self.meta_batch_size) # post-update losses total_losses2 = [tf.reduce_sum(lossesb[j]) / tf.to_float(self.meta_batch_size) for j in range(self.num_updates)] total_final_eept_losses2 = [tf.reduce_sum(final_eept_lossesb[j]) / tf.to_float(self.meta_batch_size) for j in range(self.num_updates)] if 'Training' in prefix: self.total_loss1 = total_loss1 self.total_losses2 = total_losses2 self.total_final_eept_losses2 = total_final_eept_losses2 elif 'Validation' in prefix: self.val_total_loss1 = total_loss1 self.val_total_losses2 = total_losses2 self.val_total_final_eept_losses2 = total_final_eept_losses2 # TODO: add reptile in here if 'Training' in prefix: # TODO: figure out why we are using total_losses2[self.num_updates - 1], it is becuase you only update on loss of last fine-tune step self.train_op = tf.train.AdamOptimizer(self.meta_lr).minimize(self.total_losses2[self.num_updates - 1]) # Add summaries summ = [tf.summary.scalar(prefix + 'Pre-update_loss', self.total_loss1)] for j in xrange(self.num_updates): summ.append(tf.summary.scalar(prefix + 'Post-update_loss_step_%d' % j, self.total_losses2[j])) summ.append(tf.summary.scalar(prefix + 'Post-update_final_eept_loss_step_%d' % j, self.total_final_eept_losses2[j])) for k in xrange(len(self.sorted_weight_keys)): summ.append(tf.summary.histogram('Gradient_of_%s_step_%d' % (self.sorted_weight_keys[k], j), gradients[j][k])) self.train_summ_op = tf.summary.merge(summ) elif 'Validation' in prefix: # Add summaries summ = [tf.summary.scalar(prefix + 'Pre-update_loss', self.val_total_loss1)] for j in xrange(self.num_updates): summ.append(tf.summary.scalar(prefix + 'Post-update_loss_step_%d' % j, self.val_total_losses2[j])) summ.append(tf.summary.scalar(prefix + 'Post-update_final_eept_loss_step_%d' % j, self.val_total_final_eept_losses2[j])) self.val_summ_op = tf.summary.merge(summ) def construct_image_input(self, nn_input, state_idx, img_idx, network_config=None): """Preprocess images; takes in state_idx (list of indices for state data in observation), img_idx (list of indices for image data in observation), and nn_input; pretty much just preprocesses input; returns preprocessed image input, flattened image input, and the state input """ state_input = nn_input[:, 0:state_idx[-1]+1] flat_image_input = nn_input[:, state_idx[-1]+1:img_idx[-1]+1] # image goes through 3 convnet layers num_filters = network_config['num_filters'] im_height = network_config['image_height'] im_width = network_config['image_width'] num_channels = network_config['image_channels'] image_input = tf.reshape(flat_image_input, [-1, num_channels, im_width, im_height]) image_input = tf.transpose(image_input, perm=[0,3,2,1]) if FLAGS.pretrain_weight_path != 'N/A': image_input = image_input * 255.0 - tf.convert_to_tensor(np.array([103.939, 116.779, 123.68], np.float32)) # 'RGB'->'BGR' image_input = image_input[:, :, :, ::-1] return image_input, flat_image_input, state_input def construct_weights(self, dim_input=27, dim_output=7, network_config=None): """ Construct weights for the network; takes in input dim and final output dim and just builds conv and fc weights, including augmented bias thing (and 2-headed architecture if FLAGS.two_head is true); returns weights """ weights = {} num_filters = network_config['num_filters'] strides = network_config.get('strides', [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1]]) filter_sizes = network_config.get('filter_size', [3]*len(strides)) # used to be 2 if type(filter_sizes) is not list: filter_sizes = len(strides)*[filter_sizes] im_height = network_config['image_height'] im_width = network_config['image_width'] num_channels = network_config['image_channels'] is_dilated = network_config.get('is_dilated', False) use_fp = FLAGS.fp pretrain = FLAGS.pretrain_weight_path != 'N/A' train_pretrain_conv1 = FLAGS.train_pretrain_conv1 initialization = network_config.get('initialization', 'random') if pretrain: num_filters[0] = 64 pretrain_weight_path = FLAGS.pretrain_weight_path n_conv_layers = len(num_filters) downsample_factor = 1 for stride in strides: downsample_factor *= stride[1] if use_fp: self.conv_out_size = int(num_filters[-1]*2) else: self.conv_out_size = int(np.ceil(im_width/(downsample_factor)))*int(np.ceil(im_height/(downsample_factor)))*num_filters[-1] # conv weights fan_in = num_channels if FLAGS.conv_bt: fan_in += num_channels if FLAGS.conv_bt: weights['img_context'] = safe_get('img_context', initializer=tf.zeros([im_height, im_width, num_channels], dtype=tf.float32)) weights['img_context'] = tf.clip_by_value(weights['img_context'], 0., 1.) for i in xrange(n_conv_layers): if not pretrain or i != 0: if self.norm_type == 'selu': weights['wc%d' % (i+1)] = init_conv_weights_snn([filter_sizes[i], filter_sizes[i], fan_in, num_filters[i]], name='wc%d' % (i+1)) # 5x5 conv, 1 input, 32 outputs elif initialization == 'xavier': weights['wc%d' % (i+1)] = init_conv_weights_xavier([filter_sizes[i], filter_sizes[i], fan_in, num_filters[i]], name='wc%d' % (i+1)) # 5x5 conv, 1 input, 32 outputs elif initialization == 'random': weights['wc%d' % (i+1)] = init_weights([filter_sizes[i], filter_sizes[i], fan_in, num_filters[i]], name='wc%d' % (i+1)) # 5x5 conv, 1 input, 32 outputs else: raise NotImplementedError weights['bc%d' % (i+1)] = init_bias([num_filters[i]], name='bc%d' % (i+1)) fan_in = num_filters[i] else: import h5py assert num_filters[i] == 64 vgg_filter_size = 3 weights['wc%d' % (i+1)] = safe_get('wc%d' % (i+1), [vgg_filter_size, vgg_filter_size, fan_in, num_filters[i]], dtype=tf.float32, trainable=train_pretrain_conv1) weights['bc%d' % (i+1)] = safe_get('bc%d' % (i+1), [num_filters[i]], dtype=tf.float32, trainable=train_pretrain_conv1) pretrain_weight = h5py.File(pretrain_weight_path, 'r') conv_weight = pretrain_weight['block1_conv%d' % (i+1)]['block1_conv%d_W_1:0' % (i+1)][...] conv_bias = pretrain_weight['block1_conv%d' % (i+1)]['block1_conv%d_b_1:0' % (i+1)][...] weights['wc%d' % (i+1)].assign(conv_weight) weights['bc%d' % (i+1)].assign(conv_bias) fan_in = conv_weight.shape[-1] # fc weights in_shape = self.conv_out_size if not FLAGS.no_state: in_shape += len(self.state_idx) if FLAGS.learn_final_eept: final_eept_range = range(FLAGS.final_eept_min, FLAGS.final_eept_max) final_eept_in_shape = self.conv_out_size if FLAGS.fc_bt: weights['context_final_eept'] = safe_get('context_final_eept', initializer=tf.zeros([FLAGS.bt_dim], dtype=tf.float32)) final_eept_in_shape += FLAGS.bt_dim weights['w_ee'] = init_weights([final_eept_in_shape, len(final_eept_range)], name='w_ee') weights['b_ee'] = init_bias([len(final_eept_range)], name='b_ee') if FLAGS.two_head and FLAGS.no_final_eept: weights['w_ee_two_heads'] = init_weights([final_eept_in_shape, len(final_eept_range)], name='w_ee_two_heads') weights['b_ee_two_heads'] = init_bias([len(final_eept_range)], name='b_ee_two_heads') in_shape += (len(final_eept_range)) if FLAGS.fc_bt: in_shape += FLAGS.bt_dim if FLAGS.fc_bt: weights['context'] = safe_get('context', initializer=tf.zeros([FLAGS.bt_dim], dtype=tf.float32)) fc_weights = self.construct_fc_weights(in_shape, dim_output, network_config=network_config) self.conv_out_size_final = in_shape weights.update(fc_weights) return weights def construct_fc_weights(self, dim_input=27, dim_output=7, network_config=None): """ same as above just for only fc weights """ n_layers = network_config.get('n_layers', 4) dim_hidden = network_config.get('layer_size', [100]*(n_layers-1)) if type(dim_hidden) is not list: dim_hidden = (n_layers - 1)*[dim_hidden] dim_hidden.append(dim_output) weights = {} in_shape = dim_input for i in xrange(n_layers): if FLAGS.two_arms and i == 0: if self.norm_type == 'selu': weights['w_%d_img' % i] = init_fc_weights_snn([in_shape-len(self.state_idx), dim_hidden[i]], name='w_%d_img' % i) weights['w_%d_state' % i] = init_fc_weights_snn([len(self.state_idx), dim_hidden[i]], name='w_%d_state' % i) else: weights['w_%d_img' % i] = init_weights([in_shape-len(self.state_idx), dim_hidden[i]], name='w_%d_img' % i) weights['w_%d_state' % i] = init_weights([len(self.state_idx), dim_hidden[i]], name='w_%d_state' % i) weights['b_%d_state_two_arms' % i] = init_bias([dim_hidden[i]], name='b_%d_state_two_arms' % i) weights['b_%d_img' % i] = init_bias([dim_hidden[i]], name='b_%d_img' % i) weights['b_%d_state' % i] = init_bias([dim_hidden[i]], name='b_%d_state' % i) in_shape = dim_hidden[i] continue if i > 0 and FLAGS.all_fc_bt: in_shape += FLAGS.bt_dim weights['context_%d' % i] = init_bias([FLAGS.bt_dim], name='context_%d' % i) if self.norm_type == 'selu': weights['w_%d' % i] = init_fc_weights_snn([in_shape, dim_hidden[i]], name='w_%d' % i) else: weights['w_%d' % i] = init_weights([in_shape, dim_hidden[i]], name='w_%d' % i) weights['b_%d' % i] = init_bias([dim_hidden[i]], name='b_%d' % i) if (i == n_layers - 1 or (i == 0 and FLAGS.zero_state and not FLAGS.two_arms)) and FLAGS.two_head: weights['w_%d_two_heads' % i] = init_weights([in_shape, dim_hidden[i]], name='w_%d_two_heads' % i) weights['b_%d_two_heads' % i] = init_bias([dim_hidden[i]], name='b_%d_two_heads' % i) in_shape = dim_hidden[i] return weights def forward(self, image_input, state_input, weights, meta_testing=False, is_training=True, testing=False, network_config=None): """ Perform the forward pass; given image input, state input, and weight dict, perform standard forward pass in net, except only through conv layers really, then call fc_forward and output final result through that """ if FLAGS.fc_bt: im_height = network_config['image_height'] im_width = network_config['image_width'] num_channels = network_config['image_channels'] flatten_image = tf.reshape(image_input, [-1, im_height*im_width*num_channels]) context = tf.transpose(tf.gather(tf.transpose(tf.zeros_like(flatten_image)), range(FLAGS.bt_dim))) context += weights['context'] if FLAGS.learn_final_eept: context_final_eept = tf.transpose(tf.gather(tf.transpose(tf.zeros_like(flatten_image)), range(FLAGS.bt_dim))) context_final_eept += weights['context_final_eept'] norm_type = self.norm_type decay = network_config.get('decay', 0.9) strides = network_config.get('strides', [[1, 2, 2, 1], [1, 2, 2, 1], [1, 2, 2, 1]]) downsample_factor = strides[0][1] n_strides = len(strides) n_conv_layers = len(strides) use_dropout = FLAGS.dropout prob = FLAGS.keep_prob is_dilated = network_config.get('is_dilated', False) im_height = network_config['image_height'] im_width = network_config['image_width'] num_channels = network_config['image_channels'] conv_layer = image_input if FLAGS.conv_bt: img_context = tf.zeros_like(conv_layer) img_context += weights['img_context'] conv_layer = tf.concat(axis=3, values=[conv_layer, img_context]) for i in xrange(n_conv_layers): if not use_dropout: conv_layer = norm(conv2d(img=conv_layer, w=weights['wc%d' % (i+1)], b=weights['bc%d' % (i+1)], strides=strides[i], is_dilated=is_dilated), \ norm_type=norm_type, decay=decay, id=i, is_training=is_training, activation_fn=self.activation_fn) else: conv_layer = dropout(norm(conv2d(img=conv_layer, w=weights['wc%d' % (i+1)], b=weights['bc%d' % (i+1)], strides=strides[i], is_dilated=is_dilated), \ norm_type=norm_type, decay=decay, id=i, is_training=is_training, activation_fn=self.activation_fn), keep_prob=prob, is_training=is_training, name='dropout_%d' % (i+1)) if FLAGS.fp: _, num_rows, num_cols, num_fp = conv_layer.get_shape() if is_dilated: num_rows = int(np.ceil(im_width/(downsample_factor**n_strides))) num_cols = int(np.ceil(im_height/(downsample_factor**n_strides))) num_rows, num_cols, num_fp = [int(x) for x in [num_rows, num_cols, num_fp]] x_map = np.empty([num_rows, num_cols], np.float32) y_map = np.empty([num_rows, num_cols], np.float32) for i in range(num_rows): for j in range(num_cols): x_map[i, j] = (i - num_rows / 2.0) / num_rows y_map[i, j] = (j - num_cols / 2.0) / num_cols x_map = tf.convert_to_tensor(x_map) y_map = tf.convert_to_tensor(y_map) x_map = tf.reshape(x_map, [num_rows * num_cols]) y_map = tf.reshape(y_map, [num_rows * num_cols]) # rearrange features to be [batch_size, num_fp, num_rows, num_cols] features = tf.reshape(tf.transpose(conv_layer, [0,3,1,2]), [-1, num_rows*num_cols]) softmax = tf.nn.softmax(features) fp_x = tf.reduce_sum(tf.multiply(x_map, softmax), [1], keep_dims=True) fp_y = tf.reduce_sum(tf.multiply(y_map, softmax), [1], keep_dims=True) conv_out_flat = tf.reshape(tf.concat(axis=1, values=[fp_x, fp_y]), [-1, num_fp*2]) else: conv_out_flat = tf.reshape(conv_layer, [-1, self.conv_out_size]) fc_input = tf.add(conv_out_flat, 0) if FLAGS.learn_final_eept: final_eept_range = range(FLAGS.final_eept_min, FLAGS.final_eept_max) if testing: T = 1 else: T = self.T conv_out_flat = tf.reshape(conv_out_flat, [-1, T, self.conv_out_size]) conv_size = self.conv_out_size if FLAGS.fc_bt: context_dim = FLAGS.bt_dim conv_out_flat = tf.concat(axis=2, values=[conv_out_flat, tf.reshape(context_final_eept, [-1, T, context_dim])]) conv_size += context_dim # only predict the final eept using the initial image final_ee_inp = tf.reshape(conv_out_flat, [-1, conv_size]) # use video for preupdate only if no_final_eept if (not FLAGS.learn_final_eept_whole_traj) or meta_testing: final_ee_inp = conv_out_flat[:, 0, :] if FLAGS.two_head and not meta_testing and FLAGS.no_final_eept: final_eept_pred = tf.matmul(final_ee_inp, weights['w_ee_two_heads']) + weights['b_ee_two_heads'] else: final_eept_pred = tf.matmul(final_ee_inp, weights['w_ee']) + weights['b_ee'] if (not FLAGS.learn_final_eept_whole_traj) or meta_testing: final_eept_pred = tf.reshape(tf.tile(tf.reshape(final_eept_pred, [-1]), [T]), [-1, len(final_eept_range)]) final_eept_concat = tf.identity(final_eept_pred) else: # Assume tbs == 1 # Only provide the FC layers with final_eept_pred at first time step final_eept_concat = final_eept_pred[0] final_eept_concat = tf.reshape(tf.tile(tf.reshape(final_eept_concat, [-1]), [T]), [-1, len(final_eept_range)]) fc_input = tf.concat(axis=1, values=[fc_input, final_eept_concat]) else: final_eept_pred = None if FLAGS.fc_bt: fc_input = tf.concat(axis=1, values=[fc_input, context]) return self.fc_forward(fc_input, weights, state_input=state_input, meta_testing=meta_testing, is_training=is_training, testing=testing, network_config=network_config), final_eept_pred def fc_forward(self, fc_input, weights, state_input=None, meta_testing=False, is_training=True, testing=False, network_config=None): ''' fc_forward: completes forward pass for conv net; performs forward pass given special fc input (flexible for multiple architectures, including 2-headed, etc) ''' n_layers = network_config.get('n_layers', 4) use_dropout = FLAGS.dropout prob = FLAGS.keep_prob fc_output = tf.add(fc_input, 0) use_selu = self.norm_type == 'selu' norm_type = self.norm_type if state_input is not None and not FLAGS.two_arms: fc_output = tf.concat(axis=1, values=[fc_output, state_input]) for i in xrange(n_layers): if i > 0 and FLAGS.all_fc_bt: context = tf.transpose(tf.gather(tf.transpose(tf.zeros_like(fc_output)), range(FLAGS.bt_dim))) context += weights['context_%d' % i] fc_output = tf.concat(axis=1, values=[fc_output, context]) if (i == n_layers - 1 or (i == 0 and FLAGS.zero_state and not FLAGS.two_arms)) and FLAGS.two_head and not meta_testing: fc_output = tf.matmul(fc_output, weights['w_%d_two_heads' % i]) + weights['b_%d_two_heads' % i] elif i == 0 and FLAGS.two_arms: assert state_input is not None if FLAGS.two_arms: state_part = weights['b_%d_state_two_arms' % i] else: state_part = tf.matmul(state_input, weights['w_%d_state' % i]) + weights['b_%d_state' % i] if not meta_testing: fc_output = tf.matmul(fc_output, weights['w_%d_img' % i]) + weights['b_%d_img' % i] + state_part else: fc_output = tf.matmul(fc_output, weights['w_%d_img' % i]) + weights['b_%d_img' % i] + \ tf.matmul(state_input, weights['w_%d_state' % i]) + weights['b_%d_state' % i] else: fc_output = tf.matmul(fc_output, weights['w_%d' % i]) + weights['b_%d' % i] if i != n_layers - 1: if use_selu: fc_output = selu(fc_output) else: fc_output = self.activation_fn(fc_output) # only use dropout for post-update if use_dropout: fc_output = dropout(fc_output, keep_prob=prob, is_training=is_training, name='dropout_fc_%d' % i, selu=use_selu) return fc_output def construct_model(self, input_tensors=None, prefix='Training_', dim_input=27, dim_output=7, network_config=None): """ Construct the meta-learning graph. Args: input_tensors: tensors of input videos, if available prefix: indicate whether we are building training, validation or testing graph. dim_input: Dimensionality of input. dim_output: Dimensionality of the output. network_config: dictionary of network structure parameters Returns: a tuple of output tensors. """ # create placeholders for observations, states, and actions if input_tensors is None: self.obsa = obsa = tf.placeholder(tf.float32, name='obsa') # meta_batch_size x update_batch_size x dim_input self.obsb = obsb = tf.placeholder(tf.float32, name='obsb') else: self.obsa = obsa = input_tensors['inputa'] # meta_batch_size x update_batch_size x dim_input self.obsb = obsb = input_tensors['inputb'] if not hasattr(self, 'statea'): self.statea = statea = tf.placeholder(tf.float32, name='statea') self.stateb = stateb = tf.placeholder(tf.float32, name='stateb') self.actiona = actiona = tf.placeholder(tf.float32, name='actiona') self.actionb = actionb = tf.placeholder(tf.float32, name='actionb') else: statea = self.statea stateb = self.stateb actiona = self.actiona actionb = self.actionb # feed states and observations in as input to model; this provides more info that just obs inputa = tf.concat(axis=2, values=[statea, obsa]) inputb = tf.concat(axis=2, values=[stateb, obsb]) with tf.variable_scope('model', reuse=None) as training_scope: # Construct layers weight & bias if 'weights' not in dir(self): if FLAGS.learn_final_eept: final_eept_range = range(FLAGS.final_eept_min, FLAGS.final_eept_max) self.weights = weights = self.construct_weights(dim_input, dim_output-len(final_eept_range), network_config=network_config) else: self.weights = weights = self.construct_weights(dim_input, dim_output, network_config=network_config) self.sorted_weight_keys = natsorted(self.weights.keys()) else: training_scope.reuse_variables() weights = self.weights # set hyperparameters self.step_size = FLAGS.train_update_lr loss_multiplier = FLAGS.loss_multiplier final_eept_loss_eps = FLAGS.final_eept_loss_eps act_loss_eps = FLAGS.act_loss_eps use_whole_traj = FLAGS.learn_final_eept_whole_traj # record losses for fine-tune and after meta update? num_updates = self.num_updates lossesa, outputsa = [], [] lossesb = [[] for _ in xrange(num_updates)] outputsb = [[] for _ in xrange(num_updates)] def batch_metalearn(inp): # input has two examples: action/obs a is for training on task, # action/obs b is for meta-training update # this is because you need to train on the task to get the loss L_i to take grad w.r.t. initial params inputa, inputb, actiona, actionb = inp inputa = tf.reshape(inputa, [-1, dim_input]) inputb = tf.reshape(inputb, [-1, dim_input]) actiona = tf.reshape(actiona, [-1, dim_output]) actionb = tf.reshape(actionb, [-1, dim_output]) gradients_summ = [] testing = 'Testing' in prefix # for learning end effector pose final_eepta, final_eeptb = None, None if FLAGS.learn_final_eept: final_eept_range = range(FLAGS.final_eept_min, FLAGS.final_eept_max) final_eepta = actiona[:, final_eept_range[0]:final_eept_range[-1]+1] final_eeptb = actionb[:, final_eept_range[0]:final_eept_range[-1]+1] actiona = actiona[:, :final_eept_range[0]] actionb = actionb[:, :final_eept_range[0]] if FLAGS.no_final_eept: final_eepta = tf.zeros_like(final_eepta) if FLAGS.no_action: actiona = tf.zeros_like(actiona) local_outputbs, local_lossesb, final_eept_lossesb = [], [], [] # Assume fixed data for each update # by update they mean the number of gradient steps on loss L_i before taking derivative w.r.t inital params actionas = [actiona]*num_updates # Convert to image dims inputa, _, state_inputa = self.construct_image_input(inputa, self.state_idx, self.img_idx, network_config=network_config) inputb, flat_img_inputb, state_inputb = self.construct_image_input(inputb, self.state_idx, self.img_idx, network_config=network_config) inputas = [inputa]*num_updates inputbs = [inputb]*num_updates if FLAGS.zero_state: state_inputa = tf.zeros_like(state_inputa) state_inputas = [state_inputa]*num_updates if FLAGS.no_state: state_inputa = None if FLAGS.learn_final_eept: final_eeptas = [final_eepta]*num_updates # euclidean loss layer = (action - mlp_out)'*precision*(action-mlp_out) = (u-uhat)'*A*(u-uhat) # Pre-update # aka update on task, single step of GD if 'Training' in prefix: local_outputa, final_eept_preda = self.forward(inputa, state_inputa, weights, network_config=network_config) else: local_outputa, final_eept_preda = self.forward(inputa, state_inputa, weights, is_training=False, network_config=network_config) if FLAGS.learn_final_eept: final_eept_lossa = euclidean_loss_layer(final_eept_preda, final_eepta, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) else: final_eept_lossa = tf.constant(0.0) local_lossa = act_loss_eps * euclidean_loss_layer(local_outputa, actiona, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) if FLAGS.learn_final_eept: local_lossa += final_eept_loss_eps * final_eept_lossa # Compute fast gradients - take GD step # Do normal updates on the local_lossa grads = tf.gradients(local_lossa, weights.values()) gradients = dict(zip(weights.keys(), grads)) # make fast gradient zero for weights with gradient None for key in gradients.keys(): if gradients[key] is None: gradients[key] = tf.zeros_like(weights[key]) if FLAGS.stop_grad: gradients = {key:tf.stop_gradient(gradients[key]) for key in gradients.keys()} if FLAGS.clip: clip_min = FLAGS.clip_min clip_max = FLAGS.clip_max for key in gradients.keys(): gradients[key] = tf.clip_by_value(gradients[key], clip_min, clip_max) if FLAGS.pretrain_weight_path != 'N/A': gradients['wc1'] = tf.zeros_like(gradients['wc1']) gradients['bc1'] = tf.zeros_like(gradients['bc1']) # add gradient for each key in sorted weight keys gradients_summ.append([gradients[key] for key in self.sorted_weight_keys]) # weird way to take GD step--but this is the update; w = w - lr*gradient; update weights for current task # fast_weights are the pre-update weights fast_weights = dict(zip(weights.keys(), [weights[key] - self.step_size*gradients[key] for key in weights.keys()])) # Post-update - aka meta update on demonstration b (note meta_testing=True) # Compute new loss after gradient update on weights w.r.t L_i if FLAGS.no_state: state_inputb = None if 'Training' in prefix: outputb, final_eept_predb = self.forward(inputb, state_inputb, fast_weights, meta_testing=True, network_config=network_config) else: outputb, final_eept_predb = self.forward(inputb, state_inputb, fast_weights, meta_testing=True, is_training=False, testing=testing, network_config=network_config) local_outputbs.append(outputb) if FLAGS.learn_final_eept: final_eept_lossb = euclidean_loss_layer(final_eept_predb, final_eeptb, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) else: final_eept_lossb = tf.constant(0.0) local_lossb = act_loss_eps * euclidean_loss_layer(outputb, actionb, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) if FLAGS.learn_final_eept: local_lossb += final_eept_loss_eps * final_eept_lossb if use_whole_traj: # assume tbs == 1 final_eept_lossb = euclidean_loss_layer(final_eept_predb[0], final_eeptb[0], multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) final_eept_lossesb.append(final_eept_lossb) local_lossesb.append(local_lossb) # take more gradient steps using the fast_weights computed above i.e. finetune on the task for num_updates for j in range(num_updates - 1): # more input-observation pairs; num_updates = num steps of SGD to take # Pre-update state_inputa_new = state_inputas[j+1] if FLAGS.no_state: state_inputa_new = None if 'Training' in prefix: outputa, final_eept_preda = self.forward(inputas[j+1], state_inputa_new, fast_weights, network_config=network_config) else: outputa, final_eept_preda = self.forward(inputas[j+1], state_inputa_new, fast_weights, is_training=False, testing=testing, network_config=network_config) if FLAGS.learn_final_eept: final_eept_lossa = euclidean_loss_layer(final_eept_preda, final_eeptas[j+1], multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) else: final_eept_lossa = tf.constant(0.0) loss = act_loss_eps * euclidean_loss_layer(outputa, actionas[j+1], multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) if FLAGS.learn_final_eept: loss += final_eept_loss_eps * final_eept_lossa # Compute fast gradients grads = tf.gradients(loss, fast_weights.values()) gradients = dict(zip(fast_weights.keys(), grads)) # make fast gradient zero for weights with gradient None for key in gradients.keys(): if gradients[key] is None: gradients[key] = tf.zeros_like(fast_weights[key]) if FLAGS.stop_grad: gradients = {key:tf.stop_gradient(gradients[key]) for key in gradients.keys()} if FLAGS.clip: clip_min = FLAGS.clip_min clip_max = FLAGS.clip_max for key in gradients.keys(): gradients[key] = tf.clip_by_value(gradients[key], clip_min, clip_max) if FLAGS.pretrain_weight_path != 'N/A': gradients['wc1'] = tf.zeros_like(gradients['wc1']) gradients['bc1'] = tf.zeros_like(gradients['bc1']) gradients_summ.append([gradients[key] for key in self.sorted_weight_keys]) fast_weights = dict(zip(fast_weights.keys(), [fast_weights[key] - self.step_size*gradients[key] for key in fast_weights.keys()])) # Post-update if FLAGS.no_state: state_inputb = None if 'Training' in prefix: output, final_eept_predb = self.forward(inputbs[j+1], state_inputb, fast_weights, meta_testing=True, network_config=network_config) else: output, final_eept_predb = self.forward(inputbs[j+1], state_inputb, fast_weights, meta_testing=True, is_training=False, testing=testing, network_config=network_config) local_outputbs.append(output) if FLAGS.learn_final_eept: final_eept_lossb = euclidean_loss_layer(final_eept_predb, final_eeptb, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) else: final_eept_lossb = tf.constant(0.0) lossb = act_loss_eps * euclidean_loss_layer(output, actionb, multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) if FLAGS.learn_final_eept: lossb += final_eept_loss_eps * final_eept_lossb if use_whole_traj: # assume tbs == 1 final_eept_lossb = euclidean_loss_layer(final_eept_predb[0], final_eeptb[0], multiplier=loss_multiplier, use_l1=FLAGS.use_l1_l2_loss) final_eept_lossesb.append(final_eept_lossb) local_lossesb.append(lossb) # assuming this is all of the loss / gradient information needed to make take a MIL step local_fn_output = [local_outputa, local_outputbs, local_outputbs[-1], local_lossa, \ local_lossesb, final_eept_lossesb, flat_img_inputb, gradients_summ, fast_weights] return local_fn_output if self.norm_type: # initialize batch norm vars. unused = batch_metalearn((inputa[0], inputb[0], actiona[0], actionb[0])) out_dtype = [tf.float32, [tf.float32]*num_updates, tf.float32, tf.float32, [tf.float32]*num_updates, [tf.float32]*num_updates, \ tf.float32, [[tf.float32]*len(self.weights.keys())]*num_updates, [tf.float]*len(self.weights.keys())] # creates a list of loss, gradient info to take MIL step # we are mapping over tasks in elems so each index is a task result = tf.map_fn(batch_metalearn, elems=(inputa, inputb, actiona, actionb), dtype=out_dtype) print 'Done with map.' return result
en
0.770851
This file defines Meta Imitation Learning (MIL). Initialize MIL. Need to call init_network to contruct the architecture after init. # MIL hyperparams # by default, we use relu # List of indices for state (vector) data and image (tensor) data in observation. # Dimension of input and output of the model Helper method to initialize the tf networks used; takes in tf graph; initializes networks; calls construct_model; sets params based on training/validation/test mode contained in prefix var # map inputs to outputs # added code for reptile # get actual weights # maintain weights for each task # where W_i is the pre-update fine-tune of parameters on task i # w' = w - \eps 1/k sum_i^n (W_i - w) # self.weights set in construct_model to pre update weights #= natsorted(self.weights.keys()) # the import variables expects that weights is # # pre-update losses # post-update losses # TODO: add reptile in here # TODO: figure out why we are using total_losses2[self.num_updates - 1], it is becuase you only update on loss of last fine-tune step # Add summaries # Add summaries Preprocess images; takes in state_idx (list of indices for state data in observation), img_idx (list of indices for image data in observation), and nn_input; pretty much just preprocesses input; returns preprocessed image input, flattened image input, and the state input # image goes through 3 convnet layers # 'RGB'->'BGR' Construct weights for the network; takes in input dim and final output dim and just builds conv and fc weights, including augmented bias thing (and 2-headed architecture if FLAGS.two_head is true); returns weights # used to be 2 # conv weights # 5x5 conv, 1 input, 32 outputs # 5x5 conv, 1 input, 32 outputs # 5x5 conv, 1 input, 32 outputs # fc weights same as above just for only fc weights Perform the forward pass; given image input, state input, and weight dict, perform standard forward pass in net, except only through conv layers really, then call fc_forward and output final result through that # rearrange features to be [batch_size, num_fp, num_rows, num_cols] # only predict the final eept using the initial image # use video for preupdate only if no_final_eept # Assume tbs == 1 # Only provide the FC layers with final_eept_pred at first time step fc_forward: completes forward pass for conv net; performs forward pass given special fc input (flexible for multiple architectures, including 2-headed, etc) # only use dropout for post-update Construct the meta-learning graph. Args: input_tensors: tensors of input videos, if available prefix: indicate whether we are building training, validation or testing graph. dim_input: Dimensionality of input. dim_output: Dimensionality of the output. network_config: dictionary of network structure parameters Returns: a tuple of output tensors. # create placeholders for observations, states, and actions # meta_batch_size x update_batch_size x dim_input # meta_batch_size x update_batch_size x dim_input # feed states and observations in as input to model; this provides more info that just obs # Construct layers weight & bias # set hyperparameters # record losses for fine-tune and after meta update? # input has two examples: action/obs a is for training on task, # action/obs b is for meta-training update # this is because you need to train on the task to get the loss L_i to take grad w.r.t. initial params # for learning end effector pose # Assume fixed data for each update # by update they mean the number of gradient steps on loss L_i before taking derivative w.r.t inital params # Convert to image dims # euclidean loss layer = (action - mlp_out)'*precision*(action-mlp_out) = (u-uhat)'*A*(u-uhat) # Pre-update # aka update on task, single step of GD # Compute fast gradients - take GD step # Do normal updates on the local_lossa # make fast gradient zero for weights with gradient None # add gradient for each key in sorted weight keys # weird way to take GD step--but this is the update; w = w - lr*gradient; update weights for current task # fast_weights are the pre-update weights # Post-update - aka meta update on demonstration b (note meta_testing=True) # Compute new loss after gradient update on weights w.r.t L_i # assume tbs == 1 # take more gradient steps using the fast_weights computed above i.e. finetune on the task for num_updates # more input-observation pairs; num_updates = num steps of SGD to take # Pre-update # Compute fast gradients # make fast gradient zero for weights with gradient None # Post-update # assume tbs == 1 # assuming this is all of the loss / gradient information needed to make take a MIL step # initialize batch norm vars. # creates a list of loss, gradient info to take MIL step # we are mapping over tasks in elems so each index is a task
2.675035
3
molgym/buffer.py
polyzer/molgym
1
6625343
# The content of this file is based on: OpenAI Spinning Up https://spinningup.openai.com/. from typing import Optional, List import numpy as np from molgym.spaces import ObservationType from molgym.tools import util from molgym.tools.mpi import mpi_mean_std class PPOBuffer: """ A buffer for storing trajectories experienced by a PPO agent interacting with the environment, and using Generalized Advantage Estimation (GAE-Lambda) for calculating the advantages of state-action pairs. """ def __init__(self, int_act_dim: int, size: int, gamma=0.99, lam=0.95) -> None: self.obs_buf: List[Optional[ObservationType]] = [None] * size self.act_buf = np.empty((size, int_act_dim), dtype=np.float32) self.rew_buf = np.zeros(size, dtype=np.float32) self.next_obs_buf: List[Optional[ObservationType]] = [None] * size self.term_buf = np.zeros(size, dtype=np.bool) self.val_buf = np.zeros(size, dtype=np.float32) self.logp_buf = np.zeros(size, dtype=np.float32) # Filled when path is finished self.adv_buf = np.zeros(size, dtype=np.float32) self.ret_buf = np.zeros(size, dtype=np.float32) self.gamma = gamma self.lam = lam self.ptr = 0 self.path_start_idx = 0 self.max_size = size def store(self, obs: ObservationType, act: np.ndarray, reward: float, next_obs: ObservationType, terminal: bool, value: float, logp: float): """Append one time step of agent-environment interaction to the buffer.""" assert self.ptr < self.max_size # buffer has to have room so you can store self.obs_buf[self.ptr] = obs self.act_buf[self.ptr] = act self.rew_buf[self.ptr] = reward self.next_obs_buf[self.ptr] = next_obs self.term_buf[self.ptr] = terminal self.val_buf[self.ptr] = value self.logp_buf[self.ptr] = logp self.ptr += 1 def finish_path(self, last_val: float) -> float: """ Call this at the end of a trajectory, or when one gets cut off by an epoch ending. This looks back in the buffer to where the trajectory started, and uses rewards and value estimates from the whole trajectory to compute advantage estimates with GAE-Lambda, as well as compute the rewards-to-go for each state, to use as the targets for the value function. The "last_val" argument should be 0 if the trajectory ended because the agent reached a terminal state (died), and otherwise should be V(s_T), the value function estimated for the last state. This allows us to bootstrap the reward-to-go calculation to account for timesteps beyond the arbitrary episode horizon (or epoch cutoff). """ path_slice = slice(self.path_start_idx, self.ptr) rews = np.append(self.rew_buf[path_slice], last_val) vals = np.append(self.val_buf[path_slice], last_val) # the next two lines implement GAE-Lambda advantage calculation deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1] self.adv_buf[path_slice] = util.discount_cumsum(deltas, self.gamma * self.lam) # the next line computes rewards-to-go, to be targets for the value function self.ret_buf[path_slice] = util.discount_cumsum(rews, self.gamma)[:-1] episodic_return = self.ret_buf[self.path_start_idx] self.path_start_idx = self.ptr return episodic_return def get(self): """ Call this at the end of an epoch to get all of the data from the buffer, with advantages appropriately normalized (shifted to have mean zero and std one). Also, resets some pointers in the buffer. """ assert self.is_full() # buffer has to be full before you can get self.ptr, self.path_start_idx = 0, 0 # the next two lines implement the advantage normalization trick adv_mean, adv_std = mpi_mean_std(self.adv_buf, axis=-1) self.adv_buf = (self.adv_buf - adv_mean) / adv_std return dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf, adv=self.adv_buf, logp=self.logp_buf) def is_full(self) -> bool: return self.ptr == self.max_size
# The content of this file is based on: OpenAI Spinning Up https://spinningup.openai.com/. from typing import Optional, List import numpy as np from molgym.spaces import ObservationType from molgym.tools import util from molgym.tools.mpi import mpi_mean_std class PPOBuffer: """ A buffer for storing trajectories experienced by a PPO agent interacting with the environment, and using Generalized Advantage Estimation (GAE-Lambda) for calculating the advantages of state-action pairs. """ def __init__(self, int_act_dim: int, size: int, gamma=0.99, lam=0.95) -> None: self.obs_buf: List[Optional[ObservationType]] = [None] * size self.act_buf = np.empty((size, int_act_dim), dtype=np.float32) self.rew_buf = np.zeros(size, dtype=np.float32) self.next_obs_buf: List[Optional[ObservationType]] = [None] * size self.term_buf = np.zeros(size, dtype=np.bool) self.val_buf = np.zeros(size, dtype=np.float32) self.logp_buf = np.zeros(size, dtype=np.float32) # Filled when path is finished self.adv_buf = np.zeros(size, dtype=np.float32) self.ret_buf = np.zeros(size, dtype=np.float32) self.gamma = gamma self.lam = lam self.ptr = 0 self.path_start_idx = 0 self.max_size = size def store(self, obs: ObservationType, act: np.ndarray, reward: float, next_obs: ObservationType, terminal: bool, value: float, logp: float): """Append one time step of agent-environment interaction to the buffer.""" assert self.ptr < self.max_size # buffer has to have room so you can store self.obs_buf[self.ptr] = obs self.act_buf[self.ptr] = act self.rew_buf[self.ptr] = reward self.next_obs_buf[self.ptr] = next_obs self.term_buf[self.ptr] = terminal self.val_buf[self.ptr] = value self.logp_buf[self.ptr] = logp self.ptr += 1 def finish_path(self, last_val: float) -> float: """ Call this at the end of a trajectory, or when one gets cut off by an epoch ending. This looks back in the buffer to where the trajectory started, and uses rewards and value estimates from the whole trajectory to compute advantage estimates with GAE-Lambda, as well as compute the rewards-to-go for each state, to use as the targets for the value function. The "last_val" argument should be 0 if the trajectory ended because the agent reached a terminal state (died), and otherwise should be V(s_T), the value function estimated for the last state. This allows us to bootstrap the reward-to-go calculation to account for timesteps beyond the arbitrary episode horizon (or epoch cutoff). """ path_slice = slice(self.path_start_idx, self.ptr) rews = np.append(self.rew_buf[path_slice], last_val) vals = np.append(self.val_buf[path_slice], last_val) # the next two lines implement GAE-Lambda advantage calculation deltas = rews[:-1] + self.gamma * vals[1:] - vals[:-1] self.adv_buf[path_slice] = util.discount_cumsum(deltas, self.gamma * self.lam) # the next line computes rewards-to-go, to be targets for the value function self.ret_buf[path_slice] = util.discount_cumsum(rews, self.gamma)[:-1] episodic_return = self.ret_buf[self.path_start_idx] self.path_start_idx = self.ptr return episodic_return def get(self): """ Call this at the end of an epoch to get all of the data from the buffer, with advantages appropriately normalized (shifted to have mean zero and std one). Also, resets some pointers in the buffer. """ assert self.is_full() # buffer has to be full before you can get self.ptr, self.path_start_idx = 0, 0 # the next two lines implement the advantage normalization trick adv_mean, adv_std = mpi_mean_std(self.adv_buf, axis=-1) self.adv_buf = (self.adv_buf - adv_mean) / adv_std return dict(obs=self.obs_buf, act=self.act_buf, ret=self.ret_buf, adv=self.adv_buf, logp=self.logp_buf) def is_full(self) -> bool: return self.ptr == self.max_size
en
0.911601
# The content of this file is based on: OpenAI Spinning Up https://spinningup.openai.com/. A buffer for storing trajectories experienced by a PPO agent interacting with the environment, and using Generalized Advantage Estimation (GAE-Lambda) for calculating the advantages of state-action pairs. # Filled when path is finished Append one time step of agent-environment interaction to the buffer. # buffer has to have room so you can store Call this at the end of a trajectory, or when one gets cut off by an epoch ending. This looks back in the buffer to where the trajectory started, and uses rewards and value estimates from the whole trajectory to compute advantage estimates with GAE-Lambda, as well as compute the rewards-to-go for each state, to use as the targets for the value function. The "last_val" argument should be 0 if the trajectory ended because the agent reached a terminal state (died), and otherwise should be V(s_T), the value function estimated for the last state. This allows us to bootstrap the reward-to-go calculation to account for timesteps beyond the arbitrary episode horizon (or epoch cutoff). # the next two lines implement GAE-Lambda advantage calculation # the next line computes rewards-to-go, to be targets for the value function Call this at the end of an epoch to get all of the data from the buffer, with advantages appropriately normalized (shifted to have mean zero and std one). Also, resets some pointers in the buffer. # buffer has to be full before you can get # the next two lines implement the advantage normalization trick
2.37406
2
venv/Lib/site-packages/IPython/core/tests/test_displayhook.py
ajayiagbebaku/NFL-Model
1,318
6625344
import sys from IPython.testing.tools import AssertPrints, AssertNotPrints from IPython.core.displayhook import CapturingDisplayHook from IPython.utils.capture import CapturedIO def test_output_displayed(): """Checking to make sure that output is displayed""" with AssertPrints('2'): ip.run_cell('1+1', store_history=True) with AssertPrints('2'): ip.run_cell('1+1 # comment with a semicolon;', store_history=True) with AssertPrints('2'): ip.run_cell('1+1\n#commented_out_function();', store_history=True) def test_output_quiet(): """Checking to make sure that output is quiet""" with AssertNotPrints('2'): ip.run_cell('1+1;', store_history=True) with AssertNotPrints('2'): ip.run_cell('1+1; # comment with a semicolon', store_history=True) with AssertNotPrints('2'): ip.run_cell('1+1;\n#commented_out_function()', store_history=True) def test_underscore_no_overrite_user(): ip.run_cell('_ = 42', store_history=True) ip.run_cell('1+1', store_history=True) with AssertPrints('42'): ip.run_cell('print(_)', store_history=True) ip.run_cell('del _', store_history=True) ip.run_cell('6+6', store_history=True) with AssertPrints('12'): ip.run_cell('_', store_history=True) def test_underscore_no_overrite_builtins(): ip.run_cell("import gettext ; gettext.install('foo')", store_history=True) ip.run_cell('3+3', store_history=True) with AssertPrints('gettext'): ip.run_cell('print(_)', store_history=True) ip.run_cell('_ = "userset"', store_history=True) with AssertPrints('userset'): ip.run_cell('print(_)', store_history=True) ip.run_cell('import builtins; del builtins._') def test_interactivehooks_ast_modes(): """ Test that ast nodes can be triggered with different modes """ saved_mode = ip.ast_node_interactivity ip.ast_node_interactivity = 'last_expr_or_assign' try: with AssertPrints('2'): ip.run_cell('a = 1+1', store_history=True) with AssertPrints('9'): ip.run_cell('b = 1+8 # comment with a semicolon;', store_history=False) with AssertPrints('7'): ip.run_cell('c = 1+6\n#commented_out_function();', store_history=True) ip.run_cell('d = 11', store_history=True) with AssertPrints('12'): ip.run_cell('d += 1', store_history=True) with AssertNotPrints('42'): ip.run_cell('(u,v) = (41+1, 43-1)') finally: ip.ast_node_interactivity = saved_mode def test_interactivehooks_ast_modes_semi_suppress(): """ Test that ast nodes can be triggered with different modes and suppressed by semicolon """ saved_mode = ip.ast_node_interactivity ip.ast_node_interactivity = 'last_expr_or_assign' try: with AssertNotPrints('2'): ip.run_cell('x = 1+1;', store_history=True) with AssertNotPrints('7'): ip.run_cell('y = 1+6; # comment with a semicolon', store_history=True) with AssertNotPrints('9'): ip.run_cell('z = 1+8;\n#commented_out_function()', store_history=True) finally: ip.ast_node_interactivity = saved_mode def test_capture_display_hook_format(): """Tests that the capture display hook conforms to the CapturedIO output format""" hook = CapturingDisplayHook(ip) hook({"foo": "bar"}) captured = CapturedIO(sys.stdout, sys.stderr, hook.outputs) # Should not raise with RichOutput transformation error captured.outputs
import sys from IPython.testing.tools import AssertPrints, AssertNotPrints from IPython.core.displayhook import CapturingDisplayHook from IPython.utils.capture import CapturedIO def test_output_displayed(): """Checking to make sure that output is displayed""" with AssertPrints('2'): ip.run_cell('1+1', store_history=True) with AssertPrints('2'): ip.run_cell('1+1 # comment with a semicolon;', store_history=True) with AssertPrints('2'): ip.run_cell('1+1\n#commented_out_function();', store_history=True) def test_output_quiet(): """Checking to make sure that output is quiet""" with AssertNotPrints('2'): ip.run_cell('1+1;', store_history=True) with AssertNotPrints('2'): ip.run_cell('1+1; # comment with a semicolon', store_history=True) with AssertNotPrints('2'): ip.run_cell('1+1;\n#commented_out_function()', store_history=True) def test_underscore_no_overrite_user(): ip.run_cell('_ = 42', store_history=True) ip.run_cell('1+1', store_history=True) with AssertPrints('42'): ip.run_cell('print(_)', store_history=True) ip.run_cell('del _', store_history=True) ip.run_cell('6+6', store_history=True) with AssertPrints('12'): ip.run_cell('_', store_history=True) def test_underscore_no_overrite_builtins(): ip.run_cell("import gettext ; gettext.install('foo')", store_history=True) ip.run_cell('3+3', store_history=True) with AssertPrints('gettext'): ip.run_cell('print(_)', store_history=True) ip.run_cell('_ = "userset"', store_history=True) with AssertPrints('userset'): ip.run_cell('print(_)', store_history=True) ip.run_cell('import builtins; del builtins._') def test_interactivehooks_ast_modes(): """ Test that ast nodes can be triggered with different modes """ saved_mode = ip.ast_node_interactivity ip.ast_node_interactivity = 'last_expr_or_assign' try: with AssertPrints('2'): ip.run_cell('a = 1+1', store_history=True) with AssertPrints('9'): ip.run_cell('b = 1+8 # comment with a semicolon;', store_history=False) with AssertPrints('7'): ip.run_cell('c = 1+6\n#commented_out_function();', store_history=True) ip.run_cell('d = 11', store_history=True) with AssertPrints('12'): ip.run_cell('d += 1', store_history=True) with AssertNotPrints('42'): ip.run_cell('(u,v) = (41+1, 43-1)') finally: ip.ast_node_interactivity = saved_mode def test_interactivehooks_ast_modes_semi_suppress(): """ Test that ast nodes can be triggered with different modes and suppressed by semicolon """ saved_mode = ip.ast_node_interactivity ip.ast_node_interactivity = 'last_expr_or_assign' try: with AssertNotPrints('2'): ip.run_cell('x = 1+1;', store_history=True) with AssertNotPrints('7'): ip.run_cell('y = 1+6; # comment with a semicolon', store_history=True) with AssertNotPrints('9'): ip.run_cell('z = 1+8;\n#commented_out_function()', store_history=True) finally: ip.ast_node_interactivity = saved_mode def test_capture_display_hook_format(): """Tests that the capture display hook conforms to the CapturedIO output format""" hook = CapturingDisplayHook(ip) hook({"foo": "bar"}) captured = CapturedIO(sys.stdout, sys.stderr, hook.outputs) # Should not raise with RichOutput transformation error captured.outputs
en
0.613298
Checking to make sure that output is displayed # comment with a semicolon;', store_history=True) #commented_out_function();', store_history=True) Checking to make sure that output is quiet # comment with a semicolon', store_history=True) #commented_out_function()', store_history=True) Test that ast nodes can be triggered with different modes # comment with a semicolon;', store_history=False) #commented_out_function();', store_history=True) Test that ast nodes can be triggered with different modes and suppressed by semicolon # comment with a semicolon', store_history=True) #commented_out_function()', store_history=True) Tests that the capture display hook conforms to the CapturedIO output format # Should not raise with RichOutput transformation error
2.474473
2
gui/qt/__init__.py
mpatc/Electron-Cash
1
6625345
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import gc, os, platform, shutil, signal, sys, traceback try: import PyQt5 except Exception: if sys.platform.startswith('win'): msg = ("\n\nError: Could not import PyQt5.\n" "If you are running the release .exe, this is a bug (please" " contact the developers in that case).\n" "If you are running from source, then you may try this from the command-line:\n\n" " python -m pip install pyqt5\n\n") elif sys.platform.startswith('darw'): msg = ("\n\nError: Could not import PyQt5.\n" "If you are running the release .app, this is a bug (please" " contact the developers in that case).\n" "If you are running from source, then you may try this from the command-line:\n\n" " python3 -m pip install --user -I pyqt5\n\n") else: msg = ("\n\nError: Could not import PyQt5.\n" "You may try:\n\n" " python3 -m pip install --user -I pyqt5\n\n" "Or, if on Linux Ubuntu, Debian, etc:\n\n" " sudo apt-get install python3-pyqt5\n\n") sys.exit(msg) from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore import * from electroncash.i18n import _ from electroncash import i18n from electroncash.plugins import run_hook from electroncash import WalletStorage from electroncash.util import (UserCancelled, PrintError, print_error, standardize_path, finalization_print_error, Weak, get_new_wallet_name, Handlers) from electroncash import version from electroncash.address import Address from .installwizard import InstallWizard, GoBack from . import icons # This needs to be imported once app-wide then the :icons/ namespace becomes available for Qt icon filenames. from .util import * # * needed for plugins from .main_window import ElectrumWindow from .network_dialog import NetworkDialog from .exception_window import Exception_Hook from .update_checker import UpdateChecker class ElectrumGui(QObject, PrintError): new_window_signal = pyqtSignal(str, object) update_available_signal = pyqtSignal(bool) cashaddr_toggled_signal = pyqtSignal() # app-wide signal for when cashaddr format is toggled. This used to live in each ElectrumWindow instance but it was recently refactored to here. cashaddr_status_button_hidden_signal = pyqtSignal(bool) # app-wide signal for when cashaddr toggle button is hidden from the status bar shutdown_signal = pyqtSignal() # signal for requesting an app-wide full shutdown do_in_main_thread_signal = pyqtSignal(object, object, object) instance = None def __init__(self, config, daemon, plugins): super(__class__, self).__init__() # QObject init assert __class__.instance is None, "ElectrumGui is a singleton, yet an instance appears to already exist! FIXME!" __class__.instance = self i18n.set_language(config.get('language')) self.config = config self.daemon = daemon self.plugins = plugins self.windows = [] self._setup_do_in_main_thread_handler() # Uncomment this call to verify objects are being properly # GC-ed when windows are closed #if daemon.network: # from electroncash.util import DebugMem # from electroncash.wallet import Abstract_Wallet # from electroncash.verifier import SPV # from electroncash.synchronizer import Synchronizer # daemon.network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer, # ElectrumWindow], interval=5)]) call_after_app = self._pre_and_post_app_setup() try: self.app = QApplication(sys.argv) finally: call_after_app() self._load_fonts() # this needs to be done very early, before the font engine loads fonts.. out of paranoia self._exit_if_required_pyqt_is_missing() # This may immediately exit the app if missing required PyQt5 modules, so it should also be done early. self.new_version_available = None self._set_icon() self.app.installEventFilter(self) self.timer = QTimer(self); self.timer.setSingleShot(False); self.timer.setInterval(500) #msec self.gc_timer = QTimer(self); self.gc_timer.setSingleShot(True); self.gc_timer.timeout.connect(ElectrumGui.gc); self.gc_timer.setInterval(500) #msec self.nd = None self._last_active_window = None # we remember the last activated ElectrumWindow as a Weak.ref Address.show_cashaddr(self.is_cashaddr()) # Dark Theme -- ideally set this before any widgets are created. self.set_dark_theme_if_needed() # / # Wallet Password Cache # wallet -> (password, QTimer) map for some plugins (like CashShuffle) # that need wallet passwords to operate, and we don't want to prompt # for pw twice right after the InstallWizard runs (see #106). # Entries in this map are deleted after 10 seconds by the QTimer (which # also deletes itself) self._wallet_password_cache = Weak.KeyDictionary() # / self.update_checker = UpdateChecker() self.update_checker_timer = QTimer(self); self.update_checker_timer.timeout.connect(self.on_auto_update_timeout); self.update_checker_timer.setSingleShot(False) self.update_checker.got_new_version.connect(self.on_new_version) # init tray self.dark_icon = self.config.get("dark_icon", False) self.tray = QSystemTrayIcon(self.tray_icon(), self) self.tray.setToolTip('Electron Cash') self.tray.activated.connect(self.tray_activated) self.build_tray_menu() self.tray.show() self.new_window_signal.connect(self.start_new_window) if self.has_auto_update_check(): self._start_auto_update_timer(first_run = True) self.app.focusChanged.connect(self.on_focus_change) # track last window the user interacted with self.shutdown_signal.connect(self.close, Qt.QueuedConnection) run_hook('init_qt', self) # We did this once already in the set_dark_theme call, but we do this # again here just in case some plugin modified the color scheme. ColorScheme.update_from_widget(QWidget()) self._check_and_warn_qt_version() def __del__(self): stale = True if __class__.instance is self: stale = False __class__.instance = None print_error("[{}] finalized{}".format(__class__.__name__, ' (stale instance)' if stale else '')) if hasattr(super(), '__del__'): super().__del__() def _setup_do_in_main_thread_handler(self): ''' Sets up "do_in_main_thread" handler mechanism for Qt GUI. ''' self.do_in_main_thread_signal.connect(self._do_in_main_thread_handler_slot) orig_handler = Handlers.do_in_main_thread weakSelf = Weak.ref(self) def my_do_in_main_thread_handler(func, *args, **kwargs): strongSelf = weakSelf() if strongSelf: # We are still alive, emit the signal which will be handled # in the main thread. strongSelf.do_in_main_thread_signal.emit(func, args, kwargs) else: # We died. Uninstall this handler, invoke original handler. Handlers.do_in_main_thread = orig_handler orig_handler(func, *args, **kwargs) Handlers.do_in_main_thread = my_do_in_main_thread_handler def _do_in_main_thread_handler_slot(self, func, args, kwargs): ''' Hooked in to util.Handlers.do_in_main_thread via the do_in_main_thread_signal. This ensures that there is an app-wide mechanism for posting invocations to the main thread. Currently CashFusion uses this mechanism, but other code may as well. ''' func(*args, **kwargs) def _pre_and_post_app_setup(self): ''' Call this before instantiating the QApplication object. It sets up some platform-specific miscellany that need to happen before the QApplication is constructed. A function is returned. This function *must* be called after the QApplication is constructed. ''' callables = [] def call_callables(): for func in callables: func() ret = call_callables if hasattr(QGuiApplication, 'setDesktopFileName'): QGuiApplication.setDesktopFileName('electron-cash.desktop') if self.windows_qt_use_freetype: # Use FreeType for font rendering on Windows. This fixes rendering # of the Schnorr sigil and allows us to load the Noto Color Emoji # font if needed. os.environ['QT_QPA_PLATFORM'] = 'windows:fontengine=freetype' QCoreApplication.setAttribute(Qt.AA_X11InitThreads) if hasattr(Qt, "AA_ShareOpenGLContexts"): QCoreApplication.setAttribute(Qt.AA_ShareOpenGLContexts) if sys.platform not in ('darwin',) and hasattr(Qt, "AA_EnableHighDpiScaling"): # The below only applies to non-macOS. On macOS this setting is # never used (because it is implicitly auto-negotiated by the OS # in a differernt way). # # qt_disable_highdpi will be set to None by default, or True if # specified on command-line. The command-line override is intended # to supporess high-dpi mode just for this run for testing. # # The more permanent setting is qt_enable_highdpi which is the GUI # preferences option, so we don't enable highdpi if it's explicitly # set to False in the GUI. # # The default on Linux, Windows, etc is to enable high dpi disable_scaling = self.config.get('qt_disable_highdpi', False) enable_scaling = self.config.get('qt_enable_highdpi', True) if not disable_scaling and enable_scaling: QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling) if hasattr(Qt, "AA_UseHighDpiPixmaps"): QCoreApplication.setAttribute(Qt.AA_UseHighDpiPixmaps) # macOS Mojave "font rendering looks terrible on PyQt5.11" workaround. # See: https://old.reddit.com/r/apple/comments/9leavs/fix_mojave_font_rendering_issues_on_a_perapp_basis/ # This affects PyQt 5.11 (which is what we ship in the macOS El Capitan # .dmg). We apply the workaround and also warn the user to not use # the El Capitan compatibility .dmg. if sys.platform in ('darwin',) and self.qt_version() < (5, 12): # macOS hacks. On Mojave with PyQt <5.12 the font rendering is terrible. # As a workaround we need to temporarily set this 'defaults' keys # which we immediately disable after the QApplication is started. try: ver = tuple(int(a) for a in platform.mac_ver()[0].split('.')) except (TypeError, ValueError): self.print_error("WARNING: Cannot parse platform.mac_ver", f"'{platform.mac_ver()[0]}'") ver = None if ver and ver >= (10, 14): from electroncash.utils import macos self.print_error("Mojave+ with PyQt<5.12 detected; applying CGFontRenderingFontSmoothingDisabled workaround...") bundle = macos.get_bundle_identifier() os.system(f'defaults write {bundle} CGFontRenderingFontSmoothingDisabled -bool NO') def undo_hack(): os.system(f'defaults delete {bundle} CGFontRenderingFontSmoothingDisabled') self.print_error("Mojave+ font rendering workaround applied.") #msg = _("Mojave or newer system detected, however you are running the " # "El Capitan compatibility release of Electron Cash. " # "Font and graphics rendering may be affected." # "\n\nPlease obtain the latest non-compatibility version " # "of Electron Cash for MacOS.") #QMessageBox.warning(None, _("Warning"), msg) # this works even if app is not exec_() yet. callables.append(undo_hack) def setup_layout_direction(): """Sets the app layout direction depending on language. To be called after self.app is created successfully. Note this *MUST* be called after set_language has been called.""" assert i18n.set_language_called > 0 lc = i18n.language.info().get('language') lc = '' if not isinstance(lc, str) else lc lc = lc.split('_')[0] layout_direction = Qt.LeftToRight blurb = "left-to-right" if lc in {'ar', 'fa', 'he', 'ps', 'ug', 'ur'}: # Right-to-left languages layout_direction = Qt.RightToLeft blurb = "right-to-left" self.print_error("Setting layout direction:", blurb) self.app.setLayoutDirection(layout_direction) # callable will be called after self.app is set-up successfully callables.append(setup_layout_direction) return ret def _exit_if_required_pyqt_is_missing(self): ''' Will check if required PyQt5 modules are present and if not, display an error message box to the user and immediately quit the app. This is because some Linux systems break up PyQt5 into multiple subpackages, and for instance PyQt5 QtSvg is its own package, and it may be missing. ''' try: from PyQt5 import QtSvg except ImportError: # Closes #1436 -- Some "Run from source" Linux users lack QtSvg # (partial PyQt5 install) msg = _("A required Qt module, QtSvg was not found. Please fully install all of PyQt5 5.12 or above to resolve this issue.") if sys.platform == 'linux': msg += "\n\n" + _("On Linux, you may try:\n\n python3 -m pip install --user -I pyqt5") if shutil.which('apt'): msg += "\n\n" + _("On Debian-based distros, you can run:\n\n sudo apt install python3-pyqt5.qtsvg") QMessageBox.critical(None, _("QtSvg Missing"), msg) # this works even if app is not exec_() yet. self.app.exit(1) sys.exit(msg) def is_dark_theme_available(self): if sys.platform in ('darwin',): # On OSX, qdarkstyle is kind of broken. We instead rely on Mojave # dark mode if (built in to the OS) for this facility, which the # user can set outside of this application. return False try: import qdarkstyle except: return False return True def set_dark_theme_if_needed(self): if sys.platform in ('darwin',): # On OSX, qdarkstyle is kind of broken. We instead rely on Mojave # dark mode if (built in to the OS) for this facility, which the # user can set outside of this application. use_dark_theme = False else: use_dark_theme = self.config.get('qt_gui_color_theme', 'default') == 'dark' darkstyle_ver = None if use_dark_theme: try: import qdarkstyle self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5()) try: darkstyle_ver = version.normalize_version(qdarkstyle.__version__) except (ValueError, IndexError, TypeError, NameError, AttributeError) as e: self.print_error("Warning: Could not determine qdarkstyle version:", repr(e)) except BaseException as e: use_dark_theme = False self.print_error('Error setting dark theme: {}'.format(repr(e))) # Apply any necessary stylesheet patches. For now this only does anything # if the version is < 2.6.8. # 2.6.8+ seems to have fixed all the issues (for now!) from . import style_patcher style_patcher.patch(dark=use_dark_theme, darkstyle_ver=darkstyle_ver) # Even if we ourselves don't set the dark theme, # the OS/window manager/etc might set *a dark theme*. # Hence, try to choose colors accordingly: ColorScheme.update_from_widget(QWidget(), force_dark=use_dark_theme) def get_cached_password(self, wallet): ''' Passwords in the cache only live for a very short while (10 seconds) after wallet window creation, and only if it's a new window. This mechanism is a convenience for plugins that need access to the wallet password and it would make for poor UX for the user to enter their password twice when opening a new window ''' entry = self._wallet_password_cache.get(wallet) if entry: return entry[0] def _expire_cached_password(self, weakWallet): ''' Timer callback, called after 10 seconds. ''' wallet = weakWallet() if isinstance(weakWallet, Weak.ref) else weakWallet if wallet: entry = self._wallet_password_cache.pop(wallet, None) if entry: timer = entry[1] timer.stop(); timer.deleteLater() def _cache_password(self, wallet, password): self._expire_cached_password(wallet) if password is None: return timer = QTimer() # NB a top-level parentless QObject will get delete by Python when its Python refct goes to 0, which is what we want here. Future programmers: Do not give this timer a parent! self._wallet_password_cache[wallet] = (password, timer) weakWallet = Weak.ref(wallet) weakSelf = Weak.ref(self) def timeout(): slf = weakSelf() slf and slf._expire_cached_password(weakWallet) timer.setSingleShot(True); timer.timeout.connect(timeout); timer.start(10000) # 10 sec def cache_password(self, wallet, password): self._cache_password(wallet, password) def _set_icon(self): icon = None if sys.platform == 'darwin': # on macOS, in "running from source" mode, we want to set the app # icon, otherwise we get the generic Python icon. # In non-running-from-source mode, macOS will get the icon from # the .app bundle Info.plist spec (which ends up being # electron.icns). However, in .app mode, Qt will not know about # this icon and won't be able to use it for e.g. the About dialog. # In the latter case the branch below will tell Qt to use # electron-cash.svg as the "window icon". icon = QIcon("electron.icns") if os.path.exists("electron.icns") else None if not icon: # Set this on all other platforms (and macOS built .app) as it can # only help and never harm, and is always available. icon = QIcon(":icons/electron-cash.svg") if icon: self.app.setWindowIcon(icon) @staticmethod def qt_version() -> tuple: ''' Returns a 3-tuple of the form (major, minor, revision) eg (5, 12, 4) for the current Qt version derived from the QT_VERSION global provided by Qt. ''' return ( (QT_VERSION >> 16) & 0xff, (QT_VERSION >> 8) & 0xff, QT_VERSION & 0xff ) def _load_fonts(self): ''' All apologies for the contorted nature of this platform code. Fonts on Windows & Linux are .. a sensitive situation. :) ''' # Only load the emoji font on Linux and Windows if sys.platform not in ('linux', 'win32', 'cygwin'): return # TODO: Check if we already have the needed emojis # TODO: Allow the user to download a full color emoji set linux_font_config_file = os.path.join(os.path.dirname(__file__), 'data', 'fonts.xml') emojis_ttf_name = 'ecsupplemental_lnx.ttf' emojis_ttf_path = os.path.join(os.path.dirname(__file__), 'data', emojis_ttf_name) did_set_custom_fontconfig = False if (sys.platform == 'linux' and self.linux_qt_use_custom_fontconfig # method-backed property, checks config settings and not os.environ.get('FONTCONFIG_FILE') and os.path.exists('/etc/fonts/fonts.conf') and os.path.exists(linux_font_config_file) and os.path.exists(emojis_ttf_path) and self.qt_version() >= (5, 12)): # doing this on Qt < 5.12 causes harm and makes the whole app render fonts badly # On Linux, we override some fontconfig rules by loading our own # font config XML file. This makes it so that our custom emojis and # other needed glyphs are guaranteed to get picked up first, # regardless of user font config. Without this some Linux systems # had black and white or missing emoji glyphs. We only do this if # the user doesn't have their own fontconfig file in env and # also as a sanity check, if they have the system # /etc/fonts/fonts.conf file in the right place. os.environ['FONTCONFIG_FILE'] = linux_font_config_file did_set_custom_fontconfig = True if sys.platform in ('win32', 'cygwin'): env_var = os.environ.get('QT_QPA_PLATFORM') if not env_var or 'windows:fontengine=freetype' not in env_var.lower(): # not set up to use freetype, so loading the .ttf would fail. # abort early. return del env_var # use a different .ttf file on Windows emojis_ttf_name = 'ecsupplemental_win.ttf' emojis_ttf_path = os.path.join(os.path.dirname(__file__), 'data', emojis_ttf_name) if QFontDatabase.addApplicationFont(emojis_ttf_path) < 0: self.print_error('Failed to add unicode emoji font to application fonts:', emojis_ttf_path) if did_set_custom_fontconfig: self.print_error('Deleting custom (fonts.xml) FONTCONFIG_FILE env. var') del os.environ['FONTCONFIG_FILE'] def _check_and_warn_qt_version(self): if sys.platform == 'linux' and self.qt_version() < (5, 12): msg = _("Electron Cash on Linux requires PyQt5 5.12+.\n\n" "You have version {version_string} installed.\n\n" "Please upgrade otherwise you may experience " "font rendering issues with emojis and other unicode " "characters used by Electron Cash.").format(version_string=QT_VERSION_STR) QMessageBox.warning(None, _("PyQt5 Upgrade Needed"), msg) # this works even if app is not exec_() yet. def eventFilter(self, obj, event): ''' This event filter allows us to open bitcoincash: URIs on macOS ''' if event.type() == QEvent.FileOpen: if len(self.windows) >= 1: self.windows[0].pay_to_URI(event.url().toString()) return True return False def build_tray_menu(self): ''' Rebuild the tray menu by tearing it down and building it new again ''' m_old = self.tray.contextMenu() if m_old is not None: # Tray does NOT take ownership of menu, so we are tasked with # deleting the old one. Note that we must delete the old one rather # than just clearing it because otherwise the old sub-menus stick # around in Qt. You can try calling qApp.topLevelWidgets() to # convince yourself of this. Doing it this way actually cleans-up # the menus and they do not leak. m_old.clear() m_old.deleteLater() # C++ object and its children will be deleted later when we return to the event loop m = QMenu() m.setObjectName("SysTray.QMenu") self.tray.setContextMenu(m) destroyed_print_error(m) for window in self.windows: submenu = m.addMenu(window.wallet.basename()) submenu.addAction(_("Show/Hide"), window.show_or_hide) submenu.addAction(_("Close"), window.close) m.addAction(_("Dark/Light"), self.toggle_tray_icon) m.addSeparator() m.addAction(_("&Check for updates..."), lambda: self.show_update_checker(None)) m.addSeparator() m.addAction(_("Exit Electron Cash"), self.close) self.tray.setContextMenu(m) def tray_icon(self): if self.dark_icon: return QIcon(':icons/electron_dark_icon.svg') else: return QIcon(':icons/electron_light_icon.svg') def toggle_tray_icon(self): self.dark_icon = not self.dark_icon self.config.set_key("dark_icon", self.dark_icon, True) self.tray.setIcon(self.tray_icon()) def tray_activated(self, reason): if reason == QSystemTrayIcon.DoubleClick: if all([w.is_hidden() for w in self.windows]): for w in self.windows: w.bring_to_top() else: for w in self.windows: w.hide() def close(self): for window in list(self.windows): window.close() def new_window(self, path, uri=None): # Use a signal as can be called from daemon thread self.new_window_signal.emit(path, uri) def show_network_dialog(self, parent, *, jumpto : str = ''): if self.warn_if_no_network(parent): return if self.nd: self.nd.on_update() run_hook("on_network_dialog", self.nd) self.nd.show() self.nd.raise_() if jumpto: self.nd.jumpto(jumpto) return self.nd = NetworkDialog(self.daemon.network, self.config) run_hook("on_network_dialog", self.nd) self.nd.show() if jumpto: self.nd.jumpto(jumpto) def create_window_for_wallet(self, wallet): w = ElectrumWindow(self, wallet) self.windows.append(w) finalization_print_error(w, "[{}] finalized".format(w.diagnostic_name())) self.build_tray_menu() run_hook('on_new_window', w) return w def get_wallet_folder(self): ''' may raise FileNotFoundError ''' return os.path.dirname(os.path.abspath(self.config.get_wallet_path())) def get_new_wallet_path(self): ''' may raise FileNotFoundError ''' wallet_folder = self.get_wallet_folder() filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) return full_path def on_focus_change(self, ignored, new_focus_widget): ''' Remember the last wallet window that was activated because start_new_window uses this information. We store the ElectrumWindow in a weak reference so that we don't interfere with its gc when it is closed.''' if not new_focus_widget: return if isinstance(new_focus_widget, QWidget): window = QWidget.window(new_focus_widget) # call base class because some widgets may actually override 'window' with Python attributes. if isinstance(window, ElectrumWindow): self._last_active_window = Weak.ref(window) def start_new_window(self, path, uri): '''Raises the window for the wallet if it is open. Otherwise opens the wallet and creates a new window for it. `path=None` is a special usage which will raise the last activated window or open the 'last wallet' if no windows are open.''' if not path: if not self.windows: # This branch is taken if nothing is currently open but # path == None, in which case set path=last wallet self.config.open_last_wallet() path = self.config.get_wallet_path() elif self._last_active_window: # This branch is taken if we have windows open and we have # _last_active_window defined, in which case we specify # that this window should be activated by setting path # so that the for loop below will trigger on this window. w = self._last_active_window() # weak ref -> strong ref if w and w in self.windows: # check ref still alive # this will cause the last active window to be used in the # for loop below path = w.wallet.storage.path # NB: path may still be None here if it came in as None from args and # if the above logic couldn't select a window to use -- in which case # we'll end up picking self.windows[0] path = path and standardize_path(path) # just make sure some plugin didn't give us a symlink for w in self.windows: if not path or w.wallet.storage.path == path: path = w.wallet.storage.path # remember path in case it was None w.bring_to_top() break else: try: if not self.windows: self.warn_if_no_secp(relaxed=True) try: wallet = self.daemon.load_wallet(path, None) except BaseException as e: self.print_error(repr(e)) if self.windows: # *Not* starting up. Propagate exception out to present # error message box to user. raise e # We're just starting up, so we are tolerant of bad wallets # and just want to proceed to the InstallWizard so the user # can either specify a different wallet or create a new one. # (See issue #1189 where before they would get stuck) path = self.get_new_wallet_path() # give up on this unknown wallet and try a new name.. note if things get really bad this will raise FileNotFoundError and the app aborts here. wallet = None # fall thru to wizard if not wallet: storage = WalletStorage(path, manual_upgrades=True) wizard = InstallWizard(self.config, self.app, self.plugins, storage) try: wallet, password = wizard.run_and_get_wallet() or (None, None) except UserCancelled: pass except GoBack as e: self.print_error('[start_new_window] Exception caught (GoBack)', e) finally: wizard.terminate() del wizard gc.collect() # wizard sticks around in memory sometimes, otherwise :/ if not wallet: return wallet.start_threads(self.daemon.network) self.daemon.add_wallet(wallet) self._cache_password(wallet, password) except BaseException as e: traceback.print_exc(file=sys.stdout) if '2fa' in str(e): self.warning(title=_('Error'), message = '2FA wallets for Bitcoin Cash are currently unsupported by <a href="https://api.trustedcoin.com/#/">TrustedCoin</a>. Follow <a href="https://github.com/Electron-Cash/Electron-Cash/issues/41#issuecomment-357468208">this guide</a> in order to recover your funds.') else: self.warning(title=_('Error'), message = 'Cannot load wallet:\n' + str(e), icon=QMessageBox.Critical) return w = self.create_window_for_wallet(wallet) if uri: w.pay_to_URI(uri) w.bring_to_top() w.setWindowState(w.windowState() & ~Qt.WindowMinimized | Qt.WindowActive) # this will activate the window w.activateWindow() return w def close_window(self, window): self.windows.remove(window) self.build_tray_menu() # save wallet path of last open window run_hook('on_close_window', window) # GC on ElectrumWindows takes forever to actually happen due to the # circular reference zoo they create around them (they end up stuck in # generation 2 for a long time before being collected). The below # schedules a more comprehensive GC to happen in the very near future. # This mechanism takes on the order of 40-100ms to execute (depending # on hardware) but frees megabytes of memory after closing a window # (which itslef is a relatively infrequent UI event, so it's # an acceptable tradeoff). self.gc_schedule() if not self.windows: self.config.save_last_wallet(window.wallet) # NB: We see if we should quit the app after the last wallet # window is closed, even if a network dialog or some other window is # open. It was bizarre behavior to keep the app open when # things like a transaction dialog or the network dialog were still # up. self._quit_after_last_window() # central point that checks if we should quit. #window.deleteLater() # <--- This has the potential to cause bugs (esp. with misbehaving plugins), so commented-out. The object gets deleted anyway when Python GC kicks in. Forcing a delete may risk python to have a dangling reference to a deleted C++ object. def gc_schedule(self): ''' Schedule garbage collection to happen in the near future. Note that rapid-fire calls to this re-start the timer each time, thus only the last call takes effect (it's rate-limited). ''' self.gc_timer.start() # start/re-start the timer to fire exactly once in timeInterval() msecs @staticmethod def gc(): ''' self.gc_timer timeout() slot ''' gc.collect() def init_network(self): # Show network dialog if config does not exist if self.daemon.network: if self.config.get('auto_connect') is None: wizard = InstallWizard(self.config, self.app, self.plugins, None) wizard.init_network(self.daemon.network) wizard.terminate() def on_new_version(self, newver): ''' Called by the auto update check mechanism to notify that a new version is available. We propagate the signal out using our own update_available_signal as well as post a message to the system tray. ''' self.new_version_available = newver self.update_available_signal.emit(True) self.notify(_("A new version of Electron Cash is available: {}").format(newver)) def show_update_checker(self, parent, *, skip_check = False): if self.warn_if_no_network(parent): return self.update_checker.show() self.update_checker.raise_() if not skip_check: self.update_checker.do_check() def on_auto_update_timeout(self): if not self.daemon.network: # auto-update-checking never is done in offline mode self.print_error("Offline mode; update check skipped") elif not self.update_checker.did_check_recently(): # make sure auto-check doesn't happen right after a manual check. self.update_checker.do_check() if self.update_checker_timer.first_run: self._start_auto_update_timer(first_run = False) def _start_auto_update_timer(self, *, first_run = False): self.update_checker_timer.first_run = bool(first_run) if first_run: interval = 10.0*1e3 # do it very soon (in 10 seconds) else: interval = 4.0*3600.0*1e3 # once every 4 hours (in ms) self.update_checker_timer.start(interval) self.print_error("Auto update check: interval set to {} seconds".format(interval//1e3)) def _stop_auto_update_timer(self): self.update_checker_timer.stop() self.print_error("Auto update check: disabled") def warn_if_cant_import_qrreader(self, parent, show_warning=True): ''' Checks it QR reading from camera is possible. It can fail on a system lacking QtMultimedia. This can be removed in the future when we are unlikely to encounter Qt5 installations that are missing QtMultimedia ''' try: from .qrreader import QrReaderCameraDialog except ImportError as e: if show_warning: self.warning(parent=parent, title=_("QR Reader Error"), message=_("QR reader failed to load. This may " "happen if you are using an older version " "of PyQt5.<br><br>Detailed error: ") + str(e), rich_text=True) return True return False def warn_if_no_network(self, parent): if not self.daemon.network: self.warning(message=_('You are using Electron Cash in offline mode; restart Electron Cash if you want to get connected'), title=_('Offline'), parent=parent, rich_text=True) return True return False def warn_if_no_secp(self, parent=None, message=None, icon=QMessageBox.Warning, relaxed=False): ''' Returns True if it DID warn: ie if there's no secp and ecc operations are slow, otherwise returns False if we have secp. Pass message (rich text) to provide a custom message. Note that the URL link to the HOWTO will always be appended to the custom message.''' from electroncash import ecc_fast has_secp = ecc_fast.is_using_fast_ecc() if has_secp: return False # When relaxwarn is set return True without showing the warning from electroncash import get_config if relaxed and get_config().cmdline_options["relaxwarn"]: return True # else.. howto_url='https://github.com/Electron-Cash/Electron-Cash/blob/master/contrib/secp_HOWTO.md#libsecp256k1-0-for-electron-cash' template = ''' <html><body> <p> {message} <p> {url_blurb} </p> <p><a href="{url}">Electron Cash Secp Mini-HOWTO</a></p> </body></html> ''' msg = template.format( message = message or _("Electron Cash was unable to find the secp256k1 library on this system. Elliptic curve cryptography operations will be performed in slow Python-only mode."), url=howto_url, url_blurb = _("Please visit this page for instructions on how to correct the situation:") ) self.warning(parent=parent, title=_("Missing libsecp256k1"), message=msg, rich_text=True) return True def warning(self, title, message, icon = QMessageBox.Warning, parent = None, rich_text=False): if not isinstance(icon, QMessageBox.Icon): icon = QMessageBox.Warning if isinstance(parent, MessageBoxMixin): parent.msg_box(title=title, text=message, icon=icon, parent=None, rich_text=rich_text) else: parent = parent if isinstance(parent, QWidget) else None d = QMessageBoxMixin(icon, title, message, QMessageBox.Ok, parent) if not rich_text: d.setTextFormat(Qt.PlainText) d.setTextInteractionFlags(Qt.TextSelectableByMouse) else: d.setTextFormat(Qt.AutoText) d.setTextInteractionFlags(Qt.TextSelectableByMouse|Qt.LinksAccessibleByMouse) d.setWindowModality(Qt.WindowModal if parent else Qt.ApplicationModal) d.exec_() d.setParent(None) def lin_win_maybe_show_highdpi_caveat_msg(self, parent): ''' Called from main_window.py -- tells user once and only once about the high DPI mode and its caveats on Linux only. Is a no-op otherwise. ''' is_win = sys.platform[:3] in ('win', 'cyg') is_lin = sys.platform in ('linux',) if not is_win and not is_lin: return if (hasattr(Qt, "AA_EnableHighDpiScaling") and self.app.testAttribute(Qt.AA_EnableHighDpiScaling) # first run check: and self.config.get('qt_enable_highdpi', None) is None and (is_lin # we can't check pixel ratio on linux as apparently it's unreliable, so always show this message on linux # on some windows systems running in highdpi causes # glitches to the QMessageBox windows, so we need # to also warn Windows users that they can turn this off, # but only if they actually are using a high dpi display or (is_win and hasattr(QScreen, 'devicePixelRatio') and any(s.devicePixelRatio() > 1.0 # do they have any screens that are high dpi? for s in self.app.screens()) ))): # write to the config key to immediately suppress this warning in # the future -- it only appears on first-run if key was None self.config.set_key('qt_enable_highdpi', True) if is_lin: msg = (_("Automatic high DPI scaling has been enabled for Electron Cash, which should result in improved graphics quality.") + "\n\n" + _("However, on some esoteric Linux systems, this mode may cause disproportionately large status bar icons.") + "\n\n" + _("If that is the case for you, then you may disable automatic DPI scaling in the preferences, under 'General'.")) else: # is_win msg = (_("Automatic high DPI scaling has been enabled for Electron Cash, which should result in improved graphics quality.") + "\n\n" + _("However, on some Windows systems, bugs in Qt may result in minor graphics glitches in system 'message box' dialogs.") + "\n\n" + _("If that is the case for you, then you may disable automatic DPI scaling in the preferences, under 'General'.")) parent.show_message( title = _('Automatic High DPI'), msg = msg) def has_auto_update_check(self): return bool(self.config.get('auto_update_check', True)) def set_auto_update_check(self, b): was, b = self.has_auto_update_check(), bool(b) if was != b: self.config.set_key('auto_update_check', b, save=True) if b: self._start_auto_update_timer() else: self._stop_auto_update_timer() def _quit_after_last_window(self): if any(1 for w in self.windows if isinstance(w, ElectrumWindow) and not w.cleaned_up): # We can get here if we have some top-level ElectrumWindows that # are "minimized to tray" (hidden). "lastWindowClosed "is emitted # if there are no *visible* windows. If we actually have hidden # app windows (because the user hid them), then we want to *not* # quit the app. https://doc.qt.io/qt-5/qguiapplication.html#lastWindowClosed # This check and early return fixes issue #1727. return qApp.quit() def notify(self, message): ''' Display a message in the system tray popup notification. On macOS this is the GROWL thing. On Windows it's a balloon popup from the system tray. On Linux it's usually a banner in the top of the screen.''' if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electron Cash", message, QIcon(":icons/electron-cash.svg"), 20000) except TypeError: self.tray.showMessage("Electron Cash", message, QSystemTrayIcon.Information, 20000) def is_cashaddr(self): return bool(self.config.get('show_cashaddr', True)) def toggle_cashaddr(self, on = None): was = self.is_cashaddr() if on is None: on = not was else: on = bool(on) self.config.set_key('show_cashaddr', on) Address.show_cashaddr(on) if was != on: self.cashaddr_toggled_signal.emit() def is_cashaddr_status_button_hidden(self): return bool(self.config.get('hide_cashaddr_button', False)) def set_cashaddr_status_button_hidden(self, b): b = bool(b) was = self.is_cashaddr_status_button_hidden() if was != b: self.config.set_key('hide_cashaddr_button', bool(b)) self.cashaddr_status_button_hidden_signal.emit(b) @property def windows_qt_use_freetype(self): ''' Returns True iff we are windows and we are set to use freetype as the font engine. This will always return false on platforms where the question doesn't apply. This config setting defaults to True for Windows < Win10 and False otherwise. It is only relevant when using the Qt GUI, however. ''' if sys.platform not in ('win32', 'cygwin'): return False try: winver = float(platform.win32_ver()[0]) # '7', '8', '8.1', '10', etc except (AttributeError, ValueError, IndexError): # We can get here if cygwin, which has an empty win32_ver tuple # in some cases. # In that case "assume windows 10" and just proceed. Cygwin users # can always manually override this setting from GUI prefs. winver = 10 # setting defaults to on for Windows < Win10 return bool(self.config.get('windows_qt_use_freetype', winver < 10)) @windows_qt_use_freetype.setter def windows_qt_use_freetype(self, b): if self.config.is_modifiable('windows_qt_use_freetype') and sys.platform in ('win32', 'cygwin'): self.config.set_key('windows_qt_use_freetype', bool(b)) @property def linux_qt_use_custom_fontconfig(self): ''' Returns True iff we are Linux and we are set to use the fonts.xml fontconfig override, False otherwise. This config setting defaults to True for all Linux, but only is relevant to Qt GUI. ''' return bool(sys.platform in ('linux',) and self.config.get('linux_qt_use_custom_fontconfig', True)) @linux_qt_use_custom_fontconfig.setter def linux_qt_use_custom_fontconfig(self, b): if self.config.is_modifiable('linux_qt_use_custom_fontconfig') and sys.platform in ('linux',): self.config.set_key('linux_qt_use_custom_fontconfig', bool(b)) def main(self): try: self.init_network() except UserCancelled: return except GoBack: return except BaseException as e: traceback.print_exc(file=sys.stdout) return self.timer.start() self.config.open_last_wallet() path = self.config.get_wallet_path() if not self.start_new_window(path, self.config.get('url')): return signal.signal(signal.SIGINT, lambda signum, frame: self.shutdown_signal.emit()) self.app.setQuitOnLastWindowClosed(False) # we want to control this in our slot (since we support non-visible, backgrounded windows via the systray show/hide facility) self.app.lastWindowClosed.connect(self._quit_after_last_window) def clean_up(): # Just in case we get an exception as we exit, uninstall the Exception_Hook Exception_Hook.uninstall() # Shut down the timer cleanly self.timer.stop() self.gc_timer.stop() self._stop_auto_update_timer() # clipboard persistence. see http://www.mail-archive.com/<EMAIL>/msg17328.html event = QEvent(QEvent.Clipboard) self.app.sendEvent(self.app.clipboard(), event) self.tray.hide() self.app.aboutToQuit.connect(clean_up) Exception_Hook(self.config) # This wouldn't work anyway unless the app event loop is active, so we must install it once here and no earlier. # main loop self.app.exec_() # on some platforms the exec_ call may not return, so use clean_up()
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. import gc, os, platform, shutil, signal, sys, traceback try: import PyQt5 except Exception: if sys.platform.startswith('win'): msg = ("\n\nError: Could not import PyQt5.\n" "If you are running the release .exe, this is a bug (please" " contact the developers in that case).\n" "If you are running from source, then you may try this from the command-line:\n\n" " python -m pip install pyqt5\n\n") elif sys.platform.startswith('darw'): msg = ("\n\nError: Could not import PyQt5.\n" "If you are running the release .app, this is a bug (please" " contact the developers in that case).\n" "If you are running from source, then you may try this from the command-line:\n\n" " python3 -m pip install --user -I pyqt5\n\n") else: msg = ("\n\nError: Could not import PyQt5.\n" "You may try:\n\n" " python3 -m pip install --user -I pyqt5\n\n" "Or, if on Linux Ubuntu, Debian, etc:\n\n" " sudo apt-get install python3-pyqt5\n\n") sys.exit(msg) from PyQt5.QtGui import * from PyQt5.QtWidgets import * from PyQt5.QtCore import * from electroncash.i18n import _ from electroncash import i18n from electroncash.plugins import run_hook from electroncash import WalletStorage from electroncash.util import (UserCancelled, PrintError, print_error, standardize_path, finalization_print_error, Weak, get_new_wallet_name, Handlers) from electroncash import version from electroncash.address import Address from .installwizard import InstallWizard, GoBack from . import icons # This needs to be imported once app-wide then the :icons/ namespace becomes available for Qt icon filenames. from .util import * # * needed for plugins from .main_window import ElectrumWindow from .network_dialog import NetworkDialog from .exception_window import Exception_Hook from .update_checker import UpdateChecker class ElectrumGui(QObject, PrintError): new_window_signal = pyqtSignal(str, object) update_available_signal = pyqtSignal(bool) cashaddr_toggled_signal = pyqtSignal() # app-wide signal for when cashaddr format is toggled. This used to live in each ElectrumWindow instance but it was recently refactored to here. cashaddr_status_button_hidden_signal = pyqtSignal(bool) # app-wide signal for when cashaddr toggle button is hidden from the status bar shutdown_signal = pyqtSignal() # signal for requesting an app-wide full shutdown do_in_main_thread_signal = pyqtSignal(object, object, object) instance = None def __init__(self, config, daemon, plugins): super(__class__, self).__init__() # QObject init assert __class__.instance is None, "ElectrumGui is a singleton, yet an instance appears to already exist! FIXME!" __class__.instance = self i18n.set_language(config.get('language')) self.config = config self.daemon = daemon self.plugins = plugins self.windows = [] self._setup_do_in_main_thread_handler() # Uncomment this call to verify objects are being properly # GC-ed when windows are closed #if daemon.network: # from electroncash.util import DebugMem # from electroncash.wallet import Abstract_Wallet # from electroncash.verifier import SPV # from electroncash.synchronizer import Synchronizer # daemon.network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer, # ElectrumWindow], interval=5)]) call_after_app = self._pre_and_post_app_setup() try: self.app = QApplication(sys.argv) finally: call_after_app() self._load_fonts() # this needs to be done very early, before the font engine loads fonts.. out of paranoia self._exit_if_required_pyqt_is_missing() # This may immediately exit the app if missing required PyQt5 modules, so it should also be done early. self.new_version_available = None self._set_icon() self.app.installEventFilter(self) self.timer = QTimer(self); self.timer.setSingleShot(False); self.timer.setInterval(500) #msec self.gc_timer = QTimer(self); self.gc_timer.setSingleShot(True); self.gc_timer.timeout.connect(ElectrumGui.gc); self.gc_timer.setInterval(500) #msec self.nd = None self._last_active_window = None # we remember the last activated ElectrumWindow as a Weak.ref Address.show_cashaddr(self.is_cashaddr()) # Dark Theme -- ideally set this before any widgets are created. self.set_dark_theme_if_needed() # / # Wallet Password Cache # wallet -> (password, QTimer) map for some plugins (like CashShuffle) # that need wallet passwords to operate, and we don't want to prompt # for pw twice right after the InstallWizard runs (see #106). # Entries in this map are deleted after 10 seconds by the QTimer (which # also deletes itself) self._wallet_password_cache = Weak.KeyDictionary() # / self.update_checker = UpdateChecker() self.update_checker_timer = QTimer(self); self.update_checker_timer.timeout.connect(self.on_auto_update_timeout); self.update_checker_timer.setSingleShot(False) self.update_checker.got_new_version.connect(self.on_new_version) # init tray self.dark_icon = self.config.get("dark_icon", False) self.tray = QSystemTrayIcon(self.tray_icon(), self) self.tray.setToolTip('Electron Cash') self.tray.activated.connect(self.tray_activated) self.build_tray_menu() self.tray.show() self.new_window_signal.connect(self.start_new_window) if self.has_auto_update_check(): self._start_auto_update_timer(first_run = True) self.app.focusChanged.connect(self.on_focus_change) # track last window the user interacted with self.shutdown_signal.connect(self.close, Qt.QueuedConnection) run_hook('init_qt', self) # We did this once already in the set_dark_theme call, but we do this # again here just in case some plugin modified the color scheme. ColorScheme.update_from_widget(QWidget()) self._check_and_warn_qt_version() def __del__(self): stale = True if __class__.instance is self: stale = False __class__.instance = None print_error("[{}] finalized{}".format(__class__.__name__, ' (stale instance)' if stale else '')) if hasattr(super(), '__del__'): super().__del__() def _setup_do_in_main_thread_handler(self): ''' Sets up "do_in_main_thread" handler mechanism for Qt GUI. ''' self.do_in_main_thread_signal.connect(self._do_in_main_thread_handler_slot) orig_handler = Handlers.do_in_main_thread weakSelf = Weak.ref(self) def my_do_in_main_thread_handler(func, *args, **kwargs): strongSelf = weakSelf() if strongSelf: # We are still alive, emit the signal which will be handled # in the main thread. strongSelf.do_in_main_thread_signal.emit(func, args, kwargs) else: # We died. Uninstall this handler, invoke original handler. Handlers.do_in_main_thread = orig_handler orig_handler(func, *args, **kwargs) Handlers.do_in_main_thread = my_do_in_main_thread_handler def _do_in_main_thread_handler_slot(self, func, args, kwargs): ''' Hooked in to util.Handlers.do_in_main_thread via the do_in_main_thread_signal. This ensures that there is an app-wide mechanism for posting invocations to the main thread. Currently CashFusion uses this mechanism, but other code may as well. ''' func(*args, **kwargs) def _pre_and_post_app_setup(self): ''' Call this before instantiating the QApplication object. It sets up some platform-specific miscellany that need to happen before the QApplication is constructed. A function is returned. This function *must* be called after the QApplication is constructed. ''' callables = [] def call_callables(): for func in callables: func() ret = call_callables if hasattr(QGuiApplication, 'setDesktopFileName'): QGuiApplication.setDesktopFileName('electron-cash.desktop') if self.windows_qt_use_freetype: # Use FreeType for font rendering on Windows. This fixes rendering # of the Schnorr sigil and allows us to load the Noto Color Emoji # font if needed. os.environ['QT_QPA_PLATFORM'] = 'windows:fontengine=freetype' QCoreApplication.setAttribute(Qt.AA_X11InitThreads) if hasattr(Qt, "AA_ShareOpenGLContexts"): QCoreApplication.setAttribute(Qt.AA_ShareOpenGLContexts) if sys.platform not in ('darwin',) and hasattr(Qt, "AA_EnableHighDpiScaling"): # The below only applies to non-macOS. On macOS this setting is # never used (because it is implicitly auto-negotiated by the OS # in a differernt way). # # qt_disable_highdpi will be set to None by default, or True if # specified on command-line. The command-line override is intended # to supporess high-dpi mode just for this run for testing. # # The more permanent setting is qt_enable_highdpi which is the GUI # preferences option, so we don't enable highdpi if it's explicitly # set to False in the GUI. # # The default on Linux, Windows, etc is to enable high dpi disable_scaling = self.config.get('qt_disable_highdpi', False) enable_scaling = self.config.get('qt_enable_highdpi', True) if not disable_scaling and enable_scaling: QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling) if hasattr(Qt, "AA_UseHighDpiPixmaps"): QCoreApplication.setAttribute(Qt.AA_UseHighDpiPixmaps) # macOS Mojave "font rendering looks terrible on PyQt5.11" workaround. # See: https://old.reddit.com/r/apple/comments/9leavs/fix_mojave_font_rendering_issues_on_a_perapp_basis/ # This affects PyQt 5.11 (which is what we ship in the macOS El Capitan # .dmg). We apply the workaround and also warn the user to not use # the El Capitan compatibility .dmg. if sys.platform in ('darwin',) and self.qt_version() < (5, 12): # macOS hacks. On Mojave with PyQt <5.12 the font rendering is terrible. # As a workaround we need to temporarily set this 'defaults' keys # which we immediately disable after the QApplication is started. try: ver = tuple(int(a) for a in platform.mac_ver()[0].split('.')) except (TypeError, ValueError): self.print_error("WARNING: Cannot parse platform.mac_ver", f"'{platform.mac_ver()[0]}'") ver = None if ver and ver >= (10, 14): from electroncash.utils import macos self.print_error("Mojave+ with PyQt<5.12 detected; applying CGFontRenderingFontSmoothingDisabled workaround...") bundle = macos.get_bundle_identifier() os.system(f'defaults write {bundle} CGFontRenderingFontSmoothingDisabled -bool NO') def undo_hack(): os.system(f'defaults delete {bundle} CGFontRenderingFontSmoothingDisabled') self.print_error("Mojave+ font rendering workaround applied.") #msg = _("Mojave or newer system detected, however you are running the " # "El Capitan compatibility release of Electron Cash. " # "Font and graphics rendering may be affected." # "\n\nPlease obtain the latest non-compatibility version " # "of Electron Cash for MacOS.") #QMessageBox.warning(None, _("Warning"), msg) # this works even if app is not exec_() yet. callables.append(undo_hack) def setup_layout_direction(): """Sets the app layout direction depending on language. To be called after self.app is created successfully. Note this *MUST* be called after set_language has been called.""" assert i18n.set_language_called > 0 lc = i18n.language.info().get('language') lc = '' if not isinstance(lc, str) else lc lc = lc.split('_')[0] layout_direction = Qt.LeftToRight blurb = "left-to-right" if lc in {'ar', 'fa', 'he', 'ps', 'ug', 'ur'}: # Right-to-left languages layout_direction = Qt.RightToLeft blurb = "right-to-left" self.print_error("Setting layout direction:", blurb) self.app.setLayoutDirection(layout_direction) # callable will be called after self.app is set-up successfully callables.append(setup_layout_direction) return ret def _exit_if_required_pyqt_is_missing(self): ''' Will check if required PyQt5 modules are present and if not, display an error message box to the user and immediately quit the app. This is because some Linux systems break up PyQt5 into multiple subpackages, and for instance PyQt5 QtSvg is its own package, and it may be missing. ''' try: from PyQt5 import QtSvg except ImportError: # Closes #1436 -- Some "Run from source" Linux users lack QtSvg # (partial PyQt5 install) msg = _("A required Qt module, QtSvg was not found. Please fully install all of PyQt5 5.12 or above to resolve this issue.") if sys.platform == 'linux': msg += "\n\n" + _("On Linux, you may try:\n\n python3 -m pip install --user -I pyqt5") if shutil.which('apt'): msg += "\n\n" + _("On Debian-based distros, you can run:\n\n sudo apt install python3-pyqt5.qtsvg") QMessageBox.critical(None, _("QtSvg Missing"), msg) # this works even if app is not exec_() yet. self.app.exit(1) sys.exit(msg) def is_dark_theme_available(self): if sys.platform in ('darwin',): # On OSX, qdarkstyle is kind of broken. We instead rely on Mojave # dark mode if (built in to the OS) for this facility, which the # user can set outside of this application. return False try: import qdarkstyle except: return False return True def set_dark_theme_if_needed(self): if sys.platform in ('darwin',): # On OSX, qdarkstyle is kind of broken. We instead rely on Mojave # dark mode if (built in to the OS) for this facility, which the # user can set outside of this application. use_dark_theme = False else: use_dark_theme = self.config.get('qt_gui_color_theme', 'default') == 'dark' darkstyle_ver = None if use_dark_theme: try: import qdarkstyle self.app.setStyleSheet(qdarkstyle.load_stylesheet_pyqt5()) try: darkstyle_ver = version.normalize_version(qdarkstyle.__version__) except (ValueError, IndexError, TypeError, NameError, AttributeError) as e: self.print_error("Warning: Could not determine qdarkstyle version:", repr(e)) except BaseException as e: use_dark_theme = False self.print_error('Error setting dark theme: {}'.format(repr(e))) # Apply any necessary stylesheet patches. For now this only does anything # if the version is < 2.6.8. # 2.6.8+ seems to have fixed all the issues (for now!) from . import style_patcher style_patcher.patch(dark=use_dark_theme, darkstyle_ver=darkstyle_ver) # Even if we ourselves don't set the dark theme, # the OS/window manager/etc might set *a dark theme*. # Hence, try to choose colors accordingly: ColorScheme.update_from_widget(QWidget(), force_dark=use_dark_theme) def get_cached_password(self, wallet): ''' Passwords in the cache only live for a very short while (10 seconds) after wallet window creation, and only if it's a new window. This mechanism is a convenience for plugins that need access to the wallet password and it would make for poor UX for the user to enter their password twice when opening a new window ''' entry = self._wallet_password_cache.get(wallet) if entry: return entry[0] def _expire_cached_password(self, weakWallet): ''' Timer callback, called after 10 seconds. ''' wallet = weakWallet() if isinstance(weakWallet, Weak.ref) else weakWallet if wallet: entry = self._wallet_password_cache.pop(wallet, None) if entry: timer = entry[1] timer.stop(); timer.deleteLater() def _cache_password(self, wallet, password): self._expire_cached_password(wallet) if password is None: return timer = QTimer() # NB a top-level parentless QObject will get delete by Python when its Python refct goes to 0, which is what we want here. Future programmers: Do not give this timer a parent! self._wallet_password_cache[wallet] = (password, timer) weakWallet = Weak.ref(wallet) weakSelf = Weak.ref(self) def timeout(): slf = weakSelf() slf and slf._expire_cached_password(weakWallet) timer.setSingleShot(True); timer.timeout.connect(timeout); timer.start(10000) # 10 sec def cache_password(self, wallet, password): self._cache_password(wallet, password) def _set_icon(self): icon = None if sys.platform == 'darwin': # on macOS, in "running from source" mode, we want to set the app # icon, otherwise we get the generic Python icon. # In non-running-from-source mode, macOS will get the icon from # the .app bundle Info.plist spec (which ends up being # electron.icns). However, in .app mode, Qt will not know about # this icon and won't be able to use it for e.g. the About dialog. # In the latter case the branch below will tell Qt to use # electron-cash.svg as the "window icon". icon = QIcon("electron.icns") if os.path.exists("electron.icns") else None if not icon: # Set this on all other platforms (and macOS built .app) as it can # only help and never harm, and is always available. icon = QIcon(":icons/electron-cash.svg") if icon: self.app.setWindowIcon(icon) @staticmethod def qt_version() -> tuple: ''' Returns a 3-tuple of the form (major, minor, revision) eg (5, 12, 4) for the current Qt version derived from the QT_VERSION global provided by Qt. ''' return ( (QT_VERSION >> 16) & 0xff, (QT_VERSION >> 8) & 0xff, QT_VERSION & 0xff ) def _load_fonts(self): ''' All apologies for the contorted nature of this platform code. Fonts on Windows & Linux are .. a sensitive situation. :) ''' # Only load the emoji font on Linux and Windows if sys.platform not in ('linux', 'win32', 'cygwin'): return # TODO: Check if we already have the needed emojis # TODO: Allow the user to download a full color emoji set linux_font_config_file = os.path.join(os.path.dirname(__file__), 'data', 'fonts.xml') emojis_ttf_name = 'ecsupplemental_lnx.ttf' emojis_ttf_path = os.path.join(os.path.dirname(__file__), 'data', emojis_ttf_name) did_set_custom_fontconfig = False if (sys.platform == 'linux' and self.linux_qt_use_custom_fontconfig # method-backed property, checks config settings and not os.environ.get('FONTCONFIG_FILE') and os.path.exists('/etc/fonts/fonts.conf') and os.path.exists(linux_font_config_file) and os.path.exists(emojis_ttf_path) and self.qt_version() >= (5, 12)): # doing this on Qt < 5.12 causes harm and makes the whole app render fonts badly # On Linux, we override some fontconfig rules by loading our own # font config XML file. This makes it so that our custom emojis and # other needed glyphs are guaranteed to get picked up first, # regardless of user font config. Without this some Linux systems # had black and white or missing emoji glyphs. We only do this if # the user doesn't have their own fontconfig file in env and # also as a sanity check, if they have the system # /etc/fonts/fonts.conf file in the right place. os.environ['FONTCONFIG_FILE'] = linux_font_config_file did_set_custom_fontconfig = True if sys.platform in ('win32', 'cygwin'): env_var = os.environ.get('QT_QPA_PLATFORM') if not env_var or 'windows:fontengine=freetype' not in env_var.lower(): # not set up to use freetype, so loading the .ttf would fail. # abort early. return del env_var # use a different .ttf file on Windows emojis_ttf_name = 'ecsupplemental_win.ttf' emojis_ttf_path = os.path.join(os.path.dirname(__file__), 'data', emojis_ttf_name) if QFontDatabase.addApplicationFont(emojis_ttf_path) < 0: self.print_error('Failed to add unicode emoji font to application fonts:', emojis_ttf_path) if did_set_custom_fontconfig: self.print_error('Deleting custom (fonts.xml) FONTCONFIG_FILE env. var') del os.environ['FONTCONFIG_FILE'] def _check_and_warn_qt_version(self): if sys.platform == 'linux' and self.qt_version() < (5, 12): msg = _("Electron Cash on Linux requires PyQt5 5.12+.\n\n" "You have version {version_string} installed.\n\n" "Please upgrade otherwise you may experience " "font rendering issues with emojis and other unicode " "characters used by Electron Cash.").format(version_string=QT_VERSION_STR) QMessageBox.warning(None, _("PyQt5 Upgrade Needed"), msg) # this works even if app is not exec_() yet. def eventFilter(self, obj, event): ''' This event filter allows us to open bitcoincash: URIs on macOS ''' if event.type() == QEvent.FileOpen: if len(self.windows) >= 1: self.windows[0].pay_to_URI(event.url().toString()) return True return False def build_tray_menu(self): ''' Rebuild the tray menu by tearing it down and building it new again ''' m_old = self.tray.contextMenu() if m_old is not None: # Tray does NOT take ownership of menu, so we are tasked with # deleting the old one. Note that we must delete the old one rather # than just clearing it because otherwise the old sub-menus stick # around in Qt. You can try calling qApp.topLevelWidgets() to # convince yourself of this. Doing it this way actually cleans-up # the menus and they do not leak. m_old.clear() m_old.deleteLater() # C++ object and its children will be deleted later when we return to the event loop m = QMenu() m.setObjectName("SysTray.QMenu") self.tray.setContextMenu(m) destroyed_print_error(m) for window in self.windows: submenu = m.addMenu(window.wallet.basename()) submenu.addAction(_("Show/Hide"), window.show_or_hide) submenu.addAction(_("Close"), window.close) m.addAction(_("Dark/Light"), self.toggle_tray_icon) m.addSeparator() m.addAction(_("&Check for updates..."), lambda: self.show_update_checker(None)) m.addSeparator() m.addAction(_("Exit Electron Cash"), self.close) self.tray.setContextMenu(m) def tray_icon(self): if self.dark_icon: return QIcon(':icons/electron_dark_icon.svg') else: return QIcon(':icons/electron_light_icon.svg') def toggle_tray_icon(self): self.dark_icon = not self.dark_icon self.config.set_key("dark_icon", self.dark_icon, True) self.tray.setIcon(self.tray_icon()) def tray_activated(self, reason): if reason == QSystemTrayIcon.DoubleClick: if all([w.is_hidden() for w in self.windows]): for w in self.windows: w.bring_to_top() else: for w in self.windows: w.hide() def close(self): for window in list(self.windows): window.close() def new_window(self, path, uri=None): # Use a signal as can be called from daemon thread self.new_window_signal.emit(path, uri) def show_network_dialog(self, parent, *, jumpto : str = ''): if self.warn_if_no_network(parent): return if self.nd: self.nd.on_update() run_hook("on_network_dialog", self.nd) self.nd.show() self.nd.raise_() if jumpto: self.nd.jumpto(jumpto) return self.nd = NetworkDialog(self.daemon.network, self.config) run_hook("on_network_dialog", self.nd) self.nd.show() if jumpto: self.nd.jumpto(jumpto) def create_window_for_wallet(self, wallet): w = ElectrumWindow(self, wallet) self.windows.append(w) finalization_print_error(w, "[{}] finalized".format(w.diagnostic_name())) self.build_tray_menu() run_hook('on_new_window', w) return w def get_wallet_folder(self): ''' may raise FileNotFoundError ''' return os.path.dirname(os.path.abspath(self.config.get_wallet_path())) def get_new_wallet_path(self): ''' may raise FileNotFoundError ''' wallet_folder = self.get_wallet_folder() filename = get_new_wallet_name(wallet_folder) full_path = os.path.join(wallet_folder, filename) return full_path def on_focus_change(self, ignored, new_focus_widget): ''' Remember the last wallet window that was activated because start_new_window uses this information. We store the ElectrumWindow in a weak reference so that we don't interfere with its gc when it is closed.''' if not new_focus_widget: return if isinstance(new_focus_widget, QWidget): window = QWidget.window(new_focus_widget) # call base class because some widgets may actually override 'window' with Python attributes. if isinstance(window, ElectrumWindow): self._last_active_window = Weak.ref(window) def start_new_window(self, path, uri): '''Raises the window for the wallet if it is open. Otherwise opens the wallet and creates a new window for it. `path=None` is a special usage which will raise the last activated window or open the 'last wallet' if no windows are open.''' if not path: if not self.windows: # This branch is taken if nothing is currently open but # path == None, in which case set path=last wallet self.config.open_last_wallet() path = self.config.get_wallet_path() elif self._last_active_window: # This branch is taken if we have windows open and we have # _last_active_window defined, in which case we specify # that this window should be activated by setting path # so that the for loop below will trigger on this window. w = self._last_active_window() # weak ref -> strong ref if w and w in self.windows: # check ref still alive # this will cause the last active window to be used in the # for loop below path = w.wallet.storage.path # NB: path may still be None here if it came in as None from args and # if the above logic couldn't select a window to use -- in which case # we'll end up picking self.windows[0] path = path and standardize_path(path) # just make sure some plugin didn't give us a symlink for w in self.windows: if not path or w.wallet.storage.path == path: path = w.wallet.storage.path # remember path in case it was None w.bring_to_top() break else: try: if not self.windows: self.warn_if_no_secp(relaxed=True) try: wallet = self.daemon.load_wallet(path, None) except BaseException as e: self.print_error(repr(e)) if self.windows: # *Not* starting up. Propagate exception out to present # error message box to user. raise e # We're just starting up, so we are tolerant of bad wallets # and just want to proceed to the InstallWizard so the user # can either specify a different wallet or create a new one. # (See issue #1189 where before they would get stuck) path = self.get_new_wallet_path() # give up on this unknown wallet and try a new name.. note if things get really bad this will raise FileNotFoundError and the app aborts here. wallet = None # fall thru to wizard if not wallet: storage = WalletStorage(path, manual_upgrades=True) wizard = InstallWizard(self.config, self.app, self.plugins, storage) try: wallet, password = wizard.run_and_get_wallet() or (None, None) except UserCancelled: pass except GoBack as e: self.print_error('[start_new_window] Exception caught (GoBack)', e) finally: wizard.terminate() del wizard gc.collect() # wizard sticks around in memory sometimes, otherwise :/ if not wallet: return wallet.start_threads(self.daemon.network) self.daemon.add_wallet(wallet) self._cache_password(wallet, password) except BaseException as e: traceback.print_exc(file=sys.stdout) if '2fa' in str(e): self.warning(title=_('Error'), message = '2FA wallets for Bitcoin Cash are currently unsupported by <a href="https://api.trustedcoin.com/#/">TrustedCoin</a>. Follow <a href="https://github.com/Electron-Cash/Electron-Cash/issues/41#issuecomment-357468208">this guide</a> in order to recover your funds.') else: self.warning(title=_('Error'), message = 'Cannot load wallet:\n' + str(e), icon=QMessageBox.Critical) return w = self.create_window_for_wallet(wallet) if uri: w.pay_to_URI(uri) w.bring_to_top() w.setWindowState(w.windowState() & ~Qt.WindowMinimized | Qt.WindowActive) # this will activate the window w.activateWindow() return w def close_window(self, window): self.windows.remove(window) self.build_tray_menu() # save wallet path of last open window run_hook('on_close_window', window) # GC on ElectrumWindows takes forever to actually happen due to the # circular reference zoo they create around them (they end up stuck in # generation 2 for a long time before being collected). The below # schedules a more comprehensive GC to happen in the very near future. # This mechanism takes on the order of 40-100ms to execute (depending # on hardware) but frees megabytes of memory after closing a window # (which itslef is a relatively infrequent UI event, so it's # an acceptable tradeoff). self.gc_schedule() if not self.windows: self.config.save_last_wallet(window.wallet) # NB: We see if we should quit the app after the last wallet # window is closed, even if a network dialog or some other window is # open. It was bizarre behavior to keep the app open when # things like a transaction dialog or the network dialog were still # up. self._quit_after_last_window() # central point that checks if we should quit. #window.deleteLater() # <--- This has the potential to cause bugs (esp. with misbehaving plugins), so commented-out. The object gets deleted anyway when Python GC kicks in. Forcing a delete may risk python to have a dangling reference to a deleted C++ object. def gc_schedule(self): ''' Schedule garbage collection to happen in the near future. Note that rapid-fire calls to this re-start the timer each time, thus only the last call takes effect (it's rate-limited). ''' self.gc_timer.start() # start/re-start the timer to fire exactly once in timeInterval() msecs @staticmethod def gc(): ''' self.gc_timer timeout() slot ''' gc.collect() def init_network(self): # Show network dialog if config does not exist if self.daemon.network: if self.config.get('auto_connect') is None: wizard = InstallWizard(self.config, self.app, self.plugins, None) wizard.init_network(self.daemon.network) wizard.terminate() def on_new_version(self, newver): ''' Called by the auto update check mechanism to notify that a new version is available. We propagate the signal out using our own update_available_signal as well as post a message to the system tray. ''' self.new_version_available = newver self.update_available_signal.emit(True) self.notify(_("A new version of Electron Cash is available: {}").format(newver)) def show_update_checker(self, parent, *, skip_check = False): if self.warn_if_no_network(parent): return self.update_checker.show() self.update_checker.raise_() if not skip_check: self.update_checker.do_check() def on_auto_update_timeout(self): if not self.daemon.network: # auto-update-checking never is done in offline mode self.print_error("Offline mode; update check skipped") elif not self.update_checker.did_check_recently(): # make sure auto-check doesn't happen right after a manual check. self.update_checker.do_check() if self.update_checker_timer.first_run: self._start_auto_update_timer(first_run = False) def _start_auto_update_timer(self, *, first_run = False): self.update_checker_timer.first_run = bool(first_run) if first_run: interval = 10.0*1e3 # do it very soon (in 10 seconds) else: interval = 4.0*3600.0*1e3 # once every 4 hours (in ms) self.update_checker_timer.start(interval) self.print_error("Auto update check: interval set to {} seconds".format(interval//1e3)) def _stop_auto_update_timer(self): self.update_checker_timer.stop() self.print_error("Auto update check: disabled") def warn_if_cant_import_qrreader(self, parent, show_warning=True): ''' Checks it QR reading from camera is possible. It can fail on a system lacking QtMultimedia. This can be removed in the future when we are unlikely to encounter Qt5 installations that are missing QtMultimedia ''' try: from .qrreader import QrReaderCameraDialog except ImportError as e: if show_warning: self.warning(parent=parent, title=_("QR Reader Error"), message=_("QR reader failed to load. This may " "happen if you are using an older version " "of PyQt5.<br><br>Detailed error: ") + str(e), rich_text=True) return True return False def warn_if_no_network(self, parent): if not self.daemon.network: self.warning(message=_('You are using Electron Cash in offline mode; restart Electron Cash if you want to get connected'), title=_('Offline'), parent=parent, rich_text=True) return True return False def warn_if_no_secp(self, parent=None, message=None, icon=QMessageBox.Warning, relaxed=False): ''' Returns True if it DID warn: ie if there's no secp and ecc operations are slow, otherwise returns False if we have secp. Pass message (rich text) to provide a custom message. Note that the URL link to the HOWTO will always be appended to the custom message.''' from electroncash import ecc_fast has_secp = ecc_fast.is_using_fast_ecc() if has_secp: return False # When relaxwarn is set return True without showing the warning from electroncash import get_config if relaxed and get_config().cmdline_options["relaxwarn"]: return True # else.. howto_url='https://github.com/Electron-Cash/Electron-Cash/blob/master/contrib/secp_HOWTO.md#libsecp256k1-0-for-electron-cash' template = ''' <html><body> <p> {message} <p> {url_blurb} </p> <p><a href="{url}">Electron Cash Secp Mini-HOWTO</a></p> </body></html> ''' msg = template.format( message = message or _("Electron Cash was unable to find the secp256k1 library on this system. Elliptic curve cryptography operations will be performed in slow Python-only mode."), url=howto_url, url_blurb = _("Please visit this page for instructions on how to correct the situation:") ) self.warning(parent=parent, title=_("Missing libsecp256k1"), message=msg, rich_text=True) return True def warning(self, title, message, icon = QMessageBox.Warning, parent = None, rich_text=False): if not isinstance(icon, QMessageBox.Icon): icon = QMessageBox.Warning if isinstance(parent, MessageBoxMixin): parent.msg_box(title=title, text=message, icon=icon, parent=None, rich_text=rich_text) else: parent = parent if isinstance(parent, QWidget) else None d = QMessageBoxMixin(icon, title, message, QMessageBox.Ok, parent) if not rich_text: d.setTextFormat(Qt.PlainText) d.setTextInteractionFlags(Qt.TextSelectableByMouse) else: d.setTextFormat(Qt.AutoText) d.setTextInteractionFlags(Qt.TextSelectableByMouse|Qt.LinksAccessibleByMouse) d.setWindowModality(Qt.WindowModal if parent else Qt.ApplicationModal) d.exec_() d.setParent(None) def lin_win_maybe_show_highdpi_caveat_msg(self, parent): ''' Called from main_window.py -- tells user once and only once about the high DPI mode and its caveats on Linux only. Is a no-op otherwise. ''' is_win = sys.platform[:3] in ('win', 'cyg') is_lin = sys.platform in ('linux',) if not is_win and not is_lin: return if (hasattr(Qt, "AA_EnableHighDpiScaling") and self.app.testAttribute(Qt.AA_EnableHighDpiScaling) # first run check: and self.config.get('qt_enable_highdpi', None) is None and (is_lin # we can't check pixel ratio on linux as apparently it's unreliable, so always show this message on linux # on some windows systems running in highdpi causes # glitches to the QMessageBox windows, so we need # to also warn Windows users that they can turn this off, # but only if they actually are using a high dpi display or (is_win and hasattr(QScreen, 'devicePixelRatio') and any(s.devicePixelRatio() > 1.0 # do they have any screens that are high dpi? for s in self.app.screens()) ))): # write to the config key to immediately suppress this warning in # the future -- it only appears on first-run if key was None self.config.set_key('qt_enable_highdpi', True) if is_lin: msg = (_("Automatic high DPI scaling has been enabled for Electron Cash, which should result in improved graphics quality.") + "\n\n" + _("However, on some esoteric Linux systems, this mode may cause disproportionately large status bar icons.") + "\n\n" + _("If that is the case for you, then you may disable automatic DPI scaling in the preferences, under 'General'.")) else: # is_win msg = (_("Automatic high DPI scaling has been enabled for Electron Cash, which should result in improved graphics quality.") + "\n\n" + _("However, on some Windows systems, bugs in Qt may result in minor graphics glitches in system 'message box' dialogs.") + "\n\n" + _("If that is the case for you, then you may disable automatic DPI scaling in the preferences, under 'General'.")) parent.show_message( title = _('Automatic High DPI'), msg = msg) def has_auto_update_check(self): return bool(self.config.get('auto_update_check', True)) def set_auto_update_check(self, b): was, b = self.has_auto_update_check(), bool(b) if was != b: self.config.set_key('auto_update_check', b, save=True) if b: self._start_auto_update_timer() else: self._stop_auto_update_timer() def _quit_after_last_window(self): if any(1 for w in self.windows if isinstance(w, ElectrumWindow) and not w.cleaned_up): # We can get here if we have some top-level ElectrumWindows that # are "minimized to tray" (hidden). "lastWindowClosed "is emitted # if there are no *visible* windows. If we actually have hidden # app windows (because the user hid them), then we want to *not* # quit the app. https://doc.qt.io/qt-5/qguiapplication.html#lastWindowClosed # This check and early return fixes issue #1727. return qApp.quit() def notify(self, message): ''' Display a message in the system tray popup notification. On macOS this is the GROWL thing. On Windows it's a balloon popup from the system tray. On Linux it's usually a banner in the top of the screen.''' if self.tray: try: # this requires Qt 5.9 self.tray.showMessage("Electron Cash", message, QIcon(":icons/electron-cash.svg"), 20000) except TypeError: self.tray.showMessage("Electron Cash", message, QSystemTrayIcon.Information, 20000) def is_cashaddr(self): return bool(self.config.get('show_cashaddr', True)) def toggle_cashaddr(self, on = None): was = self.is_cashaddr() if on is None: on = not was else: on = bool(on) self.config.set_key('show_cashaddr', on) Address.show_cashaddr(on) if was != on: self.cashaddr_toggled_signal.emit() def is_cashaddr_status_button_hidden(self): return bool(self.config.get('hide_cashaddr_button', False)) def set_cashaddr_status_button_hidden(self, b): b = bool(b) was = self.is_cashaddr_status_button_hidden() if was != b: self.config.set_key('hide_cashaddr_button', bool(b)) self.cashaddr_status_button_hidden_signal.emit(b) @property def windows_qt_use_freetype(self): ''' Returns True iff we are windows and we are set to use freetype as the font engine. This will always return false on platforms where the question doesn't apply. This config setting defaults to True for Windows < Win10 and False otherwise. It is only relevant when using the Qt GUI, however. ''' if sys.platform not in ('win32', 'cygwin'): return False try: winver = float(platform.win32_ver()[0]) # '7', '8', '8.1', '10', etc except (AttributeError, ValueError, IndexError): # We can get here if cygwin, which has an empty win32_ver tuple # in some cases. # In that case "assume windows 10" and just proceed. Cygwin users # can always manually override this setting from GUI prefs. winver = 10 # setting defaults to on for Windows < Win10 return bool(self.config.get('windows_qt_use_freetype', winver < 10)) @windows_qt_use_freetype.setter def windows_qt_use_freetype(self, b): if self.config.is_modifiable('windows_qt_use_freetype') and sys.platform in ('win32', 'cygwin'): self.config.set_key('windows_qt_use_freetype', bool(b)) @property def linux_qt_use_custom_fontconfig(self): ''' Returns True iff we are Linux and we are set to use the fonts.xml fontconfig override, False otherwise. This config setting defaults to True for all Linux, but only is relevant to Qt GUI. ''' return bool(sys.platform in ('linux',) and self.config.get('linux_qt_use_custom_fontconfig', True)) @linux_qt_use_custom_fontconfig.setter def linux_qt_use_custom_fontconfig(self, b): if self.config.is_modifiable('linux_qt_use_custom_fontconfig') and sys.platform in ('linux',): self.config.set_key('linux_qt_use_custom_fontconfig', bool(b)) def main(self): try: self.init_network() except UserCancelled: return except GoBack: return except BaseException as e: traceback.print_exc(file=sys.stdout) return self.timer.start() self.config.open_last_wallet() path = self.config.get_wallet_path() if not self.start_new_window(path, self.config.get('url')): return signal.signal(signal.SIGINT, lambda signum, frame: self.shutdown_signal.emit()) self.app.setQuitOnLastWindowClosed(False) # we want to control this in our slot (since we support non-visible, backgrounded windows via the systray show/hide facility) self.app.lastWindowClosed.connect(self._quit_after_last_window) def clean_up(): # Just in case we get an exception as we exit, uninstall the Exception_Hook Exception_Hook.uninstall() # Shut down the timer cleanly self.timer.stop() self.gc_timer.stop() self._stop_auto_update_timer() # clipboard persistence. see http://www.mail-archive.com/<EMAIL>/msg17328.html event = QEvent(QEvent.Clipboard) self.app.sendEvent(self.app.clipboard(), event) self.tray.hide() self.app.aboutToQuit.connect(clean_up) Exception_Hook(self.config) # This wouldn't work anyway unless the app event loop is active, so we must install it once here and no earlier. # main loop self.app.exec_() # on some platforms the exec_ call may not return, so use clean_up()
en
0.877024
#!/usr/bin/env python3 # -*- coding: utf-8 -*- # # Electrum - lightweight Bitcoin client # Copyright (C) 2012 thomasv@gitorious # # Permission is hereby granted, free of charge, to any person # obtaining a copy of this software and associated documentation files # (the "Software"), to deal in the Software without restriction, # including without limitation the rights to use, copy, modify, merge, # publish, distribute, sublicense, and/or sell copies of the Software, # and to permit persons to whom the Software is furnished to do so, # subject to the following conditions: # # The above copyright notice and this permission notice shall be # included in all copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, # EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF # MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND # NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS # BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN # ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN # CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # This needs to be imported once app-wide then the :icons/ namespace becomes available for Qt icon filenames. # * needed for plugins # app-wide signal for when cashaddr format is toggled. This used to live in each ElectrumWindow instance but it was recently refactored to here. # app-wide signal for when cashaddr toggle button is hidden from the status bar # signal for requesting an app-wide full shutdown # QObject init # Uncomment this call to verify objects are being properly # GC-ed when windows are closed #if daemon.network: # from electroncash.util import DebugMem # from electroncash.wallet import Abstract_Wallet # from electroncash.verifier import SPV # from electroncash.synchronizer import Synchronizer # daemon.network.add_jobs([DebugMem([Abstract_Wallet, SPV, Synchronizer, # ElectrumWindow], interval=5)]) # this needs to be done very early, before the font engine loads fonts.. out of paranoia # This may immediately exit the app if missing required PyQt5 modules, so it should also be done early. #msec #msec # we remember the last activated ElectrumWindow as a Weak.ref # Dark Theme -- ideally set this before any widgets are created. # / # Wallet Password Cache # wallet -> (password, QTimer) map for some plugins (like CashShuffle) # that need wallet passwords to operate, and we don't want to prompt # for pw twice right after the InstallWizard runs (see #106). # Entries in this map are deleted after 10 seconds by the QTimer (which # also deletes itself) # / # init tray # track last window the user interacted with # We did this once already in the set_dark_theme call, but we do this # again here just in case some plugin modified the color scheme. Sets up "do_in_main_thread" handler mechanism for Qt GUI. # We are still alive, emit the signal which will be handled # in the main thread. # We died. Uninstall this handler, invoke original handler. Hooked in to util.Handlers.do_in_main_thread via the do_in_main_thread_signal. This ensures that there is an app-wide mechanism for posting invocations to the main thread. Currently CashFusion uses this mechanism, but other code may as well. Call this before instantiating the QApplication object. It sets up some platform-specific miscellany that need to happen before the QApplication is constructed. A function is returned. This function *must* be called after the QApplication is constructed. # Use FreeType for font rendering on Windows. This fixes rendering # of the Schnorr sigil and allows us to load the Noto Color Emoji # font if needed. # The below only applies to non-macOS. On macOS this setting is # never used (because it is implicitly auto-negotiated by the OS # in a differernt way). # # qt_disable_highdpi will be set to None by default, or True if # specified on command-line. The command-line override is intended # to supporess high-dpi mode just for this run for testing. # # The more permanent setting is qt_enable_highdpi which is the GUI # preferences option, so we don't enable highdpi if it's explicitly # set to False in the GUI. # # The default on Linux, Windows, etc is to enable high dpi # macOS Mojave "font rendering looks terrible on PyQt5.11" workaround. # See: https://old.reddit.com/r/apple/comments/9leavs/fix_mojave_font_rendering_issues_on_a_perapp_basis/ # This affects PyQt 5.11 (which is what we ship in the macOS El Capitan # .dmg). We apply the workaround and also warn the user to not use # the El Capitan compatibility .dmg. # macOS hacks. On Mojave with PyQt <5.12 the font rendering is terrible. # As a workaround we need to temporarily set this 'defaults' keys # which we immediately disable after the QApplication is started. #msg = _("Mojave or newer system detected, however you are running the " # "El Capitan compatibility release of Electron Cash. " # "Font and graphics rendering may be affected." # "\n\nPlease obtain the latest non-compatibility version " # "of Electron Cash for MacOS.") #QMessageBox.warning(None, _("Warning"), msg) # this works even if app is not exec_() yet. Sets the app layout direction depending on language. To be called after self.app is created successfully. Note this *MUST* be called after set_language has been called. # Right-to-left languages # callable will be called after self.app is set-up successfully Will check if required PyQt5 modules are present and if not, display an error message box to the user and immediately quit the app. This is because some Linux systems break up PyQt5 into multiple subpackages, and for instance PyQt5 QtSvg is its own package, and it may be missing. # Closes #1436 -- Some "Run from source" Linux users lack QtSvg # (partial PyQt5 install) # this works even if app is not exec_() yet. # On OSX, qdarkstyle is kind of broken. We instead rely on Mojave # dark mode if (built in to the OS) for this facility, which the # user can set outside of this application. # On OSX, qdarkstyle is kind of broken. We instead rely on Mojave # dark mode if (built in to the OS) for this facility, which the # user can set outside of this application. # Apply any necessary stylesheet patches. For now this only does anything # if the version is < 2.6.8. # 2.6.8+ seems to have fixed all the issues (for now!) # Even if we ourselves don't set the dark theme, # the OS/window manager/etc might set *a dark theme*. # Hence, try to choose colors accordingly: Passwords in the cache only live for a very short while (10 seconds) after wallet window creation, and only if it's a new window. This mechanism is a convenience for plugins that need access to the wallet password and it would make for poor UX for the user to enter their password twice when opening a new window Timer callback, called after 10 seconds. # NB a top-level parentless QObject will get delete by Python when its Python refct goes to 0, which is what we want here. Future programmers: Do not give this timer a parent! # 10 sec # on macOS, in "running from source" mode, we want to set the app # icon, otherwise we get the generic Python icon. # In non-running-from-source mode, macOS will get the icon from # the .app bundle Info.plist spec (which ends up being # electron.icns). However, in .app mode, Qt will not know about # this icon and won't be able to use it for e.g. the About dialog. # In the latter case the branch below will tell Qt to use # electron-cash.svg as the "window icon". # Set this on all other platforms (and macOS built .app) as it can # only help and never harm, and is always available. Returns a 3-tuple of the form (major, minor, revision) eg (5, 12, 4) for the current Qt version derived from the QT_VERSION global provided by Qt. All apologies for the contorted nature of this platform code. Fonts on Windows & Linux are .. a sensitive situation. :) # Only load the emoji font on Linux and Windows # TODO: Check if we already have the needed emojis # TODO: Allow the user to download a full color emoji set # method-backed property, checks config settings # doing this on Qt < 5.12 causes harm and makes the whole app render fonts badly # On Linux, we override some fontconfig rules by loading our own # font config XML file. This makes it so that our custom emojis and # other needed glyphs are guaranteed to get picked up first, # regardless of user font config. Without this some Linux systems # had black and white or missing emoji glyphs. We only do this if # the user doesn't have their own fontconfig file in env and # also as a sanity check, if they have the system # /etc/fonts/fonts.conf file in the right place. # not set up to use freetype, so loading the .ttf would fail. # abort early. # use a different .ttf file on Windows # this works even if app is not exec_() yet. This event filter allows us to open bitcoincash: URIs on macOS Rebuild the tray menu by tearing it down and building it new again # Tray does NOT take ownership of menu, so we are tasked with # deleting the old one. Note that we must delete the old one rather # than just clearing it because otherwise the old sub-menus stick # around in Qt. You can try calling qApp.topLevelWidgets() to # convince yourself of this. Doing it this way actually cleans-up # the menus and they do not leak. # C++ object and its children will be deleted later when we return to the event loop # Use a signal as can be called from daemon thread may raise FileNotFoundError may raise FileNotFoundError Remember the last wallet window that was activated because start_new_window uses this information. We store the ElectrumWindow in a weak reference so that we don't interfere with its gc when it is closed. # call base class because some widgets may actually override 'window' with Python attributes. Raises the window for the wallet if it is open. Otherwise opens the wallet and creates a new window for it. `path=None` is a special usage which will raise the last activated window or open the 'last wallet' if no windows are open. # This branch is taken if nothing is currently open but # path == None, in which case set path=last wallet # This branch is taken if we have windows open and we have # _last_active_window defined, in which case we specify # that this window should be activated by setting path # so that the for loop below will trigger on this window. # weak ref -> strong ref # check ref still alive # this will cause the last active window to be used in the # for loop below # NB: path may still be None here if it came in as None from args and # if the above logic couldn't select a window to use -- in which case # we'll end up picking self.windows[0] # just make sure some plugin didn't give us a symlink # remember path in case it was None # *Not* starting up. Propagate exception out to present # error message box to user. # We're just starting up, so we are tolerant of bad wallets # and just want to proceed to the InstallWizard so the user # can either specify a different wallet or create a new one. # (See issue #1189 where before they would get stuck) # give up on this unknown wallet and try a new name.. note if things get really bad this will raise FileNotFoundError and the app aborts here. # fall thru to wizard # wizard sticks around in memory sometimes, otherwise :/ #/">TrustedCoin</a>. Follow <a href="https://github.com/Electron-Cash/Electron-Cash/issues/41#issuecomment-357468208">this guide</a> in order to recover your funds.') # this will activate the window # save wallet path of last open window # GC on ElectrumWindows takes forever to actually happen due to the # circular reference zoo they create around them (they end up stuck in # generation 2 for a long time before being collected). The below # schedules a more comprehensive GC to happen in the very near future. # This mechanism takes on the order of 40-100ms to execute (depending # on hardware) but frees megabytes of memory after closing a window # (which itslef is a relatively infrequent UI event, so it's # an acceptable tradeoff). # NB: We see if we should quit the app after the last wallet # window is closed, even if a network dialog or some other window is # open. It was bizarre behavior to keep the app open when # things like a transaction dialog or the network dialog were still # up. # central point that checks if we should quit. #window.deleteLater() # <--- This has the potential to cause bugs (esp. with misbehaving plugins), so commented-out. The object gets deleted anyway when Python GC kicks in. Forcing a delete may risk python to have a dangling reference to a deleted C++ object. Schedule garbage collection to happen in the near future. Note that rapid-fire calls to this re-start the timer each time, thus only the last call takes effect (it's rate-limited). # start/re-start the timer to fire exactly once in timeInterval() msecs self.gc_timer timeout() slot # Show network dialog if config does not exist Called by the auto update check mechanism to notify that a new version is available. We propagate the signal out using our own update_available_signal as well as post a message to the system tray. # auto-update-checking never is done in offline mode # make sure auto-check doesn't happen right after a manual check. # do it very soon (in 10 seconds) # once every 4 hours (in ms) Checks it QR reading from camera is possible. It can fail on a system lacking QtMultimedia. This can be removed in the future when we are unlikely to encounter Qt5 installations that are missing QtMultimedia Returns True if it DID warn: ie if there's no secp and ecc operations are slow, otherwise returns False if we have secp. Pass message (rich text) to provide a custom message. Note that the URL link to the HOWTO will always be appended to the custom message. # When relaxwarn is set return True without showing the warning # else.. #libsecp256k1-0-for-electron-cash' <html><body> <p> {message} <p> {url_blurb} </p> <p><a href="{url}">Electron Cash Secp Mini-HOWTO</a></p> </body></html> Called from main_window.py -- tells user once and only once about the high DPI mode and its caveats on Linux only. Is a no-op otherwise. # first run check: # we can't check pixel ratio on linux as apparently it's unreliable, so always show this message on linux # on some windows systems running in highdpi causes # glitches to the QMessageBox windows, so we need # to also warn Windows users that they can turn this off, # but only if they actually are using a high dpi display # do they have any screens that are high dpi? # write to the config key to immediately suppress this warning in # the future -- it only appears on first-run if key was None # is_win # We can get here if we have some top-level ElectrumWindows that # are "minimized to tray" (hidden). "lastWindowClosed "is emitted # if there are no *visible* windows. If we actually have hidden # app windows (because the user hid them), then we want to *not* # quit the app. https://doc.qt.io/qt-5/qguiapplication.html#lastWindowClosed # This check and early return fixes issue #1727. Display a message in the system tray popup notification. On macOS this is the GROWL thing. On Windows it's a balloon popup from the system tray. On Linux it's usually a banner in the top of the screen. # this requires Qt 5.9 Returns True iff we are windows and we are set to use freetype as the font engine. This will always return false on platforms where the question doesn't apply. This config setting defaults to True for Windows < Win10 and False otherwise. It is only relevant when using the Qt GUI, however. # '7', '8', '8.1', '10', etc # We can get here if cygwin, which has an empty win32_ver tuple # in some cases. # In that case "assume windows 10" and just proceed. Cygwin users # can always manually override this setting from GUI prefs. # setting defaults to on for Windows < Win10 Returns True iff we are Linux and we are set to use the fonts.xml fontconfig override, False otherwise. This config setting defaults to True for all Linux, but only is relevant to Qt GUI. # we want to control this in our slot (since we support non-visible, backgrounded windows via the systray show/hide facility) # Just in case we get an exception as we exit, uninstall the Exception_Hook # Shut down the timer cleanly # clipboard persistence. see http://www.mail-archive.com/<EMAIL>/msg17328.html # This wouldn't work anyway unless the app event loop is active, so we must install it once here and no earlier. # main loop # on some platforms the exec_ call may not return, so use clean_up()
2.080909
2
src/char/sorceress/blizz_sorc.py
thordin9/botty
0
6625346
import keyboard from char.sorceress import Sorceress from utils.custom_mouse import mouse from logger import Logger from utils.misc import wait, rotate_vec, unit_vector import random from pather import Location import numpy as np class BlizzSorc(Sorceress): def __init__(self, *args, **kwargs): Logger.info("Setting up Blizz Sorc") super().__init__(*args, **kwargs) def _ice_blast(self, cast_pos_abs: tuple[float, float], delay: tuple[float, float] = (0.16, 0.23), spray: float = 10): keyboard.send(self._char_config["stand_still"], do_release=False) if self._skill_hotkeys["ice_blast"]: keyboard.send(self._skill_hotkeys["ice_blast"]) for _ in range(5): x = cast_pos_abs[0] + (random.random() * 2*spray - spray) y = cast_pos_abs[1] + (random.random() * 2*spray - spray) cast_pos_monitor = self._screen.convert_abs_to_monitor((x, y)) mouse.move(*cast_pos_monitor) mouse.press(button="left") wait(delay[0], delay[1]) mouse.release(button="left") keyboard.send(self._char_config["stand_still"], do_press=False) def _blizzard(self, cast_pos_abs: tuple[float, float], spray: float = 10): if not self._skill_hotkeys["blizzard"]: raise ValueError("You did not set a hotkey for blizzard!") keyboard.send(self._skill_hotkeys["blizzard"]) x = cast_pos_abs[0] + (random.random() * 2 * spray - spray) y = cast_pos_abs[1] + (random.random() * 2 * spray - spray) cast_pos_monitor = self._screen.convert_abs_to_monitor((x, y)) mouse.move(*cast_pos_monitor) click_tries = random.randint(2, 4) for _ in range(click_tries): mouse.press(button="right") wait(0.09, 0.12) mouse.release(button="right") def kill_pindle(self) -> bool: pindle_pos_abs = self._screen.convert_screen_to_abs(self._config.path["pindle_end"][0]) cast_pos_abs = [pindle_pos_abs[0] * 0.9, pindle_pos_abs[1] * 0.9] for _ in range(int(self._char_config["atk_len_pindle"])): self._blizzard(cast_pos_abs, spray=11) self._ice_blast(cast_pos_abs, spray=11) # Move to items wait(self._cast_duration, self._cast_duration + 0.2) self._pather.traverse_nodes_fixed("pindle_end", self) return True def kill_eldritch(self) -> bool: moves = [(0, -175), (0, 65), (0, 50)] for move in moves: pos_m = self._screen.convert_abs_to_monitor(move) self.pre_move() self.move(pos_m, force_move=True) self._cast_static() self._blizzard((10, -50), spray=40) self._cast_static wait(0.7) wait(1.5) self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, time_out=0.6, force_tp=True) self._blizzard((0, 0), spray=50) wait(1.5) return True def kill_shenk(self) -> bool: # Top left position pos_m = self._screen.convert_abs_to_monitor((100, 170)) self.pre_move() self.move(pos_m, force_move=True) # Lower left posistion self._pather.traverse_nodes([151], self, time_out=2.5) self._cast_static() self._blizzard((-170, 70)) self._ice_blast((60, 70), spray=30) # Teledance 1 pos_m = self._screen.convert_abs_to_monitor((100, 50)) self.pre_move() self.move(pos_m, force_move=True) # Teledance attack 1 self._cast_static() self._blizzard((400, 100)) self._cast_static() self._blizzard((0, -250)) wait(0.3) # Teledance 2 pos_m = self._screen.convert_abs_to_monitor((150, -240)) self.pre_move() self.move(pos_m, force_move=True) # Teledance attack 2 self._cast_static() self._blizzard((-200, 75)) wait(0.3) # Shenk Kill self._pather.traverse_nodes([151], self, time_out=2.5) # Shenk attack 1 self._cast_static() self._blizzard((10, -70)) wait(0.3) # Shenk teledance 2 pos_m = self._screen.convert_abs_to_monitor((90, -170)) self.pre_move() self.move(pos_m, force_move=True) self._cast_static() # Move to items wait(self._cast_duration, self._cast_duration + 0.2) self._pather.traverse_nodes((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), self, time_out=1.4, force_tp=True) return True def kill_council(self) -> bool: atk_len_trav = max(1, int(self._char_config["atk_len_trav"]) - 1) # Check out the node screenshot in assets/templates/trav/nodes to see where each node is at # Go inside cast stuff in general direction self._pather.traverse_nodes_fixed([(1262, 265)], self) self._pather.offset_node(229, [350, 100]) self._pather.traverse_nodes([229], self, time_out=2.5, force_tp=True) self._pather.offset_node(229, [-350, -100]) atk_pos_abs = self._pather.find_abs_node_pos(230, self._screen.grab()) if atk_pos_abs is None: Logger.debug("Could not find node [230]. Using static attack coordinates instead.") atk_pos_abs = [-300, -200] else: atk_pos_abs = [atk_pos_abs[0], atk_pos_abs[1] + 70] cast_pos_abs = np.array([atk_pos_abs[0] * 0.9, atk_pos_abs[1] * 0.9]) cast_pos_abs_bliz = np.array([atk_pos_abs[0] * 0.25, atk_pos_abs[1] * 0.25]) for _ in range(atk_len_trav): self._blizzard(cast_pos_abs_bliz, spray=120) self._ice_blast(cast_pos_abs, spray=90) self._cast_static() # move a bit back pos_m = self._screen.convert_abs_to_monitor((110, 30)) self.pre_move() self.move(pos_m, force_move=True) atk_pos_abs = self._pather.find_abs_node_pos(229, self._screen.grab()) if atk_pos_abs is None: Logger.debug("Could not find node [229]. Using static attack coordinates instead.") atk_pos_abs = [-200, -80] self._blizzard((-70, -40), spray=50) self._ice_blast(cast_pos_abs, spray=60) # Move outside # Move a bit back and another round self._pather.traverse_nodes([226], self, time_out=2.5, force_tp=True) cast_pos_abs = np.array([-100, -50]) for _ in range(atk_len_trav): self._blizzard(cast_pos_abs, spray=60) self._ice_blast(cast_pos_abs, spray=60) # move a bit back self.pre_move() self.move(pos_m, force_move=True) cast_pos_abs = np.array([-50, -100]) for _ in range(atk_len_trav): self._blizzard(cast_pos_abs, spray=40) self._ice_blast(cast_pos_abs, spray=30) self._blizzard(cast_pos_abs, spray=40) return True def kill_nihlatak(self, end_nodes: list[int]) -> bool: # Find nilhlatak position atk_sequences = max(2, int(self._char_config["atk_len_nihlatak"]) - 1) for i in range(atk_sequences): nihlatak_pos_abs = self._pather.find_abs_node_pos(end_nodes[-1], self._screen.grab()) if nihlatak_pos_abs is not None: cast_pos_abs = np.array([nihlatak_pos_abs[0] * 0.9, nihlatak_pos_abs[1] * 0.9]) self._blizzard(cast_pos_abs, spray=90) self._cast_static() # Do some tele "dancing" after each sequence if i < atk_sequences - 1: rot_deg = random.randint(-10, 10) if i % 2 == 0 else random.randint(170, 190) tele_pos_abs = unit_vector(rotate_vec(cast_pos_abs, rot_deg)) * 100 pos_m = self._screen.convert_abs_to_monitor(tele_pos_abs) self.pre_move() self.move(pos_m) # Move to items self._pather.traverse_nodes(end_nodes, self, time_out=0.8) self._blizzard((0, 0), spray=10) return True if __name__ == "__main__": import os import keyboard from screen import Screen from template_finder import TemplateFinder from pather import Pather keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1)) keyboard.wait("f11") from config import Config from ui import UiManager config = Config() screen = Screen(config.general["monitor"]) t_finder = TemplateFinder(screen) pather = Pather(screen, t_finder) ui_manager = UiManager(screen, t_finder) char = BlizzSorc(config.blizz_sorc, config.char, screen, t_finder, ui_manager, pather) char.kill_council()
import keyboard from char.sorceress import Sorceress from utils.custom_mouse import mouse from logger import Logger from utils.misc import wait, rotate_vec, unit_vector import random from pather import Location import numpy as np class BlizzSorc(Sorceress): def __init__(self, *args, **kwargs): Logger.info("Setting up Blizz Sorc") super().__init__(*args, **kwargs) def _ice_blast(self, cast_pos_abs: tuple[float, float], delay: tuple[float, float] = (0.16, 0.23), spray: float = 10): keyboard.send(self._char_config["stand_still"], do_release=False) if self._skill_hotkeys["ice_blast"]: keyboard.send(self._skill_hotkeys["ice_blast"]) for _ in range(5): x = cast_pos_abs[0] + (random.random() * 2*spray - spray) y = cast_pos_abs[1] + (random.random() * 2*spray - spray) cast_pos_monitor = self._screen.convert_abs_to_monitor((x, y)) mouse.move(*cast_pos_monitor) mouse.press(button="left") wait(delay[0], delay[1]) mouse.release(button="left") keyboard.send(self._char_config["stand_still"], do_press=False) def _blizzard(self, cast_pos_abs: tuple[float, float], spray: float = 10): if not self._skill_hotkeys["blizzard"]: raise ValueError("You did not set a hotkey for blizzard!") keyboard.send(self._skill_hotkeys["blizzard"]) x = cast_pos_abs[0] + (random.random() * 2 * spray - spray) y = cast_pos_abs[1] + (random.random() * 2 * spray - spray) cast_pos_monitor = self._screen.convert_abs_to_monitor((x, y)) mouse.move(*cast_pos_monitor) click_tries = random.randint(2, 4) for _ in range(click_tries): mouse.press(button="right") wait(0.09, 0.12) mouse.release(button="right") def kill_pindle(self) -> bool: pindle_pos_abs = self._screen.convert_screen_to_abs(self._config.path["pindle_end"][0]) cast_pos_abs = [pindle_pos_abs[0] * 0.9, pindle_pos_abs[1] * 0.9] for _ in range(int(self._char_config["atk_len_pindle"])): self._blizzard(cast_pos_abs, spray=11) self._ice_blast(cast_pos_abs, spray=11) # Move to items wait(self._cast_duration, self._cast_duration + 0.2) self._pather.traverse_nodes_fixed("pindle_end", self) return True def kill_eldritch(self) -> bool: moves = [(0, -175), (0, 65), (0, 50)] for move in moves: pos_m = self._screen.convert_abs_to_monitor(move) self.pre_move() self.move(pos_m, force_move=True) self._cast_static() self._blizzard((10, -50), spray=40) self._cast_static wait(0.7) wait(1.5) self._pather.traverse_nodes((Location.A5_ELDRITCH_SAFE_DIST, Location.A5_ELDRITCH_END), self, time_out=0.6, force_tp=True) self._blizzard((0, 0), spray=50) wait(1.5) return True def kill_shenk(self) -> bool: # Top left position pos_m = self._screen.convert_abs_to_monitor((100, 170)) self.pre_move() self.move(pos_m, force_move=True) # Lower left posistion self._pather.traverse_nodes([151], self, time_out=2.5) self._cast_static() self._blizzard((-170, 70)) self._ice_blast((60, 70), spray=30) # Teledance 1 pos_m = self._screen.convert_abs_to_monitor((100, 50)) self.pre_move() self.move(pos_m, force_move=True) # Teledance attack 1 self._cast_static() self._blizzard((400, 100)) self._cast_static() self._blizzard((0, -250)) wait(0.3) # Teledance 2 pos_m = self._screen.convert_abs_to_monitor((150, -240)) self.pre_move() self.move(pos_m, force_move=True) # Teledance attack 2 self._cast_static() self._blizzard((-200, 75)) wait(0.3) # Shenk Kill self._pather.traverse_nodes([151], self, time_out=2.5) # Shenk attack 1 self._cast_static() self._blizzard((10, -70)) wait(0.3) # Shenk teledance 2 pos_m = self._screen.convert_abs_to_monitor((90, -170)) self.pre_move() self.move(pos_m, force_move=True) self._cast_static() # Move to items wait(self._cast_duration, self._cast_duration + 0.2) self._pather.traverse_nodes((Location.A5_SHENK_SAFE_DIST, Location.A5_SHENK_END), self, time_out=1.4, force_tp=True) return True def kill_council(self) -> bool: atk_len_trav = max(1, int(self._char_config["atk_len_trav"]) - 1) # Check out the node screenshot in assets/templates/trav/nodes to see where each node is at # Go inside cast stuff in general direction self._pather.traverse_nodes_fixed([(1262, 265)], self) self._pather.offset_node(229, [350, 100]) self._pather.traverse_nodes([229], self, time_out=2.5, force_tp=True) self._pather.offset_node(229, [-350, -100]) atk_pos_abs = self._pather.find_abs_node_pos(230, self._screen.grab()) if atk_pos_abs is None: Logger.debug("Could not find node [230]. Using static attack coordinates instead.") atk_pos_abs = [-300, -200] else: atk_pos_abs = [atk_pos_abs[0], atk_pos_abs[1] + 70] cast_pos_abs = np.array([atk_pos_abs[0] * 0.9, atk_pos_abs[1] * 0.9]) cast_pos_abs_bliz = np.array([atk_pos_abs[0] * 0.25, atk_pos_abs[1] * 0.25]) for _ in range(atk_len_trav): self._blizzard(cast_pos_abs_bliz, spray=120) self._ice_blast(cast_pos_abs, spray=90) self._cast_static() # move a bit back pos_m = self._screen.convert_abs_to_monitor((110, 30)) self.pre_move() self.move(pos_m, force_move=True) atk_pos_abs = self._pather.find_abs_node_pos(229, self._screen.grab()) if atk_pos_abs is None: Logger.debug("Could not find node [229]. Using static attack coordinates instead.") atk_pos_abs = [-200, -80] self._blizzard((-70, -40), spray=50) self._ice_blast(cast_pos_abs, spray=60) # Move outside # Move a bit back and another round self._pather.traverse_nodes([226], self, time_out=2.5, force_tp=True) cast_pos_abs = np.array([-100, -50]) for _ in range(atk_len_trav): self._blizzard(cast_pos_abs, spray=60) self._ice_blast(cast_pos_abs, spray=60) # move a bit back self.pre_move() self.move(pos_m, force_move=True) cast_pos_abs = np.array([-50, -100]) for _ in range(atk_len_trav): self._blizzard(cast_pos_abs, spray=40) self._ice_blast(cast_pos_abs, spray=30) self._blizzard(cast_pos_abs, spray=40) return True def kill_nihlatak(self, end_nodes: list[int]) -> bool: # Find nilhlatak position atk_sequences = max(2, int(self._char_config["atk_len_nihlatak"]) - 1) for i in range(atk_sequences): nihlatak_pos_abs = self._pather.find_abs_node_pos(end_nodes[-1], self._screen.grab()) if nihlatak_pos_abs is not None: cast_pos_abs = np.array([nihlatak_pos_abs[0] * 0.9, nihlatak_pos_abs[1] * 0.9]) self._blizzard(cast_pos_abs, spray=90) self._cast_static() # Do some tele "dancing" after each sequence if i < atk_sequences - 1: rot_deg = random.randint(-10, 10) if i % 2 == 0 else random.randint(170, 190) tele_pos_abs = unit_vector(rotate_vec(cast_pos_abs, rot_deg)) * 100 pos_m = self._screen.convert_abs_to_monitor(tele_pos_abs) self.pre_move() self.move(pos_m) # Move to items self._pather.traverse_nodes(end_nodes, self, time_out=0.8) self._blizzard((0, 0), spray=10) return True if __name__ == "__main__": import os import keyboard from screen import Screen from template_finder import TemplateFinder from pather import Pather keyboard.add_hotkey('f12', lambda: Logger.info('Force Exit (f12)') or os._exit(1)) keyboard.wait("f11") from config import Config from ui import UiManager config = Config() screen = Screen(config.general["monitor"]) t_finder = TemplateFinder(screen) pather = Pather(screen, t_finder) ui_manager = UiManager(screen, t_finder) char = BlizzSorc(config.blizz_sorc, config.char, screen, t_finder, ui_manager, pather) char.kill_council()
en
0.771726
# Move to items # Top left position # Lower left posistion # Teledance 1 # Teledance attack 1 # Teledance 2 # Teledance attack 2 # Shenk Kill # Shenk attack 1 # Shenk teledance 2 # Move to items # Check out the node screenshot in assets/templates/trav/nodes to see where each node is at # Go inside cast stuff in general direction # move a bit back # Move outside # Move a bit back and another round # move a bit back # Find nilhlatak position # Do some tele "dancing" after each sequence # Move to items
2.193133
2
tegrity/settings.py
kevmo314/tegrity
20
6625347
<filename>tegrity/settings.py import os import shutil # the CONFIG_PATH just points to the logs, cache, and saved configurations. # change this to 755 if you want your ~/.tegrity folder to be world readable import tegrity CONFIG_PATH_MODE = 0o700 # change this if conflict with an existing ~/.tegrity folder you want to keep DEFAULT_CONFIG_PATH = os.path.join(os.path.expanduser("~"), f".tegrity") # for download.py # todo: implement, since python bzip is really slow LBZIP2 = shutil.which('lbzip2') # for kernel.py DEFAULT_LOCALVERSION = '-tegrity' NANO_TX1_KERNEL_URL = "https://developer.nvidia.com/embedded/dlc/r32-3-1_Release_v1.0/Sources/T210/public_sources.tbz2" NANO_TX1_KERNEL_SHA512 = "f9729758ff44f9b18ec78a3e99634a8cac1ca165f40cda825bc18f6fdd0b088baac5a5c0868167a420993b3a7aed78bc9a43ecd7dc5bba2c75ca20c6635573a6" XAVIER_TX2_KERNEL_URL = "https://developer.nvidia.com/embedded/dlc/r32-3-1_Release_v1.0/Sources/T186/public_sources.tbz2" XAVIER_TX2_KERNEL_SHA512 = "78e6d3cc67dcbdf27cb21f4cbbabb7a5b89ca813f2aaeb60a06ed8f797e6ec46d06bb0e915bfc292302c721dbce9b27492dbf07ee4ae084ca748ecd65eaae994" # the path to the kernel_src.tbz2 inside public_sources.tbz2 KERNEL_TARBALL_PATH = ('Linux_for_Tegra', 'source', 'public', 'kernel_src.tbz2',) # the path to the kernel inside kernel_src.tbz2 KERNEL_PATH = ('kernel', 'kernel-4.9') # for rootfs.py # urls, shas, and supported model numbers for their rootfs L4T_ROOTFS_URL = "https://developer.nvidia.com/embedded/r32-2-3_Release_v1.0/t210ref_release_aarch64/Tegra_Linux_Sample-Root-Filesystem_R32.2.3_aarch64.tbz2" L4T_ROOTFS_SHA512 = "15075b90d2e6f981e40e7fdd5b02fc1e3bbf89876a6604e61b77771519bf3970308ee921bb39957158153ba8597a31b504f5d77c205c0a0c2d3b483aee9f0d4f" UBUNTU_BASE_URL = "http://cdimage.ubuntu.com/ubuntu-base/releases/18.04.3/release/ubuntu-base-18.04-base-arm64.tar.gz" UBUNTU_BASE_SHA_256 = "9193fd5f648e12c2102326ee6fdc69ac59c490fac3eb050758cee01927612021" NV_SOURCES_LIST = ('etc', 'apt', 'sources.list.d', "nvidia-l4t-apt-source.list") NV_SOURCES_LIST_TEMPLATE = """deb https://repo.download.nvidia.com/jetson/common r32 main deb https://repo.download.nvidia.com/jetson/{soc} r32 main""" # this is a mapping between board id and appropriate SOC to fill in the repo url BOARD_ID_TO_SOC = { tegrity.db.NANO_DEV_ID: 't210' }
<filename>tegrity/settings.py import os import shutil # the CONFIG_PATH just points to the logs, cache, and saved configurations. # change this to 755 if you want your ~/.tegrity folder to be world readable import tegrity CONFIG_PATH_MODE = 0o700 # change this if conflict with an existing ~/.tegrity folder you want to keep DEFAULT_CONFIG_PATH = os.path.join(os.path.expanduser("~"), f".tegrity") # for download.py # todo: implement, since python bzip is really slow LBZIP2 = shutil.which('lbzip2') # for kernel.py DEFAULT_LOCALVERSION = '-tegrity' NANO_TX1_KERNEL_URL = "https://developer.nvidia.com/embedded/dlc/r32-3-1_Release_v1.0/Sources/T210/public_sources.tbz2" NANO_TX1_KERNEL_SHA512 = "f9729758ff44f9b18ec78a3e99634a8cac1ca165f40cda825bc18f6fdd0b088baac5a5c0868167a420993b3a7aed78bc9a43ecd7dc5bba2c75ca20c6635573a6" XAVIER_TX2_KERNEL_URL = "https://developer.nvidia.com/embedded/dlc/r32-3-1_Release_v1.0/Sources/T186/public_sources.tbz2" XAVIER_TX2_KERNEL_SHA512 = "78e6d3cc67dcbdf27cb21f4cbbabb7a5b89ca813f2aaeb60a06ed8f797e6ec46d06bb0e915bfc292302c721dbce9b27492dbf07ee4ae084ca748ecd65eaae994" # the path to the kernel_src.tbz2 inside public_sources.tbz2 KERNEL_TARBALL_PATH = ('Linux_for_Tegra', 'source', 'public', 'kernel_src.tbz2',) # the path to the kernel inside kernel_src.tbz2 KERNEL_PATH = ('kernel', 'kernel-4.9') # for rootfs.py # urls, shas, and supported model numbers for their rootfs L4T_ROOTFS_URL = "https://developer.nvidia.com/embedded/r32-2-3_Release_v1.0/t210ref_release_aarch64/Tegra_Linux_Sample-Root-Filesystem_R32.2.3_aarch64.tbz2" L4T_ROOTFS_SHA512 = "15075b90d2e6f981e40e7fdd5b02fc1e3bbf89876a6604e61b77771519bf3970308ee921bb39957158153ba8597a31b504f5d77c205c0a0c2d3b483aee9f0d4f" UBUNTU_BASE_URL = "http://cdimage.ubuntu.com/ubuntu-base/releases/18.04.3/release/ubuntu-base-18.04-base-arm64.tar.gz" UBUNTU_BASE_SHA_256 = "9193fd5f648e12c2102326ee6fdc69ac59c490fac3eb050758cee01927612021" NV_SOURCES_LIST = ('etc', 'apt', 'sources.list.d', "nvidia-l4t-apt-source.list") NV_SOURCES_LIST_TEMPLATE = """deb https://repo.download.nvidia.com/jetson/common r32 main deb https://repo.download.nvidia.com/jetson/{soc} r32 main""" # this is a mapping between board id and appropriate SOC to fill in the repo url BOARD_ID_TO_SOC = { tegrity.db.NANO_DEV_ID: 't210' }
en
0.845396
# the CONFIG_PATH just points to the logs, cache, and saved configurations. # change this to 755 if you want your ~/.tegrity folder to be world readable # change this if conflict with an existing ~/.tegrity folder you want to keep # for download.py # todo: implement, since python bzip is really slow # for kernel.py # the path to the kernel_src.tbz2 inside public_sources.tbz2 # the path to the kernel inside kernel_src.tbz2 # for rootfs.py # urls, shas, and supported model numbers for their rootfs deb https://repo.download.nvidia.com/jetson/common r32 main deb https://repo.download.nvidia.com/jetson/{soc} r32 main # this is a mapping between board id and appropriate SOC to fill in the repo url
1.909679
2
util/bin-to-h.py
PokeyManatee4/splatood
98
6625348
<gh_stars>10-100 import sys use_rle = False if "--rle" in sys.argv: use_rle = True out = [] previous = "NO" count = 0 with open(sys.argv[1]) as f: x = f.read() for i in x: c = hex(ord(i)) if len(c) == len('0x0'): c = c[:2] + '0' + c[2] if use_rle: if c == previous: count += 1 else: if count: out.append( hex(count) ) out.append( previous ) count = 1 previous = c else: out.append( c ) if use_rle and count: out.append( hex(count) ) out.append( previous ) print "const unsigned char __rename_me__[",len(out),"] = {",",".join(out),"};"
import sys use_rle = False if "--rle" in sys.argv: use_rle = True out = [] previous = "NO" count = 0 with open(sys.argv[1]) as f: x = f.read() for i in x: c = hex(ord(i)) if len(c) == len('0x0'): c = c[:2] + '0' + c[2] if use_rle: if c == previous: count += 1 else: if count: out.append( hex(count) ) out.append( previous ) count = 1 previous = c else: out.append( c ) if use_rle and count: out.append( hex(count) ) out.append( previous ) print "const unsigned char __rename_me__[",len(out),"] = {",",".join(out),"};"
none
1
2.873965
3
control_gpio_pi/51low.py
Rahul14singh/remote_raspberrypigpio_control
10
6625349
import RPi.GPIO as ir print "PIN 3 Low" ir.setwarnings(False) ir.setmode(ir.BOARD) ir.setup(3,ir.OUT) ir.output(3,ir.LOW)
import RPi.GPIO as ir print "PIN 3 Low" ir.setwarnings(False) ir.setmode(ir.BOARD) ir.setup(3,ir.OUT) ir.output(3,ir.LOW)
none
1
2.878637
3
aprendizado/curso_em_video/desafios/desafio061.py
renatodev95/Python
0
6625350
<reponame>renatodev95/Python termo = int(input('Digite o primeiro termo: ')) razão = int(input('Razão da PA: ')) c = 1 while c <= 10: print('{} ➡'.format(termo), end=' ') termo += razão c += 1 print('FIM')
termo = int(input('Digite o primeiro termo: ')) razão = int(input('Razão da PA: ')) c = 1 while c <= 10: print('{} ➡'.format(termo), end=' ') termo += razão c += 1 print('FIM')
none
1
3.86653
4