index
int64 | repo_name
string | branch_name
string | path
string | content
string | import_graph
string |
|---|---|---|---|---|---|
14,431
|
Kyungpyo-Kang/EV
|
refs/heads/master
|
/Community/views.py
|
from django.shortcuts import render
def community(request):
return render(request, 'community.html')
|
{"/EV/urls.py": ["/Community/views.py", "/Member/views.py", "/Recommand/views.py"], "/Member/views.py": ["/Member/models.py"]}
|
14,432
|
ashok-arjun/fsl_ssl
|
refs/heads/master
|
/methods/.ipynb_checkpoints/protonet-checkpoint.py
|
# This code is modified from https://github.com/jakesnell/prototypical-networks
import backbone
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from methods.meta_template import MetaTemplate
from model_resnet import *
from itertools import cycle
import wandb
class ProtoNet(MetaTemplate):
def __init__(self, model_func, n_way, n_support, jigsaw=False, lbda=0.0, rotation=False, tracking=False, use_bn=True, pretrain=False):
super(ProtoNet, self).__init__(model_func, n_way, n_support, use_bn, pretrain, tracking=tracking)
self.loss_fn = nn.CrossEntropyLoss()
self.jigsaw = jigsaw
self.rotation = rotation
self.lbda = lbda
self.global_count = 0
if self.jigsaw:
self.fc6 = nn.Sequential()
self.fc6.add_module('fc6_s1',nn.Linear(512, 512))#for resnet
self.fc6.add_module('relu6_s1',nn.ReLU(inplace=True))
self.fc6.add_module('drop6_s1',nn.Dropout(p=0.5))
self.fc7 = nn.Sequential()
self.fc7.add_module('fc7',nn.Linear(9*512,4096))#for resnet
self.fc7.add_module('relu7',nn.ReLU(inplace=True))
self.fc7.add_module('drop7',nn.Dropout(p=0.5))
self.classifier = nn.Sequential()
self.classifier.add_module('fc8',nn.Linear(4096, 35))
if self.rotation:
self.fc6 = nn.Sequential()
self.fc6.add_module('fc6_s1',nn.Linear(512, 512))#for resnet
self.fc6.add_module('relu6_s1',nn.ReLU(inplace=True))
self.fc6.add_module('drop6_s1',nn.Dropout(p=0.5))
self.fc7 = nn.Sequential()
self.fc7.add_module('fc7',nn.Linear(512,128))#for resnet
self.fc7.add_module('relu7',nn.ReLU(inplace=True))
self.fc7.add_module('drop7',nn.Dropout(p=0.5))
self.classifier_rotation = nn.Sequential()
self.classifier_rotation.add_module('fc8',nn.Linear(128, 4))
def train_loop(self, epoch, train_loader, optimizer, writer, base_loader_u=None):
print_freq = 10
avg_loss=0
avg_loss_proto=0
avg_loss_jigsaw=0
avg_loss_rotation=0
self.global_count = epoch * len(train_loader)
if base_loader_u is not None:
for i,inputs in enumerate(zip(train_loader,cycle(base_loader_u))):
self.global_count += 1
x = inputs[0][0]
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
optimizer.zero_grad()
loss_proto, acc = self.set_forward_loss(x)
if self.jigsaw:
loss_jigsaw, acc_jigsaw = self.set_forward_loss_unlabel(inputs[1][2], inputs[1][3])# torch.Size([5, 21, 9, 3, 75, 75]), torch.Size([5, 21])
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_jigsaw
wandb.log({'train/loss_proto': float(loss_proto.data.item())}, step=self.global_count)
wandb.log({'train/loss_jigsaw': float(loss_jigsaw.data.item())}, step=self.global_count)
elif self.rotation:
loss_rotation, acc_rotation = self.set_forward_loss_unlabel(inputs[1][2], inputs[1][3])# torch.Size([5, 21, 9, 3, 75, 75]), torch.Size([5, 21])
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_rotation
wandb.log({'train/loss_proto': float(loss_proto.data.item())}, step=self.global_count)
wandb.log({'train/loss_rotation': float(loss_rotation.data.item())}, step=self.global_count)
else:
loss = loss_proto
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.data
wandb.log({'train/loss': float(loss.data.item())}, step=self.global_count)
if self.jigsaw:
avg_loss_proto += loss_proto.data
avg_loss_jigsaw += loss_jigsaw.data
wandb.log({'train/acc_proto': acc}, step=self.global_count)
wandb.log({'train/acc_jigsaw': acc_jigsaw}, step=self.global_count)
elif self.rotation:
avg_loss_proto += loss_proto.data
avg_loss_rotation += loss_rotation.data
wandb.log({'train/acc_proto': acc}, step=self.global_count)
wandb.log({'train/acc_rotation': acc_rotation}, step=self.global_count)
if (i+1) % print_freq==0:
if self.jigsaw:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Loss Proto {:f} | Loss Jigsaw {:f}'.\
format(epoch, i+1, len(train_loader), avg_loss/float(i+1), avg_loss_proto/float(i+1), avg_loss_jigsaw/float(i+1)))
elif self.rotation:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Loss Proto {:f} | Loss Rotation {:f}'.\
format(epoch, i+1, len(train_loader), avg_loss/float(i+1), avg_loss_proto/float(i+1), avg_loss_rotation/float(i+1)))
else:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i+1, len(train_loader), avg_loss/float(i+1)))
else:
for i, inputs in enumerate(train_loader):
self.global_count += 1
x = inputs[0]
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
optimizer.zero_grad()
loss_proto, acc = self.set_forward_loss(x)
if self.jigsaw:
loss_jigsaw, acc_jigsaw = self.set_forward_loss_unlabel(inputs[2], inputs[3])# torch.Size([5, 21, 9, 3, 75, 75]), torch.Size([5, 21])
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_jigsaw
wandb.log({'train/loss_proto': float(loss_proto.data.item())}, step=self.global_count)
wandb.log({'train/loss_jigsaw': float(loss_jigsaw.data.item())}, step=self.global_count)
elif self.rotation:
loss_rotation, acc_rotation = self.set_forward_loss_unlabel(inputs[2], inputs[3])# torch.Size([5, 21, 9, 3, 75, 75]), torch.Size([5, 21])
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_rotation
wandb.log({'train/loss_proto': float(loss_proto.data.item())}, step=self.global_count)
wandb.log({'train/loss_rotation': float(loss_rotation.data.item())}, step=self.global_count)
else:
loss = loss_proto
loss.backward()
optimizer.step()
avg_loss = avg_loss+loss.item()
wandb.log({'train/loss': float(loss.data.item())}, step=self.global_count)
if self.jigsaw:
avg_loss_proto += loss_proto.data
avg_loss_jigsaw += loss_jigsaw.data
wandb.log({'train/acc_proto': acc}, step=self.global_count)
wandb.log({'train/acc_jigsaw': acc_jigsaw}, step=self.global_count)
elif self.rotation:
avg_loss_proto += loss_proto.data
avg_loss_rotation += loss_rotation.data
wandb.log({'train/acc_proto': acc}, step=self.global_count)
wandb.log({'train/acc_rotation': acc_rotation}, step=self.global_count)
if (i+1) % print_freq==0:
#print(optimizer.state_dict()['param_groups'][0]['lr'])
if self.jigsaw:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Loss Proto {:f} | Loss Jigsaw {:f}'.\
format(epoch, i+1, len(train_loader), avg_loss/float(i+1), avg_loss_proto/float(i+1), avg_loss_jigsaw/float(i+1)))
elif self.rotation:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Loss Proto {:f} | Loss Rotation {:f}'.\
format(epoch, i+1, len(train_loader), avg_loss/float(i+1), avg_loss_proto/float(i+1), avg_loss_rotation/float(i+1)))
else:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f}'.format(epoch, i+1, len(train_loader), avg_loss/float(i+1)))
def test_loop(self, test_loader, record = None):
correct =0
count = 0
acc_all = []
acc_all_jigsaw = []
acc_all_rotation = []
iter_num = len(test_loader)
for i, inputs in enumerate(test_loader):
x = inputs[0]
self.n_query = x.size(1) - self.n_support
if self.change_way:
self.n_way = x.size(0)
if self.jigsaw:
correct_this, correct_this_jigsaw, count_this, count_this_jigsaw = self.correct(x, inputs[2], inputs[3])
elif self.rotation:
correct_this, correct_this_rotation, count_this, count_this_rotation = self.correct(x, inputs[2], inputs[3])
else:
correct_this, count_this = self.correct(x)
acc_all.append(correct_this/ count_this*100)
if self.jigsaw:
acc_all_jigsaw.append(correct_this_jigsaw/ count_this_jigsaw*100)
elif self.rotation:
acc_all_rotation.append(correct_this_rotation/ count_this_rotation*100)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Protonet Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
if self.jigsaw:
acc_all_jigsaw = np.asarray(acc_all_jigsaw)
acc_mean_jigsaw = np.mean(acc_all_jigsaw)
acc_std_jigsaw = np.std(acc_all_jigsaw)
print('%d Test Jigsaw Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean_jigsaw, 1.96* acc_std_jigsaw/np.sqrt(iter_num)))
return acc_mean, acc_mean_jigsaw
elif self.rotation:
acc_all_rotation = np.asarray(acc_all_rotation)
acc_mean_rotation = np.mean(acc_all_rotation)
acc_std_rotation = np.std(acc_all_rotation)
print('%d Test Rotation Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean_rotation, 1.96* acc_std_rotation/np.sqrt(iter_num)))
return acc_mean, acc_mean_rotation
else:
return acc_mean
def correct(self, x, patches=None, patches_label=None):
scores = self.set_forward(x)
if self.jigsaw:
x_, y_ = self.set_forward_unlabel(patches=patches,patches_label=patches_label)
elif self.rotation:
x_, y_ = self.set_forward_unlabel(patches=patches,patches_label=patches_label)
y_query = np.repeat(range( self.n_way ), self.n_query )
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
top1_correct = np.sum(topk_ind[:,0] == y_query)
if self.jigsaw:
pred = torch.max(x_,1)
top1_correct_jigsaw = torch.sum(pred[1] == y_)
return float(top1_correct), float(top1_correct_jigsaw), len(y_query), len(y_)
elif self.rotation:
pred = torch.max(x_,1)
top1_correct_rotation = torch.sum(pred[1] == y_)
return float(top1_correct), float(top1_correct_rotation), len(y_query), len(y_)
else:
return float(top1_correct), len(y_query)
def set_forward(self,x,is_feature = False):
z_support, z_query = self.parse_feature(x,is_feature)
z_support = z_support.contiguous()
z_proto = z_support.view(self.n_way, self.n_support, -1 ).mean(1) #the shape of z is [n_data, n_dim]
z_query = z_query.contiguous().view(self.n_way* self.n_query, -1 )
dists = euclidean_dist(z_query, z_proto)
scores = -dists
return scores
def set_forward_unlabel(self, patches=None, patches_label=None):
if len(patches.size()) == 6:
Way,S,T,C,H,W = patches.size()#torch.Size([5, 15, 9, 3, 75, 75])
B = Way*S
elif len(patches.size()) == 5:
B,T,C,H,W = patches.size()#torch.Size([5, 15, 9, 3, 75, 75])
if self.jigsaw:
patches = patches.view(B*T,C,H,W).cuda()#torch.Size([675, 3, 64, 64])
if self.dual_cbam:
patch_feat = self.feature(patches, jigsaw=True)#torch.Size([675, 512])
else:
patch_feat = self.feature(patches)#torch.Size([675, 512])
x_ = patch_feat.view(B,T,-1)
x_ = x_.transpose(0,1)#torch.Size([9, 75, 512])
x_list = []
for i in range(9):
z = self.fc6(x_[i])#torch.Size([75, 512])
z = z.view([B,1,-1])#torch.Size([75, 1, 512])
x_list.append(z)
x_ = torch.cat(x_list,1)#torch.Size([75, 9, 512])
x_ = self.fc7(x_.view(B,-1))#torch.Size([75, 9*512])
x_ = self.classifier(x_)
y_ = patches_label.view(-1).cuda()
return x_, y_
elif self.rotation:
patches = patches.view(B*T,C,H,W).cuda()
x_ = self.feature(patches)#torch.Size([64, 512, 1, 1])
x_ = x_.squeeze()
x_ = self.fc6(x_)
x_ = self.fc7(x_)#64,128
x_ = self.classifier_rotation(x_)#64,4
pred = torch.max(x_,1)
y_ = patches_label.view(-1).cuda()
return x_, y_
def set_forward_loss(self, x):
y_query = torch.from_numpy(np.repeat(range( self.n_way ), self.n_query ))
scores = self.set_forward(x)
topk_scores, topk_labels = scores.data.topk(1, 1, True, True)
topk_ind = topk_labels.cpu().numpy()
acc = np.sum(topk_ind[:,0] == y_query.numpy())/len(y_query.numpy())
y_query = Variable(y_query.cuda())
return self.loss_fn(scores, y_query), acc
def set_forward_loss_unlabel(self, patches=None, patches_label=None):
if self.jigsaw:
x_, y_ = self.set_forward_unlabel(patches=patches,patches_label=patches_label)
pred = torch.max(x_,1)
acc_jigsaw = torch.sum(pred[1] == y_).cpu().numpy()*1.0/len(y_)
elif self.rotation:
x_, y_ = self.set_forward_unlabel(patches=patches,patches_label=patches_label)
pred = torch.max(x_,1)
acc_rotation = torch.sum(pred[1] == y_).cpu().numpy()*1.0/len(y_)
if self.jigsaw:
return self.loss_fn(x_,y_), acc_jigsaw
elif self.rotation:
return self.loss_fn(x_,y_), acc_rotation
def parse_feature(self,x,is_feature):
x = Variable(x.cuda())
if is_feature:
z_all = x
else:
x = x.contiguous().view( self.n_way * (self.n_support + self.n_query), *x.size()[2:])
z_all = self.feature(x)
z_all = z_all.view( self.n_way, self.n_support + self.n_query, -1)
z_support = z_all[:, :self.n_support]
z_query = z_all[:, self.n_support:]
return z_support, z_query
def euclidean_dist( x, y):
# x: N x D
# y: M x D
n = x.size(0)
m = y.size(0)
d = x.size(1)
assert d == y.size(1)
x = x.unsqueeze(1).expand(n, m, d)
y = y.unsqueeze(0).expand(n, m, d)
return torch.pow(x - y, 2).sum(2)
|
{"/.ipynb_checkpoints/train-checkpoint.py": ["/methods/baselinetrain.py"]}
|
14,433
|
ashok-arjun/fsl_ssl
|
refs/heads/master
|
/merge.py
|
import os
import shutil
subfolders = os.listdir("miniImageNet/images")
print(subfolders)
for folder in subfolders:
files = os.listdir(os.path.join("miniImageNet/images", folder))
for file in files:
shutil.move(os.path.join("miniImageNet/images/"+folder, file), "miniImageNet/images/"+file)
os.rmdir(os.path.join("miniImageNet/images", folder))
|
{"/.ipynb_checkpoints/train-checkpoint.py": ["/methods/baselinetrain.py"]}
|
14,434
|
ashok-arjun/fsl_ssl
|
refs/heads/master
|
/wandb_restore.py
|
# This script will ask for a wandb run ID and restore path
import wandb
RUN_ID = "27wluxlz"
PATH = "ckpts/dogs/_resnet18_baseline_aug_tracking_lr0.0010/last_model.tar"
wandb.init(id=RUN_ID, project="fsl_ssl", resume=True)
wandb.restore(PATH)
|
{"/.ipynb_checkpoints/train-checkpoint.py": ["/methods/baselinetrain.py"]}
|
14,435
|
ashok-arjun/fsl_ssl
|
refs/heads/master
|
/.ipynb_checkpoints/io_utils-checkpoint.py
|
import numpy as np
import os
import glob
import argparse
import backbone
from model_resnet import *
model_dict = dict(
Conv4 = backbone.Conv4,
Conv4S = backbone.Conv4S,
Conv6 = backbone.Conv6,
ResNet10 = backbone.ResNet10,
ResNet18 = backbone.ResNet18,
ResNet34 = backbone.ResNet34,
ResNet50 = backbone.ResNet50,
ResNet101 = backbone.ResNet101,
resnet18 = 'resnet18',
resnet18_pytorch = 'resnet18_pytorch',
resnet50_pytorch = 'resnet50_pytorch'
)
def parse_args(script):
parser = argparse.ArgumentParser(description= 'few-shot script %s' %(script))
parser.add_argument('--dataset' , default='miniImagenet', help='CUB/cars/flowers/dogs/aircrafts/miniImagenet/tieredImagenet')
parser.add_argument('--model' , default='resnet18', help='model: Conv{4|6} / ResNet{10|18|34|50|101}') # 50 and 101 are not used in the paper
parser.add_argument('--method' , default='protonet', help='baseline/baseline++/protonet/matchingnet/relationnet{_softmax}/maml{_approx}') #relationnet_softmax replace L2 norm with softmax to expedite training, maml_approx use first-order approximation in the gradient for efficiency
parser.add_argument('--train_n_way' , default=5, type=int, help='class num to classify for training') #baseline and baseline++ would ignore this parameter
parser.add_argument('--test_n_way' , default=5, type=int, help='class num to classify for testing (validation) ') #baseline and baseline++ only use this parameter in finetuning
parser.add_argument('--n_shot' , default=5, type=int, help='number of labeled data in each class, same as n_support') #baseline and baseline++ only use this parameter in finetuning
parser.add_argument('--train_aug' , action='store_true', help='perform data augmentation or not during training ') #still required for save_features.py and test.py to find the model path correctly
parser.add_argument('--jigsaw' , action='store_true', help='multi-task training')
parser.add_argument('--lbda' , default=0.5, type=float, help='lambda for the jigsaw loss, (1-lambda) for proto loss')
parser.add_argument('--lr' , default=0.001, type=float,help='learning rate')
parser.add_argument('--optimization', default='Adam', type=str, help='Adam or SGD')
parser.add_argument('--loadfile' , default='', type=str, help='load pre-trained model')
parser.add_argument('--finetune' , action='store_true', help='finetuning from jigsaw to protonet')
parser.add_argument('--random' , action='store_true', help='random init net')
parser.add_argument('--n_query' , default=16, type=int, help='number of query, 16 is used in the paper')
parser.add_argument('--image_size' , default=224, type=int, help='224 is used in the paper')
parser.add_argument('--date' , default='', type=str, help='date of the exp')
parser.add_argument('--rotation' , action='store_true', help='multi-task training')
parser.add_argument('--tracking' , action='store_true', default=True, help='tracking batchnorm stats')
parser.add_argument('--split' , default='novel', help='base/val/novel') #default novel, but you can also test base/val class accuracy if you want
parser.add_argument('--save_iter' , default=-1, type=int, help='saved feature from the model trained in x epoch, use the best model if x is -1')
parser.add_argument('--adaptation' , action='store_true', help='further adaptation in test time or not')
parser.add_argument('--bs' , default=16, type=int, help='batch size used for unlabeled dataset, also when method==baseline')
parser.add_argument('--no_bn' , action='store_true', help='not using batch norm if True')
parser.add_argument('--pretrain' , action='store_true', help='use imagenet pre-train model')
parser.add_argument('--grey' , action='store_true', help='use grey iamge')
parser.add_argument('--test_bs' , default=64, type=int, help='batch size for testing w/o batchnorm')
parser.add_argument('--dataset_unlabel', default=None, help='CUB/cars/flowers/dogs/aircrafts/miniImagenet/tieredImagenet')
parser.add_argument('--base' , default='base', help='name of the json file of the base set')
parser.add_argument('--base_unlabel' , default='base', help='name of the json file of the base set for unlabeled dataset dataloader')
# parser.add_argument("--device_ids", nargs="+", required=True, type=int) # [0] can be set as default
if script == 'train':
parser.add_argument('--num_classes' , default=200, type=int,help='total number of classes in softmax, only used in baseline') #make it larger than the maximum label value in base class
parser.add_argument('--save_freq' , default=10, type=int,help='Save frequency')
parser.add_argument('--start_epoch' , default=0, type=int, help='Starting epoch')
parser.add_argument('--stop_epoch' , default=600, type=int,help='Stopping epoch') # for meta-learning methods, each epoch contains 100 episodes
parser.add_argument('--resume' , action='store_true', help='continue from previous trained model with largest epoch')
parser.add_argument('--resume_wandb_id' , default=None, help='wandb ID')
parser.add_argument('--warmup' , action='store_true', help='continue from baseline, neglected if resume is true') #never used in the paper
parser.add_argument('--device' , default="0", type=str, help='GPU id')
parser.add_argument('--layer', default=-1, type=int)
return parser.parse_args()
def get_assigned_file(checkpoint_dir,num):
assign_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(num))
return assign_file
def get_resume_file(checkpoint_dir):
filelist = glob.glob(os.path.join(checkpoint_dir, '*.tar'))
if len(filelist) == 0:
return None
filelist = [ x for x in filelist if os.path.basename(x) != 'best_model.tar' ]
epochs = np.array([int(os.path.splitext(os.path.basename(x))[0]) for x in filelist])
max_epoch = np.max(epochs)
resume_file = os.path.join(checkpoint_dir, '{:d}.tar'.format(max_epoch))
return resume_file
def get_best_file(checkpoint_dir):
best_file = os.path.join(checkpoint_dir, 'best_model.tar')
if os.path.isfile(best_file):
return best_file
else:
return get_resume_file(checkpoint_dir)
|
{"/.ipynb_checkpoints/train-checkpoint.py": ["/methods/baselinetrain.py"]}
|
14,436
|
ashok-arjun/fsl_ssl
|
refs/heads/master
|
/data/dataset_unlabel.py
|
# This code is modified from https://github.com/facebookresearch/low-shot-shrink-hallucinate
import torch
from PIL import Image
import json
import numpy as np
import torchvision.transforms as transforms
import os
identity = lambda x:x
import math
def get_patches(img, transform_jigsaw, transform_patch_jigsaw, permutations):
if np.random.rand() < 0.30:
img = img.convert('LA').convert('RGB')## this should be L instead....... need to change that!!
img = transform_jigsaw(img)
s = float(img.size[0]) / 3
a = s / 2
tiles = [None] * 9
for n in range(9):
i = int(n / 3)
j = n % 3
c = [a * i * 2 + a, a * j * 2 + a]
c = np.array([math.ceil(c[1] - a), math.ceil(c[0] - a), int(c[1] + a ), int(c[0] + a )]).astype(int)
tile = img.crop(c.tolist())
tile = transform_patch_jigsaw(tile)
# Normalize the patches indipendently to avoid low level features shortcut
m, s = tile.view(3, -1).mean(dim=1).numpy(), tile.view(3, -1).std(dim=1).numpy()
s[s == 0] = 1
norm = transforms.Normalize(mean=m.tolist(), std=s.tolist())
tile = norm(tile)
tiles[n] = tile
order = np.random.randint(len(permutations))
data = [tiles[permutations[order][t]] for t in range(9)]
data = torch.stack(data, 0)
return data, int(order)
def retrive_permutations(classes):
all_perm = np.load('permutations_%d.npy' % (classes))
if all_perm.min() == 1:
all_perm = all_perm - 1
return all_perm
class SimpleDataset:
def __init__(self, data_file, transform, target_transform=identity, \
jigsaw=False, transform_jigsaw=None, transform_patch_jigsaw=None, rotation=False, isAircraft=False, grey=False):
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.transform = transform
self.target_transform = target_transform
self.jigsaw = jigsaw
self.transform_jigsaw = transform_jigsaw
self.transform_patch_jigsaw = transform_patch_jigsaw
self.permutations = retrive_permutations(35)
self.rotation = rotation
self.isAircraft = isAircraft
self.grey = grey
def __getitem__(self,i):
image_path = os.path.join(self.meta['image_names'][i])
if self.grey:
img = Image.open(image_path).convert('L').convert('RGB')
else:
img = Image.open(image_path).convert('RGB')
if self.isAircraft:
## crop the banner
img = img.crop((0,0,img.size[0],img.size[1]-20))
if self.jigsaw:
patches, order = get_patches(img, self.transform_jigsaw, self.transform_patch_jigsaw, self.permutations)
if self.rotation:
rotated_imgs = [
self.transform(img),
self.transform(img.rotate(90,expand=True)),
self.transform(img.rotate(180,expand=True)),
self.transform(img.rotate(270,expand=True))
]
rotation_labels = torch.LongTensor([0, 1, 2, 3])
img = self.transform(img)
target = self.target_transform(self.meta['image_labels'][i])
if self.jigsaw:
return patches, order
elif self.rotation:
return torch.stack(rotated_imgs, dim=0), rotation_labels
else:
return img, target
def __len__(self):
return len(self.meta['image_names'])
class SetDataset:
def __init__(self, data_file, batch_size, transform, jigsaw=False, \
transform_jigsaw=None, transform_patch_jigsaw=None, rotation=False, isAircraft=False, grey=False):
self.jigsaw = jigsaw
self.transform_jigsaw = transform_jigsaw
self.transform_patch_jigsaw = transform_patch_jigsaw
self.rotation = rotation
self.isAircraft = isAircraft
self.grey = grey
with open(data_file, 'r') as f:
self.meta = json.load(f)
self.cl_list = np.unique(self.meta['image_labels']).tolist()
self.sub_meta = {}
for cl in self.cl_list:
self.sub_meta[cl] = []
for x,y in zip(self.meta['image_names'],self.meta['image_labels']):
self.sub_meta[y].append(x)
self.sub_dataloader = []
sub_data_loader_params = dict(batch_size = batch_size,
shuffle = True,
num_workers = 0, #use main thread only or may receive multiple batches
pin_memory = False)
for cl in self.cl_list:
sub_dataset = SubDataset(self.sub_meta[cl], cl, transform = transform, jigsaw=self.jigsaw, \
transform_jigsaw=self.transform_jigsaw, transform_patch_jigsaw=self.transform_patch_jigsaw, \
rotation=self.rotation, isAircraft=self.isAircraft, grey=self.grey)
self.sub_dataloader.append( torch.utils.data.DataLoader(sub_dataset, **sub_data_loader_params) )
def __getitem__(self,i):
return next(iter(self.sub_dataloader[i]))
def __len__(self):
return len(self.cl_list)
class SubDataset:
def __init__(self, sub_meta, cl, transform=transforms.ToTensor(), target_transform=identity, \
jigsaw=False, transform_jigsaw=None, transform_patch_jigsaw=None, rotation=False, isAircraft=False, grey=False):
self.sub_meta = sub_meta
self.cl = cl
self.transform = transform
self.target_transform = target_transform
self.rotation = rotation
self.isAircraft = isAircraft
self.grey = grey
self.jigsaw = jigsaw
if jigsaw:
self.permutations = retrive_permutations(35)
self.transform_jigsaw = transform_jigsaw
self.transform_patch_jigsaw = transform_patch_jigsaw
def __getitem__(self,i):
image_path = os.path.join(self.sub_meta[i])
if self.grey:
img = Image.open(image_path).convert('L').convert('RGB')
else:
img = Image.open(image_path).convert('RGB')
if self.isAircraft:
## crop the banner
img = img.crop((0,0,img.size[0],img.size[1]-20))
if self.jigsaw:
patches, order = get_patches(img, self.transform_jigsaw, self.transform_patch_jigsaw, self.permutations)
if self.rotation:
rotated_imgs = [
self.transform(img),
self.transform(img.rotate(90,expand=True)),
self.transform(img.rotate(180,expand=True)),
self.transform(img.rotate(270,expand=True))
]
rotation_labels = torch.LongTensor([0, 1, 2, 3])
img = self.transform(img)
target = self.target_transform(self.cl)
if self.jigsaw:
return img, target, patches, order
elif self.rotation:
return img, target, torch.stack(rotated_imgs, dim=0), rotation_labels
else:
return img, target
def __len__(self):
return len(self.sub_meta)
class EpisodicBatchSampler(object):
def __init__(self, n_classes, n_way, n_episodes):
self.n_classes = n_classes
self.n_way = n_way
self.n_episodes = n_episodes
def __len__(self):
return self.n_episodes
def __iter__(self):
for i in range(self.n_episodes):
yield torch.randperm(self.n_classes)[:self.n_way]
|
{"/.ipynb_checkpoints/train-checkpoint.py": ["/methods/baselinetrain.py"]}
|
14,437
|
ashok-arjun/fsl_ssl
|
refs/heads/master
|
/methods/baselinetrain.py
|
import backbone
import utils
import torch
import torch.nn as nn
from torch.autograd import Variable
import numpy as np
import torch.nn.functional as F
from model_resnet import *
from resnet_pytorch import *
import wandb
class BaselineTrain(nn.Module):
def __init__(self, model_func, num_class, loss_type = 'softmax', jigsaw=False, lbda=0.0, rotation=False, tracking=True, pretrain=False):
super(BaselineTrain, self).__init__()
self.jigsaw = jigsaw
self.lbda = lbda
self.rotation = rotation
self.tracking = tracking
print('tracking in baseline train:',tracking)
self.pretrain = pretrain
print("USE pre-trained model:",pretrain)
if isinstance(model_func,str):
if model_func == 'resnet18':
self.feature = ResidualNet('ImageNet', 18, 1000, None, tracking=self.tracking)
self.feature.final_feat_dim = 512
elif model_func == 'resnet18_pytorch':
self.feature = resnet18(pretrained=self.pretrain, tracking=self.tracking)
self.feature.final_feat_dim = 512
elif model_func == 'resnet50_pytorch':
self.feature = resnet50(pretrained=self.pretrain, tracking=self.tracking)
self.feature.final_feat_dim = 2048
else:
self.feature = model_func()
if loss_type == 'softmax':
self.classifier = nn.Linear(self.feature.final_feat_dim, num_class)
self.classifier.bias.data.fill_(0)
elif loss_type == 'dist': #Baseline ++
self.classifier = backbone.distLinear(self.feature.final_feat_dim, num_class)
self.loss_type = loss_type #'softmax' #'dist'
self.num_class = num_class
self.loss_fn = nn.CrossEntropyLoss()
self.global_count = 0
if self.jigsaw:
self.fc6 = nn.Sequential()
self.fc6.add_module('fc6_s1',nn.Linear(512, 512))#for resnet
self.fc6.add_module('relu6_s1',nn.ReLU(inplace=True))
self.fc6.add_module('drop6_s1',nn.Dropout(p=0.5))
self.fc7 = nn.Sequential()
self.fc7.add_module('fc7',nn.Linear(9*512,4096))#for resnet
self.fc7.add_module('relu7',nn.ReLU(inplace=True))
self.fc7.add_module('drop7',nn.Dropout(p=0.5))
self.classifier_jigsaw = nn.Sequential()
self.classifier_jigsaw.add_module('fc8',nn.Linear(4096, 35))
if self.rotation:
self.fc6 = nn.Sequential()
self.fc6.add_module('fc6_s1',nn.Linear(512, 512))#for resnet
self.fc6.add_module('relu6_s1',nn.ReLU(inplace=True))
self.fc6.add_module('drop6_s1',nn.Dropout(p=0.5))
self.fc7 = nn.Sequential()
self.fc7.add_module('fc7',nn.Linear(512,128))#for resnet
self.fc7.add_module('relu7',nn.ReLU(inplace=True))
self.fc7.add_module('drop7',nn.Dropout(p=0.5))
self.classifier_rotation = nn.Sequential()
self.classifier_rotation.add_module('fc8',nn.Linear(128, 4))
def forward(self,x):
x = Variable(x.cuda())
out = self.feature(x)
scores = self.classifier(out.view(x.size(0), -1))
return scores
def forward_loss(self, x=None, y=None, patches=None, patches_label=None, unlabel_only=False, label_only=False):
# import ipdb; ipdb.set_trace()
if not unlabel_only:
scores = self.forward(x)
y = Variable(y.cuda())
pred = torch.argmax(scores, dim=1)
if torch.cuda.is_available():
acc = (pred == y).type(torch.cuda.FloatTensor).mean().item()
else:
acc = (pred == y).type(torch.FloatTensor).mean().item()
if label_only:
return self.loss_fn(scores, y), acc
if self.jigsaw:
B,T,C,H,W = patches.size()#torch.Size([16, 9, 3, 64, 64])
patches = patches.view(B*T,C,H,W).cuda()#torch.Size([144, 3, 64, 64])
patch_feat = self.feature(patches)#torch.Size([144, 512, 1, 1])
x_ = patch_feat.view(B,T,-1)#torch.Size([16, 9, 512])
x_ = x_.transpose(0,1)#torch.Size([9, 16, 512])
x_list = []
for i in range(9):
z = self.fc6(x_[i])#torch.Size([16, 512])
z = z.view([B,1,-1])#torch.Size([16, 1, 512])
x_list.append(z)
x_ = torch.cat(x_list,1)#torch.Size([16, 9, 512])
x_ = self.fc7(x_.view(B,-1))#torch.Size([16, 9*512])
x_ = self.classifier_jigsaw(x_)
y_ = patches_label.view(-1).cuda()
pred = torch.max(x_,1)
acc_jigsaw = torch.sum(pred[1] == y_).cpu().numpy()*1.0/len(y_)
if unlabel_only:
return self.loss_fn(x_,y_), acc_jigsaw
else:
return self.loss_fn(scores, y), self.loss_fn(x_,y_), acc, acc_jigsaw
elif self.rotation:
B,R,C,H,W = patches.size()#torch.Size([16, 4, 3, 224, 224])
patches = patches.view(B*R,C,H,W).cuda()
x_ = self.feature(patches)#torch.Size([64, 512, 1, 1])
x_ = x_.squeeze()
x_ = self.fc6(x_)
x_ = self.fc7(x_)#64,128
x_ = self.classifier_rotation(x_)#64,4
pred = torch.max(x_,1)
y_ = patches_label.view(-1).cuda()
acc_jigsaw = torch.sum(pred[1] == y_).cpu().numpy()*1.0/len(y_)
if unlabel_only:
return self.loss_fn(x_,y_), acc_jigsaw
else:
return self.loss_fn(scores, y), self.loss_fn(x_,y_), acc, acc_jigsaw
else:
return self.loss_fn(scores, y), acc
def train_loop(self, epoch, train_loader, optimizer, writer, scheduler=None, base_loader_u=None):
print_freq = min(50,len(train_loader))
avg_loss=0
avg_loss_proto=0
avg_loss_jigsaw=0
avg_loss_rotation=0
avg_acc_proto=0
avg_acc_jigsaw=0
avg_acc_rotation=0
self.global_count = epoch * len(train_loader)
if base_loader_u is not None:
for i,inputs in enumerate(zip(train_loader,base_loader_u)):
self.global_count += 1
x = inputs[0][0]
y = inputs[0][1]
optimizer.zero_grad()
loss_proto, acc = self.forward_loss(x, y, label_only=True)
if self.jigsaw:
loss_jigsaw, acc_jigsaw = self.forward_loss(patches=inputs[1][2], patches_label=inputs[1][3], unlabel_only=True)
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_jigsaw
writer.add_scalar('train/loss_proto', float(loss_proto.data.item()), self.global_count)
writer.add_scalar('train/loss_jigsaw', float(loss_jigsaw), self.global_count)
wandb.log({'train/loss_proto': float(loss_proto.data.item())}, step=self.global_count)
wandb.log({'train/loss_jigsaw': float(loss_jigsaw.data.item())}, step=self.global_count)
elif self.rotation:
loss_rotation, acc_rotation = self.forward_loss(patches=inputs[1][2], patches_label=inputs[1][3], unlabel_only=True)
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_rotation
writer.add_scalar('train/loss_proto', float(loss_proto.data.item()), self.global_count)
writer.add_scalar('train/loss_rotation', float(loss_rotation), self.global_count)
wandb.log({'train/loss_proto': float(loss_proto.data.item())}, step=self.global_count)
wandb.log({'train/loss_rotation': float(loss_rotation.data.item())}, step=self.global_count)
else:
loss, acc = self.forward_loss(x,y)
writer.add_scalar('train/loss', float(loss.data.item()), self.global_count)
wandb.log({'train/loss': float(loss.data.item())}, step=self.global_count)
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
writer.add_scalar('train/lr', optimizer.param_groups[0]['lr'], self.global_count)
wandb.log({'train/lr': optimizer.param_groups[0]['lr']}, step=self.global_count)
avg_loss = avg_loss+loss.data#[0]
avg_acc_proto = avg_acc_proto+acc
writer.add_scalar('train/acc_cls', acc, self.global_count)
wandb.log({'train/acc_cls': acc}, step=self.global_count)
if self.jigsaw:
avg_loss_proto += loss_proto.data
avg_loss_jigsaw += loss_jigsaw
avg_acc_jigsaw = avg_acc_jigsaw+acc_jigsaw
writer.add_scalar('train/acc_jigsaw', acc_jigsaw, self.global_count)
wandb.log({'train/acc_jigsaw': acc_jigsaw}, step=self.global_count)
elif self.rotation:
avg_loss_proto += loss_proto.data
avg_loss_rotation += loss_rotation
avg_acc_rotation = avg_acc_rotation+acc_rotation
writer.add_scalar('train/acc_rotation', acc_rotation, self.global_count)
wandb.log({'train/acc_rotation': acc_rotation}, step=self.global_count)
if (i+1) % print_freq==0:
if self.jigsaw:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Loss Cls {:f} | Loss Jigsaw {:f} | Acc Cls {:f} | Acc Jigsaw {:f}'.\
format(epoch+1, i+1, len(train_loader), avg_loss/float(i+1), avg_loss_proto/float(i+1), \
avg_loss_jigsaw/float(i+1), avg_acc_proto/float(i+1), avg_acc_jigsaw/float(i+1)))
elif self.rotation:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Loss Cls {:f} | Loss Rotation {:f} | Acc Cls {:f} | Acc Rotation {:f}'.\
format(epoch+1, i+1, len(train_loader), avg_loss/float(i+1), avg_loss_proto/float(i+1), \
avg_loss_rotation/float(i+1), avg_acc_proto/float(i+1), avg_acc_rotation/float(i+1)))
else:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Acc Cls {:f}'.format(epoch+1, i+1, \
len(train_loader), avg_loss/float(i+1), avg_acc_proto/float(i+1) ))
else:
for i, inputs in enumerate(train_loader):
self.global_count += 1
x = inputs[0]
y = inputs[1]
optimizer.zero_grad()
if self.jigsaw:
loss_proto, loss_jigsaw, acc, acc_jigsaw = self.forward_loss(x, y, inputs[2], inputs[3])
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_jigsaw
writer.add_scalar('train/loss_proto', float(loss_proto.data.item()), self.global_count)
writer.add_scalar('train/loss_jigsaw', float(loss_jigsaw), self.global_count)
wandb.log({'train/loss_proto': float(loss_proto.data.item())}, step=self.global_count)
wandb.log({'train/loss_jigsaw': float(loss_jigsaw.data.item())}, step=self.global_count)
elif self.rotation:
loss_proto, loss_rotation, acc, acc_rotation = self.forward_loss(x, y, inputs[2], inputs[3])
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_rotation
writer.add_scalar('train/loss_proto', float(loss_proto.data.item()), self.global_count)
writer.add_scalar('train/loss_rotation', float(loss_rotation), self.global_count)
wandb.log({'train/loss_proto': float(loss_proto.data.item())}, step=self.global_count)
wandb.log({'train/loss_rotation': float(loss_rotation.data.item())}, step=self.global_count)
else:
loss, acc = self.forward_loss(x,y)
writer.add_scalar('train/loss', float(loss.data.item()), self.global_count)
wandb.log({'train/loss': float(loss.data.item())}, step=self.global_count)
loss.backward()
optimizer.step()
if scheduler is not None:
scheduler.step()
writer.add_scalar('train/lr', optimizer.param_groups[0]['lr'], self.global_count)
wandb.log({'train/lr': optimizer.param_groups[0]['lr']}, step=self.global_count)
avg_loss = avg_loss+loss.data
avg_acc_proto = avg_acc_proto+acc
writer.add_scalar('train/acc_cls', acc, self.global_count)
wandb.log({'train/acc_cls': acc}, step=self.global_count)
if self.jigsaw:
avg_loss_proto += loss_proto.data
avg_loss_jigsaw += loss_jigsaw
avg_acc_jigsaw = avg_acc_jigsaw+acc_jigsaw
writer.add_scalar('train/acc_jigsaw', acc_jigsaw, self.global_count)
wandb.log({'train/acc_jigsaw': acc_jigsaw}, step=self.global_count)
elif self.rotation:
avg_loss_proto += loss_proto.data
avg_loss_rotation += loss_rotation
avg_acc_rotation = avg_acc_rotation+acc_rotation
writer.add_scalar('train/acc_rotation', acc_rotation, self.global_count)
wandb.log({'train/acc_rotation': acc_rotation}, step=self.global_count)
if (i+1) % print_freq==0:
if self.jigsaw:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Loss Cls {:f} | Loss Jigsaw {:f} | Acc Cls {:f} | Acc Jigsaw {:f}'.\
format(epoch+1, i+1, len(train_loader), avg_loss/float(i+1), avg_loss_proto/float(i+1), \
avg_loss_jigsaw/float(i+1), avg_acc_proto/float(i+1), avg_acc_jigsaw/float(i+1)))
elif self.rotation:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Loss Cls {:f} | Loss Rotation {:f} | Acc Cls {:f} | Acc Rotation {:f}'.\
format(epoch+1, i+1, len(train_loader), avg_loss/float(i+1), avg_loss_proto/float(i+1), \
avg_loss_rotation/float(i+1), avg_acc_proto/float(i+1), avg_acc_rotation/float(i+1)))
else:
print('Epoch {:d} | Batch {:d}/{:d} | Loss {:f} | Acc Cls {:f}'.format(epoch+1, i+1, \
len(train_loader), avg_loss/float(i+1), avg_acc_proto/float(i+1) ))
def test_loop(self, val_loader=None):
if val_loader is not None:
num_correct = 0
num_total = 0
num_correct_jigsaw = 0
num_total_jigsaw = 0
for i, inputs in enumerate(val_loader):
x = inputs[0]
y = inputs[1]
if self.jigsaw:
loss_proto, loss_jigsaw, acc, acc_jigsaw = self.forward_loss(x, y, inputs[2], inputs[3])
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_jigsaw
num_correct_jigsaw = int(acc_jigsaw*len(inputs[3]))
num_total_jigsaw += len(inputs[3].view(-1))
elif self.rotation:
loss_proto, loss_rotation, acc, acc_rotation = self.forward_loss(x, y, inputs[2], inputs[3])
loss = (1.0-self.lbda) * loss_proto + self.lbda * loss_rotation
num_correct_jigsaw = int(acc_jigsaw*len(inputs[3]))
num_total_jigsaw += len(inputs[3].view(-1))
else:
loss, acc = self.forward_loss(x,y)
num_correct += int(acc*x.shape[0])
num_total += len(y)
if self.jigsaw or self.rotation:
return num_correct*100.0/num_total, num_correct_jigsaw*100.0/num_total_jigsaw
else:
print("Validation loader inside BaselineTrain: ", num_correct*100.0/num_total)
return num_correct*100.0/num_total
else:
if self.jigsaw:
return -1, -1
elif self.rotation:
return -1, -1
else:
return -1 #no validation, just save model during iteration
|
{"/.ipynb_checkpoints/train-checkpoint.py": ["/methods/baselinetrain.py"]}
|
14,438
|
ashok-arjun/fsl_ssl
|
refs/heads/master
|
/.ipynb_checkpoints/train-checkpoint.py
|
import numpy as np
import torch
import torch.nn as nn
from torch.autograd import Variable
import torch.optim
import torch.optim.lr_scheduler as lr_scheduler
import time
import os
import glob
import random
import backbone
from data.datamgr import SimpleDataManager, SetDataManager
from methods.baselinetrain import BaselineTrain
from methods.baselinefinetune import BaselineFinetune
from methods.protonet import ProtoNet
from methods.matchingnet import MatchingNet
from methods.relationnet import RelationNet
from methods.maml import MAML
from io_utils import model_dict, parse_args, get_resume_file, get_best_file, get_assigned_file
from tensorboardX import SummaryWriter
import json
from model_resnet import *
import wandb
def train(base_loader, val_loader, model, optimizer, start_epoch, stop_epoch, params):
eval_interval = 20
max_acc = 0
writer = SummaryWriter(log_dir=params.checkpoint_dir)
for epoch in range(start_epoch,stop_epoch):
model.train()
model.train_loop(epoch, base_loader, optimizer, writer) #model are called by reference, no need to return
if epoch % eval_interval == True or epoch == stop_epoch - 1:
model.eval()
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
if params.jigsaw:
acc, acc_jigsaw = model.test_loop( val_loader)
writer.add_scalar('val/acc', acc, epoch)
writer.add_scalar('val/acc_jigsaw', acc_jigsaw, epoch)
elif params.rotation:
acc, acc_rotation = model.test_loop( val_loader)
writer.add_scalar('val/acc', acc, epoch)
writer.add_scalar('val/acc_rotation', acc_rotation, epoch)
else:
acc = model.test_loop( val_loader)
writer.add_scalar('val/acc', acc, epoch)
wandb.log({"val/acc": acc}, step=model.global_count)
if acc > max_acc : #for baseline and baseline++, we don't use validation here so we let acc = -1
print("best model! save...")
max_acc = acc
outfile = os.path.join(params.checkpoint_dir, 'best_model.tar')
torch.save({'epoch':epoch, 'state':model.state_dict(), 'optimizer': optimizer.state_dict()}, outfile)
wandb.save(outfile)
if ((epoch+1) % params.save_freq==0) or (epoch==stop_epoch-1):
outfile = os.path.join(params.checkpoint_dir, 'last_model.tar'.format(epoch))
torch.save({'epoch':epoch, 'state':model.state_dict(), 'optimizer': optimizer.state_dict()}, outfile)
wandb.save(outfile)
# only two models are uploaded in each run - the best one and the last one
# return model
if __name__=='__main__':
SEED = 10
torch.manual_seed(SEED)
torch.cuda.manual_seed(SEED)
np.random.seed(SEED)
random.seed(SEED)
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = True
params = parse_args('train')
os.environ["CUDA_VISIBLE_DEVICES"] = params.device
isAircraft = (params.dataset == 'aircrafts')
base_file = os.path.join('filelists', params.dataset, params.base+'.json')
val_file = os.path.join('filelists', params.dataset, 'val.json')
image_size = params.image_size
if params.method in ['baseline', 'baseline++'] :
base_datamgr = SimpleDataManager(image_size, batch_size = params.bs, jigsaw=params.jigsaw, rotation=params.rotation, isAircraft=isAircraft)
base_loader = base_datamgr.get_data_loader( base_file , aug = params.train_aug )
val_datamgr = SimpleDataManager(image_size, batch_size = params.bs, jigsaw=params.jigsaw, rotation=params.rotation, isAircraft=isAircraft)
val_loader = val_datamgr.get_data_loader( val_file, aug = False)
if params.dataset == 'CUB':
params.num_classes = 200
elif params.dataset == 'cars':
params.num_classes = 196
elif params.dataset == 'aircrafts':
params.num_classes = 100
elif params.dataset == 'dogs':
params.num_classes = 120
elif params.dataset == 'flowers':
params.num_classes = 102
elif params.dataset == 'miniImagenet':
params.num_classes = 100
elif params.dataset == 'tieredImagenet':
params.num_classes = 608
if params.method == 'baseline':
model = BaselineTrain( model_dict[params.model], params.num_classes, \
jigsaw=params.jigsaw, lbda=params.lbda, rotation=params.rotation, tracking=params.tracking)
elif params.method == 'baseline++':
model = BaselineTrain( model_dict[params.model], params.num_classes, \
loss_type = 'dist', jigsaw=params.jigsaw, lbda=params.lbda, rotation=params.rotation, tracking=params.tracking)
elif params.method in ['protonet','matchingnet','relationnet', 'relationnet_softmax', 'maml', 'maml_approx']:
n_query = max(1, int(params.n_query * params.test_n_way/params.train_n_way)) #if test_n_way is smaller than train_n_way, reduce n_query to keep batch size small
train_few_shot_params = dict(n_way = params.train_n_way, n_support = params.n_shot, \
jigsaw=params.jigsaw, lbda=params.lbda, rotation=params.rotation)
base_datamgr = SetDataManager(image_size, n_query = n_query, **train_few_shot_params, isAircraft=isAircraft)
base_loader = base_datamgr.get_data_loader( base_file , aug = params.train_aug )
test_few_shot_params = dict(n_way = params.test_n_way, n_support = params.n_shot, \
jigsaw=params.jigsaw, lbda=params.lbda, rotation=params.rotation)
val_datamgr = SetDataManager(image_size, n_query = n_query, **test_few_shot_params, isAircraft=isAircraft)
val_loader = val_datamgr.get_data_loader( val_file, aug = False)
if params.method == 'protonet':
model = ProtoNet( model_dict[params.model], **train_few_shot_params, use_bn=(not params.no_bn), pretrain=params.pretrain, tracking=params.tracking)
elif params.method == 'matchingnet':
model = MatchingNet( model_dict[params.model], **train_few_shot_params )
elif params.method in ['relationnet', 'relationnet_softmax']:
feature_model = lambda: model_dict[params.model]( flatten = False )
loss_type = 'mse' if params.method == 'relationnet' else 'softmax'
model = RelationNet( feature_model, loss_type = loss_type , **train_few_shot_params )
elif params.method in ['maml' , 'maml_approx']:
backbone.ConvBlock.maml = True
backbone.SimpleBlock.maml = True
backbone.BottleneckBlock.maml = True
backbone.ResNet.maml = True
BasicBlock.maml = True
Bottleneck.maml = True
ResNet.maml = True
model = MAML( model_dict[params.model], approx = (params.method == 'maml_approx') , **train_few_shot_params )
else:
raise ValueError('Unknown method')
# model = nn.DataParallel(model, device_ids = params.device_ids)
model = model.cuda()
# Arjun - defined optimizer here
if params.optimization == 'Adam':
optimizer = torch.optim.Adam(model.parameters(), lr=params.lr)
elif params.optimization == 'SGD':
optimizer = torch.optim.SGD(model.parameters(), lr=params.lr)
elif params.optimization == 'Nesterov':
optimizer = torch.optim.SGD(model.parameters(), lr=params.lr, nesterov=True, momentum=0.9, weight_decay=params.wd)
else:
raise ValueError('Unknown optimization, please define by yourself')
# ---
params.checkpoint_dir = 'ckpts/%s/%s_%s_%s' %(params.dataset, params.date, params.model, params.method)
if params.train_aug:
params.checkpoint_dir += '_aug'
if not params.method in ['baseline', 'baseline++']:
params.checkpoint_dir += '_%dway_%dshot_%dquery' %( params.train_n_way, params.n_shot, params.n_query)
if params.dataset_unlabel is not None:
params.checkpoint_dir += params.dataset_unlabel
params.checkpoint_dir += str(params.bs)
## Track bn stats
if params.tracking:
params.checkpoint_dir += '_tracking'
## Add jigsaw
if params.jigsaw:
params.checkpoint_dir += '_jigsaw_lbda%.2f'%(params.lbda)
params.checkpoint_dir += params.optimization
## Add rotation
if params.rotation:
params.checkpoint_dir += '_rotation_lbda%.2f'%(params.lbda)
params.checkpoint_dir += params.optimization
params.checkpoint_dir += '_lr%.4f'%(params.lr)
if params.finetune:
params.checkpoint_dir += '_finetune'
print('Checkpoint path:',params.checkpoint_dir)
if not os.path.isdir(params.checkpoint_dir):
os.makedirs(params.checkpoint_dir)
start_epoch = params.start_epoch
stop_epoch = params.stop_epoch
if params.method == 'maml' or params.method == 'maml_approx' :
stop_epoch = params.stop_epoch * model.n_task #maml use multiple tasks in one update
if params.resume:
resume_file = get_resume_file(params.checkpoint_dir)
if resume_file is not None:
print('Resuming model, epoch and optimizer from: ', resume_file)
tmp = torch.load(resume_file)
start_epoch = tmp['epoch']+1
model.load_state_dict(tmp['state'])
optimizer.load_state_dict(tmp['optimizer'])
del tmp
elif params.warmup: #We also support warmup from pretrained baseline feature, but we never used in our paper
baseline_checkpoint_dir = 'checkpoints/%s/%s_%s' %(params.dataset, params.model, 'baseline')
if params.train_aug:
baseline_checkpoint_dir += '_aug'
warmup_resume_file = get_resume_file(baseline_checkpoint_dir)
tmp = torch.load(warmup_resume_file)
if tmp is not None:
state = tmp['state']
state_keys = list(state.keys())
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
model.feature.load_state_dict(state)
else:
raise ValueError('No warm_up file')
if params.loadfile != '':
print('Loading model from: ' + params.loadfile)
checkpoint = torch.load(params.loadfile)
model.load_state_dict(checkpoint['state'])
json.dump(vars(params), open(params.checkpoint_dir+'/configs.json','w'))
# Init WANDB
if params.resume_wandb_id:
print('Resuming from wandb ID: ', params.resume_wandb_id)
wandb.init(project="fsl_ssl", id=params.resume_wandb_id, resume=True)
else:
print('Fresh wandb run')
wandb.init(project="fsl_ssl")
train(base_loader, val_loader, model, optimizer, start_epoch, stop_epoch, params)
##### from save_features.py (except maml)#####
split = 'novel'
if params.save_iter != -1:
split_str = split + "_" +str(params.save_iter)
else:
split_str = split
iter_num = 600
few_shot_params = dict(n_way = params.test_n_way , n_support = params.n_shot)
acc_all = []
if params.loadfile != '':
modelfile = params.loadfile
checkpoint_dir = params.loadfile
else:
checkpoint_dir = params.checkpoint_dir
if params.save_iter != -1:
modelfile = get_assigned_file(checkpoint_dir,params.save_iter)
elif params.method in ['baseline', 'baseline++'] :
modelfile = get_resume_file(checkpoint_dir)
else:
modelfile = get_best_file(checkpoint_dir)
if params.method in ['maml', 'maml_approx']:
if modelfile is not None:
tmp = torch.load(modelfile)
state = tmp['state']
state_keys = list(state.keys())
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
model.feature.load_state_dict(tmp['state'])
print('modelfile:',modelfile)
datamgr = SetDataManager(image_size, n_eposide = iter_num, n_query = params.n_query , **few_shot_params, isAircraft=isAircraft)
loadfile = os.path.join('filelists', params.dataset, 'novel.json')
novel_loader = datamgr.get_data_loader( loadfile, aug = False)
if params.adaptation:
model.task_update_num = 100 #We perform adaptation on MAML simply by updating more times.
model.eval()
acc_mean, acc_std = model.test_loop( novel_loader, return_std = True)
else:
if params.save_iter != -1:
outfile = os.path.join( checkpoint_dir.replace("checkpoints","features"), "novel_" + str(params.save_iter)+ ".hdf5")
else:
outfile = os.path.join( checkpoint_dir.replace("checkpoints","features"), "novel.hdf5")
datamgr = SimpleDataManager(image_size, batch_size = params.test_bs, isAircraft=isAircraft)
loadfile = os.path.join('filelists', params.dataset, 'novel.json')
data_loader = datamgr.get_data_loader(loadfile, aug = False)
tmp = torch.load(modelfile)
state = tmp['state']
state_keys = list(state.keys())
for i, key in enumerate(state_keys):
if "feature." in key:
newkey = key.replace("feature.","") # an architecture model has attribute 'feature', load architecture feature to backbone by casting name from 'feature.trunk.xx' to 'trunk.xx'
state[newkey] = state.pop(key)
else:
state.pop(key)
model.feature.load_state_dict(state)
model.eval()
model = model.cuda()
model.eval()
dirname = os.path.dirname(outfile)
if not os.path.isdir(dirname):
os.makedirs(dirname)
print('save outfile at:', outfile)
from save_features import save_features
save_features(model, data_loader, outfile)
### from test.py ###
from test import feature_evaluation
novel_file = os.path.join( checkpoint_dir.replace("checkpoints","features"), split_str +".hdf5") #defaut split = novel, but you can also test base or val classes
print('load novel file from:',novel_file)
import data.feature_loader as feat_loader
cl_data_file = feat_loader.init_loader(novel_file)
for i in range(iter_num):
acc = feature_evaluation(cl_data_file, model, n_query = 15, adaptation = params.adaptation, **few_shot_params)
acc_all.append(acc)
acc_all = np.asarray(acc_all)
acc_mean = np.mean(acc_all)
acc_std = np.std(acc_all)
print('%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num)))
with open(os.path.join( checkpoint_dir.replace("checkpoints","features"), split_str +"_test.txt") , 'a') as f:
timestamp = time.strftime("%Y%m%d-%H%M%S", time.localtime())
aug_str = '-aug' if params.train_aug else ''
aug_str += '-adapted' if params.adaptation else ''
if params.method in ['baseline', 'baseline++'] :
exp_setting = '%s-%s-%s-%s%s %sshot %sway_test' %(params.dataset, split_str, params.model, params.method, aug_str, params.n_shot, params.test_n_way )
else:
exp_setting = '%s-%s-%s-%s%s %sshot %sway_train %sway_test' %(params.dataset, split_str, params.model, params.method, aug_str , params.n_shot , params.train_n_way, params.test_n_way )
acc_str = '%d Test Acc = %4.2f%% +- %4.2f%%' %(iter_num, acc_mean, 1.96* acc_std/np.sqrt(iter_num))
f.write( 'Time: %s, Setting: %s, Acc: %s \n' %(timestamp,exp_setting,acc_str) )
|
{"/.ipynb_checkpoints/train-checkpoint.py": ["/methods/baselinetrain.py"]}
|
14,442
|
MeteoSwiss/python-TAMER
|
refs/heads/main
|
/python_tamer/subroutines.py
|
import numpy as np
import pandas as pd
import datetime as dt
import string
def assert_data_shape_24(data,reverse=False,force_second_dim=True) :
"""Simple function to check if first dimension is 24 hours and, if not, reshapes accordingly
"""
datashape = np.shape(data)
# TODO: Could be a big job, but this F ordering is weird and I should reconsider
if datashape[0] != 24 and not reverse: # Checks that first dimension is length 24 (hours in a day) and reshapes if not
new_shape = (24, datashape[0]//24) + datashape[1:]
elif reverse :
new_shape = [datashape[0] * datashape[1]] + list(datashape[2:])
elif force_second_dim :
new_shape = (24, 1) + datashape[1:]
else :
# option in case no reshaping necessary
return data
data = np.reshape(data,new_shape,order='F')
return data
def ER_Vernez_model_equation(Vis,mSZA) :
"""ER_Vernez_model_equation calculates the Exposure Ratio according to the Vis parameter and the Solar Zenith Angle.
See Vernez et al., Journal of Exposure Science and Environmental Epidemiology (2015) 25, 113–118
(doi:10.1038/jes.2014.6) for further details on the model used for the calculation.
Args:
Vis (pandas.DataFrame): Values for the Vis parameter (percentages between 0 and 100)
mSZA (pandas.DataFrame): Values of the minimal Solar Zenith Angle in degrees for the
given date and latitude. Can be calculated using the min_solar_zenith_angle function
Returns:
pandas.DataFrame: A single column DataFrame containing the calculated Exposure Ratios.
"""
Vis_cent = Vis / 10 - 5.800
lnVis_cent = np.log(Vis / 10) - 1.758
cosSZA3_cent = np.cos(np.radians(mSZA))**3 - 0.315
ER = -3.396 * lnVis_cent + 10.714 * Vis_cent - 9.199 * cosSZA3_cent + 56.991
return ER
def min_solar_zenith_angle(date,lat) :
"""min_solar_zenith_angle calculates the minimal Solar Zenith Angle for a given date and latitude.
This function is adapted from the SACRaM_astr MATLAB function written by Laurent Vuilleumier for MeteoSwiss.
Args:
date (pandas.DataFrame): A datetime column describing the specific day of exposure
lat (panda.DataFrame): A column of latitude values in decimal degrees
Returns:
pandas.DataFrame: A column of minimal SZA values in degrees.
"""
if type(date) is pd.core.series.Series :
TrTim = date.apply(lambda x: x.toordinal() + 366).to_numpy() * 2.73785151e-05 - 18.9996356
else : # adds support for single date input
TrTim = (date.toordinal() +366) * 2.73785151e-05 - 18.9996356
TrTim = np.array(TrTim)
G = np.radians(np.mod( 358.475833 + 35999.04975 * TrTim - 0.000150 * TrTim**2 , 360))
SL = np.radians(np.mod( 279.696678 + 36000.76892 * TrTim + 0.000303 * TrTim**2 , 360))
SJ = np.radians(np.mod( 225.444651 + 3034.906654 * TrTim , 360))
SN = np.radians(np.mod( 259.183275 - 1934.142008 * TrTim + 0.002078 * TrTim**2 , 360))
SV = np.radians(np.mod( 212.603219 + 58517.803875 * TrTim + 0.001286 * TrTim**2 , 360))
theta = (-( 0.00001 * TrTim * np.sin( G + SL ) + 0.00001 * np.cos( G - SL - SJ ) ) -
0.000014 * np.sin( 2*G - SL ) - 0.00003 * TrTim * np.sin( G - SL ) -
0.000039 * np.sin( SN - SL ) - 0.00004 * np.cos( SL ) +
0.000042 * np.sin( 2*G + SL ) - 0.000208 * TrTim * np.sin( SL ) +
0.003334 * np.sin( G+SL ) +
0.009999 * np.sin( G-SL ) +
0.39793 * np.sin( SL ))
rho = (0.000027 * np.sin( 2*G - 2*SV ) - 0.000033 * np.sin( G - SJ )
+ 0.000084 * TrTim * np.cos( G ) - 0.00014 * np.cos( 2*G ) - 0.033503 * np.cos( G )
+ 1.000421)
declination = np.arcsin(theta / np.sqrt(rho))
return lat - np.degrees(declination)
def find_nearest(array, value):
array = np.asarray(array)
idx = (np.abs(array - value)).argmin()
return idx
def convert_swiss_time_to_UTC(input_table,name) :
# TODO: Need a replacement for this, but argument the responsibility of the user?
def convert_swiss_time_to_UTC_iter(time_in,Date) :
if Date.month > 3 and Date.month < 11 :
time_out = time_in
time_out = time_out.replace(hour=time_in.hour - 2)
else :
time_out = time_in
time_out = time_out.replace(hour=time_in.hour - 1)
return time_out
new_time = input_table.apply(lambda x: convert_swiss_time_to_UTC_iter(x[name],x["Date"]),axis='columns')
return new_time
def hist_mean(counts,bin_centers) :
"""hist_mean calculates the mean of a histogram using numpy functions
This function is designed to calculate a histogram mean as efficiently as possible
Args:
counts (array): The quantity of numbers within each histogram bin
bin_centers (array): The central value of each histogram bin. Note that
this can be calculated as bin_edges[:-1]+0.5*np.diff(bin_edges) but
we removed this calculation to optimise this function further.
Returns:
float: the mean value of the histogram
"""
mean = np.dot(counts, bin_centers) / np.sum(counts)
return mean
def hist_var(counts,bin_centers) :
"""hist_var calculates the variance of a histogram
This function calculates the variance of a histogram as E[X^2] - E[X]^2 using the
hist_mean function to efficiently calculate expectation values.
Args:
counts (array): The quantity of numbers within each histogram bin
bin_centers (array): The central value of each histogram bin. Note that
this can be calculated as bin_edges[:-1]+0.5*np.diff(bin_edges) but
we removed this calculation to optimise this function further.
Returns:
float: the variance of the histogram
"""
# Not really essential seeing as this would break the units
var = hist_mean(counts,bin_centers**2) - hist_mean(counts,bin_centers)**2
return var
def hist_stdev(counts,bin_centers) :
"""hist_stdev calculates the standard deviation of a histogram
This function calculates the variance of a histogram as E[X^2] - E[X]^2 using the
hist_mean function to efficiently calculate expectation values. It then returns the
square root of the variance.
Args:
counts (array): The quantity of numbers within each histogram bin
bin_centers (array): The central value of each histogram bin. Note that
this can be calculated as bin_edges[:-1]+0.5*np.diff(bin_edges) but
we removed this calculation to optimise this function further.
Returns:
float: the standard deviation of the histogram
"""
var = hist_mean(counts,bin_centers**2) - hist_mean(counts,bin_centers)**2
return var**0.5
def hist_percentile(counts,bin_centers,prct) :
"""hist_percentile calculates percentiles of histogram data
This function takes discretised data, typical for histograms, and calculates
the user-specified percentile quantity.
Args:
counts (array): The quantity of numbers within each histogram bin
bin_centers (array): The central value of each histogram bin. Note that
this can be calculated as bin_edges[:-1]+0.5*np.diff(bin_edges) but
we removed this calculation to optimise this function further.
prct (float): A fraction betwee 0.0 and 1.0 for the desired percentile.
Returns:
float: The desired percentile value. In cases where the quantity falls
between two bins, their respective central values are averaged.
"""
n = np.sum(counts)
cumcounts = np.cumsum(counts)
# TODO: Possibly unnecessary, but could probably improve efficiency of
# this if statement (e.g. if i==j no need to take average)
if prct == 0 :
# special case: searching for min
j = np.searchsorted(cumcounts,n*prct,side='right')
percentile = bin_centers[j]
elif prct == 1 :
# special case: searching for max
i = np.searchsorted(cumcounts,n*prct)
percentile = bin_centers[i]
else :
i = np.searchsorted(cumcounts,n*prct)
j = np.searchsorted(cumcounts,n*prct,side='right')
percentile = (bin_centers[i] + bin_centers[j])/2
return percentile
def hist_min(counts,bin_centers) :
"""hist_min calculates the minimum value of a histogram
This function finds the minimum value of a histogram.
It is built on the some basic functionality as hist_percentile.
Args:
counts (array): The quantity of numbers within each histogram bin
bin_centers (array): The central value of each histogram bin. Note that
this can be calculated as bin_edges[:-1]+0.5*np.diff(bin_edges) but
we removed this calculation to optimise this function further.
Returns:
float: The minimum value of the histogram
"""
cumcounts = np.cumsum(counts)
j = np.searchsorted(cumcounts,0,side='right')
min = bin_centers[j]
return min
def hist_max(counts,bin_centers) :
"""hist_max calculates the maximum value of a histogram
This function finds the maximum value of a histogram.
It is built on the some basic functionality as hist_percentile.
Args:
counts (array): The quantity of numbers within each histogram bin
bin_centers (array): The central value of each histogram bin. Note that
this can be calculated as bin_edges[:-1]+0.5*np.diff(bin_edges) but
we removed this calculation to optimise this function further.
Returns:
float: The maximum value of the histogram
"""
n = np.sum(counts)
cumcounts = np.cumsum(counts)
i = np.searchsorted(cumcounts,n)
max = bin_centers[i]
return max
def ER_Vernez_2015(Anatomic_zone,
Posture,
Date=None,
Latitude=None,
Vis_table_path=None,
Vis_table=None) :
"""Calculates Exposure Ratios for a given anatomic zone, posture, and date.
This function calculates ER as a percentage between 0 and 100 based on Anatomic_zone, Posture, Date, and Latitude
information. This function contains hard-coded synonyms for certain anatomical zones, e.g. 'Forehead"
maps to "Face'. See Vernez et al., Journal of Exposure Science and Environmental Epidemiology (2015)
25, 113–118 (https://doi.org/10.1038/jes.2014.6) for further details on the model used for the calculation.
Parameters
----------
Anatomic_zone : list
String or list of strings describing the anatomic zone for which the ER is to be calculated.
Posture : list
String or list of strings describing the posture for which the ER is to be calculated.
Date : list, optional
The date for which the ER is to be calculated. The date affects the minimum solar zenith
angle in the Vernez et al. 2015 ER model. The specific year is not relevant. Defaults to
March 20, the equinox.
Latitude : list, optional
The latitude is important for calculating the ER. Defaults to None, wherein the latitude
of the centroid of Switzerland (46.8 degrees) is used.
Vis_table_path : str, optional
The full path to an alternative table for the Vis parameter.
Must be a csv file. Defaults to None.
Vis_table : str, optional
An alternative table for the Vis parameter. Defaults to None.
Returns
-------
list
Returns ER values as a list
"""
# In case of single input rather than lists
if isinstance(Anatomic_zone, str): Anatomic_zone = [Anatomic_zone]
if isinstance(Posture,str): Posture = [Posture]
if Latitude is None:
Latitude = [46.8] #Switzerland centroid
if Date is None:
Date = [dt.date(2015,3,20)] # equinox
if not isinstance(Latitude,list): Latitude = [Latitude]
if not isinstance(Date,list): Date = [Date]
d = {'Anatomic_zone': Anatomic_zone,
'Posture': Posture,
'Latitude': Latitude,
'Date': Date}
lengths = [len(x) for x in d.values()]
max_length = max(lengths)
for key in list(d.keys()) :
if len(d[key]) != max_length :
d[key] = d[key] * (max_length//len(d[key]))
self = pd.DataFrame(d)
# This chunk of code checks if the default Vis table should be used or if the user enters some alternative table.
if Vis_table is None and Vis_table_path is None :
Vis_table = pd.DataFrame.from_records(
columns=['Seated','Kneeling','Standing erect arms down','Standing erect arms up','Standing bowing'],
index=['Face','Skull','Forearm','Upper arm','Neck','Top of shoulders','Belly','Upper back','Hand','Shoulder','Upper leg','Lower leg','Lower back'],
data=[[53.7,28.7,46.6,44.9,19.2],
[56.2,66.6,61.1,58.4,67.5],
[62.3,56.5,49.4,53.1,62.1],
[51.7,60.5,45.9,65.3,61.6],
[58.3,84.3,67.6,65.2,81.6],
[35.9,50.3,48.6,45.7,85.3],
[58.1,45.1,50.3,49.6,15.2],
[35.9,50.3,48.6,45.7,85.3],
[59.2,58.8,42.4,55,58.5],
[68,62,63,67.1,64],
[65.4,45.4,50.9,51,43.5],
[32.8,63.4,49.7,50.3,50],
[44.9,51.6,56.6,53.4,86.9]])
# The 'standing moving' posture must be dealt with somehow...
# Vis_table['Standing moving']= (Vis_table['Standing erect arms down'] + Vis_table['Standing bowing']) / 2
# TODO: add interpeter or force users to conform?
Vis_table['Standing moving']= Vis_table['Standing erect arms down']
Vis_table['Standing']=Vis_table['Standing erect arms down']
elif Vis_table is None :
Vis_table = pd.read_csv(Vis_table_path)
# Below is a dictionary describing a range of synonyms for the anatomical zones defined in the Vis table.
Anatomic_zone_synonyms_reverse = {
'Forearm' : ['wrist',
'Left extern radial',
'Right extern radial',
'Left wrist: radius head',
'Right wrist: radius head',
'Left wrist',
'Right wrist'],
'Face' : ['Forehead'],
'Upper back' : ['Right trapezoid',
'Left trapezoid',
'trapezius'],
'Belly' : ['Chest'],
'Shoulder' : ['Left deltoid',
'Right deltoid',
'Left shoulder',
'Right shoulder'],
'Upper arm' : ['Left elbow',
'Right elbow',
'Left biceps',
'Right biceps'],
'Upper leg' : ['Left thigh',
'Right thigh',
'Left knee',
'Right knee'],
'Lower back' : ['Low back']
}
# The dictionary is reversed so that the multiple synonyms can be mapped to the few correct terms for the Vis table.
Anatomic_zone_synonyms = {keys: old_keys for old_keys, old_values in Anatomic_zone_synonyms_reverse.items() for keys in old_values}
self = self.replace({'Anatomic_zone' : Anatomic_zone_synonyms})
# With the correct anatomic zone names established, we can lookup the Vis values from the table
Vis = Vis_table.lookup(self['Anatomic_zone'],self['Posture'])
# Next we must calculate the minimal Solar Zenith Angle for the given date
mSZA = min_solar_zenith_angle(self.Date,self.Latitude)
# With the Vis value and the SZA, we can calculate the ER according to the Vernez model
ER = ER_Vernez_model_equation(Vis,mSZA) / 100
return ER.to_numpy()
def format_filename(inp):
"""Takes a string and return a valid filename constructed from the string.
Uses a whitelist approach: any characters not present in valid_chars are
removed. Also spaces are replaced with underscores.
Note: this method may produce invalid filenames such as ``, `.` or `..`
When using this method, prepend a date string like '2009_01_15_19_46_32_'
and append a file extension like '.txt', so to avoid the potential of using
an invalid filename.
Parameters
----------
s : str
Input string to be converted to valid filename
Returns
-------
str
"""
valid_chars = "-_.() %s%s" % (string.ascii_letters, string.digits)
inp_rpl = inp.replace(' ','_').replace(':','-')
filename = ''.join(c for c in inp_rpl if c in valid_chars)
return filename
def str2daysofyear(inp) :
"""Interprets a string, list, or array into a list of arrays for days of the year
An ExposureMapSequence object requires of a list of arrays describing the days of
the year to be used in the creation of each histogram. This function simplifies the
process of entering this information. The user can enter keywords to automatically
generate the appropriate list of days.
Parameters
----------
inp : str or list or numpy.array
The input to be interpreted. Numeric entries should be included in the output
unmodified, while string entries should be replaced by numeric arrays.
Returns
-------
list
Produces a list of arrays that is interpretable by the ExposureMapSequence code.
"""
def str2daysofyear_raw(inp) :
ayear = pd.date_range(start="2010-01-01",end="2010-12-31")
winter = [x for i in [12,1,2] for x in ayear[ayear.month == i].dayofyear.values.tolist()]
spring = [x for i in [3,4,5] for x in ayear[ayear.month == i].dayofyear.values.tolist()]
summer = [x for i in [6,7,8] for x in ayear[ayear.month == i].dayofyear.values.tolist()]
autumn = [x for i in [9,10,11] for x in ayear[ayear.month == i].dayofyear.values.tolist()]
keys_ref = {
"month" : [ayear[ayear.month == i].dayofyear.values.tolist() for i in range(1,13)],
"season" : [spring,summer,autumn,winter],
"quarter": [spring,summer,autumn,winter],
"year" : ayear.dayofyear.values.tolist(),
"annual" : ayear.dayofyear.values.tolist(),
"jan" : ayear[ayear.month == 1].dayofyear.values.tolist(),
"feb" : ayear[ayear.month == 2].dayofyear.values.tolist(),
"mar" : ayear[ayear.month == 3].dayofyear.values.tolist(),
"apr" : ayear[ayear.month == 4].dayofyear.values.tolist(),
"may" : ayear[ayear.month == 5].dayofyear.values.tolist(),
"jun" : ayear[ayear.month == 6].dayofyear.values.tolist(),
"jul" : ayear[ayear.month == 7].dayofyear.values.tolist(),
"aug" : ayear[ayear.month == 8].dayofyear.values.tolist(),
"sep" : ayear[ayear.month == 9].dayofyear.values.tolist(),
"oct" : ayear[ayear.month == 10].dayofyear.values.tolist(),
"nov" : ayear[ayear.month == 11].dayofyear.values.tolist(),
"dec" : ayear[ayear.month == 12].dayofyear.values.tolist(),
"winter" : winter,
"autumn" : autumn,
"fall" : autumn,
"spring" : spring,
"summer" : summer,
}
return keys_ref[inp]
keys = ["month","season","quarter","year","annual",
"jan","feb","mar","apr","may","jun","jul","aug","sep","oct","nov","dec",
"winter","autumn","fall","spring","summer"]
if isinstance(inp,str) :
# simple case, user has entered "monthly" or some such
# there should be only one result from the filter anyway
inp_flt = list(filter(lambda x: x in inp.lower(), keys))
out = str2daysofyear_raw(inp_flt[0])
# in case user hasn't selected one of the nice advanced options, must convert to list
if inp_flt[0] not in ["month","season","quarter","year","annual"] :
out = [out]
# TODO: rewrite this function to do this first and then pass filtered input to raw
if inp_flt[0] == 'month' :
inp_flt = ['jan','feb','mar','apr','may','jun','jul','aug','sep','oct','nov','dec']
elif inp_flt[0] in ['season','quarter'] :
inp_flt = ['spring','summer','autumn','winter']
elif inp_flt[0] == 'year' :
inp_flt[0] = 'annual'
nonstrings = [False]
elif isinstance(inp,list) :
# complex case, user has entered list
# ["june","july","august","summer"] or some such
out = []
inp_flt = []
nonstrings = []
for inpx in inp :
if isinstance(inpx,str) :
inp_flt_temp = list(filter(lambda x: x in inpx.lower(), keys))[0]
inp_flt.append(inp_flt_temp)
out.append(str2daysofyear_raw(inp_flt_temp))
nonstrings.append(False)
else :
inp_flt.append(inpx)
out.append(inpx)
nonstrings.append(True)
# convert list of lists to list of arrays for consistency
for i in range(len(out)) :
out[i] = np.array(out[i])
return out, inp_flt, nonstrings
|
{"/python_tamer/SpecificDoses.py": ["/python_tamer/subroutines.py"], "/test/test_dummy_library.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"], "/python_tamer/ExposureMap.py": ["/python_tamer/subroutines.py"], "/python_tamer/__init__.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"]}
|
14,443
|
MeteoSwiss/python-TAMER
|
refs/heads/main
|
/python_tamer/SpecificDoses.py
|
import pandas as pd
import numpy as np
import netCDF4 as nc
from .subroutines import *
class SpecificDoses(pd.DataFrame):
"""A class for specific dose estimates akin to dosimetry measurements
High resolution data allows for personal and ambient dose estimation without the need for
direct measurement. This class is structured like a table with a set of functions to add
columns ultimately leading to dose estimates. Each row of this table represents a specific
exposure instance, i.e. an individual at a specific location for a specific date and time
with a specific exposure ratio. See Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) for more information on calculations appropriate
for this class.
Parameters
----------
src_filename_format : str
Describes the filename of the netCDF files containing the UV data with 'yyyy' in place
of the year.
src_directory : str
The directory where the data is stored. Must end with a slash.
Notes
-----
Presently, the class is inherited from a pandas.DataFrame which is somewhat restrictive
and will likely be revised in a later update. For the time being, this means that the
parameters cannot be set when initialising a `SpecificDoses` object, they must instead
be adjusted after initialisation, like so::
ExistingExposureMapObject.src_directory = "/new/data/directory/"
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. There are four important
functions as part of this class, three for standardising and preparing the columns,
and one for actually loading the data and performing the dose calculations. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
"""
# This property ensures that functions return the same subclass
@property
def _constructor(self):
return SpecificDoses
# This adds some useful metadata (self-explanatory)
_metadata = ["src_filename_format","src_directory"]
src_filename_format = 'UVery.AS_ch02.lonlat_yyyy01010000.nc'
src_directory = 'C:/Data/UV/' # TODO: set up __init__ for these options
# It feels like this should be declared with __init__ as well but idk
def standard_column_names(self) :
"""Limited function to standardise column names
When loading tables to use as the basis for a SpecificDoses table, some columns may have
slightly names to what is expected. This function standardises the names but is very
limited in terms of what it can recognise. The user is encourages to ensure the columns
are correctly labelled themselves and not to rely on this function.
Returns
-------
SpecificDoses
The table has its column names modified.
"""
legend_dict_reverse = {'Point' : ['Lieu de mesure'],
'Date' : ['Date'],
'Time_start' : ['Heure début','Start_time','Start time'],
'Time_end' : ['Heure fin','End_time','End time'],
'Measured_dose' : ['Exposition [MED]','Exposure'],
'Anatomic_zone' : ['Zone anatomique','Body part','Anatomic zone'],
'Posture' : ['Body posture'],
'Latitude' : ['lat'],
'Longitude' : ['lon','lng']}
legend_dict = {keys: old_keys for old_keys, old_values in legend_dict_reverse.items() for keys in old_values}
self = self.rename(columns=legend_dict)
return self
def schedule_constant_exposure(self) :
"""Generates exposure schedules given start and end times.
This function generates exposure schedules based on simple continuous exposure, i.e.
with a start time and an end time. The exposure schedule is a vector with length 24
with each entry representing the proportion of the corresponding hour of the day that
the subject is exposed.
Returns
-------
python_tamer.SpecificDoses
An exposure_schedule column is created and is appended to the input
`SpecificDoses` object or, if that column already exists, it is overwritten.
Notes
-----
The input `SpecificDoses` object must contain the following columns:
* ``Time_start``
* ``Time_end``
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. There are four important
functions as part of this class, three for standardising and preparing the columns,
and one for actually loading the data and performing the dose calculations. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
"""
def schedule_constant_exposure_iter(Start_time,End_time) :
"""Iterates through rows of a SpecificDoses table to generate schedules.
This function is designed to be applied to each row in a datatable to generate an
exposure schedule based on a start time and end time
Parameters
----------
Start_time : datetime.time
UTC time at which exposure period begins
End_time : datetime.time
UTC time at which exposure period end
Returns
-------
numpy.array
24 length vector of values between 0 and 1 indicating proportion
of time exposed for that corresponding hour of the day.
"""
schedule = np.zeros(24)
schedule[Start_time.hour:End_time.hour] = 1
# Modify start and end hours according to proportion of time exposed
if Start_time.minute != 0 :
schedule[Start_time.hour] = (1 - Start_time.minute/60)
if End_time.minute != 0 :
schedule[End_time.hour] = End_time.minute/60
return schedule
# With that function defined, we need just one line to apply it to the whole table
self["Schedule"] = self.apply(lambda x: schedule_constant_exposure_iter(
x["Time_start"],x["Time_end"]),axis='columns')
return self
def ER_from_posture(self,
Vis_table_path=None,
Vis_table=None) :
"""ER_from_posture calculates Exposure Ratios for a given anatomic zone, posture, and date.
This function calculates ER as a percentage between 0 and 100 based on information from an input table.
The input table must contain certain columns at a minimum. Those are: Date, Anatomic_zone, and Posture.
This function contains hard-coded synonyms for certain anatomical zones, e.g. 'Forehead" maps to "Face'.
See Vernez et al., Journal of Exposure Science and Environmental Epidemiology (2015) 25, 113–118
(https://doi.org/10.1038/jes.2014.6) for further details on the model used for the calculation.
Parameters
----------
Vis_table_path : str, optional
The full path to an alternative table for the Vis parameter.
Must be a csv file. Defaults to None.
Vis_table : str, optional
An alternative table for the Vis parameter. Defaults to None.
Returns
-------
SpecificDoses
Returns input table appended with ER column
Notes
-----
The SpecificDoses table used must contain columns for Date, Anatomic_zone, and Posture.
The Date column should contain DateTime entries. The Anatonic_zone column should contain one string per
row describing the exposed body part. The Posture column should contain one string per row describing
one of six accepted postures.
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. There are four important
functions as part of this class, three for standardising and preparing the columns,
and one for actually loading the data and performing the dose calculations. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
"""
# This chunk of code checks if the default Vis table should be used or if the user enters some alternative table.
if Vis_table is None and Vis_table_path is None :
Vis_table = pd.DataFrame.from_records(
columns=['Seated','Kneeling','Standing erect arms down','Standing erect arms up','Standing bowing'],
index=['Face','Skull','Forearm','Upper arm','Neck','Top of shoulders','Belly','Upper back','Hand','Shoulder','Upper leg','Lower leg','Lower back'],
data=[[53.7,28.7,46.6,44.9,19.2],
[56.2,66.6,61.1,58.4,67.5],
[62.3,56.5,49.4,53.1,62.1],
[51.7,60.5,45.9,65.3,61.6],
[58.3,84.3,67.6,65.2,81.6],
[35.9,50.3,48.6,45.7,85.3],
[58.1,45.1,50.3,49.6,15.2],
[35.9,50.3,48.6,45.7,85.3],
[59.2,58.8,42.4,55,58.5],
[68,62,63,67.1,64],
[65.4,45.4,50.9,51,43.5],
[32.8,63.4,49.7,50.3,50],
[44.9,51.6,56.6,53.4,86.9]])
# The 'standing moving' posture must be dealt with somehow...
# Vis_table['Standing moving']= (Vis_table['Standing erect arms down'] + Vis_table['Standing bowing']) / 2
# TODO: add interpeter or force users to conform?
Vis_table['Standing moving']= Vis_table['Standing erect arms down']
elif Vis_table is None :
Vis_table = pd.read_csv(Vis_table_path)
# Below is a dictionary describing a range of synonyms for the anatomical zones defined in the Vis table.
Anatomic_zone_synonyms_reverse = {
'Forearm' : ['wrist',
'Left extern radial',
'Right extern radial',
'Left wrist: radius head',
'Right wrist: radius head',
'Left wrist',
'Right wrist'],
'Face' : ['Forehead'],
'Upper back' : ['Right trapezoid',
'Left trapezoid',
'trapezius'],
'Belly' : ['Chest'],
'Shoulder' : ['Left deltoid',
'Right deltoid',
'Left shoulder',
'Right shoulder'],
'Upper arm' : ['Left elbow',
'Right elbow',
'Left biceps',
'Right biceps'],
'Upper leg' : ['Left thigh',
'Right thigh',
'Left knee',
'Right knee'],
'Lower back' : ['Low back']
}
# The dictionary is reversed so that the multiple synonyms can be mapped to the few correct terms for the Vis table.
Anatomic_zone_synonyms = {keys: old_keys for old_keys, old_values in Anatomic_zone_synonyms_reverse.items() for keys in old_values}
self = self.replace({'Anatomic_zone' : Anatomic_zone_synonyms})
# With the correct anatomic zone names established, we can lookup the Vis values from the table
# TODO: lookup is being depreciated, must replace with something new
Vis = Vis_table.lookup(self['Anatomic_zone'],self['Posture'])
# Next we must calculate the minimal Solar Zenith Angle for the given date
mSZA = min_solar_zenith_angle(self.Date,self.Latitude)
# With the Vis value and the SZA, we can calculate the ER according to the Vernez model
self.loc[:,'ER'] = ER_Vernez_model_equation(Vis,mSZA) / 100
return self
def calculate_specific_dose(self) :
"""Calculates doses according to exposure schedule, ER, date, and location.
This function takes the SpecificDoseEstimationTable and calculates the specific
ambient and personal doses according to the exposure schedule and ER. There are
a few key steps to this function. First it reads the Date column to determine
which years of data must be loaded. It then iterates through each year, loading
only the necessary dates. It applies the exposure schedule and the ER to
calculate the ambient and personal doses.
Returns
-------
SpecificDoses
The input table is appended with a Ambient_dose and Personal_dose column.
Notes
-----
The input SpecificDoses object must include Date, Schedule, ER, Latitude,
and Longitude columns.
Consult Harris et al. 2021 (https://doi.org/10.3390/atmos12020268) for more
information on how this function can be used in the context of mimicking UV
dosimetry measurements.
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. There are four important
functions as part of this class, three for standardising and preparing the columns,
and one for actually loading the data and performing the dose calculations. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
"""
# First step is find unique years to avoid loading unnecessary data
years = pd.DatetimeIndex(self.Date).year
unique_years = sorted(set(years))
self['Ambient_dose'] = np.nan
self['Personal_dose'] = np.nan
for year in unique_years :
# Load netCDF file
print("Processing year "+str(year))
dataset=nc.Dataset(self.src_directory+self.src_filename_format.replace('yyyy',str(year)))
dataset.set_auto_mask(False) # This is important for nans to import correctly
# Make temporary table for yearly subset
temp_table = self[years == year].copy()
# find all unique days in year to be loaded
unique_days,unique_days_idx = np.unique(pd.DatetimeIndex(temp_table.Date).dayofyear,
return_inverse=True)
temp_table['unique_days_idx'] = unique_days_idx
#pd.DatetimeIndex(nc.num2date(dataset.variables["time"][:],dataset.variables["time"].units,only_use_cftime_datetimes=False))
if dataset.dimensions['time'].size == 24 :
# needed if just a single day
time_subset = [True for i in range(dataset.dimensions['time'].size)]
else :
# Next we pull a subset from the netCDF file
# declare false array with same length of time dimension from netCDF
time_subset = [False for i in range(dataset.dimensions['time'].size)]
# reshape false array to have first dimension 24 (hours in day)
time_subset = assert_data_shape_24(time_subset)
# set the appropriate days as true
time_subset[:,unique_days-1] = True
# flatten time_subset array back to one dimension
time_subset = time_subset.flatten(order='F')
data = assert_data_shape_24(dataset['UV_AS'][time_subset,:,:])
# TODO: improve comprehension of raw data units rather than assuming
# convert lat lon into pixel coordinates
# TODO: consider is necessary to load entire maps for just a few required pixels
lat = dataset['lat'][:]
lon = dataset['lon'][:]
temp_table['pixel_lat'] = temp_table.apply(lambda x:
find_nearest(lat,x['Latitude']),axis='columns')
temp_table['pixel_lon'] = temp_table.apply(lambda x:
find_nearest(lon,x['Longitude']),axis='columns')
# calculate doses
temp_table['Ambient_dose'] = temp_table.apply(lambda x:
np.sum(data[:,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] *
x['Schedule']),axis='columns')
temp_table['Personal_dose'] = temp_table.apply(lambda x:
np.sum(data[:,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] *
(x['Schedule'] * x['ER'])),axis='columns')
# extra step necessary to ensure correct assignment
self.loc[temp_table.index,'Ambient_dose'] = temp_table['Ambient_dose'].values
self.loc[temp_table.index,'Personal_dose'] = temp_table['Personal_dose'].values
# TODO: improve units options here
self['Ambient_dose'] = self['Ambient_dose']/40*3600/100 # SED
self['Personal_dose'] = self['Personal_dose']/40*3600/100 # SED
return self
def analyse_variable(self,
variable="UV_AS",
statistic="Mean",
src_filename_format=None,
src_directory=None) :
"""Basic calculations for specific exposure instances
This function is for calculating information other than ambient and personal
doses that corresponds to specific exposure instances.
Parameters
----------
variable : str, optional
The name of the variable to be analysed. This informs what data should be
pulled from the source netCDF files. This also informs the name of the column(s)
that will be created by this function. Defaults to "UV_AS", i.e. the All-Sky
UV data that is used in the calculate_specific_dose function.
statistic : str or list, optional
The statistic to be calculated, options include: mean, median, stdev, variance,
min, max, weighted_mean, and sum. Not case sensitive. Can be a single string or
a list of strings whereby multiple columns will be calculated. Defaults to "Mean".
src_filename_format : str, optional
Allows the user to select different source data. This may be useful in cases where
the user wants to compare doses calculated with one dataset to (say) cloud cover
from another dataset. Defaults to None, where the function uses the source files
specified by the object's metadata.
src_directory : str, optional
Allows the user to select different source data. This may be useful in cases where
the user wants to compare doses calculated with one dataset to (say) cloud cover
from another dataset. Defaults to None, where the function uses the source files
specified by the object's metadata.
Returns
-------
SpecificDoses
The table is appended with new columns named [variable]_[statistic].
Example
-------
In this example, we illustrate the process for calculating the doses in Harris et al. 2021
(https://doi.org/10.3390/atmos12020268) from the spreadsheet supplied as supplementary
data (https://www.mdpi.com/2073-4433/12/2/268/s1). Note that results will differ as the
spreadsheet contains only local Swiss time and not UTC time. Additionally, to demonstrate
the analyse_variable function, we also calculate the weighted mean CMF assuming it to be
an additional variable in the source data files. See below::
import python_tamer as pt
import pandas as pd
example = pt.SpecificDoses(pd.read_excel(r'atmosphere-12-00268-s001.xlsx',
header=2,index_col=0,usecols="B:K"))
example.src_directory = 'C:/enter_the_directory_of_your_dataset_here'
example = example.standard_column_names()
example = example.schedule_constant_exposure().ER_from_posture()
example = example.calculate_specific_dose()
example = example.analyse_variable(variable="CMF",statistic="weighted_mean")
"""
# users have option to load different files, otherwise defaults to metadata
if src_filename_format is None :
src_filename_format = self.src_filename_format
if src_directory is None :
src_directory = self.src_directory
# First step is find unique years to avoid loading unnecessary data
years = pd.DatetimeIndex(self.Date).year
unique_years = sorted(set(years))
if isinstance(statistic,str) :
self[variable+"_"+statistic] = np.nan
# convert to list to simplify code later
statistic = [statistic]
elif isinstance(statistic,list) :
for x in statistic :
self[variable+"_"+x]=np.nan
else :
raise TypeError("statistic input must be str or list of str")
for year in unique_years :
# Load netCDF file
print("Processing year "+str(year))
dataset=nc.Dataset(src_directory+src_filename_format.replace('yyyy',str(year)))
dataset.set_auto_mask(False) # This is important for nans to import correctly
# Make temporary table for yearly subset
temp_table = self[years == year].copy()
# find all unique days in year to be loaded
unique_days,unique_days_idx = np.unique(pd.DatetimeIndex(temp_table.Date).dayofyear,
return_inverse=True)
temp_table['unique_days_idx'] = unique_days_idx
#pd.DatetimeIndex(nc.num2date(dataset.variables["time"][:],dataset.variables["time"].units,only_use_cftime_datetimes=False))
if dataset.dimensions['time'].size == 24 :
# needed if just a single day
time_subset = [True for i in range(dataset.dimensions['time'].size)]
else :
# Next we pull a subset from the netCDF file
# declare false array with same length of time dimension from netCDF
time_subset = [False for i in range(dataset.dimensions['time'].size)]
# reshape false array to have first dimension 24 (hours in day)
time_subset = assert_data_shape_24(time_subset)
# set the appropriate days as true
time_subset[:,unique_days-1] = True
# flatten time_subset array back to one dimension
time_subset = time_subset.flatten(order='F')
data = assert_data_shape_24(dataset[variable][time_subset,:,:])
# TODO: improve comprehension of raw data units rather than assuming
# convert lat lon into pixel coordinates
# TODO: consider is necessary to load entire maps for just a few required pixels
lat = dataset['lat'][:]
lon = dataset['lon'][:]
temp_table['pixel_lat'] = temp_table.apply(lambda x:
find_nearest(lat,x['Latitude']),axis='columns')
temp_table['pixel_lon'] = temp_table.apply(lambda x:
find_nearest(lon,x['Longitude']),axis='columns')
# calculate
for stat in statistic :
# mean
if stat.lower() in ["mean",'average','avg'] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.mean(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# median
elif stat.lower() in ["median","med"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.median(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# stdev
elif stat.lower() in ["std","sd","stdev"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.std(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# variance
elif stat.lower() in ["var","variance"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.var(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# minimum
elif stat.lower() in ["min",'minimum'] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.amin(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# maximum
elif stat.lower() in ["max","maximum"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.amax(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# weighted mean
elif stat.lower() in ["weighted_mean","weighted_average","mean_weighted","average_weighted","avg_weighted"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.average(data[:,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']],weights=x['Schedule']),axis='columns')
# sum
elif stat.lower() in ["sum","total"] :
temp_table[variable+"_"+stat] = temp_table.apply(lambda x:
np.sum(data[x['Schedule']!=0,x['unique_days_idx'],x['pixel_lat'],x['pixel_lon']] ),axis='columns')
# extra step necessary to ensure correct assignment
self.loc[temp_table.index,variable+"_"+stat] = temp_table[variable+"_"+stat].values
return self
|
{"/python_tamer/SpecificDoses.py": ["/python_tamer/subroutines.py"], "/test/test_dummy_library.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"], "/python_tamer/ExposureMap.py": ["/python_tamer/subroutines.py"], "/python_tamer/__init__.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"]}
|
14,444
|
MeteoSwiss/python-TAMER
|
refs/heads/main
|
/test/test_dummy_library.py
|
import pytest
import datetime as dt
from python_tamer.ExposureMap import *
from python_tamer.SpecificDoses import *
def test_ExposureMap_max():
test = ExposureMap(date_selection=pd.date_range(start="2018-07-01",end="2018-07-01"),
units = "UVI",
statistic = "max",
data_directory="test/",
src_filename_format="UV_test_data_yyyy.nc",
bin_width=0.1
).collect_data().calculate_map()
assert test.map[50,50] == 8.05
def test_SpecificDoses():
test = SpecificDoses(pd.DataFrame({
"Date" : [dt.date(2018,7,1)],
"Time_start" : [dt.time(11,0,0)],
"Time_end" : [dt.time(12,0,0)],
"Anatomic_zone" : ["Forehead"],
"Posture" : ["Standing erect arms down"],
"Latitude" : [46.79166],
"Longitude" : [6.79167]
}))
test.data_directory = "test/"
test.src_filename_format = "UV_test_data_yyyy.nc"
test = test.ER_from_posture().schedule_constant_exposure()
test = test.calculate_specific_dose()
assert test['Ambient_dose'][0] == pytest.approx(8.08847 * 0.9, 0.05)
def test_str2daysofyear_mix_type():
a,b,c = str2daysofyear([np.arange(1,8),"January"])
assert all(a[1] == np.arange(1,32))
assert b[1] == 'jan'
assert c == [True, False]
|
{"/python_tamer/SpecificDoses.py": ["/python_tamer/subroutines.py"], "/test/test_dummy_library.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"], "/python_tamer/ExposureMap.py": ["/python_tamer/subroutines.py"], "/python_tamer/__init__.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"]}
|
14,445
|
MeteoSwiss/python-TAMER
|
refs/heads/main
|
/python_tamer/ExposureMap.py
|
from numpy.core.numeric import True_
import pandas as pd
import numpy as np
import datetime as dt
import os
import re
import netCDF4 as nc
import matplotlib.pyplot as plt
import cartopy.crs as ccrs
import cartopy.feature as cfeat
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.projections import get_projection_class
from cartopy.io import shapereader
from scipy.interpolate import interp2d
from scipy.ndimage import zoom
from .subroutines import *
class ExposureMap:
""" A class for calculating maps based on user specifications
Each instance of this class contains information required to calculate and illustrate a map
of exposure information, be that simple averages or more advanced mathematical representations
of exposure risk. The class is designed to have three core functions run in sequence, with
room for flexibility should more advanced users desire it. First, the data is read and a pixel
histogram is calculated. This allows much more data to be stored in memory and is the basis
for performing this kind of data analysis on personal computers.
Parameters
----------
units : str
The units of the quantity to be mapped. Must be "SED", "J m-2" or "UVIh" for doses or "UVI",
"W m-2" or "mW m-2" for irradiances. Defaults to "SED".
exposure_schedule : array
A vector of values describing the relative exposure of each hour of the day. 0 indicates no
exposure, 1 indicates full exposure, and a fractional value such as 0.5 would indicate
exposure for a total of 30 minutes within the hour, or a 50% partial exposure for the full
hour, or anything equivalent. Values greater than 1 are allowed. Must have a length of 24 (for
each hour of the day) although a length of 1 is also allowed, in which case that number will
be immediately replicated across a 24-length vector. When not calculating doses, hours with
any non-zero entry in this vector are included, with the irradiance values being
multiplied by the corresponding non-zero value in the exposure schedule.
bin_width : float
The width of each bin in the pixel histogram. Value assumed to be in the same units as
defined by the units parameter. *Making bin_width excessively small can lead to high
memory usage,* consider the underlying accuracy of the source data and be sure not to
substantially exceed its precision with this parameter.
statistic : str
The statistical descriptor to be calculated from the pixel histograms to be later
represented on the rendered map. Must contain at least one of these keywords:
"mean", "median" or "med", "sd" or "std" or "stdev", "max" or "maximum", "min" or
"minimum".
*Planned:* the string can be a formula using any of the keywords above,
as well at "prct" or "percentile" preceeded by a number between 0 and 100, and
basic mathematical operators (+, -, *, /, **) and numeric factors.
date_selection : list of dates
The dates for which the irradiances are retrieved or the daily doses are calculated.
Defaults to None whereby the program selects all data within the src_directory that
matches the src_filename_format.
src_filename_format : str
Describes the filename of the netCDF files containing the data with 'yyyy' in place
of the year.
src_directory : str
The directory where the data is stored. Must end with a slash.
Example
-------
The code below shows a typical use case for the ExposureMap class. The long-term average daily doses
(i.e. the chronic doses) for typical school children are calculated across Switzerland asssuming certain
hours of exposure for journeying to and from school and having breaks for morning tea and lunch time. ::
import python_tamer as pt
import pandas as pd
import numpy as np
src_directory = 'C:/enter_your_src_directory_here'
ER = pt.ER_Vernez_2015("Forehead","Standing") # Long-term average ER for foreheads in standing posture
map = pt.ExposureMap(
src_directory=src_directory,
units = "J m-2",
exposure_schedule = np.array([0 ,0 ,0 ,0 ,0 ,0 ,
0 ,0 ,0.5,0 ,0.5,0 ,
0.5,0.5,0 ,0 ,0.5,0 ,
0 ,0 ,0 ,0 ,0 ,0 ])*ER,
bin_width = 25,
date_selection = pd.date_range(start="2005-01-01",end="2014-12-31"),
statistic = "mean",
map_options={"title": "Chronic daily UV dose for typical school children, 2005-2014",
"save": False})
map = map.collect_data().calculate_map()
map.plot_map()
"""
def __init__(self,units="SED",
exposure_schedule=1,
statistic="mean",
bin_width = None,
date_selection=None,
map_options=None,
src_filename_format='UVery.AS_ch02.lonlat_yyyy01010000.nc',
src_directory='C:/Data/UV/'):
# assigning options to fields in class with a few basic checks
self.units = units
self.exposure_schedule=np.array(exposure_schedule)
if len(np.atleast_1d(self.exposure_schedule)) == 1 :
self.exposure_schedule = np.repeat(self.exposure_schedule,24)
self.statistic = statistic
self.map_options = {
"title" : "Test map",
"save" : True,
"img_size" : [20,15],
"img_dpi" : 300,
"img_dir" : "",
"img_filename" : "timestamp",
"img_filetype" : "png",
"brdr_nation" : True,
"brdr_nation_rgba" : [0,0,0,0],
"brdr_state" : False,
"brdr_state_rgba" : [0,0,0,0.67],
"cmap" : "jet",
"cmap_limits" : None,
"cbar" : True,
"cbar_limits" : None
}
if map_options is not None :
self.map_options.update(map_options)
self.src_filename_format = src_filename_format
self.src_directory = src_directory
self.date_selection = date_selection
if bin_width is None :
self.bin_width = {
"SED" : 0.1,
"J m-2" : 10,
"UVI" : 0.1,
"W m-2" : 0.0025,
"mW m-2" : 2.5
}[self.units]
else :
self.bin_width = bin_width
def collect_data(self, src_directory=None,src_filename_format=None,
date_selection=None,units=None,exposure_schedule=None,bin_width=None) :
"""Loads and manipulates data into histograms for each pixel of the underlying data
In order to handle large amounts of data without exceeding memory limitations, files are
loaded one at a time and the time dimension is removed, either by calculating daily doses
or by simply taking the data as is. The resulting information is then stored not as a
list of specific values but rather binned into a histogram for each pixel. This process
is repeated for each file required by the user input, building up the pixel histograms
with more information that does not require additional memory.
Parameters
----------
src_filename_format : str
Describes the filename of the netCDF files containing the data with 'yyyy' in place
of the year.
src_directory : str
The directory where the data is stored. Must end with a slash.
date_selection : list of dates
The dates for which the irradiances are retrieved or the daily doses are calculated.
Defaults to None whereby the program selects all data within the src_directory that
matches the src_filename_format.
units : str
Name of units of desired output. This also indicates whether daily doses must be
calculated or not. Units of "SED", "J m-2", or "UVIh" will produce daily doses,
units of "UVI", "W m-2" or "mW m-2" will not.
exposure_schedule : array
A vector of values describing the relative exposure of each hour of the day. 0 indicates no
exposure, 1 indicates full exposure, and a fractional value such as 0.5 would indicate
exposure for a total of 30 minutes within the hour, or a 50% partial exposure for the full
hour, or anything equivalent. Values greater than 1 are allowed. Must have a length of 24 (for
each hour of the day) although a length of 1 is also allowed, in which case that number will
be immediately replicated across a 24-length vector. When not calculating doses, hours with
any non-zero entry in this vector are included, with the irradiance values being
multiplied by the corresponding non-zero value in the exposure schedule.
bin_width : float
The width of each bin in the pixel histogram. Value assumed to be in the same units as
defined by the units parameter. *Making bin_width excessively small can lead to high
memory usage,* consider the underlying accuracy of the source data and be sure not to
substantially exceed its precision with this parameter.
Returns
-------
python_tamer.ExposureMap
The input ExposureMap object is appended with new fields, `pix_hist` contains
the counts for the histogram, and `bin_edges`, `bin_centers`, and `num_bins`
all serve as metadata for the pixel histograms. `lat` and `lon` are also
added from the multi-file dataset to inform the pixel locations for map making
further down the typical pipeline.
Example
-------
The example code below shows how an ExposureMap class can be declared with the default parameters that
can then be later redefined by collect_data() and the other class functions. ::
import python_tamer as pt
import pandas as pd
import numpy as np
src_directory = 'C:/enter_your_src_directory_here'
ER = pt.ER_Vernez_2015("Forehead","Standing") # Long-term average ER for foreheads in standing posture
map = pt.ExposureMap()
map = map.collect_data(
src_directory=src_directory,
units = "J m-2",
exposure_schedule = np.array([0 ,0 ,0 ,0 ,0 ,0 ,
0 ,0 ,0.5,0 ,0.5,0 ,
0.5,0.5,0 ,0 ,0.5,0 ,
0 ,0 ,0 ,0 ,0 ,0 ])*ER,
bin_width = 25,
date_selection = pd.date_range(start="2005-01-01",end="2014-12-31")
)
map = map.calculate_map(statistic = "mean")
map.plot_map(map_options={"title": "Chronic daily UV dose for typical school children, 2005-2014",
"save": False})
"""
# TODO: There must be a better way to do this
if not (src_directory is None) :
self.src_directory = src_directory
if not (src_filename_format is None) :
self.src_filename_format = src_filename_format
if not (date_selection is None) :
self.date_selection = date_selection
if not (units is None) :
self.units = units
if not (exposure_schedule is None) :
self.exposure_schedule = exposure_schedule
if not (bin_width is None) :
self.bin_width = bin_width
# first we read the src_directory to check the total number of unique years available
data_dir_contents = os.listdir(self.src_directory)
# TODO: improve jankiness of this format-matching search for filenames
char_year = self.src_filename_format.find('yyyy')
dataset_years = [ x for x in data_dir_contents if re.findall(self.src_filename_format.replace("yyyy","[0-9]{4}"),x)]
dataset_years = [ int(x[char_year:char_year+4]) for x in dataset_years ]
# Now we can handle default options like "all"
if type(self.date_selection) == str and self.date_selection == "all" :
date_selection = pd.date_range(start=str(dataset_years[0])+"-01-01",
end=str(dataset_years[-1])+"-12-31")
else :
date_selection = self.date_selection # TODO: much more interpretation options here
#now we find unique years
list_of_years = sorted(set(date_selection.year))
for i in range(len(list_of_years)) :
year = list_of_years[i]
print("Processing year "+str(year)) #should use logging, don't yet know how
dataset=nc.Dataset(self.src_directory+self.src_filename_format.replace('yyyy',str(year)))
dataset.set_auto_mask(False) #to get normal arrays (faster than default masked arrays)
if dataset.dimensions['time'].size == 24 :
# needed if just a single day
time_subset = [True for i in range(dataset.dimensions['time'].size)]
else :
# Next we pull a subset from the netCDF file
# declare false array with same length of time dimension from netCDF
time_subset = [False for i in range(dataset.dimensions['time'].size)]
# reshape false array to have first dimension 24 (hours in day)
time_subset = assert_data_shape_24(time_subset)
# set the appropriate days as true
time_subset[:,date_selection[date_selection.year == year].dayofyear-1] = True
# flatten time_subset array back to one dimension
time_subset = time_subset.flatten(order='F')
# load subset of data
print(" Slicing netcdf data with time subset")
data = dataset['UV_AS'][time_subset,:,:] #work in UVI by default because it's easy to read
# TODO: check units of dataset files, CF conventions for UVI or W/m2
# now to calculate doses if requested
if self.units in ["SED","J m-2","UVIh"] :
# if calculating doses
print(' Calculating doses')
data = assert_data_shape_24(data)
data = np.sum(np.reshape(self.exposure_schedule,[24,1,1,1]) * data,axis=0)
elif (self.exposure_schedule != np.ones(24)).any() :
# assume elsewise calculating intensity (i.e. UV-index) then limit data selection according
# to schedule (remembering that default schedule is just ones)
print(' Slicing data with exposure schedule')
# reshape so first dimension is 24 hours
data = assert_data_shape_24(data)
# select only those hours with nonzero entry in exposure schedule
data = data[self.exposure_schedule != 0,:,:,:]
# select nonzero values from exposure schedule
exposure_schedule_nonzero = self.exposure_schedule[self.exposure_schedule != 0]
# if any nonzero entries aren't 1, multiply data accordingly
if (exposure_schedule_nonzero != 1).any() :
data *= np.reshape(exposure_schedule_nonzero,[len(exposure_schedule_nonzero),1,1,1])
# recombine first two dimensions (hour and day) back into time ready for histogram
data = assert_data_shape_24(data,reverse=True)
# now multiply data by conversion factor according to desired untis
# TODO: Should expand upon this in reference files
data *= {"SED":0.9, "J m-2":90, "UVIh":1, "UVI":1, "W m-2":0.025, "mW m-2":25}[self.units]
# if this is the first iteration, declare a hist
if i == 0 :
# seems like useful metadata to know bin n and edges
# TODO: reconsider where this belongs in the code (__init__?)
self.num_bins = int(np.nanmax(data) // self.bin_width ) + 2
self.bin_edges = (np.array(range(self.num_bins+1)) - 0.5) * self.bin_width
# this form allows for weird custom bin edges, but probably will never use that
self.bin_centers = self.bin_edges[:-1] + 0.5 * np.diff(self.bin_edges)
# TODO: think about possible cases where dimensions could differ
self.pix_hist=np.zeros([self.num_bins,
np.shape(data)[-2],np.shape(data)[-1]], dtype=np.int16)
# TODO: this should also be done by some initial dataset analysis, but that's a drastic
# design overhaul
self.lat = dataset['lat'][:]
self.lon = dataset['lon'][:]
else :
new_num_bins = int(np.nanmax(data) // self.bin_width) + 2 - self.num_bins
# check if new data requires extra bins in pix_hist
if new_num_bins > 0 :
# append zeros to pix hist to make room for larger values
self.pix_hist = np.concatenate((self.pix_hist,np.zeros(
[new_num_bins,np.shape(self.pix_hist)[-2],np.shape(self.pix_hist)[-1]],
dtype=np.int16)),axis=0)
# update bin information
self.num_bins = self.num_bins + new_num_bins
self.bin_edges = (np.array(range(self.num_bins+1)) - 0.5) * self.bin_width
self.bin_centers = self.bin_edges[:-1] + 0.5 * np.diff(self.bin_edges)
# TODO: Add check in case bins get "full" (i.e. approach int16 max value)
# now put data into hist using apply_along_axis to perform histogram for each pixel
print(" Calculating and adding to pixel histograms")
self.pix_hist[:,:,:] += np.apply_along_axis(lambda x:
np.histogram(x,bins=self.bin_edges)[0],0,data)
return self
def calculate_map(self,pix_hist=None,statistic=None,bin_centers=None) :
"""Calculates statistical descriptor values for pixel histograms to produce a map
This function interprets the statistic string, which can either be a simple command
such as "mean" or a more advanced formula of keywords. The corresponding function is
applied to each pixel of the pix_hist object within the ExposureMap class, essentially
removing the first dimension and resulting in straightforward map to be plotted.
Parameters
----------
pix_hist : array
A 3D array with the first dimension containing vectors of counts for histograms
and the next two dimensions serving as pixel coordinates. See
`ExposureMap.collect_data()` for more information.
statistic : str
The statistical descriptor to be calculated from the pixel histograms to be later
represented on the rendered map. Must contain at least one of these keywords:
"mean", "median" or "med", "sd" or "std" or "stdev", "max" or "maximum", "min" or
"minimum".
*Planned:* the string can be a formula using any of the keywords above,
as well at "prct" or "percentile" preceeded by a number between 0 and 100, and
basic mathematical operators (+, -, *, /, **) and numeric factors.
bin_centers : array
The central numeric values corresponding to the bins in pix_hist. The
`ExposureMap.collect_data` function typically calculates these values from the
given `bin_width` input.
Returns
-------
python_tamer.ExposureMap
The ExposureMap class object is appended with a map field containing a 2D array
Example
-------
In the example below, the user imports some pre-calculated pixel histograms, thereby
completing the ExposureMap workflow without using the `ExposureMap.ExposureMap.collect_data()`
function. This can be useful if the data collection is timely and the user wants to
produce multiple different maps. Note that the "custom data" used in this example is not
included in the python-TAMER package, this simply illustrates a unique use-case. ::
import python_tamer as pt
# load custom data from an external file (not included)
from custom_user_data import pix_hist, bin_centers, map_options
map = pt.ExposureMap(map_options = map_options)
map = map.calculate_map(
statistic = "median",
pix_hist = data,
bin_centers = bin_centers
).plot_map(save = False)
map.calculate_map(statistic = "max").plot_map(map_options={"save" = False})
map.calculate_map(statistic = "std").plot_map(map_options={"save" = False})
"""
if not (pix_hist is None) :
self.pix_hist = pix_hist
if not (statistic is None) :
self.statistic = statistic
if not (bin_centers is None) :
self.bin_centers = bin_centers
# Begin by defining the easy options that only require two inputs
basic_descriptor_functions = {
"mean": hist_mean,
"median": lambda x,y: hist_percentile(x,y,0.5),
"med": lambda x,y: hist_percentile(x,y,0.5),
"sd": hist_stdev,
"std": hist_stdev,
"stdev": hist_stdev,
"max": hist_max,
"maximum": hist_max,
"min": hist_min,
"minimum":hist_min
}
# we can check if the chosen statistic is basic or advanced
if self.statistic.lower() in basic_descriptor_functions.keys() :
# in this case, we can simply select the basic function from the dict...
descriptor_function = basic_descriptor_functions[self.statistic.lower()]
# ...and execute it across the map
self.map = np.apply_along_axis(lambda x: descriptor_function(x,self.bin_centers),0,self.pix_hist)
# TODO: a loose space could ruin this, need shunting yard algorithm of sorts
elif self.statistic.lower()[2:] == "prct" or self.statistic.lower()[2:] == "percentile" :
prct = int(self.statistic[0:1]) / 100
self.map = np.apply_along_axis(lambda x: hist_percentile(x,self.bin_centers,prct),0,self.pix_hist)
else :
# TODO: interpret self.statistic to build advanced functions (y i k e s)
print("WARNING: ExposureMap.statistic not recognised.")
return self
def plot_map(self,map_options=None) :
"""Renders and optionally saves a map of the ``map`` field in an ExposureMap object
This function caps off the typical workflow for the ExposureMap class by rendering the contents
of the map field. Many aesthetic factors are accounted for, contained within the
`ExposureMap.map_options` dictionary.
Parameters
----------
map_options : dict, optional
A collection of many typical options such as image and font sizes, colormaps, etc.
The full range of options is listed below with their default values.
"title" : "Test map"
The title to be rendered above the map. Can be left blank for no title. Can be
used to inform img_filename
"save" : True
Boolean to declare whether the map should be saved as an image file or not.
"img_size" : [20,15]
The size [width,height] of the image in cm.
"img_dpi" : 300
The dots per inch of the saved image.
"img_dir" : ""
The directory for the image to be saved in, leaving it blank should result
in images being saved in the working directory.
"img_filename" : "timestamp"
The image filename as a string. The default value of "timestamp" is a keyword
indicating that the function should generate a filename based on the time at
the moment of the calculation, specified with the format %Y%m%d_%H%M%S_%f
which includes millisecond precision.
"img_filetype" : "png"
The image filetype, must be acceptable to `matplotlib.pyplot.savefig()`.
"brdr_nation" : True
Boolean for drawing national borders on the map.
"brdr_nation_rgba" : [0,0,0,0]
The red, green, blue, and alpha values for the national borders.
"brdr_state" : False
Boolean for drawing state borders as defined by Natural Earth dataset.
"brdr_state_rgba" : [0,0,0,0.67]
The red, green, blue, and alpha values for the national borders.
"cmap" : "jet"
The name of the colourmap to be used when rendering the map.
"cmap_limits" : None
The numeric limits of the colourmap. Defaults to None, where the lower
and upper limits of the plotted data are used as the colourmap limits.
"cbar" : True
Boolean for rendering a colourbar.
"cbar_limits" : None
The numeric limits of the colourbar. Defaults to None, where the lower
and upper limits of the plotted data are used as the colourbar limits.
Returns
-------
python_tamer.ExposureMap
Returns the ExposureMap object that was input with an updated map_options
field (if the user has specificied any changes to the default map_options).
"""
if map_options is not None :
self.map_options.update(map_options)
# TODO: Add custom sizing and resolution specifications
fig = plt.figure(figsize=(self.map_options['img_size'][0]/2.54,
self.map_options['img_size'][1]/2.54))
# TODO: Accept custom projections
proj = ccrs.Mercator()
# TODO: Add support for multiple plots per figure (too complex? consider use cases)
ax = fig.add_subplot(1,1,1,projection = proj)
# TODO: Increase flexibility of borders consideration
if self.map_options['brdr_nation'] :
ax.add_feature(cfeat.BORDERS)
# TODO: Consider first-last versus min-max - how can we avoid accidentally flipping images
extents=[self.lon[0],self.lon[-1],self.lat[0],self.lat[-1]]
ax.set_extent(extents)
# Confusingly, this code correctly translate the lat/lon limits into the projected coordinates
extents_proj = proj.transform_points(ccrs.Geodetic(),np.array(extents[:2]),np.array(extents[2:]))
extents_proj = extents_proj[:,:2].flatten(order='F')
# TODO: Custom colormaps, interpolation, cropping
im = ax.imshow(self.map,extent=extents_proj,transform=proj,origin='lower',
cmap=self.map_options['cmap'],interpolation='bicubic')
# TODO: Add more advanced title interpretation (i.e. smart date placeholder)
if self.map_options['title'] is not None :
ax.set_title(self.map_options['title'])
# TODO: Add support for horizontal
if self.map_options['cbar'] :
cb = plt.colorbar(im, ax=ax, orientation='horizontal',pad=0.05,fraction=0.05)
cb.ax.set_xlabel(self.units)
# TODO: Add plot title, small textbox description, copyright from dataset, ticks and gridlines
if self.map_options['save'] :
# Generate timestamp filename if relying on default
if self.map_options['img_filename'] == "timestamp" :
img_filename=dt.datetime.now().strftime('%Y%m%d_%H%M%S_%f')
plt.savefig(self.map_options['img_dir']+img_filename+"."+self.map_options['img_filetype'],
bbox_inches="tight",dpi=self.map_options['img_dpi'])
plt.show()
return self
class ExposureMapSequence :
""" Class for generating multiple Exposure Maps in a single operation
The ExposureMapSequence class is a framework for generating multiple maps following
a given sequence. The basic workflow begins by declaring an object of this class and
collecting the data from the source NetCDF files. The process is designed with
memory efficiency in mind, so data is loaded one year at a time and put into pixel
histograms akin to the ExposureMap class behaviour. However, in this class we allow for
multiple histograms to be stored within a single ExposureMapSequence object. Next,
the maps are calculated by the calculate_maps function. Multiple maps can be calculated for each histogram if the user
has specified multiple statistics that they want to calculate. Lastly, the maps are
rendered and saved by the save_maps function.
Parameters
----------
src_filename_format : str
Describes the filename of the netCDF files containing the data with 'yyyy' in place
of the year.
src_directory : str
The directory where the data is stored. Must end with a slash.
Example
-------
In this example, we produce a basic sequence of monthly average doses for 2020::
example = ExposureMapSequence()
example = example.collect_data('monthly',year_selection=[2020],units=["SED"])
example = example.calculate_maps(statistic='Mean')
example.save_maps(save=True,show=True)
In this example, we produce a basic sequence of annual average doses for each year
of the dataset::
example = ExposureMapSequence()
example = example.collect_data(['annual'],year_selection=[0],units=["SED"])
example = example.calculate_maps(statistic='Mean')
example.save_maps(save=True,show=True)
"""
def __init__(self,
src_filename_format='UVery.AS_ch02.lonlat_yyyy01010000.nc',
src_directory='C:/Data/UV/',
units=None,
bin_width=None,
map_options=None,
):
# start with data location to quickly get some metadata
self.src_filename_format = src_filename_format
self.src_directory = src_directory
# first we read the src_directory to check the total number of unique years available
data_dir_contents = os.listdir(self.src_directory)
# match filename format to find years
dataset_years = [ x for x in data_dir_contents
if re.findall(self.src_filename_format.replace("yyyy","[1-2][0-9]{3}"),x)]
char_year = self.src_filename_format.find('yyyy')
self.dataset_years = [ int(x[char_year:char_year+4]) for x in dataset_years ]
self.bin_width = bin_width
self.units = units
# declare an empty dictionary for map options
self.map_options={}
# if any input, update dictionary
if map_options is not None :
self.map_options = self.map_options.update(map_options)
def interpret_parameters(self) :
"""Interprets some parameter inputs and adjusts for consistency
This function will check that parameters are correctly entered and do some basic interpretation.
It checks the exposure_schedule, year_selection, units, and bin_width input. All input is converted
to lists as required.
"""
if hasattr(self,'exposure_schedule') and self.exposure_schedule is not None :
if isinstance(self.exposure_schedule,float) :
self.exposure_schedule = [np.repeat(self.exposure_schedule,24)]
elif isinstance(self.exposure_schedule,int) :
temp = self.exposure_schedule
self.exposure_schedule = [np.zeros(24)]
self.exposure_schedule[0][temp] = 1
elif isinstance(self.exposure_schedule,dict) :
temp = self.exposure_schedule
self.exposure_schedule = [np.zeros(24)]
for x in temp.items() :
self.exposure_schedule[0][int(x[0])] = x[1]
elif isinstance(self.exposure_schedule,np.ndarray) :
if len(np.shape(self.exposure_schedule)) == 1 and np.shape(self.exposure_schedule)[0] == 24 :
self.exposure_schedule = [self.exposure_schedule]
elif len(np.shape(self.exposure_schedule)) == 2 and np.shape(self.exposure_schedule)[1] == 24 :
# split an array of multiple schedules into a list of single schedule arrays
self.exposure_schedule = np.split(self.exposure_schedule,np.shape(self.exposure_schedule)[0])
else :
raise ValueError("Exposure schedule not a comprehensible numpy array, " +
"must be length 24 in first or second dimension")
elif isinstance(self.exposure_schedule,list) :
if len(self.exposure_schedule) == 24 and all(isinstance(x,(int,float)) for x in self.exposure_schedule) :
self.exposure_schedule = [np.array(self.exposure_schedule)]
for i in range(len(self.exposure_schedule)) :
if isinstance(self.exposure_schedule[i],float) :
self.exposure_schedule[i] = np.repeat(self.exposure_schedule[i],24)
elif isinstance(self.exposure_schedule[i],int) :
temp = self.exposure_schedule[i]
self.exposure_schedule[i] = np.zeros(24)
self.exposure_schedule[i][temp] = 1
elif isinstance(self.exposure_schedule[i],dict) :
temp = self.exposure_schedule[i]
self.exposure_schedule[i] = np.zeros(24)
for x in temp.items() :
self.exposure_schedule[i][int(x[0])] = x[1]
elif isinstance(self.exposure_schedule[i],np.ndarray) :
if not (len(np.shape(self.exposure_schedule[i])) == 1
and np.shape(self.exposure_schedule[i])[0] == 24 ):
raise ValueError("Exposure schedule list contains an incomprehensible entry, " +
"a numpy array that is not length 24")
elif isinstance(self.exposure_schedule[i],list) :
if len(self.exposure_schedule[i]) == 24 :
self.exposure_schedule[i] = np.array(self.exposure_schedule[i])
else :
raise ValueError("Exposure schedule list contains an incomprehensible entry, " +
"a list that is not length 24")
else :
raise TypeError("Exposure schedule list contains an incomprehensible entry")
else :
raise TypeError("Exposure schedule must be a list of length-24 numpy arrays or similar")
######################################################################################################
if hasattr(self,'year_selection') and self.year_selection is not None :
if isinstance(self.year_selection,int) :
if self.year_selection==0:
self.year_selection = [np.array([x]) for x in self.dataset_years]
else:
self.year_selection = [np.array([self.year_selection])]
elif isinstance(self.year_selection,np.ndarray) :
if len(np.shape(self.year_selection)) == 1 :
self.year_selection = [self.year_selection]
else :
raise ValueError("Year selection should be a list of numpy arrays, " +
"provided numpy array has incomprehensible shape")
elif isinstance(self.year_selection,list) :
if all([isinstance(x,int) for x in self.year_selection]) and all(x!=0 for x in self.year_selection) :
self.year_selection = [np.array(self.year_selection)]
else :
i=0
for k in range(len(self.year_selection)) :
if isinstance(self.year_selection[i],int) :
if self.year_selection[i] == 0 :
temp = self.year_selection[0:i] + [np.array([x]) for x in self.dataset_years]
if i != len(self.year_selection)-1 :
temp = temp + self.year_selection[i+1:]
self.year_selection = temp
i = i + len(self.dataset_years) - 1
else :
self.year_selection[i] = np.array([self.year_selection[i]])
elif isinstance(self.year_selection[i],list) :
self.year_selection[i] = np.array(self.year_selection[i])
elif not isinstance(self.year_selection[i],np.ndarray) :
raise TypeError("Year selection list must contain ints, lists, or numpy arrays")
i=i+1
else :
raise TypeError("Year selection must be an int, numpy array, or list of numpy arrays")
for i in range(len(self.year_selection)) :
if all(self.year_selection[i] == 0) :
self.year_selection[i] = np.array(self.dataset_years)
#####################################################################################################
if hasattr(self,'units') and self.units is not None :
if isinstance(self.units,str) :
self.units = [self.units]
elif isinstance(self.units,list) :
if not all(isinstance(x,str) for x in self.units) :
raise TypeError("Units input must be a list of strings")
else :
raise TypeError("Units input must be a list of strings")
for i in range(len(self.units)) :
if not isinstance(self.units[i],str) :
raise TypeError("Units input must be a list of strings")
if self.units[i] not in ["SED","UVIh","UVI","J m-2","W m-2","mW m-2"] :
raise ValueError("Units input must be list of accepted unit strings, " +
"those being SED, UVIh, J m-2, UVI, W m-2, or mW m-2")
if hasattr(self,'bin_width') :
if self.bin_width is None :
self.bin_width = []
for unit in self.units :
self.bin_width.append({
"SED" : 0.1,
"J m-2" : 10,
"UVI" : 0.1,
"W m-2" : 0.0025,
"mW m-2" : 2.5
}[unit])
elif isinstance(self.bin_width,(int,float)) :
self.bin_width = [self.bin_width]
return self
def collect_data(self,
day_selection,
exposure_schedule=[1.0],
year_selection=[0],
units=["SED"],
bin_width=None):
"""Loads data into multiple pixel histograms
This function loads all of the necessary data and compiles it into one or
multiple histograms. All parameters are designed to be interpreted as lists
of arrays or lists of strings, where each list entry corresponding to a
different histogram in the sequence. So to create a sequence of maps
corresponding to the months of the year, the day_selection input would be a
list of 12 arrays, the first containing numbers from 1 to 31, the second
containing numbers from 32 to 59, and so on.
The user specifies the day_selection and the year_selection as two separate
numerical inputs, rather than specifying dates. This make the interpretation
of the sequence simpler. However, to simplify the user experience, the
day_selection input can include keywords to be automatically interpreted as
days of the year.
Parameters
----------
day_selection : list, str, array
A list of arrays and/or strings. Keywords interpretable in such a list
include the (english) names of the 12 months (at least the first three
letters), the names of the four seasons (fall or autumn is accepted),
or the words "year" or "annual" to indicate the full year. These
keywords are replaced by the corresponding array of days in the year.
Note that the 29th of February is removed from consideration should it
arise. Note also that the seasons are the meteorological seasons, i.e.
the three month blocks JJA, SON, DJF, and MAM for summer, autumn,
winter, and spring respectively.
The user can alternatively enter a special string instead of a list.
The string "monthly" generates a list of 12 arrays according to the
months whereas the string "seasons" generates a list of 4 arrays
according to the four seasons.
exposure_schedule : list, float, int, dict, array
If the user enters a float, this float value will be repeated across a
length-24 array to make the exposure schedule. For example, entering
1.0 (not 1) will generate an array of 24 ones.
If the user enters an int, i, a length-24 array of zeroes will be
generated with the ith entry being set to 1. For example, entering 1
(not 1.0) will generate an array that reads [0,1,0,0...] (length 24).
If the user enters a dict, they can specify the values of a few
particular entries in a length-24 array where unspecified entries have
a value of zero. For example, entering {0:0.5, 2:0.8, 3:1} will
generate and array the reads [0.5, 0, 0.8, 1, 0...] (length 24).
If the user enters an array, it must be 1 dimensional with length 24
or 2 dimensional with the second dimension having length 24 (allowing
the user to specify multiple schedules).
If the user enters a list, each entry of that list is interpreted using
the rules listed above, with the caveat that arrays within a list cannot
be 2 dimensional.
year_selection : list, array, int
The years across which the data should be pulled. Input should be a list
of arrays of ints corresponding to years available in the dataset. Each
list entry corresponds to a pixel histogram. The user can enter 0 as a
shortcut for using all available years. For example, an input might be
[numpy.arange(2010,2020),[0],0]. The first list entry is an array of a
decade of years, straightforward enough. The second list entry is [0].
This is equivalent to writing numpy.arange(2004,2021) i.e. it produces
an array of the available years of the dataset. The last entry is 0,
this produces a sequence of individual years. So the input could be
equivalently written as [numpy.arange(2010,2020),numpy.arange(2004,2021),
numpy.array([2004]),numpy.array([2005]),numpy.array([2006])...] and so
on until 2020.
units : list, optional
The list of units for each pixel histogram. Acceptable strings are "SED",
"J m-2", "UVIh", "UVI", "W m-2", and "mW m-2". Defaults to SED.
bin_width : list, optional
The bin width for each histogram. By default, these values are defined
automatically according to the units input.
Returns
-------
ExposureMapSequence
The object has the hist_specs and hists fields added detailing the pixel
histograms.
Example
-------
In this example, we produce a basic sequence of monthly average doses for 2020::
example = ExposureMapSequence()
example = example.collect_data('monthly',year_selection=[2020],units=["SED"])
example = example.calculate_maps(statistic='Mean')
example.save_maps(save=True,show=True)
In this example, we produce a basic sequence of annual average doses for each year
of the dataset::
example = ExposureMapSequence()
example = example.collect_data(['annual'],year_selection=[0],units=["SED"])
example = example.calculate_maps(statistic='Mean')
example.save_maps(save=True,show=True)
"""
# this subroutine handles keyword inputs (monthly, seasonal, etc)
self.day_selection, self.day_input_flt, self.day_nonstring = str2daysofyear(day_selection)
self.exposure_schedule = exposure_schedule
self.year_selection = year_selection
if units is not None :
self.units = units
if bin_width is not None :
self.bin_width = bin_width
self = self.interpret_parameters()
############################################################################
lengths = {'day_selection' : len(self.day_selection),
'exposure_schedule' : len(self.exposure_schedule),
'year_selection' : len(self.year_selection),
'units' : len(self.units),
'bin_width' : len(self.bin_width)}
self.num_hists = max(lengths.items(), key=lambda x: x[1])[1]
assert all(x == self.num_hists or x == 1 for x in lengths.values()), (
"Inputs must be lists of length 1 or num_hists")
self.iterators = [x[0] for x in lengths.items() if x[1]==self.num_hists]
self.hist_specs = []
for i in range(self.num_hists) :
hist_spec = {
'day_selection' : self.day_selection[0],
'exposure_schedule' : self.exposure_schedule[0],
'year_selection' : self.year_selection[0],
'units' : self.units[0],
'bin_width' : self.bin_width[0]}
for x in self.iterators :
hist_spec[x] = self.__dict__[x][i]
self.hist_specs = self.hist_specs + [hist_spec]
# find unique years to be loaded (probably all years but have to check)
unique_years = set(self.year_selection[0])
if len(self.year_selection) > 1 :
for i in range(1,len(self.year_selection)) :
unique_years.update(self.year_selection[i])
unique_years = sorted(unique_years)
# declare empty hists
self.hists = [None for x in range(self.num_hists)]
for i in range(len(unique_years)) :
year = unique_years[i]
print("Processing year "+str(year)) #should use logging, don't yet know how
dataset=nc.Dataset(self.src_directory+self.src_filename_format.replace('yyyy',str(year)))
dataset.set_auto_mask(False) #to get normal arrays (faster than default masked arrays)
if i == 0 :
# TODO: this should also be done by some initial dataset analysis, but that's a drastic
# design overhaul
self.lat = dataset['lat'][:]
self.lon = dataset['lon'][:]
# now to determine the unique days for the specific year
unique_days = set()
for j in range(self.num_hists) :
if year in self.hist_specs[j]['year_selection'] :
unique_days.update(self.hist_specs[j]['day_selection'])
unique_days = sorted(unique_days)
# TODO: when metadata fixed, update this to actually interpret dates (cftime)
# reformat to index for netCDF
nc_day_sel = [False for i in range(365*24)]
# reshape false array to have first dimension 24 (hours in day)
nc_day_sel = assert_data_shape_24(nc_day_sel)
# set the appropriate days as true
nc_day_sel[:,np.array(unique_days)-1] = True
# correct for leap years (skip feb 29)
if year % 4 == 0 :
nc_day_sel = np.concatenate(
(nc_day_sel[:,0:59],np.full((24,1),False),nc_day_sel[:,59:]),axis=1)
# flatten time_subset array back to one dimension
nc_day_sel = nc_day_sel.flatten(order='F')
#load data
data_year = assert_data_shape_24(dataset['UV_AS'][nc_day_sel,:,:])
#sort data into histograms
for j in range(self.num_hists) :
if year in self.hist_specs[j]['year_selection'] :
sub_day_sel = [ True if x in self.hist_specs[j]['day_selection']
else False for x in unique_days ]
temp_data = data_year[:,sub_day_sel,:,:]
# Apply the exposure schedule, differently for doses vs intensity
if self.hist_specs[j]['units'] in ["SED","J m-2","UVIh"] :
# if calculating doses
print(' Calculating doses')
temp_data = np.sum(np.reshape(
self.hist_specs[j]['exposure_schedule'],[24,1,1,1]) * temp_data,axis=0)
# more complex when doing intensity
else :
# assume elsewise calculating intensity (i.e. UV-index) then limit data selection
# to schedule (remembering that default schedule is just ones)
print(' Slicing data with exposure schedule')
# select only those hours with nonzero entry in exposure schedule
temp_data = temp_data[self.hist_specs[j]['exposure_schedule'] != 0,:,:,:]
# select nonzero values from exposure schedule
exposure_schedule_nonzero = self.hist_specs[j]['exposure_schedule'][
self.hist_specs[j]['exposure_schedule'] != 0]
# if any nonzero entries aren't 1, multiply data accordingly
if (exposure_schedule_nonzero != 1).any() :
temp_data *= np.reshape(exposure_schedule_nonzero,[len(exposure_schedule_nonzero),1,1,1])
# recombine first two dimensions (hour and day) back into time ready for histogram
temp_data = assert_data_shape_24(temp_data,reverse=True)
# now multiply data by conversion factor according to desired untis
# TODO: Should expand upon this in reference files
temp_data *= {"SED":0.9, "J m-2":90, "UVIh":1, "UVI":1, "W m-2":0.025, "mW m-2":25}[self.hist_specs[j]['units']]
# if this is the first iteration, declare a hist
if 'num_bins' not in self.hist_specs[j] :
# seems like useful metadata to know bin n and edges
self.hist_specs[j]['num_bins'] = int(np.nanmax(temp_data) // self.hist_specs[j]['bin_width'] ) + 2
self.hist_specs[j]['bin_edges'] = (np.array(range(self.hist_specs[j]['num_bins']+1))
- 0.5) * self.hist_specs[j]['bin_width']
# this form allows for weird custom bin edges, but probably will never use that
self.hist_specs[j]['bin_centers'] = (self.hist_specs[j]['bin_edges'][:-1]
+ 0.5 * np.diff(self.hist_specs[j]['bin_edges']))
# TODO: think about possible cases where dimensions could differ
self.hists[j]=np.zeros([self.hist_specs[j]['num_bins'],
np.shape(temp_data)[-2],np.shape(temp_data)[-1]], dtype=np.int16)
else :
new_num_bins = int(np.nanmax(temp_data) // self.hist_specs[j]['bin_width']) + 2 - self.hist_specs[j]['num_bins']
# check if new data requires extra bins in pix_hist
if new_num_bins > 0 :
# append zeros to pix hist to make room for larger values
self.hists[j] = np.concatenate((self.hists[j],np.zeros(
[new_num_bins,np.shape(self.hists[j])[-2],np.shape(self.hists[j])[-1]],
dtype=np.int16)),axis=0)
# update bin information
self.hist_specs[j]['num_bins'] = self.hist_specs[j]['num_bins'] + new_num_bins
self.hist_specs[j]['bin_edges'] = (np.array(range(self.hist_specs[j]['num_bins']+1))
- 0.5) * self.hist_specs[j]['bin_width']
self.hist_specs[j]['bin_centers'] = (self.hist_specs[j]['bin_edges'][:-1]
+ 0.5 * np.diff(self.hist_specs[j]['bin_edges']))
# TODO: Add check in case bins get "full" (i.e. approach int16 max value)
# now put data into hist using apply_along_axis to perform histogram for each pixel
print(" Calculating and adding to pixel histograms")
self.hists[j][:,:,:] += np.apply_along_axis(lambda x:
np.histogram(x,bins=self.hist_specs[j]['bin_edges'])[0],0,temp_data)
return self
def calculate_maps(self,statistic=None,titles=None,filenames="auto") :
"""Calcualte the maps from the pixel histograms
This function calculates maps from the pixel histograms and generates
titles and filenames for each map. Note that the number of maps can
be greater than the number of pixel histograms if more than one
statistic is specified.
Parameters
----------
statistic : list, str
The statistical descriptor to be calculated from the pixel histograms to be later
represented on the rendered map. Must contain at least one of these keywords:
"mean", "median" or "med", "sd" or "std" or "stdev", "max" or "maximum", "min" or
"minimum". The keyword "prct" or "percentile" is also accepted so long as it is
preceded by a two-digit integer specifying the desired percentile from 01 to 99.
*Planned:* the string can be a formula using any of the keywords above, and
basic mathematical operators (+, -, *, /, **) and numeric factors.
titles : list, optional
If the user does not wish to use the automatically generated map titles,
they can enter them with this parameter. This must be a list of strings
with a length equal to the number of maps produced.
filenames : str, optional
Filenames are generated to match the titles by default, but the user can
alternatively enter them manually with this parameter.
Returns
-------
ExposureMapSequence
The object is appended with maps, map_specs, and num_maps fields.
Example
-------
In this example, we produce a basic sequence of annual average doses for each year
of the dataset::
example = ExposureMapSequence()
example = example.collect_data(['annual'],year_selection=[0],units=["SED"])
example = example.calculate_maps(statistic='Mean')
example.save_maps(save=True,show=True)
"""
if statistic is not None :
self.statistic = statistic
if isinstance(self.statistic,str) :
self.statistic = [self.statistic]
# declare array of nans to fill with maps
self.maps = np.full([self.num_hists * len(self.statistic)] +
list(np.shape(self.hists[0])[1:]),np.nan)
if titles is not None :
self.titles = titles
else :
self.titles = [str(x) for x in range(self.num_hists * len(self.statistic))]
if isinstance(filenames,str) and filenames == "auto" :
self.filenames = [str(x) for x in range(self.num_hists * len(self.statistic))]
else :
self.filenames = filenames
mapnum = 0
hist_inds = []
stat_inds = []
for i in range(len(self.statistic)) :
for j in range(self.num_hists) :
self.maps[mapnum,:,:] = calculate_map_from_hists(
self.hists[j],self.statistic[i],self.hist_specs[j]['bin_centers'])
if titles is None :
if filenames == "auto" :
self.titles[mapnum], self.filenames[mapnum] = gen_map_title(**{
**self.hist_specs[j],
'statistic':self.statistic[i]},filename=True)
else :
self.titles[mapnum] = gen_map_title(**{
**self.hist_specs[j],
'statistic':self.statistic[i]},filename=False)
hist_inds = hist_inds + [j]
stat_inds = stat_inds + [i]
mapnum += 1
self.num_maps = mapnum
self.map_specs = {'hist' : hist_inds, 'statistic' : stat_inds}
return self
def save_maps(self,map_options=None,save=None,show=True,match_cmap_limits=True,schedule_diagram=True) :
"""Renders and saves the pre-calculated maps stored in the object
With the maps calculated, this function renders the maps with broad flexibility on aesthetic
options. It is mostly a wrapper for the render_map function.
Parameters
----------
map_options : dict, optional
A dictionary containing options for the render map function.
save : bool, optional
Although technically contained within map_options, this option is here so users can
more easily say whether they want the images to be saved or not.
show : bool, optional
An option to show the maps in a python figure window or not.
match_cmap_limits : bool, optional
When producing multiple maps, it can sometimes be desirable for the colormap limits
to be consistent across the set of images. This boolean enables that.
schedule_diagram : bool, optional
If true, a circular diagram is rendered on the map illustrating the schedule that
generated the map.
"""
if map_options is not None :
self.map_options.update(map_options)
if save is not None and isinstance(save,bool) :
self.map_options['save'] = save
if match_cmap_limits :
self.map_options['cmap_limits'] = [np.nanmin(self.maps),np.nanmax(self.maps)]
if self.map_options['cmap_limits'][0] < 0.1 * self.map_options['cmap_limits'][1] :
self.map_options['cmap_limits'][0] = 0
for i in range(self.num_maps) :
opts = self.map_options
opts['title'] = self.titles[i]
if self.filenames is not None :
opts['img_filename'] = self.filenames[i]
if schedule_diagram :
opts['schedule'] = self.hist_specs[self.map_specs['hist'][i]]['exposure_schedule']
render_map(
self.maps[i,:,:],
lat=self.lat,
lon=self.lon,
cbar_label=self.hist_specs[self.map_specs['hist'][i]]['units'],
show=show,
**opts)
def render_map(map,
lat=None,
lon=None,
title=None,
save=True,
show=True,
schedule=None,
schedule_bbox=(-0.03,0,1,0.91),
img_filename=None,
img_dir="",
img_size=[20,15],
img_dpi=300,
img_filetype="png",
brdr_nation=True,
brdr_nation_rgba=[0,0,0,1],
brdr_state=True,
brdr_state_rgba=[0,0,0,0.75],
cmap="gist_ncar",
cmap_limits=None,
cbar=True,
cbar_limits=None,
cbar_label=None,
country_focus="CHE",
gridlines=True,
gridlines_dms=False,
mch_logo=True) :
"""Renders and saves maps
Renders and saves maps with a wide variety of aesthetic options.
Parameters
----------
map : array
The map to be rendered
lat : [type], optional
[description], by default None
lon : [type], optional
[description], by default None
title : [type], optional
[description], by default None
save : bool, optional
[description], by default True
show : bool, optional
[description], by default True
schedule : [type], optional
[description], by default None
schedule_bbox : tuple, optional
[description], by default (-0.03,0,1,0.91)
img_filename : [type], optional
[description], by default None
img_dir : str, optional
[description], by default ""
img_size : list, optional
[description], by default [20,15]
img_dpi : int, optional
[description], by default 300
img_filetype : str, optional
[description], by default "png"
brdr_nation : bool, optional
[description], by default True
brdr_nation_rgba : list, optional
[description], by default [0,0,0,1]
brdr_state : bool, optional
[description], by default True
brdr_state_rgba : list, optional
[description], by default [0,0,0,0.75]
cmap : str, optional
[description], by default "gist_ncar"
cmap_limits : [type], optional
[description], by default None
cbar : bool, optional
[description], by default True
cbar_limits : [type], optional
[description], by default None
cbar_label : [type], optional
[description], by default None
country_focus : str, optional
[description], by default "CHE"
gridlines : bool, optional
[description], by default True
gridlines_dms : bool, optional
[description], by default False
mch_logo : bool, optional
[description], by default True
"""
# TODO: Add custom sizing and resolution specifications
fig = plt.figure(figsize=(img_size[0]/2.54,img_size[1]/2.54))
# TODO: Accept custom projections
# proj = ccrs.Mercator()
proj = ccrs.Orthographic(central_longitude=(lon[0]+lon[-1])/2, central_latitude=(lat[0]+lat[-1])/2)
# TODO: Add support for multiple plots per figure (too complex? consider use cases)
ax = fig.add_subplot(1,1,1,projection = proj)
# TODO: Increase flexibility of borders consideration
if brdr_state :
state_brdrs = cfeat.NaturalEarthFeature(
category='cultural',
name='admin_1_states_provinces_lines',
scale='10m',
facecolor='none')
ax.add_feature(state_brdrs,linestyle="--",edgecolor=tuple(brdr_state_rgba),linewidth=0.5)
if brdr_nation :
ax.add_feature(cfeat.BORDERS,edgecolor=tuple(brdr_nation_rgba))
if country_focus is not None :
shpfilename = shapereader.natural_earth(resolution='10m',
category='cultural',name='admin_0_countries')
reader = shapereader.Reader(shpfilename)
countries = reader.records()
# this is a very janky search for Switzerland, but it's ultimately simpler than
# making geopandas a requirement for the library
for country in countries :
if country.attributes['ADM0_A3'] == country_focus :
break
assert country.attributes['ADM0_A3'] == country_focus, "country_focus input not recognised"
poly = country.geometry
msk_proj = proj.project_geometry (poly, ccrs.Geodetic()) # project geometry to the projection used by stamen
# plot the mask using semi-transparency (alpha=0.65) on the masked-out portion
ax.add_geometries( msk_proj, proj, facecolor='white', edgecolor='none', alpha=0.8)
# TODO: Consider first-last versus min-max - how can we avoid accidentally flipping images
extents=[lon[0],lon[-1],lat[0],lat[-1]]
ax.set_extent(extents,crs=ccrs.Geodetic())
# this code correctly translate the lat/lon limits into the projected coordinates
extents_proj = proj.transform_points(ccrs.Geodetic(),np.array(extents[:2]),np.array(extents[2:]))
extents_proj = extents_proj[:,:2].flatten(order='F')
if gridlines :
ax.gridlines(draw_labels=True, dms=gridlines_dms, x_inline=False, y_inline=False,linewidth=0.25,
ylocs=[46,46.5,47,47.5])
# TODO: Custom colormaps, interpolation, cropping
# Upscale matrix for better reprojection
# f = interp2d(lon, lat, map, kind='linear')
# latnew = np.linspace(lat[0], lat[-1], (len(lat)-1)*3+1)
# lonnew = np.linspace(lon[0], lon[-1], (len(lon)-1)*3+1)
# mapnew = f(lonnew, latnew)
# Upscale matrix for better reprojection
mapnew = zoom(map,3)
# show map with given cmap and set cmap limits
im = ax.imshow(mapnew,extent=extents,transform=ccrs.PlateCarree(),
origin='lower',cmap=cmap)
if cmap_limits is not None :
im.set_clim(cmap_limits[0],cmap_limits[1])
# colorbar
# TODO: Add support for horizontal vertical option
if cbar :
cb = plt.colorbar(im, ax=ax, orientation='horizontal',pad=0.05,fraction=0.05)
cb.ax.set_xlabel(cbar_label)
# show schedule diagram
if schedule is not None :
ax2 = inset_axes(ax, width="25%", height="25%", loc=2,
axes_class = get_projection_class('polar'),
bbox_to_anchor=tuple(schedule_bbox),
bbox_transform=ax.transAxes)
schedule_clock(ax2,schedule,title="Exposure schedule")
# TODO: Add more advanced title interpretation (i.e. smart date placeholder)
if title is not None :
ax.set_title(title)
if mch_logo :
ex = ax.get_extent()
mch_logo_img = plt.imread('python_tamer/mch_logo.png')
mch_logo_width = 0.15
mch_logo_pad = 0
# some maths to work out position, note image aspect ratio 5:1
mch_extents = [ex[1]-(ex[1]-ex[0])*mch_logo_width-(ex[1]-ex[0])*mch_logo_pad,
ex[1]-(ex[1]-ex[0])*mch_logo_pad,
ex[2]+(ex[3]-ex[2])*mch_logo_pad,
ex[2]+0.2*(ex[1]-ex[0])*mch_logo_width+(ex[3]-ex[2])*mch_logo_pad]
# zorder puts image on top (behind mask otherwise for some reason)
ax.imshow(mch_logo_img,extent=mch_extents,zorder=12)
# TODO: Add plot title, small textbox description, copyright from dataset, ticks and gridlines
if save :
# Generate timestamp filename if relying on default
if img_filename is None :
if title is not None :
img_filename = format_filename(title)
else :
img_filename=dt.datetime.now().strftime('%Y%m%d_%H%M%S_%f')
elif img_filename == "timestamp" :
img_filename=dt.datetime.now().strftime('%Y%m%d_%H%M%S_%f')
plt.savefig(img_dir+img_filename+"."+img_filetype,
bbox_inches="tight",dpi=img_dpi)
if show :
plt.show()
def schedule_clock(axes,schedule,title=None,title_size=9,center=0.25,rmax=1) :
"""Generates a clock-style representation of an exposure schedule
[extended_summary]
Parameters
----------
axes : matplotlib.axes.Axes
The polar axes upon which the clock will be plotted
schedule : list or numpy.ndarray
The exposure schedule - a length-24 vector of hourly exposure proportions
title : str, optional
Title of the exposure clock
title_size : int, optional
[description], by default 9
center : float, optional
[description], by default 0.25
rmax : int, optional
[description], by default 1
Returns
-------
[type]
[description]
"""
axes.bar(
np.arange(24)/24*2*np.pi,
schedule,
width=2*np.pi/24,
align='edge',
bottom=center)
axes.bar(0,0.25,width=2*np.pi,color='k')
# Set the circumference labels
axes.set_xticks(np.linspace(0, 2*np.pi, 8, endpoint=False))
axes.set_xticklabels(np.linspace(0, 24, 8, endpoint=False,dtype=int),fontsize=8)
axes.tick_params(axis='both',which='major', pad=-3)
axes.set_yticks([0.5+center])
axes.set_yticklabels(['0.5'],fontsize=5)
axes.grid(True,color='black',linewidth=0.25)
# Make the labels go clockwise
axes.set_theta_direction(-1)
# Place 0 at the top
axes.set_theta_offset(np.pi/2)
# format grid and fake ticks
for t in np.linspace(0, 2*np.pi, 24, endpoint=False):
axes.plot([t,t], np.array([0.95,1.1])*rmax+center, lw=0.5, color="k")
for t in np.linspace(0, 2*np.pi, 8, endpoint=False):
axes.plot([t,t], np.array([0.9,1.1])*rmax+center, color="k")
if title is not None :
axes.set_title(title,fontsize=title_size)
axes.set_rmax(rmax+center)
return axes
def gen_map_title(
statistic = None,
exposure_schedule=None,
hour=None,
units=None,
year_selection=None,
day_selection=None,
filename=False,
**kwargs) :
if units in ['SED','J m-2','UVIh'] :
if all(exposure_schedule == np.ones(24)) :
title = 'UV daily doses'
else :
title = 'UV doses'
elif units in ['W m-2','UVI','mW m-2'] :
if np.sum(exposure_schedule)==1 and all(x in [0,1] for x in exposure_schedule) :
#user chosen just one hour
hour=np.where(exposure_schedule)[0][0]
title = 'UV intensity between ' + str(hour) + 'h-' + str(hour+1) + 'h'
else :
title = 'UV intensity'
else :
raise ValueError('Units must be SED, J m-2, UVIh, UVI, W m-2, or mW m-2')
title = statistic + ' of ' + title
ayear = pd.date_range(start="2010-01-01",end="2010-12-31")
ds = {'year' : ayear.dayofyear.values.tolist()}
ds['winter (DJF)'] = [x for i in [12,1,2] for x in ayear[ayear.month == i].dayofyear.values.tolist()]
ds['spring (MAM)'] = [x for i in [3,4,5] for x in ayear[ayear.month == i].dayofyear.values.tolist()]
ds['summer (JJA)'] = [x for i in [6,7,8] for x in ayear[ayear.month == i].dayofyear.values.tolist()]
ds['autumn (SON)'] = [x for i in [9,10,11] for x in ayear[ayear.month == i].dayofyear.values.tolist()]
ds['January'] = ayear[ayear.month == 1].dayofyear.values.tolist()
ds["February"] = ayear[ayear.month == 2].dayofyear.values.tolist()
ds["March"] = ayear[ayear.month == 3].dayofyear.values.tolist()
ds["April"] = ayear[ayear.month == 4].dayofyear.values.tolist()
ds["May"] = ayear[ayear.month == 5].dayofyear.values.tolist()
ds["June"] = ayear[ayear.month == 6].dayofyear.values.tolist()
ds["July"] = ayear[ayear.month == 7].dayofyear.values.tolist()
ds["August"] = ayear[ayear.month == 8].dayofyear.values.tolist()
ds["September"] = ayear[ayear.month == 9].dayofyear.values.tolist()
ds["October"] = ayear[ayear.month == 10].dayofyear.values.tolist()
ds["November"] = ayear[ayear.month == 11].dayofyear.values.tolist()
ds["December"] = ayear[ayear.month == 12].dayofyear.values.tolist()
day_str = None
for item in ds.items() :
if set(day_selection) == set(item[1]) :
day_str = item[0]
break
if day_str == 'year' :
if len(year_selection) == 1 :
title = title + ' for the year of ' + str(year_selection[0])
elif all(np.diff(year_selection)==1) :
title = title + ' for the years ' + str(np.min(year_selection)) + '-' + str(np.max(year_selection))
else :
title = title + ' for the years: ' + np.array2string(year_selection,separator=', ')
elif day_str is not None :
title = title + ' for ' + day_str
if len(year_selection) == 1 :
title = title + ' ' + str(year_selection[0])
elif all(np.diff(year_selection)==1) :
title = title + ', ' + str(np.min(year_selection)) + '-' + str(np.max(year_selection))
else :
title = title + ' ' + np.array2string(year_selection,separator=', ')
else :
# TODO: potentially make this workable with "custom day selection" placeholder in title
raise ValueError("Day selection not recognised, auto-title cannot proceed")
if filename :
custom = False
filename = "UV." + units + '.' + statistic + '.'
if len(year_selection) == 1 :
filename = filename + str(year_selection[0]) + '.'
elif all(np.diff(year_selection)==1) :
filename = filename + str(np.min(year_selection)) + '-' + str(np.max(year_selection)) + '.'
else :
filename = filename + str(year_selection[0]) + '-custom' + '.'
custom = True
day_str_filenamer = {
"January" : "01",
"February" : "02",
"March" : "03",
"April" : "04",
"May" : "05",
"June" : "06",
"July" : "07",
"August" : "08",
"September" : "09",
"October" : "10",
"November" : "11",
"December" : "12",
"winter (DJF)" : "s-",
"spring (MAM)" : "s-",
"summer (JJA)" : "s-",
"autumn (SON)" : "s-",
"year" : "year"}
filename = filename + day_str_filenamer[day_str]
if hour is not None :
filename = filename + '.' + str(hour) + 'h'
if custom :
filename = filename + '.created_' + dt.datetime.now().strftime('%Y%m%d_%H%M%S')
filename = format_filename(filename)
return title, filename
else :
return title
def calculate_map_from_hists(pix_hist,statistic,bin_centers) :
# Begin by defining the easy options that only require two inputs
basic_descriptor_functions = {
"mean": hist_mean,
"median": lambda x,y: hist_percentile(x,y,0.5),
"med": lambda x,y: hist_percentile(x,y,0.5),
"sd": hist_stdev,
"std": hist_stdev,
"stdev": hist_stdev,
"max": hist_max,
"maximum": hist_max,
"min": hist_min,
"minimum":hist_min
}
# we can check if the chosen statistic is basic or advanced
if statistic.lower() in basic_descriptor_functions.keys() :
# in this case, we can simply select the basic function from the dict...
descriptor_function = basic_descriptor_functions[statistic.lower()]
# ...and execute it across the map
map = np.apply_along_axis(lambda x: descriptor_function(x,bin_centers),0,pix_hist)
# TODO: a loose space could ruin this, need shunting yard algorithm of sorts
elif statistic.lower()[3:] == "prct" or statistic.lower()[3:] == "percentile" :
prct = int(statistic[0:2]) / 100
map = np.apply_along_axis(lambda x: hist_percentile(x,bin_centers,prct),0,pix_hist)
else :
# TODO: interpret self.statistic to build advanced functions (y i k e s)
raise ValueError("Statistic string not recognised")
return map
|
{"/python_tamer/SpecificDoses.py": ["/python_tamer/subroutines.py"], "/test/test_dummy_library.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"], "/python_tamer/ExposureMap.py": ["/python_tamer/subroutines.py"], "/python_tamer/__init__.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"]}
|
14,446
|
MeteoSwiss/python-TAMER
|
refs/heads/main
|
/setup.py
|
from setuptools import setup, find_packages
requirements = [
'pandas',
'netcdf4',
'numpy',
'cartopy',
'matplotlib',
]
setup_requirements = [
'pytest-runner',
]
test_requirements = [
'pytest-cov',
]
extras = {
'test': test_requirements,
}
packages = find_packages(include=['python_tamer'],exclude=['test','doc'])
package_dir = {'python-TAMER': 'python_tamer'}
package_data = {'test': ["UV_test_data_2018.nc"],'python_tamer' : ["mch_logo.png"]}
setup(
name='python-TAMER',
version="0.4.0",
author="Todd C. Harris",
author_email='todd.harris@meteoswiss.ch',
description="Toolkit for Analysis and Maps of Exposure Risk",
url='https://github.com/tch521/python-TAMER',
classifiers=[
'Development Status :: 3 - Alpha',
'Intended Audience :: Developers',
'Intended Audience :: Science/Research',
'License :: OSI Approved :: BSD License',
'Natural Language :: English',
'Programming Language :: Python :: 3',
],
keywords='python-TAMER UV',
entry_points={},
scripts=[],
license="BSD-3-Clause license",
long_description=open('README.rst').read() + '\n\n' +
open('HISTORY.rst').read(),
include_package_data=True,
zip_safe=False,
test_suite='test',
packages=packages,
install_requires=requirements,
package_dir=package_dir,
package_data=package_data,
setup_requires=setup_requirements,
tests_require=test_requirements,
extras_require=extras,
)
|
{"/python_tamer/SpecificDoses.py": ["/python_tamer/subroutines.py"], "/test/test_dummy_library.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"], "/python_tamer/ExposureMap.py": ["/python_tamer/subroutines.py"], "/python_tamer/__init__.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"]}
|
14,447
|
MeteoSwiss/python-TAMER
|
refs/heads/main
|
/python_tamer/__init__.py
|
""" Initializations """
from pkg_resources import get_distribution, DistributionNotFound # What's this?
from .ExposureMap import *
from .SpecificDoses import *
# from yaconfigobject import Config
# try:
# __version__ = get_distribution(__name__).version
# except DistributionNotFound:
# # package is not installed
# pass
__author__ = """Todd C. Harris"""
__email__ = 'todd.harris@meteoswiss.ch'
__version__ = "0.4.0"
# CONFIG = Config(name='python_tamer.conf')
|
{"/python_tamer/SpecificDoses.py": ["/python_tamer/subroutines.py"], "/test/test_dummy_library.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"], "/python_tamer/ExposureMap.py": ["/python_tamer/subroutines.py"], "/python_tamer/__init__.py": ["/python_tamer/ExposureMap.py", "/python_tamer/SpecificDoses.py"]}
|
14,450
|
Sindroc/coding_problems
|
refs/heads/main
|
/rotate_three_quart.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 14:37:00 2021
@author: sindy
"""
def three_quarter_rotate(original_matrix):
length = len(original_matrix)
three_quarter_rotate = [None]*length
for row in range(length): # initialize the rotate matrix
three_quarter_rotate[row] = [None] * length # with None values
for row in range(length):
for col in range(length): # fill the matrix
three_quarter_rotate[row][col] = original_matrix[length - col -1][row]
print(three_quarter_rotate)
return three_quarter_rotate
original_matrix = [[1,2,3], [4,5,6], [7,8,9]]
three_quarter_rotate(original_matrix)
|
{"/main.py": ["/rotate_right.py", "/rotate_left.py", "/rotate_half.py", "/rotate_three_quart.py"]}
|
14,451
|
Sindroc/coding_problems
|
refs/heads/main
|
/rotate_half.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 10:26:55 2021
rotate 180.
@author: sindy
"""
def half_rotate(original_matrix):
length = len(original_matrix)
half_rotate = [None]*length
for row in range(length): # initialize the rotate matrix
half_rotate[row] = [None] * length # with None values
for row in range(length):
for col in range(length): # fill the matrix
half_rotate[row][col] = original_matrix[length - row - 1][length - col - 1]
return half_rotate
original_matrix = [[1,2,3], [4,5,6], [7,8,9]]
half_rotate(original_matrix)
|
{"/main.py": ["/rotate_right.py", "/rotate_left.py", "/rotate_half.py", "/rotate_three_quart.py"]}
|
14,452
|
Sindroc/coding_problems
|
refs/heads/main
|
/rotate_right.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Mon Mar 1 23:28:20 2021
Given an image represented by an NxN matrix, where
each pixel in the image is 4 bytes, write a method to
rotate the image by 90 degrees. Can you do this in place?
@author: sindy
"""
def right_rotate(original_matrix):
length = len(original_matrix)
right_rotate = [None]*length
for row in range(length): # initialize the rotate matrix
right_rotate[row] = [None] * length # with None values
for row in range(length):
for col in range(length): # fill the matrix
right_rotate[col][row] = original_matrix[length - row - 1][col]
return right_rotate
# A = [[1,2,3], [4,5,6], [7,8,9], [10,11,12]]
original_matrix = [[1,2,3], [4,5,6], [7,8,9]]
right_rotate(original_matrix)
|
{"/main.py": ["/rotate_right.py", "/rotate_left.py", "/rotate_half.py", "/rotate_three_quart.py"]}
|
14,453
|
Sindroc/coding_problems
|
refs/heads/main
|
/main.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 09:50:03 2021
@author: sindy
"""
import unittest
import rotate_right
import rotate_left
import rotate_half
import rotate_three_quart
class Test(unittest.TestCase):
def test_left(self):
matriz1 = [[1,2,3], [4,5,6],[7,8,9]]
matriz2 = [[3, 6, 9], [2, 5, 8], [1, 4, 7]]
self.assertEqual(rotate_left.left_rotate(matriz1), matriz2)
def test_right(self):
matriz1 = [[1,2,3], [4,5,6],[7,8,9]]
matriz2 = [[7,4,1], [8,5,2],[9,6,3]]
self.assertEqual(rotate_right.right_rotate(matriz1), matriz2)
def test_half(self):
matriz1 = [[1,2,3],[4,5,6],[7,8,9]]
matriz2 = [[9,8,7],[6,5,4],[3,2,1]]
self.assertEqual(rotate_half.half_rotate(matriz1), matriz2)
def test_three_quarter(self):
matriz1 = [[1,2,3],[4,5,6],[7,8,9]]
matriz2 = [[7,4,1],[8,5,2], [9,6,3]]
self.assertEqual(rotate_three_quart.three_quarter_rotate(matriz1), matriz2)
if __name__ == "__main__":
unittest.main()
|
{"/main.py": ["/rotate_right.py", "/rotate_left.py", "/rotate_half.py", "/rotate_three_quart.py"]}
|
14,454
|
Sindroc/coding_problems
|
refs/heads/main
|
/rotate_left.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Wed Mar 3 09:47:38 2021
@author: sindy
"""
def left_rotate(original_matrix):
length = len(original_matrix)
left_rotate = [None]*length
for row in range(length): # initialize the rotate matrix
left_rotate[row] = [None] * length # with None values
for row in range(length):
for col in range(length): # fill the matrix
left_rotate[row][col] = original_matrix[col][length - row - 1]
return left_rotate
original_matrix = [[1,2,3], [4,5,6], [7,8,9]]
left_rotate(original_matrix)
|
{"/main.py": ["/rotate_right.py", "/rotate_left.py", "/rotate_half.py", "/rotate_three_quart.py"]}
|
14,456
|
reneichhorn/gameoflife
|
refs/heads/master
|
/main.py
|
import pyglet
from pyglet import shapes
import random
from cell import Cell
width = 2500
height = 1300
window = pyglet.window.Window(width, height)
scale = 10
cols = width // scale
rows = height // scale
batch = pyglet.graphics.Batch()
width = width - scale
height = height - scale
DEAD = [0, 0, 0]
ALIVE = [255, 255, 255]
PAUSED = False
LIVELIHOOD = 0.45
cells = []
alreadyvisited = []
lastshownplace = None
mousepos = 0, 0
figureToPlace = None
figures = [
[(0, -1), (1, -1), (1, 0), (0, 0)],
[(-4, 0), (-4, -1), (-4, 1), (-3, -2), (-3, 2), (-2, -3), (-2, 3), (-1, -3), (-1, 3), (0,0), (1, -2), (1, 2), (2, -1), (2, 0), (2, 1), (3, 0)],
[(-7, -8), (-7, 7), (-7, -7), (-7, 6), (-7, -6), (-7, 5),
(-6, -5), (-6, 4), (-6, -8), (-6, 7),
(-5, -8), (-5, 7),
(-4, -4), (-4, 3), (-4, -8), (-4, 7),
(-3, -4), (-3, 3), (-3, -8), (-3, 7),
(-2, -3), (-2, 2), (-2, 6), (-2, -7),
(-1, -2), (-1, 1),
(0, 0), (0, -1),
(1, 1), (1, -2), (1, -4), (1, 3),
(2, -4), (2, 3),
(3, -3), (3, 2), (3, -2), (3, 1),
(4, -5), (4, 4), (4, -4), (4, 3),
(5, -6), (5, 5), (5, -5), (5, 4), (5, -4), (5, 3), (5, -2), (5, 1),
(6, -7), (6, 6), (6, -6), (6, 5), (6, -3), (6, 2),
(7, -7), (7, 6), (7, -5), (7, 4),
(8, -6), (8, 5), (8, -5), (8, 4), (8, -4), (8, 3),
(9, -6), (9, 5), (9, -5), (9, 4), (9, -4), (9, 3),
(10, -6), (10, 5), (10, -5), (10, 4)
]
]
for j in range(0, rows):
for i in range(0, cols):
state = DEAD
if random.random() > LIVELIHOOD:
state = ALIVE
gameobject = shapes.Rectangle(0+i*scale, height-j*scale,
scale-1, scale-1, color=state,
batch=batch)
cells.append(Cell(gameobject, i, j, state))
for cell in cells:
cell.getNeighbours(cells, cols)
def calculateNextBoard():
global cells
for cell in cells:
cell.countAliveNeighbours()
for cell in cells:
cell.setState()
cell.setColor()
def cellCountAliveNeighbours(cells):
for cell in cells:
cell.countAliveNeighbours()
def cellSetState(cells):
for cell in cells:
cell.setState()
def cleanEverything():
global cells
for cell in cells:
cell.die()
@window.event
def on_draw():
window.clear()
batch.draw()
@window.event
def on_key_press(symbol, modifiers):
global PAUSED, figures
if chr(symbol) == 'p':
PAUSED = not PAUSED
if chr(symbol) == 'c':
PAUSED = True
cleanEverything()
if chr(symbol) == '1':
PAUSED = True
# figureToPlace = figures[1]
showFigure(figures[0])
if chr(symbol) == '2':
PAUSED = True
# figureToPlace = figures[1]
showFigure(figures[1])
if chr(symbol) == '3':
PAUSED = True
# figureToPlace = figures[1]
showFigure(figures[2])
def showFigure(figure):
global cells, mousepos, height, cols, lastshownplace
i, j = mousepos
x = (i // scale)
y = (height - j + scale//2)//scale
lookup = cols * y + x
origin = cells[lookup]
offsets = figure
if lastshownplace != (x, y) and lastshownplace is not None:
for offset in offsets:
xoffset = offset[0]
yoffset = offset[1]
lastshownplacex = lastshownplace[0]
lastshownplacey = lastshownplace[1]
neighbourlookup = (cols * (lastshownplacey + yoffset) + (lastshownplacex + xoffset)) % len(cells) - 1
neighbour = cells[neighbourlookup]
neighbour.state = DEAD
neighbour.gameobject.color = DEAD
for offset in offsets:
xoffset = offset[0]
yoffset = offset[1]
neighbourlookup = (cols * (y + yoffset) + (x + xoffset)) % len(cells) - 1
neighbour = cells[neighbourlookup]
neighbour.state = ALIVE
neighbour.gameobject.color = ALIVE
lastshownplace = (x, y)
pass
@window.event
def on_mouse_press(x, y, button, modifiers):
print('mousepress')
global height
x = x//scale
y = (height - y + scale//2)//scale
global cells, cols, ALIVE, DEAD
lookup = cols * y + x
print(cells[lookup])
state = cells[lookup].state
if state != DEAD:
state = DEAD
elif state == DEAD:
state = ALIVE
print(state)
cells[lookup].state = state
cells[lookup].gameobject.state = state
print(cells[lookup].state, cells[lookup].gameobject.state)
@window.event
def on_mouse_drag(x, y, dx, dy, buttons, modifiers):
print('mousedrag')
global height, cells, cols, ALIVE, DEAD, alreadyvisited
x = x//scale
y = (height - y + scale//2)//scale
lookup = cols * y + x
if lookup not in alreadyvisited:
state = cells[lookup].state
if state != DEAD:
state = DEAD
elif state == DEAD:
state = ALIVE
cells[lookup].state = state
cells[lookup].gameobject.state = state
alreadyvisited.append(lookup)
@window.event
def on_mouse_release(x, y, button, modifiers):
global alreadyvisited
alreadyvisited = []
@window.event
def on_mouse_motion(x, y, dx, dy):
global mousepos
mousepos = x, y
def update(dt):
window.clear()
batch.draw()
global PAUSED
if not PAUSED:
calculateNextBoard()
pyglet.clock.schedule_interval(update, 1/20)
pyglet.app.run()
|
{"/main.py": ["/cell.py"]}
|
14,457
|
reneichhorn/gameoflife
|
refs/heads/master
|
/cell.py
|
DEAD = [0, 0, 0]
ALIVE = [255, 255, 255]
ALIVESTATE1 = [255, 0, 0]
ALIVESTATE2 = [255, 165, 0]
ALIVESTATE3 = [255, 255, 0]
ALIVESTATE4 = [0, 128, 0]
ALIVESTATE5 = [0, 0, 255]
ALIVESTATE6 = [75, 0, 130]
ALIVESTATE7 = [238, 130, 238]
ALIVESTATE8 = [255, 255, 255]
WITHCOLOR = True
class Cell:
def __init__(self, gameobject, i, j, state):
self.gameobject = gameobject
self.i = i
self.j = j
self.neighbours = []
self.hasChanged = True
self.state = state
self.aliveNeighbours = 0
self.aliveTime = 0
def getNeighbours(self, cells, cols):
i = self.i
j = self.j
for rowoffset in range(-1, 2):
for coloffset in range(-1, 2):
lookup = ((cols * (j + rowoffset)) + (i + coloffset)) % (len(cells) - 1)
if (coloffset == 0 and rowoffset==0):
continue
self.neighbours.append(cells[lookup])
def countAliveNeighbours(self):
alivecount = 0
for neighbour in self.neighbours:
if not neighbour.hasChanged and neighbour.state == DEAD:
continue
if neighbour.gameobject.color != DEAD:
alivecount += 1
if alivecount > 3:
break
self.aliveNeighbours = alivecount
def setState(self):
self.hasChanged = False
if self.state != DEAD:
if self.aliveNeighbours < 2:
self.hasChanged = True
self.state = DEAD
self.aliveTime = 0
elif self.aliveNeighbours > 3:
self.state = DEAD
self.hasChanged = True
self.aliveTime = 0
else:
if self.aliveNeighbours == 3:
self.state = ALIVE
self.hasChanged = True
if self.hasChanged:
self.gameobject.color = self.state
self.setColor()
def die(self):
self.state = DEAD
self.gameobject.color = self.state
def setColor(self):
if WITHCOLOR:
if self.state != DEAD:
self.aliveTime += 1
if self.aliveTime < 40:
self.gameobject.color = ALIVESTATE1
elif self.aliveTime < 80:
self.gameobject.color = ALIVESTATE2
elif self.aliveTime < 160:
self.gameobject.color = ALIVESTATE3
elif self.aliveTime < 320:
self.gameobject.color = ALIVESTATE4
elif self.aliveTime < 640:
self.gameobject.color = ALIVESTATE5
elif self.aliveTime < 1280:
self.gameobject.color = ALIVESTATE6
elif self.aliveTime < 2560:
self.gameobject.color = ALIVESTATE7
elif self.aliveTime < 5120:
self.gameobject.color = ALIVESTATE8
self.aliveTime += 1
def setStateManually(self, state):
self.hasChanged = True
self.state = state
self.gameobject.state = self.state
print(self.state)
|
{"/main.py": ["/cell.py"]}
|
14,464
|
mkdryden/conda-helpers
|
refs/heads/master
|
/conda_helpers/recipes.py
|
# coding: utf-8
u'''
Helper functions to process Conda recipes.
.. versionadded:: 0.18
'''
from __future__ import absolute_import, unicode_literals, print_function
from ruamel.yaml import YAML
from ruamel.yaml.constructor import DuplicateKeyError
import pydash as _py
def find_requirements(recipe_obj, package_name=None):
'''
Find all ``requirements`` sections in the Conda build recipe.
'''
if isinstance(package_name, str):
package_name = [package_name]
recipe_obj = _py.clone_deep(recipe_obj)
matches = []
_py.map_values_deep(recipe_obj, iteratee=lambda value, path:
matches.append((value.split(' ')[0], value, path))
if (len(path) > 2 and path[-3] == 'requirements'
and isinstance(value, str)
and (package_name is None or
value.split(' ')[0] in package_name))
else None)
return matches
def recipe_objs(recipe_str):
'''
Parameters
----------
recipe_str : str
Conda recipe text.
Returns
-------
list<collections.OrderedDict>
List of outputs decoded from recipe. While most recipes result in a
single output, Conda recipes can describe multiple outputs (see the
`outputs section <https://conda.io/docs/user-guide/tasks/build-packages/define-metadata.html#outputs-section>`_
in the ``conda build`` documentation).
'''
try:
return [YAML().load(recipe_str)]
except DuplicateKeyError:
# multiple outputs from recipe
lines = recipe_str.splitlines()
package_starts = [i for i, line_i in enumerate(lines)
if line_i.startswith('package:')]
return [YAML().load('\n'.join(lines[start:end]))
for start, end in zip(package_starts, package_starts[1:] +
[len(lines)])]
|
{"/conda_helpers/__init__.py": ["/conda_helpers/exe_api.py", "/conda_helpers/py_api.py"], "/conda_helpers/__main__.py": ["/conda_helpers/__init__.py", "/conda_helpers/exe_api.py"], "/conda_helpers/exe_api.py": ["/conda_helpers/asyncio_util.py", "/conda_helpers/py_api.py", "/conda_helpers/recipes.py"], "/conda_helpers/asyncio_util.py": ["/conda_helpers/_async_py27.py", "/conda_helpers/_async_py35.py"]}
|
14,465
|
mkdryden/conda-helpers
|
refs/heads/master
|
/conda_helpers/__init__.py
|
# coding: utf-8
'''
.. versionchanged:: 0.13
Add support for Python 3.
.. versionchanged:: 0.21
Add support for ``conda>=4.4``.
'''
from __future__ import absolute_import, print_function, unicode_literals
from ._version import get_versions
from .exe_api import *
from .py_api import *
__version__ = get_versions()['version']
del get_versions
|
{"/conda_helpers/__init__.py": ["/conda_helpers/exe_api.py", "/conda_helpers/py_api.py"], "/conda_helpers/__main__.py": ["/conda_helpers/__init__.py", "/conda_helpers/exe_api.py"], "/conda_helpers/exe_api.py": ["/conda_helpers/asyncio_util.py", "/conda_helpers/py_api.py", "/conda_helpers/recipes.py"], "/conda_helpers/asyncio_util.py": ["/conda_helpers/_async_py27.py", "/conda_helpers/_async_py35.py"]}
|
14,466
|
mkdryden/conda-helpers
|
refs/heads/master
|
/conda_helpers/__main__.py
|
# coding: utf-8
u'''
condac - Execute Conda commands, reusing cached output if available.
'''
from __future__ import absolute_import, unicode_literals, print_function
from argparse import ArgumentParser
from collections import OrderedDict
from functools import wraps
import datetime as dt
import re
import subprocess as sp
import sys
import colorama as _C
import joblib as jl
import path_helpers as ph
import six
import conda_helpers as ch
import conda_helpers.exe_api
BASE_PARSER = ArgumentParser(add_help=False)
BASE_PARSER.add_argument('--cache-dir', type=ph.path, help='Cache directory '
'(default=`%(default)s`).',
default=ph.path('~/.conda-helpers-cache').expand())
BASE_PARSER.add_argument('-f', '--force', action='store_true', help='Force '
'execution of command (do not used cached result).')
BASE_PARSER.add_argument('-v', '--verbose', action='store_true')
def git_src_info(meta_path):
'''
Parameters
----------
meta_path : str
Path to ``meta.yaml`` Conda recipe file.
Returns
-------
tuple(path, git describe, HEAD hash) or None
Return ``None`` if no ``git_url`` is specified in the ``meta.yaml``
file. Otherwise, return ``git`` info for recipe source.
'''
meta_path = ph.path(meta_path)
recipe_path = meta_path.parent
match = re.search(r'git_url: +(?P<git_url>[\S^#]*).*$', meta_path.text(),
flags=re.MULTILINE)
git_url = ph.path(match.group('git_url'))
if git_url.isabs():
git_dir = git_url
else:
git_dir = recipe_path.joinpath(git_url).realpath()
if git_dir.isdir():
describe = sp.check_output('git describe --tags --dirty',
cwd=git_dir).strip()
head = sp.check_output('git rev-parse HEAD', cwd=git_dir).strip()
return git_dir, describe, head
@wraps(ch.exe_api.conda_exec)
def conda_exec_memoize(*args, **kwargs):
'''
Memoizable
'''
global conda_exec
__file_hashes__ = kwargs.pop('__file_hashes__', tuple())
__ignore_paths__ = kwargs.pop('__ignore_paths__', tuple())
# Get absolute path for each ignore path.
__ignore_paths__ = tuple([ph.path(p).realpath() for p in __ignore_paths__])
__force_exec__ = kwargs.pop('__force_exec__', False)
verbose = kwargs.pop('verbose', False)
cmd_args = list(args)
__git_revisions__ = tuple()
for i, a in enumerate(args):
if isinstance(a, six.string_types):
if i > 0 and args[i - 1] == '--croot':
# Ignore `croot` directory.
continue
a = ph.path(a)
if a.exists() and a.realpath() not in __ignore_paths__:
cmd_args[i] = a.realpath()
if a.isfile():
# Argument is a path to a file that exists and is not
# explicitly ignored. Add hash of file contents to
# arguments to allow for content-specific memoization.
__file_hashes__ += a.realpath(), a.read_hexhash('sha1')
if a.name == 'meta.yaml':
git_info = git_src_info(a)
__git_revisions__ += (git_info, )
elif a.isdir():
# Argument is a path to a directory that exists and is not
# explicitly ignored. Add hashes of directory contents to
# arguments to allow for content-specific memoization.
files = []
for f in a.walkfiles():
files.append((f.realpath(), f.read_hexhash('sha1')))
if f.name == 'meta.yaml':
git_info = git_src_info(f)
__git_revisions__ += (git_info, )
__file_hashes__ += (a.realpath(), tuple(files))
kwargs['verbose'] = True
if __git_revisions__:
kwargs['__git_revisions__'] = __git_revisions__
if verbose:
for git_dir_i, describe_i, head_i in __git_revisions__:
print(_C.Fore.MAGENTA + ' git source:',
(_C.Fore.WHITE + '{}@'.format(git_dir_i.name)) +
(_C.Fore.LIGHTGREEN_EX + '{}'.format(describe_i
.decode('utf8'))),
(_C.Fore.LIGHTCYAN_EX + '({})'.format(head_i[:8]
.decode('utf8'))),
file=sys.stderr)
kwargs['__git_revisions__'] = __git_revisions__
kwargs['__file_hashes__'] = __file_hashes__
output_dir, argument_hash = conda_exec._get_output_dir(*cmd_args, **kwargs)
if ph.path(output_dir).joinpath('output.pkl').isfile():
# Cache result exists.
if __force_exec__:
# Delete cached output file.
ph.path(output_dir).joinpath('output.pkl').remove()
if verbose:
print(_C.Fore.RED + 'Deleted cached result (`--force` was '
'specified.)', file=sys.stderr)
cached = False
else:
cached = True
else:
cached = False
if verbose:
print(_C.Fore.MAGENTA + 'Command:', _C.Fore.WHITE +
sp.list2cmdline(args), file=sys.stderr)
if cached:
print(_C.Fore.MAGENTA + 'Reusing cached result...',
file=sys.stderr)
else:
print(_C.Fore.MAGENTA + 'Executing function (no cache found)...',
file=sys.stderr)
if verbose:
print(_C.Fore.MAGENTA + '\nOutput\n======', file=sys.stderr)
# **Note: `conda_exec` is created dynamically in `main()` function to
# use a dynamically-specified memoize cache directory.**
output = conda_exec(*cmd_args, **kwargs).replace('\r\n', '\n')
if cached:
# Result was loaded from cache. Since function was not actually
# run, need to print output.
sys.stdout.write(output)
return output
def main(args=None):
global conda_exec
_C.init(autoreset=True)
args = sys.argv[1:]
if args is None:
args = sys.argv[1:]
if '--' in args:
cmd_args = args[args.index('--') + 1:]
parser_args = args[:args.index('--')]
else:
cmd_args = []
parser_args = args
parser = ArgumentParser(prog='condac', epilog='Version %s' %
ch.__version__, description='Cached Conda Memoized'
' Conda commands.')
parser.add_argument('--version', action='store_true')
sub = parser.add_subparsers(dest='command')
supported_commands = ['render', 'build']
subparsers = OrderedDict([(subparser_name_i,
sub.add_parser(subparser_name_i,
parents=[BASE_PARSER]))
for subparser_name_i in supported_commands])
args = parser.parse_args(parser_args)
if args.version:
print(ch.__version__)
return
if not args.command:
parser.error('No command specified. Must specify one of: `{}`'
.format(', '.join(subparsers.keys())))
if args.verbose:
if args.cache_dir == '-':
print(_C.Fore.MAGENTA + 'Cache disabled.', file=sys.stderr)
args.cache_dir = None
elif not args.cache_dir.isdir():
print(_C.Fore.MAGENTA + 'Creating cache dir:',
_C.Fore.WHITE + args.cache_dir.realpath(), file=sys.stderr)
args.cache_dir = args.cache_dir.realpath()
else:
print(_C.Fore.MAGENTA + 'Using cache dir:',
_C.Fore.WHITE + args.cache_dir.realpath(), file=sys.stderr)
args.cache_dir = args.cache_dir.realpath()
memory = jl.Memory(cachedir=args.cache_dir, verbose=0)
# **Note: `conda_exec` function is created dynamically to use the
# dynamically-specified memoize cache directory.**
conda_exec = memory.cache(ch.exe_api.conda_exec)
start = dt.datetime.now()
try:
conda_exec_memoize(args.command, *cmd_args, verbose=args.verbose,
__force_exec__=args.force)
end = dt.datetime.now()
if args.verbose:
exe_time = (end - start)
print(_C.Fore.MAGENTA + '\nExecution time: %s' % exe_time,
file=sys.stderr)
finally:
print(_C.Style.RESET_ALL, end='')
if __name__ == '__main__':
main()
|
{"/conda_helpers/__init__.py": ["/conda_helpers/exe_api.py", "/conda_helpers/py_api.py"], "/conda_helpers/__main__.py": ["/conda_helpers/__init__.py", "/conda_helpers/exe_api.py"], "/conda_helpers/exe_api.py": ["/conda_helpers/asyncio_util.py", "/conda_helpers/py_api.py", "/conda_helpers/recipes.py"], "/conda_helpers/asyncio_util.py": ["/conda_helpers/_async_py27.py", "/conda_helpers/_async_py35.py"]}
|
14,467
|
mkdryden/conda-helpers
|
refs/heads/master
|
/conda_helpers/_async_py27.py
|
from __future__ import absolute_import, print_function, unicode_literals
from backports.shutil_get_terminal_size import get_terminal_size
from functools import partial
import io
import itertools as it
import subprocess as sp
import sys
import colorama as co
import trollius as asyncio
@asyncio.coroutine
def _read_stream(stream, callback=None, buffer_size=None):
while True:
data = yield asyncio.From(stream.read(buffer_size or 1))
if data:
if callback is not None:
callback(data)
else:
break
@asyncio.coroutine
def run_command(cmd, *args, **kwargs):
'''
.. versionchanged:: 0.18
Display wait indicator if ``verbose`` is set to ``None`` (default).
'''
shell = kwargs.pop('shell', True)
verbose = kwargs.pop('verbose', True)
buffer_size = kwargs.pop('buffer_size', io.DEFAULT_BUFFER_SIZE)
if isinstance(cmd, list):
cmd = sp.list2cmdline(cmd)
_exec_func = (asyncio.subprocess.create_subprocess_shell
if shell else asyncio.subprocess.create_subprocess_exec)
process = yield asyncio.From(_exec_func(cmd, *args,
stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE))
stdout_ = io.StringIO()
stderr_ = io.StringIO()
terminal_size = get_terminal_size()
message = [co.Fore.MAGENTA + 'Executing:', co.Fore.WHITE + cmd]
if sum(map(len, message)) + 2 > terminal_size.columns:
cmd_len = terminal_size.columns - 2 - sum(map(len, ('...',
message[0])))
message[1] = co.Fore.WHITE + cmd[:cmd_len] + '...'
waiting_indicator = it.cycle(r'\|/-')
cmd_finished = asyncio.Event()
@asyncio.coroutine
def display_status():
'''
Display status while executing command.
'''
# Update no faster than `stderr` flush interval (if set).
update_interval = 2 * getattr(sys.stderr, 'flush_interval', .2)
while not cmd_finished.is_set():
print('\r' + co.Fore.WHITE + next(waiting_indicator), *message,
end='', file=sys.stderr)
yield asyncio.From(asyncio.sleep(update_interval))
print('\r' + co.Fore.GREEN + 'Finished:', co.Fore.WHITE + cmd,
file=sys.stderr)
def dump(output, data):
text = data.decode('utf8')
if verbose:
print(text, end='')
output.write(text)
if verbose is None:
# Display status while executing command.
status_future = asyncio.ensure_future(display_status())
yield asyncio.From(asyncio.wait([_read_stream(process.stdout,
partial(dump, stdout_),
buffer_size=buffer_size),
_read_stream(process.stderr,
partial(dump, stderr_),
buffer_size=buffer_size)]))
# Notify that command has completed execution.
cmd_finished.set()
if verbose is None:
# Wait for status to display "Finished: ..."
yield asyncio.From(status_future)
return_code = yield asyncio.From(process.wait())
raise asyncio.Return(return_code, stdout_.getvalue(), stderr_.getvalue())
|
{"/conda_helpers/__init__.py": ["/conda_helpers/exe_api.py", "/conda_helpers/py_api.py"], "/conda_helpers/__main__.py": ["/conda_helpers/__init__.py", "/conda_helpers/exe_api.py"], "/conda_helpers/exe_api.py": ["/conda_helpers/asyncio_util.py", "/conda_helpers/py_api.py", "/conda_helpers/recipes.py"], "/conda_helpers/asyncio_util.py": ["/conda_helpers/_async_py27.py", "/conda_helpers/_async_py35.py"]}
|
14,468
|
mkdryden/conda-helpers
|
refs/heads/master
|
/conda_helpers/exe_api.py
|
# coding: utf-8
'''
.. versionadded:: 0.21
This module contains functions that require a `conda` executable to be
available on the system path.
'''
from __future__ import absolute_import, print_function, unicode_literals
import itertools as it
import io
import logging
import platform
import re
import sys
import subprocess as sp
import tempfile as tmp
import colorama as co
import path_helpers as ph
import whichcraft
from .asyncio_util import run_command, with_loop
from .py_api import conda_list, conda_prefix
from .recipes import recipe_objs, find_requirements
logger = logging.getLogger(__name__)
'''
.. versionadded:: 0.12.3
Match progress messages from Conda install output log.
For example:
{"maxval": 133256, "finished": false, "fetch": "microdrop-laun", "progress": 0}
See `issue #5 <https://github.com/sci-bots/conda-helpers/issues/5>`_.
'''
cre_json_progress = re.compile(r'{"maxval":[^,]+,\s+"finished":[^,]+,'
r'\s+"fetch":\s+[^,]+,\s+"progress":[^}]+}')
'''
.. versionadded:: 0.12.3
Match non-JSON messages, e.g., `Conda menuinst log messages <https://github.com/ContinuumIO/menuinst/issues/49>`_.
For example:
INFO menuinst_win32:__init__(182): Menu: name: 'MicroDrop', prefix: 'dropbot.py', env_name: 'dropbot.py', mode: 'None', used_mode: 'user'
See also
--------
https://groups.google.com/a/continuum.io/forum/#!topic/anaconda/RWs9of4I2KM
https://github.com/sci-bots/conda-helpers/issues/5
'''
cre_non_json = re.compile(r'^\w')
class NotInstalled(Exception):
pass
def f_major_version(version):
'''
Parameters
----------
version : str
Version string (e.g., ``'0.1.0'``, ``'1.0'``).
Returns
-------
int
Number before first dot in version string (i.e., major version number).
'''
return int(version.split('.')[0])
def conda_executable():
'''
.. versionadded:: 0.2.post5
.. versionchanged:: 0.21
Search for first Conda executable on system path.
This adds support for Conda environments created with ``conda>=4.4``,
where a link to the root ``conda`` executable is no longer created in
the ``Scripts`` directory in the new environment. In such cases, it is
not possible to locate the root ``conda`` executable given only the
child environment.
Returns
-------
path_helpers.path
Path to Conda executable.
'''
conda_exe = whichcraft.which('conda')
if conda_exe is None:
raise IOError('Could not locate `conda` executable.')
else:
return ph.path(conda_exe)
def conda_root():
'''
.. versionadded:: 0.3.post2
.. versionchanged:: 0.21
Look up ``conda`` executable path using :func:`conda_executable`.
Returns
-------
path_helpers.path
Path to Conda **root** environment.
'''
return ph.path(sp.check_output([conda_executable(), 'info', '--root'],
shell=True).strip())
def conda_activate_command():
'''
.. versionadded:: 0.3.post2
Returns
-------
list
Command list to activate Conda environment.
Can be prepended to a command list to run the command in the activated
Conda environment corresponding to the running Python executable.
.. versionchanged:: 0.21
Search for first ``activate`` executable on system path.
This adds support for Conda environments created with ``conda>=4.4``,
where a link to the root ``activate`` executable is no longer created
in the ``Scripts`` directory in the new environment.
'''
activate_exe = whichcraft.which('activate')
if activate_exe is None:
raise IOError('Could not locate Conda `activate` executable.')
return ['call', activate_exe, conda_prefix()]
def conda_upgrade(package_name, match_major_version=False, channels=None):
'''
Upgrade Conda package.
Parameters
----------
package_name : str
Package name.
match_major_version : bool, optional
Only upgrade to versions within the same major version.
channels : list, optional
Anaconda channels to add to install command.
Returns
-------
dict
Dictionary containing:
- :data:`original_version`: Package version before upgrade.
- :data:`new_version`: Package version after upgrade (`None` if
package was not upgraded).
- :data:`installed_dependencies`: List of dependencies installed
during package upgrade. Each dependency is represented as a
dictionary of the form ``{'package': ..., 'version': ...}``.
Raises
------
NotInstalled
If package not installed.
IOError
If Conda executable not found in Conda environment.
subprocess.CalledProcessError
If `conda search` command fails (in Conda environment).
This happens, for example, if no internet connection is available.
See also
--------
:func:`pip_helpers.upgrade`
.. versionchanged:: 0.15
Use asynchronous :func:`run_command` coroutine to better stream
``stdout`` and ``stderr``.
'''
result = {'package': package_name,
'original_version': None,
'new_version': None,
'installed_dependencies': []}
try:
version_info = conda_version_info(package_name)
except IOError:
# Could not locate `conda` executable.
return result
result = {'package': package_name,
'original_version': version_info['installed'],
'new_version': None,
'installed_dependencies': []}
if result['original_version'] is None:
# Package is not installed.
raise NotInstalled(package_name)
if match_major_version:
installed_major_version = f_major_version(version_info['installed'])
latest_version = [v for v in version_info['versions']
if f_major_version(v) == installed_major_version][-1]
else:
latest_version = version_info['versions'][-1]
if result['original_version'] == latest_version:
# Latest version already installed.
return result
if channels is None:
channels_args = []
else:
channels_args = list(it.chain(*[['-c', c] for c in channels]))
# Running in a Conda environment.
command = (['conda', 'install'] + channels_args +
['-y', '{}=={}'.format(package_name, latest_version)])
returncode, stdout, stderr = with_loop(run_command)(command, shell=True,
verbose=True)
if returncode != 0:
message = ('Error executing: `{}`.\nstdout\n------\n\n{}\n\n'
'stderr\n------\n\n{}'.format(sp.list2cmdline(command),
stdout, stderr))
logger.error(message)
raise RuntimeError(message)
if '# All requested packages already installed.' in stdout:
pass
elif 'The following NEW packages will be INSTALLED' in stdout:
match = re.search(r'The following NEW packages will be INSTALLED:\s+'
r'(?P<packages>.*)\s+Linking packages', stdout,
re.MULTILINE | re.DOTALL)
cre_package = re.compile(r'\s*(?P<package>\S+):\s+'
r'(?P<version>\S+)-[^-]+\s+')
packages_str = match.group('packages')
packages = [match_i.groupdict()
for match_i in cre_package.finditer(packages_str)]
for package_i in packages:
if package_i['package'] == package_name:
result['new_version'] = package_i['version']
installed_dependencies = [p for p in packages
if p['package'] != package_name]
result['installed_dependencies'] = installed_dependencies
return result
def conda_version_info(package_name, channels=None):
'''
Parameters
----------
package_name : str
Conda package name.
channels : list, optional
Anaconda channels to add to install command.
Returns
-------
dict
Version information:
- ``latest``: Latest available version.
- ``installed``: Installed version (`None` if not installed).
Raises
------
IOError
If Conda executable not found.
subprocess.CalledProcessError
If `conda search` command fails.
This happens, for example, if no internet connection is available.
.. versionchanged:: 0.21
Use :func:`conda_list` to check for currently installed version of
package. This is necessary since format of ``conda search`` has
changed and no longer uses a ``*`` to indicate the currently installed
version.
'''
if channels is None:
channels_args = []
else:
channels_args = list(it.chain(*[['-c', c] for c in channels]))
# Use `-f` flag to search for package, but *no other packages that have
# `<package_name>` in the name*.
output = sp.check_output([conda_executable(), 'search'] + channels_args +
['-f', package_name], shell=True)
output_lines = output.strip().splitlines()
line_tokens = [re.split(r'\s+', v) for v in output_lines[1:]]
versions = [tokens_i[2] if tokens_i[1] in ('*', '.') else tokens_i[1]
for tokens_i in line_tokens]
installed_version = conda_list(package_name).get(package_name,
{}).get('version')
return {'installed': installed_version, 'versions': versions}
def conda_exec(*args, **kwargs):
r'''
Execute command using ``conda`` executable in active Conda environment.
.. versionchanged:: 0.7.3
Do not escape ``<``, ``>`` characters in ``conda_exec``, since these
characters are required for less than or greater than version
specifiers.
For example, ``"foo >2.0"``, ``"foobar <3.0"``.
.. versionchanged:: 0.10
Log executed command as a string, rather than a list of arguments.
This should make it easier, for example, to copy and paste a command to
run manually.
.. versionchanged:: 0.12.2
Escape ``&``, ``\``, ``|``, ``^``, ``<``, and ``<`` characters, but
**only** if there is not a space in an argument. The reason is that if
there is a space in the argument, the argument will automatically be
quoted so character escaping is not necessary.
.. versionchanged:: 0.12.3
By default, strip non-json lines from output when ``--json`` arg is
specified.
See `issue #5 <https://github.com/sci-bots/conda-helpers/issues/5>`_.
Parameters
----------
*args : list(str)
Command line arguments to pass to ``conda`` executable.
Returns
-------
str
Output from command (both ``stdout`` and ``stderr``).
.. versionchanged:: 0.15
Use asynchronous :func:`run_command` coroutine to better stream
``stdout`` and ``stderr``.
'''
verbose = kwargs.get('verbose')
# By default, strip non-json lines from output when `--json` arg is
# specified.
# See https://github.com/sci-bots/microdrop/issues/249.
json_fix = kwargs.get('json_fix', True)
# Only escape characters for arguments that do not include a space. See
# docstring for details.
escape_char = '^' if platform.system() == 'Windows' else '\\'
args = [arg_i if ' ' in arg_i else
re.sub(r'([&\\^\|<>])', r'{}\1'.format(escape_char), arg_i)
for arg_i in args]
# Running in a Conda environment.
command = [conda_executable()] + list(args)
logger.debug('Executing command: `%s`', sp.list2cmdline(command))
returncode, stdout, stderr = with_loop(run_command)(command, shell=True,
verbose=verbose)
if returncode != 0:
message = ('Error executing: `{}`.\nstdout\n------\n\n{}\n\n'
'stderr\n------\n\n{}'.format(sp.list2cmdline(command),
stdout, stderr))
logger.error(message)
raise RuntimeError(message)
# Strip non-json lines from output when `--json` arg is specified.
if '--json' in args and json_fix:
stdout = '\n'.join(line_i for line_i in stdout.splitlines()
if not any(cre_j.search(line_i)
for cre_j in (cre_json_progress,
cre_non_json)))
# Strip extraneous output from activate script:
# - `"Found VS2014 at C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\Tools\"`
stdout = re.sub('^"Found VS.*$', '', stdout, flags=re.MULTILINE)
# - `ERROR: The system was unable to find the specified registry key or value.`
stdout = re.sub('^ERROR: The system.*$', '', stdout, flags=re.MULTILINE)
return stdout
def development_setup(recipe_dir, *args, **kwargs):
'''
Install build and run-time dependencies for specified Conda build recipe.
Parameters
----------
recipe_dir : str
Path to Conda build recipe.
*args
Additional arguments to pass to ``conda install`` command.
verbose : bool, optional
If ``True``, display output of ``conda install`` command.
If ``False``, do not display output of ``conda install`` command.
If ``None``, display ``.`` characters to indicate progress during
``conda install`` command.
.. versionchanged:: 0.13.1
Strip build string (where necessary) from rendered recipe package
specifiers. Fixes `issue #4 <https://github.com/sci-bots/conda-helpers/issues/4>`_
.. versionchanged:: 0.18
Add support for recipes with multiple outputs.
See also
--------
https://conda.io/docs/user-guide/tasks/build-packages/define-metadata.html#outputs-section
.. versionchanged:: 0.19
Use :func:`render` to render recipe.
.. versionchanged:: 0.20
Uninstall packages corresponding to paths added with `conda develop` to
ensure development versions are used instead.
'''
verbose = kwargs.pop('verbose', True)
recipe_dir = ph.path(recipe_dir).realpath()
# Extract list of build and run dependencies from Conda build recipe.
logger.info('Extract build dependencies from Conda recipe: %s', recipe_dir)
# Render recipe for the Python version of the active Conda environment.
recipe = render(recipe_dir)
# Decode one or more outputs from the recipe yaml.
recipe_objs_ = recipe_objs(recipe)
# Find all `build` and `run` requirements across all outputs.
requirements = list(it.chain(*map(find_requirements, recipe_objs_)))
# If listed as both a `build` and `run` requirement use the `run`
# requirement specification only.
requirements = [list(requirements_i)[-1] for group_i, requirements_i in
it.groupby(sorted(requirements, key=lambda x: (x[0], x[-1])),
key=lambda x: x[0])]
# Extract package name and version (if specified) from each requirement.
required_packages = [dict(zip(('package', 'version'), r[1].split(' ')[:2]))
for r in requirements]
# XXX Do not include dependencies with wildcard version specifiers, since
# they are not supported by `conda install`.
required_packages = [v for v in required_packages
if '*' not in v.get('version', '')]
# Prepend explicit version numbers with '=='.
for req_i in required_packages:
if 'version' in req_i and re.search('^\d', req_i['version']):
req_i['version'] = '==' + req_i['version']
# Dump sorted list of required packages.
required_strs = sorted(' {}{}'.format(r['package'],
' {}'.format(r['version']
if 'version' in r
else ''))
for r in required_packages)
logger.info('Install build and run-time dependencies:\n%s',
'\n'.join(required_strs))
# Dump list of Conda requirements to a file and install dependencies using
# `conda install ...`.
required_packages_file = tmp.TemporaryFile(mode='w', prefix='%s-dev-req-' %
recipe_dir.name, delete=False)
required_packages_lines = ['{} {}'.format(req_i['package'],
req_i.get('version', '')).strip()
for req_i in required_packages]
try:
# Create string containing one package descriptor per line.
required_packages_str = '\n'.join(required_packages_lines)
required_packages_file.file.write(required_packages_str)
required_packages_file.file.close()
conda_exec('install', '-y', '--file', required_packages_file.name,
*args, verbose=verbose)
finally:
# Remove temporary file containing list of Conda requirements.
ph.path(required_packages_file.name).remove()
# Uninstall packages corresponding to paths added with `conda develop` so
# development versions are used.
dev_packages = find_dev_packages(verbose=None if verbose is None or verbose
else False)
if dev_packages:
logger.info('Uninstall packages linked with `conda develop`:\n'
'\n'.join(dev_packages))
conda_exec('uninstall', '-y', '--force', *dev_packages,
verbose=verbose)
def install_info(install_response, split_version=False):
'''
Normalize ``conda install ...`` output, whether run in dry mode or not, to
return a list of unlinked packages and a list of linked packages.
.. versionadded:: 0.7
.. versionchanged:: 0.7.3
Handle install log actions as :class:`dict` or :class:`list`.
.. versionchanged:: 0.11
Optionally split package specifier string into package name and
version.
Parameters
----------
install_response : dict
JSON decoded response from ``conda install ...`` command.
split_version : bool, optional
Split package specifier string into package name and version.
Default to ``False`` to maintain backwards compatibility with versions
``< 0.11``.
Returns
-------
unlinked_packages, linked_packages : list, list
If no packages were installed or removed:
- :data:`unlinked_packages` is set to ``None``.
- :data:`linked_packages` is set to ``None``.
If any packages are installed or removed:
- :data:`unlinked_packages` is a list of tuples corresponding to the
packages that were uninstalled/replaced.
- :data:`linked_packages` is a list of ``(<package name and version>,
<channel>)`` tuples corresponding to the packages that were
installed/upgraded.
If :data:`split_version` is ``True``, each package tuple in
:data:`unlinked_packages`` and :data:`link_packages` is of the form
``(<package name>, <version>, <channel>)``
If :data:`split_version` is ``False`` (default), each package tuple in
:data:`unlinked_packages`` and :data:`link_packages` is of the form
``(<package name and version>, <channel>)``.
Raises
------
RuntimeError
If install response does not include item with key ``'success'``.
'''
def f_format_version(v):
return '{}=={}'.format(v['name'], v['version'])
if not install_response.get('success'):
raise RuntimeError('Install operation failed.')
if 'actions' not in install_response:
return None, None
# Read list of actions from response.
actions = install_response['actions']
if isinstance(actions, list):
actions = actions[0]
if isinstance(install_response['actions'], list):
# Response was from a dry run. It has a different format.
unlink_packages = [[f_format_version(v), v['channel']]
for v in actions.get('UNLINK', [])]
link_packages = [[f_format_version(v), v['channel']]
for v in actions.get('LINK', [])]
else:
unlink_packages = [v.split('::')[::-1]
for v in actions.get('UNLINK', [])]
link_packages = [v.split('::')[::-1]
for v in actions.get('LINK', [])]
# Sort list of packages to make output deterministic.
sorted_unlinked = sorted(unlink_packages)
sorted_linked = sorted(link_packages)
def _split_version(package_tuples):
'''
Parameters
----------
package_tuples : list
List of package tuples of the form ``(<package name and version>,
<channel>)``.
Returns
-------
list
List of package tuples of the form ``(<package name>, <version>,
<channel>)``, i.e., the :data:`package_tuples` with the package
name and version number split apart.
'''
return [(package_i.split('==') if '==' in package_i
else ['-'.join(package_i.split('-')[:-2]),
package_i.split('-')[-2]]) + [channel_i]
for package_i, channel_i in package_tuples]
if split_version:
return list(map(_split_version, (sorted_unlinked, sorted_linked)))
else:
return sorted_unlinked, sorted_linked
def format_install_info(unlinked, linked):
'''
Format output of :func:`install_info` into human-readable form.
For example:
Uninstalled:
- `foo==3.2` (from `conda-forge`)
Installed:
- `foobar==1.7` (from `sci-bots`)
- `bar==1.7` (from `conda-forge`)
.. versionadded:: 0.9
.. versionchanged:: 0.12.1
Implement handling :func:`install_info` output where
:data:`split_version` set to ``True``.
Parameters
----------
unlinked : list or None
If no packages were installed or removed:
- :data:`unlinked_packages` is set to ``None``.
- :data:`linked_packages` is set to ``None``.
linked : list or None
List of package information tuple either of the form ``(<package name>,
<version>, <channel>)`` or ``(<package name and version>, <channel>)``.
Returns
-------
str
Formatted output of :func:`install_info`.
'''
output = io.BytesIO()
def _format_package_tuple(package_tuple):
'''
Parameters
----------
package_tuple : tuple
Conda package information tuple either of the form
``(<package name>, <version>, <channel>)`` or of the form
``(<package name and version>, <channel>)``.
See also
--------
:func:`install_info`
'''
if len(package_tuple) == 2:
package_i, channel_i = package_tuple
return ' - `{}` (from `{}`)'.format(package_i, channel_i)
elif len(package_tuple) == 3:
package_i, version_i, channel_i = package_tuple
return ' - `{}=={}` (from `{}`)'.format(package_i, version_i,
channel_i)
if unlinked:
print('Uninstalled:', file=output)
for package_tuple_i in linked:
print(_format_package_tuple(package_tuple_i), file=output)
if unlinked and linked:
print('', file=output)
if linked:
print('Installed:', file=output)
for package_tuple_i in linked:
print(_format_package_tuple(package_tuple_i), file=output)
return output.getvalue()
def render(recipe_dir, **kwargs):
'''
Render specified Conda build recipe.
Parameters
----------
recipe_dir : str
Path to Conda build recipe.
verbose : bool, optional
If ``True``, display output of ``conda render`` command.
If ``False``, do not display output of ``conda render`` command.
If ``None``, display waiting indicator ``conda render`` command.
Returns
-------
str
Render recipe text.
.. versionadded:: 0.19
.. versionchanged:: 0.21
Use first ``python`` executable found on the system path.
'''
recipe_dir = ph.path(recipe_dir).realpath()
# Render recipe for the Python version of the active Conda environment.
# Note that `conda render` is part of the `conda-build` package, which is
# installed in the `root` Conda environment, which may have a different
# version of Python installed.
PY = '{0.major}.{0.minor}'.format(sys.version_info)
command = ['python', '-m', 'conda_helpers', 'render', '-v', '--',
recipe_dir, '--python=' + PY]
returncode, stdout, stderr = with_loop(run_command)(command, shell=True,
**kwargs)
# Strip extraneous output from activate script:
# - `"Found VS2014 at C:\Program Files (x86)\Microsoft Visual Studio 14.0\Common7\Tools\"`
stdout = re.sub('^"Found VS.*$', '', stdout, flags=re.MULTILINE)
# - `ERROR: The system was unable to find the specified registry key or value.`
stdout = re.sub('^ERROR: The system.*$', '', stdout, flags=re.MULTILINE)
return stdout
def find_dev_packages(**kwargs):
'''
Find package names corresponding to paths added with ``conda develop``.
To do this, for each path listed in ``.../site-packages/conda.pth``:
1. If ``.conda-recipe`` directory exists within the path, render the
corresponding recipe.
2. Get the name(s) of the output package(s) from the rendered recipe.
Parameters
----------
**kwargs
Keyword arguments to pass to :func:`conda_helpers.render`.
Returns
-------
list
List of tuples containing::
- ``source_path``: path listed in ``conda.pth``.
- ``packages``: tuple of package names listed in rendered recipe.
.. versionadded:: 0.20
'''
conda_pth = conda_prefix().joinpath('Lib', 'site-packages', 'conda.pth')
dev_package_names = []
for dev_path_i in [ph.path(str.strip(p)) for p in conda_pth.lines()]:
recipe_dir_i = dev_path_i.joinpath('.conda-recipe')
if not recipe_dir_i.isdir():
if kwargs.get('verbose'):
print(co.Fore.RED + 'skipping:', co.Fore.WHITE + dev_path_i,
file=sys.stderr)
continue
if kwargs.get('verbose'):
print(co.Fore.MAGENTA + 'processing:', co.Fore.WHITE + dev_path_i,
file=sys.stderr)
try:
recipe_i = render(recipe_dir_i, **kwargs)
recipe_objs_i = recipe_objs(recipe_i)
for recipe_obj_ij in recipe_objs_i:
dev_package_names += [recipe_obj_ij['package']['name']]
except Exception as exception:
print('error:', exception)
return dev_package_names
|
{"/conda_helpers/__init__.py": ["/conda_helpers/exe_api.py", "/conda_helpers/py_api.py"], "/conda_helpers/__main__.py": ["/conda_helpers/__init__.py", "/conda_helpers/exe_api.py"], "/conda_helpers/exe_api.py": ["/conda_helpers/asyncio_util.py", "/conda_helpers/py_api.py", "/conda_helpers/recipes.py"], "/conda_helpers/asyncio_util.py": ["/conda_helpers/_async_py27.py", "/conda_helpers/_async_py35.py"]}
|
14,469
|
mkdryden/conda-helpers
|
refs/heads/master
|
/conda_helpers/py_api.py
|
# coding: utf-8
'''
.. versionadded:: 0.21
'''
from __future__ import absolute_import, print_function, unicode_literals
import json
import logging
import re
import sys
import path_helpers as ph
import six
logger = logging.getLogger(__name__)
class PackageNotFound(Exception):
def __init__(self, missing, available=None):
'''
Parameters
----------
missing : str or list
Name(s) of missing Conda packages.
available : str or list, optional
List of package information dictionaries of a set of available
Conda packages.
Useful, for example, for code to continue processing packages that
**are** found.
'''
if isinstance(missing, six.string_types):
self.missing = [missing]
else:
self.missing = missing
if isinstance(available, six.string_types):
self.available = [available]
elif available is None:
self.available = []
else:
self.available = available
def __str__(self):
if len(self.missing) > 1:
return ('The following package(s) could not be found: {}'
.format(', '.join('`{}`'.format(package_i)
for package_i in self.missing)))
elif self.missing:
return ('Package `{}` could not be found.'
.format(self.missing[0]))
else:
return 'Package not found.'
def conda_prefix():
'''
Returns
-------
path_helpers.path
Path to Conda environment prefix corresponding to running Python
executable.
Return ``None`` if not running in a Conda environment.
.. versionchanged:: 0.12.4
Use :attr:`sys.prefix` to look up Conda environment prefix.
.. versionchanged:: 0.13
Cast :attr:`sys.prefix` as a :class:`path_helpers.path` instance.
'''
return ph.path(sys.prefix)
def package_version(name, *args, **kwargs):
'''
.. versionchanged:: 0.8
Accept extra :data:`args` and :data`kwargs`.
.. versionchanged:: 0.12
Raise :class:`PackageNotFound` error if one or more specified packages
could not be found.
Note that the ``available`` attribute of the raised
:class:`PackageNotFound` object contains a list of package information
dictionaries of the set of specified packages that **are** available
Conda packages.
This is useful, for example, for code to continue processing packages
that **are** found.
.. versionchanged:: 0.21
Look up installed package info in ``<prefix>/conda-meta`` directory,
eliminating dependency on ``conda`` executable.
This is useful, for example, with Conda environments created with
``conda>=4.4``, where a link to the root ``conda`` executable is no
longer created in the ``Scripts`` directory in the new environment. In
such cases, it is not possible to locate the root ``conda`` executable
given only the child environment.
Parameters
----------
name : str or list
Name(s) of installed Conda package.
*args
Additional args to pass to :func:`conda_exec`.
*kwargs
Additional keyword args to pass to :func:`conda_exec`.
Returns
-------
dict or list
Dictionary (or dictionaries) containing ``'name'``, ``'version'``, and
``'build'``.
If multiple package names were specified in :data:`name` argument, the
order of the list of version dictionaries is the same as the order of
the package names in the :data:`name` argument.
Raises
------
PackageNotFound
If one or more specified packages could not be found.
'''
singleton = isinstance(name, six.string_types)
if singleton:
name = [name]
version_dicts = list(conda_list('|'.join(name), full_name=True).values())
if not version_dicts:
raise NameError('Package `{}` not installed.'.format(name))
if singleton:
return version_dicts[0]
else:
# Return list of version dictionaries in same order as names where
# specified in `name` argument.
versions_dict = dict([(version_i['name'], version_i)
for version_i in version_dicts])
missing = [name_i for name_i in name if name_i not in versions_dict]
available = [versions_dict[name_i] for name_i in name
if name_i not in missing]
if missing:
raise PackageNotFound(missing, available=available)
else:
return available
def conda_list(regex, full_name=False):
'''
Emulate ``conda list`` command.
.. note::
This function **does not** require the ``conda`` executable to be
available on the system path.
.. versionadded:: 0.21
Look up installed package info in ``<prefix>/conda-meta`` directory,
eliminating dependency on ``conda`` executable.
This is useful, for example, with Conda environments created with
``conda>=4.4``, where a link to the root ``conda`` executable is no
longer created in the ``Scripts`` directory in the new environment. In
such cases, it is not possible to locate the root ``conda`` executable
given only the child environment.
Parameters
----------
regex : str
Regular expression or package name.
full_name : bool, optional
If ``True``, only search for full names, i.e., ``^<regex>$``.
Returns
-------
dict
Dictionary mapping each matched package name to the corresponding
package version information, including containing ``'name'``,
``'version'``, and ``'build'``.
'''
# Match package name(s) to filenames in `<prefix>/conda-meta` according to
# [Conda package naming conventions][conda-pkg-name].
#
# [conda-pkg-name]: https://conda.io/docs/user-guide/tasks/build-packages/package-naming-conv.html
cre_package = re.compile(r'^(?P<package_name>.*)-(?P<version>[^\-]+)'
r'-(?P<build_string>[^\-])+$')
if full_name:
regex = '^{}$'.format(regex)
version_dicts = {}
for json_file_i in conda_prefix().joinpath('conda-meta').files('*.json'):
file_match_i = cre_package.match(json_file_i.namebase)
if not file_match_i:
# Unrecognized file name format.
continue
elif not re.match(regex, file_match_i.group('package_name')):
# Package name does not match specified regular expression.
continue
package_info_i = json.loads(json_file_i.text())
version_dicts[package_info_i['name']] = package_info_i
return version_dicts
|
{"/conda_helpers/__init__.py": ["/conda_helpers/exe_api.py", "/conda_helpers/py_api.py"], "/conda_helpers/__main__.py": ["/conda_helpers/__init__.py", "/conda_helpers/exe_api.py"], "/conda_helpers/exe_api.py": ["/conda_helpers/asyncio_util.py", "/conda_helpers/py_api.py", "/conda_helpers/recipes.py"], "/conda_helpers/asyncio_util.py": ["/conda_helpers/_async_py27.py", "/conda_helpers/_async_py35.py"]}
|
14,470
|
mkdryden/conda-helpers
|
refs/heads/master
|
/conda_helpers/_async_py35.py
|
from __future__ import absolute_import, print_function, unicode_literals
from functools import partial
from shutil import get_terminal_size
import asyncio
import io
import itertools as it
import subprocess as sp
import sys
import colorama as co
async def _read_stream(stream, callback=None, buffer_size=None):
while True:
data = await stream.read(buffer_size or 1)
if data:
if callback is not None:
callback(data)
else:
break
async def run_command(cmd, *args, **kwargs):
'''
.. versionchanged:: 0.18
Display wait indicator if ``verbose`` is set to ``None`` (default).
'''
shell = kwargs.pop('shell', True)
verbose = kwargs.pop('verbose', True)
buffer_size = kwargs.pop('buffer_size', io.DEFAULT_BUFFER_SIZE)
if isinstance(cmd, list):
cmd = sp.list2cmdline(cmd)
_exec_func = (asyncio.subprocess.create_subprocess_shell
if shell else asyncio.subprocess.create_subprocess_exec)
process = await _exec_func(cmd, *args, stdout=asyncio.subprocess.PIPE,
stderr=asyncio.subprocess.PIPE)
stdout_ = io.StringIO()
stderr_ = io.StringIO()
terminal_size = get_terminal_size()
message = [co.Fore.MAGENTA + 'Executing:', co.Fore.WHITE + cmd]
if sum(map(len, message)) + 2 > terminal_size.columns:
cmd_len = terminal_size.columns - 2 - sum(map(len, ('...',
message[0])))
message[1] = co.Fore.WHITE + cmd[:cmd_len] + '...'
waiting_indicator = it.cycle(r'\|/-')
cmd_finished = asyncio.Event()
async def display_status():
'''
Display status while executing command.
'''
# Update no faster than `stderr` flush interval (if set).
update_interval = 2 * getattr(sys.stderr, 'flush_interval', .2)
while not cmd_finished.is_set():
print('\r' + co.Fore.WHITE + next(waiting_indicator), *message,
end='', file=sys.stderr)
await asyncio.sleep(update_interval)
print('\r' + co.Fore.GREEN + 'Finished:', co.Fore.WHITE + cmd,
file=sys.stderr)
def dump(output, data):
text = data.decode('utf8')
if verbose:
print(text, end='')
output.write(text)
if verbose is None:
# Display status while executing command.
status_future = asyncio.ensure_future(display_status())
await asyncio.wait([_read_stream(process.stdout, partial(dump, stdout_),
buffer_size=buffer_size),
_read_stream(process.stderr, partial(dump, stderr_),
buffer_size=buffer_size)])
# Notify that command has completed execution.
cmd_finished.set()
if verbose is None:
# Wait for status to display "Finished: ..."
await status_future
return_code = await process.wait()
return return_code, stdout_.getvalue(), stderr_.getvalue()
|
{"/conda_helpers/__init__.py": ["/conda_helpers/exe_api.py", "/conda_helpers/py_api.py"], "/conda_helpers/__main__.py": ["/conda_helpers/__init__.py", "/conda_helpers/exe_api.py"], "/conda_helpers/exe_api.py": ["/conda_helpers/asyncio_util.py", "/conda_helpers/py_api.py", "/conda_helpers/recipes.py"], "/conda_helpers/asyncio_util.py": ["/conda_helpers/_async_py27.py", "/conda_helpers/_async_py35.py"]}
|
14,471
|
mkdryden/conda-helpers
|
refs/heads/master
|
/conda_helpers/asyncio_util.py
|
# coding: utf-8
'''
.. versionadded:: 0.21
'''
from __future__ import absolute_import, print_function, unicode_literals
from functools import wraps
import logging
import platform
import sys
import threading
if sys.version_info <= (3, 4):
import trollius as asyncio
from ._async_py27 import run_command
else:
import asyncio
from ._async_py35 import run_command
__all__ = ['new_file_event_loop', 'ensure_event_loop', 'with_loop', 'asyncio',
'run_command']
logger = logging.getLogger(__name__)
def new_file_event_loop():
'''
.. versionadded:: 0.15
Returns
-------
asyncio.BaseEventLoop
Event loop capable of monitoring file IO events, including ``stdout``
and ``stderr`` pipes. **Note that on Windows, the default event loop
_does not_ support file or stream events. Instead, a
:class:`ProactorEventLoop` must explicitly be used on Windows. **
'''
return (asyncio.ProactorEventLoop() if platform.system() == 'Windows'
else asyncio.new_event_loop())
def ensure_event_loop():
'''
.. versionadded:: 0.15
Get existing event loop or create a new one if necessary.
Returns
-------
asyncio.BaseEventLoop
'''
try:
loop = asyncio.get_event_loop()
except RuntimeError as e:
if 'There is no current event loop' in str(e):
loop = new_file_event_loop()
asyncio.set_event_loop(loop)
else:
raise
return loop
def with_loop(func):
'''
.. versionadded:: 0.15
Decorator to run function within an asyncio event loop.
.. notes::
Uses :class:`asyncio.ProactorEventLoop` on Windows to support file I/O
events, e.g., serial device events.
If an event loop is already bound to the thread, but is either a)
currently running, or b) *not a :class:`asyncio.ProactorEventLoop`
instance*, execute function in a new thread running a new
:class:`asyncio.ProactorEventLoop` instance.
'''
@wraps(func)
def wrapped(*args, **kwargs):
loop = ensure_event_loop()
thread_required = False
if loop.is_running():
logger.debug('Event loop is already running.')
thread_required = True
elif all([platform.system() == 'Windows',
not isinstance(loop, asyncio.ProactorEventLoop)]):
logger.debug('`ProactorEventLoop` required, not `%s`'
'loop in background thread.', type(loop))
thread_required = True
if thread_required:
logger.debug('Execute new loop in background thread.')
finished = threading.Event()
def _run(generator):
loop = ensure_event_loop()
try:
result = loop.run_until_complete(asyncio
.ensure_future(generator))
except Exception as e:
finished.result = None
finished.error = e
else:
finished.result = result
finished.error = None
finished.set()
thread = threading.Thread(target=_run,
args=(func(*args, **kwargs), ))
thread.daemon = True
thread.start()
finished.wait()
if finished.error is not None:
raise finished.error
return finished.result
logger.debug('Execute in exiting event loop in main thread')
return loop.run_until_complete(func(**kwargs))
return wrapped
|
{"/conda_helpers/__init__.py": ["/conda_helpers/exe_api.py", "/conda_helpers/py_api.py"], "/conda_helpers/__main__.py": ["/conda_helpers/__init__.py", "/conda_helpers/exe_api.py"], "/conda_helpers/exe_api.py": ["/conda_helpers/asyncio_util.py", "/conda_helpers/py_api.py", "/conda_helpers/recipes.py"], "/conda_helpers/asyncio_util.py": ["/conda_helpers/_async_py27.py", "/conda_helpers/_async_py35.py"]}
|
14,472
|
mkdryden/conda-helpers
|
refs/heads/master
|
/setup.py
|
import sys
import setuptools as st
sys.path.insert(0, '.')
import versioneer
st.setup(name='conda-helpers',
version=versioneer.get_version(),
cmdclass=versioneer.get_cmdclass(),
description='Add description here.',
keywords='',
author='Christian Fobel',
author_email='christian@fobel.net',
url='https://github.com/sci-bots/conda-helpers',
license='BSD',
packages=['conda_helpers'],
install_requires=['colorama', 'joblib', 'path-helpers', 'six'],
# Install data listed in `MANIFEST.in`
include_package_data=True)
|
{"/conda_helpers/__init__.py": ["/conda_helpers/exe_api.py", "/conda_helpers/py_api.py"], "/conda_helpers/__main__.py": ["/conda_helpers/__init__.py", "/conda_helpers/exe_api.py"], "/conda_helpers/exe_api.py": ["/conda_helpers/asyncio_util.py", "/conda_helpers/py_api.py", "/conda_helpers/recipes.py"], "/conda_helpers/asyncio_util.py": ["/conda_helpers/_async_py27.py", "/conda_helpers/_async_py35.py"]}
|
14,477
|
rahulkris1/SSW810
|
refs/heads/master
|
/HW09_ReSub_Rahul_Kampati.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 26 November 2019, 08:00
@author: Kampati Rahul
Creation of a data repository for students and instructors to keep track of data
"""
import os
from collections import defaultdict
from prettytable import PrettyTable
class Student:
def __init__(self, cwid, name, major):
""" Students class to hold students data"""
self.cwid = cwid
self.name = name
self.major = major
self.course_grade_dict = defaultdict(str)
def course_grade_student(self, course, grade):
""" Assign grade of each course"""
self.course_grade_dict[course] = grade
def prettyTable_student(self):
""" Structuring data for pretty table for students"""
return [self.cwid, self.name, sorted(self.course_grade_dict.keys())]
class Instructor:
def __init__(self, cwid, name, dept):
""" instructors class to hold students data"""
self.cwid = cwid
self.name = name
self.dept = dept
self.course_inst_dict = defaultdict(int)
def num_course_students(self, course):
""" Assign number of students under each professor"""
self.course_inst_dict[course] += 1
def prettyTable_instructor(self):
""" Structuring data for pretty table for students"""
for course in self.course_inst_dict:
yield [self.cwid, self.name, self.dept, course, self.course_inst_dict[course]]
class Repository:
def __init__(self, directory):
""" repository class to hold the students, instructors and grades data"""
self.directory = directory
self.student_dict = {}
self.instructor_dict = {}
self.student_analyser()
self.instructor_analyser()
self.grades_analyser()
self.students_summary()
self.instructors_summary()
def student_analyser(self):
""" Analyse Students.txt data file"""
if not os.path.exists(self.directory):
raise FileNotFoundError("Directory not found")
file_students = os.path.join(self.directory, 'students.txt')
for cwid, name, major in self.file_reading_gen(file_students, 3, "\t", False):
self.student_dict[cwid] = Student(cwid, name, major)
def instructor_analyser(self):
""" Analyse Instructors.txt data file"""
if not os.path.exists(self.directory):
raise FileNotFoundError("Directory not found")
file_instructors = os.path.join(self.directory, 'instructors.txt')
for cwid, name, dept in self.file_reading_gen(file_instructors, 3, "\t", False):
self.instructor_dict[cwid] = Instructor(cwid, name, dept)
def grades_analyser(self):
""" Analyse grades.txt data file"""
if not os.path.exists(self.directory):
raise FileNotFoundError("Directory not found")
file_grades = os.path.join(self.directory, 'grades.txt')
for studentCwid, course, grade, instructorCwid in self.file_reading_gen(file_grades, 4, "\t", False):
if studentCwid in self.student_dict.keys():
self.student_dict[studentCwid].course_grade_student(course, grade)
else:
print(f"Invalid student cwid {studentCwid}")
if instructorCwid in self.instructor_dict.keys():
self.instructor_dict[instructorCwid].num_course_students(course)
else:
print(f"Invalid Instructor id {instructorCwid}")
def file_reading_gen(self, path, fields, sep, header=False):
"""Generator function that reads a flie and returns one line at a time."""
try:
fp = open(path, 'r')
except FileNotFoundError:
raise FileNotFoundError("Unable to open the file path provided")
else:
with fp:
if header:
header_info = next(fp)
if len(header_info.split(sep)) != fields:
raise ValueError(f"File path has {len(header_info.split(sep))} invalid number of fields instead of {fields}")
for line in fp:
if len(line.split(sep)) != fields:
raise ValueError(f" file has {len(next(fp.split(sep)))} fields instead of {fields} ")
else:
line = line.strip().split(sep)
yield tuple(line)
def students_summary(self):
""" Summarising the students data"""
tb_student = PrettyTable(field_names = ["CWID", "Name", "Completed Courses"])
for inst_student in self.student_dict.values():
tb_student.add_row(inst_student.prettyTable_student())
print("Student Summary")
print(tb_student)
def instructors_summary(self):
""" Summarising the Instructors data"""
tb_instructor = PrettyTable(field_names = ["CWID", "Name", "Dept", "Course", "Students"])
for inst_instructor in self.instructor_dict.values():
for instructor_data in inst_instructor.prettyTable_instructor():
tb_instructor.add_row(instructor_data)
print("Instructor Summary")
print(tb_instructor)
def main():
try:
Repository("C:/Users/HP/Desktop/redo/file_09")
except Exception as e:
print(e)
if __name__ == "__main__":
main()
|
{"/HW09_ReSub_Test_Rahul_Kampati.py": ["/HW09_ReSub_Rahul_Kampati.py"]}
|
14,478
|
rahulkris1/SSW810
|
refs/heads/master
|
/HW09_ReSub_Test_Rahul_Kampati.py
|
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on 26 November 2019, 09:30:00
@author: Kampati Rahul
Testing the Creation of data repository of courses, students, faculty members.
"""
import unittest
from HW09_ReSub_Rahul_Kampati import Repository
fp = "C:/Users/HP/Desktop/redo/file_09/test"
class TestRepository(unittest.TestCase):
""" Testing the File Generators """
def test_student_dict(self):
""" Test the info of the student"""
test = Repository(fp)
self.assertEqual(list(test.student_dict.keys()), ["10103"])
def test_instructor_info_dict(self):
""" Test the info of the instructor """
test = Repository(fp)
self.assertEqual(list(test.insructor_dict.keys()), ["98765"])
if __name__ == "__main__":
unittest.main(exit=False, verbosity=2)
|
{"/HW09_ReSub_Test_Rahul_Kampati.py": ["/HW09_ReSub_Rahul_Kampati.py"]}
|
14,490
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day8.py
|
class Node:
def __init__(self, license_iter) -> None:
super().__init__()
self.child_count = next(license_iter)
self.metadata_count = next(license_iter)
self.child_nodes = [Node(license_iter) for _ in range(self.child_count)]
self.metadata = [next(license_iter) for _ in range(self.metadata_count)]
class Tree:
def __init__(self, license_file) -> None:
super().__init__()
self.root = Node(iter(int(d) for d in license_file.split(' ')))
def node_value(node):
if node.child_count == 0:
return sum(node.metadata)
else:
return sum(0 if index - 1 >= len(node.child_nodes) else node_value(node.child_nodes[index - 1])
for index in node.metadata)
def checksum(node):
if node.child_count == 0:
return sum(node.metadata)
else:
return sum(checksum(n) for n in node.child_nodes) + sum(node.metadata)
def part1(license_file):
return checksum(Tree(license_file).root)
def part2(license_file):
return node_value(Tree(license_file).root)
def tests():
test_license_file = '2 3 0 3 10 11 12 1 1 0 1 99 2 1 1 2'
assert part1(test_license_file) == 138
assert part2(test_license_file) == 66
if __name__ == '__main__':
tests()
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,491
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day1.py
|
from itertools import cycle
resulting_frequency = sum
def calibration_frequency(frequencies):
seen = set()
running_sum = 0
for frequency in frequencies:
running_sum += frequency
if running_sum in seen:
return running_sum
seen.add(running_sum)
def part1(day1input):
frequencies = [int(num) for num in day1input.splitlines()]
return resulting_frequency(frequencies)
def part2(day1input):
frequencies = [int(num) for num in day1input.splitlines()]
return calibration_frequency(cycle(frequencies))
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,492
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day4.py
|
import datetime
from collections import defaultdict
def parse_date(log_line):
date_string = log_line[log_line.index('[') + 1:log_line.index(']')]
return datetime.datetime.strptime(date_string, '%Y-%m-%d %H:%M')
def get_guard_number(log_line):
guard_fh = log_line[log_line.index('#') + 1:]
return int(guard_fh[:guard_fh.index(' ')])
def get_guards(schedule_log):
guards = {}
current_guard = None
sleep_start = None
for log_line in schedule_log:
if "Guard" in log_line:
guard_number = get_guard_number(log_line)
if guard_number not in guards:
guards[guard_number] = []
current_guard = guards[guard_number]
if 'sleep' in log_line:
sleep_start = parse_date(log_line).minute
if 'wake' in log_line:
current_guard.append(range(sleep_start, parse_date(log_line).minute))
return guards
def get_total_sleep(sleep_ranges):
return sum(len(sleep) for sleep in sleep_ranges)
def get_sleepiest_guard(guards):
return max(guards, key=sum)
def get_minutes_slept(sleep_ranges):
minutes_slept = defaultdict(int)
for sleep in sleep_ranges:
for minute in sleep:
minutes_slept[minute] += 1
return minutes_slept
def get_sleepiest_minute(sleep_ranges):
minutes_slept = get_minutes_slept(sleep_ranges)
minute = max(minutes_slept, key=minutes_slept.get)
return minute, minutes_slept[minute]
def get_guard_most_frequently_asleep_on_same_minute(guards):
sleepiest_minute_counts = {guard: get_sleepiest_minute(sleep_ranges)[1] for guard, sleep_ranges in guards.items() if
len(sleep_ranges) > 0}
return max(sleepiest_minute_counts, key=sleepiest_minute_counts.get)
def part1(day4_input):
schedule_log = sorted(day4_input.splitlines())
guards = get_guards(schedule_log)
sleepiest_guard = max(guards, key=lambda x: get_total_sleep(guards[x]))
minute_asleep_most, count = get_sleepiest_minute(guards[sleepiest_guard])
return sleepiest_guard * minute_asleep_most
def part2(day4_input):
schedule_log = sorted(day4_input.splitlines())
guards = get_guards(schedule_log)
guard_most_frequently_asleep_on_same_minute = get_guard_most_frequently_asleep_on_same_minute(guards)
sleepiest_minute_of_guard = get_sleepiest_minute(guards[guard_most_frequently_asleep_on_same_minute])[0]
return guard_most_frequently_asleep_on_same_minute * sleepiest_minute_of_guard
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,493
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day3.py
|
import re
from collections import defaultdict
def claim_coordinates(claim):
for x in range(int(claim['left_edge']), int(claim['left_edge']) + int(claim['width'])):
for y in range(int(claim['top_edge']), int(claim['top_edge']) + int(claim['height'])):
yield x, y
def get_claimed_fabric(claims):
fabric = defaultdict(int)
for claim in claims:
for x, y in claim_coordinates(claim):
fabric[(x, y)] += 1
return fabric
def get_overlapping_area_of_claims(fabric):
return sum(1 for coord in fabric.values() if coord > 1)
def get_claims(day3input):
r = re.compile('#(?P<ID>\d+) @ (?P<left_edge>\d+),(?P<top_edge>\d+): (?P<width>\d+)x(?P<height>\d+)')
return [m.groupdict() for m in r.finditer(day3input)]
def claim_overlaps(claim, fabric):
for x, y in claim_coordinates(claim):
if fabric[(x, y)] > 1:
return True
return False
def get_perfect_claim_id(claims, fabric):
for claim in claims:
if not claim_overlaps(claim, fabric):
return claim['ID']
def part1(day3input):
day3_claims = get_claims(day3input)
claimed_fabric = get_claimed_fabric(day3_claims)
return get_overlapping_area_of_claims(claimed_fabric)
def part2(day3input):
day3_claims = get_claims(day3input)
claimed_fabric = get_claimed_fabric(day3_claims)
return get_perfect_claim_id(day3_claims, claimed_fabric)
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,494
|
MRSharff/adventofcode
|
refs/heads/master
|
/advent_of_code.py
|
import os
import requests
try:
session = os.environ['advent_session_cookie']
except KeyError:
session = None
class AdventOfCodeException(Exception):
pass
def get_cached_input(input_file_path):
with open(input_file_path) as advent_input_file:
advent_input = advent_input_file.read()
return advent_input
def write_response(input_path, response):
os.makedirs(os.path.dirname(input_path), exist_ok=True)
with open(input_path, 'w') as advent_input:
advent_input.write(response.text)
def fetch_day_input(day):
response = requests.get('https://adventofcode.com/2018/day/{}/input'.format(day), cookies={'session': session})
if not response.ok:
raise AdventOfCodeException('Could not get day input: bad response code {}'.format(response.status_code))
return response
def get_day_input(day):
input_file_path = 'inputs/day{}.txt'.format(day)
try:
return get_cached_input(input_file_path).strip()
except FileNotFoundError:
if session is None:
raise AdventOfCodeException('No session defined')
response = fetch_day_input(day)
write_response(input_file_path, response)
return response.text.strip()
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,495
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day7.py
|
import re
from collections import defaultdict
seconds_per_step = 60
worker_count = 5
def get_available_steps(steps):
counts = defaultdict(int)
for step, dependencies in steps.items():
for dependency in dependencies:
counts[dependency] += 1
return [dependency for dependency in steps if counts[dependency] == 0]
def part1(steps_input):
steps = get_steps(steps_input.strip().splitlines())
step_order = []
while len(steps) > 0:
available_steps = sorted(get_available_steps(steps))
selected = min(available_steps)
step_order.append(selected)
steps.pop(selected)
return ''.join(step_order)
def get_total_time(step):
return ord(step) - 64 + seconds_per_step
def get_steps(step_instructions):
steps = {}
for instruction in step_instructions:
prereq, step = re.search('Step ([A-Z]) must be finished before step ([A-Z]) can begin.', instruction).groups()
if prereq not in steps:
steps[prereq] = []
if step not in steps:
steps[step] = []
steps[prereq].append(step)
return steps
def part2(steps_input):
# steps is a graph implemented with a simple dictionary used for a directed acyclic graph vertex: edge list
steps = get_steps(steps_input.strip().splitlines())
current_jobs = {}
available_steps = get_available_steps(steps)
total_time = 0
while len(available_steps) > 0:
# assign jobs
for step in sorted(available_steps):
if len(current_jobs) < worker_count and step not in current_jobs:
current_jobs[step] = get_total_time(step)
# do work
time_worked = min(current_jobs.values())
for job, time_left in current_jobs.copy().items():
if time_left == time_worked:
steps.pop(job)
current_jobs.pop(job)
else:
current_jobs[job] -= time_worked
total_time += time_worked
available_steps = get_available_steps(steps)
return total_time
def test():
test_input = """Step C must be finished before step A can begin.
Step C must be finished before step F can begin.
Step A must be finished before step B can begin.
Step A must be finished before step D can begin.
Step B must be finished before step E can begin.
Step D must be finished before step E can begin.
Step F must be finished before step E can begin."""
global seconds_per_step, worker_count
twc = worker_count
worker_count = 2
tsps = seconds_per_step
seconds_per_step = 0
assert part1(test_input) == 'CABDFE'
assert part2(test_input) == 15
seconds_per_step = tsps
worker_count = twc
if __name__ == '__main__':
test()
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,496
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day5.py
|
import string
def annihilates(unit1, unit2):
return abs(ord(unit1) - ord(unit2)) == 32
def react(polymer):
catalyzed_polymer = []
for unit in polymer:
if catalyzed_polymer and annihilates(catalyzed_polymer[-1], unit):
catalyzed_polymer.pop()
else:
catalyzed_polymer.append(unit)
return ''.join(catalyzed_polymer)
def remove_all(unit, polymer):
return polymer.replace(unit, '').replace(unit.upper(), '')
def tests():
test_polymer = 'dabAcCaCBAcCcaDA'
assert react(test_polymer) == 'dabCBAcaDA'
assert react('aA') == ''
assert react('abBA') == ''
assert react('abAB') == 'abAB'
assert react('aabAAB') == 'aabAAB'
assert part1(test_polymer) == 10
assert remove_all('a', test_polymer) == 'dbcCCBcCcD'
assert remove_all('b', test_polymer) == 'daAcCaCAcCcaDA'
assert remove_all('c', test_polymer) == 'dabAaBAaDA'
assert remove_all('d', test_polymer) == 'abAcCaCBAcCcaA'
assert react(remove_all('a', test_polymer)) == 'dbCBcD'
assert react(remove_all('b', test_polymer)) == 'daCAcaDA'
assert react(remove_all('c', test_polymer)) == 'daDA'
assert react(remove_all('d', test_polymer)) == 'abCBAc'
assert part2(test_polymer) == 4
print("tests successful")
def part1(polymer):
return len(react(polymer))
def part2(polymer):
unit_types = string.ascii_lowercase
polymer_sizes = [len(react(remove_all(unit, polymer))) for unit in unit_types]
return min(polymer_sizes)
if __name__ == '__main__':
tests()
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,497
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day2.py
|
from collections import defaultdict
from itertools import combinations
def get_letter_counts(box_id):
counts = defaultdict(int)
for char in box_id:
counts[char] += 1
return counts
def checksum(box_ids):
two_count = 0
three_count = 0
for box_id in box_ids:
letter_counts = get_letter_counts(box_id).values()
if 2 in letter_counts:
two_count += 1
if 3 in letter_counts:
three_count += 1
return two_count * three_count
def get_differences(str1, str2):
differences = {}
for i, (char1, char2) in enumerate(zip(str1, str2)):
if char1 != char2:
differences[i] = (char1, char2)
return differences
def get_prototype_boxes(candidate_ids):
for id1, id2 in combinations(candidate_ids, 2):
differences = get_differences(id1, id2)
if len(differences) == 1:
return id1, id2
def common_characters(str1, str2):
return ''.join([a for a, b in zip(str1, str2) if a == b])
def part1(day2input):
likely_candidates = day2input.splitlines()
return checksum(likely_candidates)
def part2(day2input):
likely_candidates = day2input.splitlines()
return common_characters(*get_prototype_boxes(likely_candidates))
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,498
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day6.py
|
from collections import defaultdict
def manhattan_distance(a, b):
return abs(a[0] - b[0]) + abs(a[1] - b[1])
def get_closest(coordinate, coordinates):
distances = defaultdict(list)
for other_coordinate in coordinates:
distances[manhattan_distance(coordinate, other_coordinate)].append(other_coordinate)
closest = min(distances)
if len(distances[closest]) > 1:
return None
return distances[closest][0]
def largest_noninfinite_area(coordinates):
non_edge_coordinates = coordinates[:]
areas = defaultdict(int)
ys = sorted(c[1] for c in coordinates)
xs = sorted(c[0] for c in coordinates)
minx, maxx = (xs[0], xs[-1])
miny, maxy = (ys[0], ys[-1])
for y in range(miny, maxy + 1):
is_y_edge = (y == miny or y == maxy)
for x in range(minx, maxx + 1):
is_x_edge = (x == minx or x == maxx)
closest = get_closest((x, y), coordinates)
if closest in non_edge_coordinates and (is_y_edge or is_x_edge):
non_edge_coordinates.remove(closest)
else:
areas[closest] += 1
return areas[max(non_edge_coordinates, key=areas.get)]
def get_region_size_of_coords_within_distance(distance, coordinates):
ys = sorted(c[1] for c in coordinates)
xs = sorted(c[0] for c in coordinates)
minx, maxx = (xs[0], xs[-1])
miny, maxy = (ys[0], ys[-1])
return sum(1 for x in range(minx, maxx + 1)
for y in range(miny, maxy + 1)
if sum(manhattan_distance((x, y), c) for c in coordinates) < distance)
def part1(coordinates_input):
clines = [cline.split(', ') for cline in coordinates_input.strip().splitlines()]
coordinates = [(int(c[0]), int(c[1])) for c in clines]
return largest_noninfinite_area(coordinates)
def part2(coordinates_input):
clines = [cline.split(', ') for cline in coordinates_input.strip().splitlines()]
coordinates = [(int(c[0]), int(c[1])) for c in clines]
return get_region_size_of_coords_within_distance(10000, coordinates)
def tests():
coordinate_list = [(1, 1),
(1, 6),
(8, 3),
(3, 4),
(5, 5),
(8, 9)]
assert largest_noninfinite_area(coordinate_list) == 17
assert get_region_size_of_coords_within_distance(32, coordinate_list) == 16
if __name__ == '__main__':
tests()
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,499
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day9.py
|
""" --- Day 9: Marble Mania --- """
import re
from collections import deque, defaultdict
def marble_game(player_count, marble_count):
score = defaultdict(int)
circle = deque([0])
# current marble is always at the head of the deque
for marble in range(1, marble_count + 1):
if marble % 23 == 0:
circle.rotate(7)
score[marble % player_count] += circle.popleft() + marble
else:
circle.rotate(-2)
circle.insert(0, marble)
return max(score.values())
def tests():
assert marble_game(9, 25) == 32
assert marble_game(10, 1618) == 8317
assert marble_game(13, 7999) == 146373
assert marble_game(17, 1104) == 2764
assert marble_game(21, 6111) == 54718
assert marble_game(30, 5807) == 37305
assert part1('10 players; last marble is worth 1618 points') == 8317
assert part1('13 players; last marble is worth 7999 points') == 146373
assert part1('17 players; last marble is worth 1104 points') == 2764
assert part1('21 players; last marble is worth 6111 points') == 54718
assert part1('30 players; last marble is worth 5807 points') == 37305
def part1(game_settings):
players, marbles = map(int, re.findall(r'\d+', game_settings))
return marble_game(players, marbles)
def part2(game_settings):
players, marbles = map(int, re.findall(r'\d+', game_settings))
return marble_game(players, marbles * 100)
if __name__ == '__main__':
tests()
print(part1('466 players; last marble is worth 71436 points'))
import time
start = time.time()
print(part2('466 players; last marble is worth 71436 points'))
print('time: {}'.format(time.time() - start))
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,500
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day11.py
|
from functools import lru_cache
def hundreds_digit_or_zero(power_level):
return int(power_level / 100) % 10
@lru_cache(maxsize=pow(2, 17))
def cell_power_level(x, y, grid_serial_number):
# rack_id = x + 10
# power_level = rack_id * y
# power_level = power_level + grid_serial_number
# power_level = power_level * rack_id
# power_level = hundreds_digit_or_zero(power_level)
# return power_level - 5
return hundreds_digit_or_zero((((x + 10) * y) + grid_serial_number) * (x + 10)) - 5
class PowerGrid:
"""
Power Grid implemented as an Integral Image (or a Summed Area Table)
The summed area is cached (or memoized) with an LRU cache from functools
(thanks Python standard library for not making me reinvent the wheel)
"""
def __init__(self, serial_number, grid_size=300) -> None:
super().__init__()
self.serial_number = serial_number
self.grid_size = grid_size + 1
@lru_cache(maxsize=pow(2, 17))
def summed_area(self, x, y):
"""
Summed area above and to the left of x, y
"""
if x < 0 or y < 0:
return 0
else:
return (self.summed_area(x - 1, y)
+ self.summed_area(x, y - 1)
- self.summed_area(x - 1, y - 1)
+ cell_power_level(x, y, self.serial_number))
def power_level(self, x, y, cell_pack_size):
a = self.summed_area(x - 1, y - 1)
b = self.summed_area(x - 1, y + cell_pack_size - 1)
c = self.summed_area(x + cell_pack_size - 1, y - 1)
d = self.summed_area(x + cell_pack_size - 1, y + cell_pack_size - 1)
return d - b - c + a
def best_cell_pack_location(self, cell_pack_size):
cell_pack_power_levels = {(x, y): self.power_level(x, y, cell_pack_size)
for y in range(1, self.grid_size - cell_pack_size)
for x in range(1, self.grid_size - cell_pack_size)}
return max(cell_pack_power_levels, key=cell_pack_power_levels.get)
def best_cell_pack(self):
best_cell_pack_location = (1, 1)
best_cell_pack_size = 3
best_cell_pack_power_level = self.power_level(1, 1, 3)
for cell_pack_size in range(2, self.grid_size):
for y in range(self.grid_size - cell_pack_size):
for x in range(self.grid_size - cell_pack_size):
cell_pack_power_level = self.power_level(x, y, cell_pack_size)
if cell_pack_power_level > best_cell_pack_power_level:
best_cell_pack_location = (x, y)
best_cell_pack_size = cell_pack_size
best_cell_pack_power_level = cell_pack_power_level
return best_cell_pack_location, best_cell_pack_size
def part1(grid_serial):
power_grid = PowerGrid(int(grid_serial))
return power_grid.best_cell_pack_location(3)
def part2(grid_serial):
power_grid = PowerGrid(int(grid_serial))
return power_grid.best_cell_pack()
def tests():
assert cell_power_level(3, 5, 8) == 4
assert cell_power_level(122, 79, 57) == -5
assert cell_power_level(217, 196, 39) == 0
assert cell_power_level(101, 153, 71) == 4
assert part1('18') == (33, 45)
assert part1('42') == (21, 61)
assert part2('18') == ((90, 269), 16)
assert part2('42') == ((232, 251), 12)
print('Tests successful')
if __name__ == '__main__':
tests()
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,501
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/runner.py
|
import advent_of_code
import importlib
import glob
days = {day[:-3]: importlib.import_module(day[:-3]) for day in glob.glob('day*.py')}
def print_solutions_for_day(day):
print('Day {}:'.format(day))
dayinput = advent_of_code.get_day_input(day)
solver = days['day{}'.format(day)]
for part in range(1, 3):
part_solver = 'part{}'.format(part)
if hasattr(solver, part_solver):
print(getattr(solver, part_solver)(dayinput))
else:
print('No Solution for Day {} Part {}'.format(day, part))
print()
def main():
for day in range(1, len(days) + 1):
print_solutions_for_day(day)
if __name__ == '__main__':
main()
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,502
|
MRSharff/adventofcode
|
refs/heads/master
|
/2018/day10.py
|
""" --- Day 10: The Stars Align --- """
import re
point_regex = re.compile('position=<(?P<position>.+)> velocity=<(?P<velocity>.+)>')
def get_yrange_at_time(points, t):
y_values = [point[1] + t * velocity[1] for point, velocity in points.items()]
return max(y_values) - min(y_values)
def get_smallest_y_range_in_time_range(range_, points):
times = {t: get_yrange_at_time(points, t) for t in range_}
return min(times, key=times.get)
def point_view(points_in_time):
max_x, max_y = max(points_in_time, key=lambda p: p[0])[0], max(points_in_time, key=lambda p: p[1])[1]
min_x, min_y = min(points_in_time, key=lambda p: p[0])[0], min(points_in_time, key=lambda p: p[1])[1]
board = []
for y in range(min_y - 1, max_y + 2):
for x in range(min_x - 1, max_x + 2):
if (x, y) in points_in_time:
board.append('#')
else:
board.append('.')
board.append('\n')
return ''.join(board)
def get_optimal_time(points):
t = 0
previous = get_yrange_at_time(points, 0)
current = get_yrange_at_time(points, t + 1)
while previous > current:
t += 1
previous = current
current = get_yrange_at_time(points, t + 1)
return t
def get_points(point_data):
return {tuple(int(c) for c in d['position'].strip().split(', ')):
tuple(int(c) for c in d['velocity'].strip().split(', '))
for d in point_regex.finditer(point_data)}
def part1(point_data):
points = get_points(point_data)
t = get_optimal_time(points)
points_in_time = [tuple(c + t * v for c, v in zip(point, velocity)) for point, velocity in points.items()]
return point_view(points_in_time)
def part2(point_data):
return get_optimal_time(get_points(point_data))
def tests():
test_points = """position=< 9, 1> velocity=< 0, 2>
position=< 7, 0> velocity=<-1, 0>
position=< 3, -2> velocity=<-1, 1>
position=< 6, 10> velocity=<-2, -1>
position=< 2, -4> velocity=< 2, 2>
position=<-6, 10> velocity=< 2, -2>
position=< 1, 8> velocity=< 1, -1>
position=< 1, 7> velocity=< 1, 0>
position=<-3, 11> velocity=< 1, -2>
position=< 7, 6> velocity=<-1, -1>
position=<-2, 3> velocity=< 1, 0>
position=<-4, 3> velocity=< 2, 0>
position=<10, -3> velocity=<-1, 1>
position=< 5, 11> velocity=< 1, -2>
position=< 4, 7> velocity=< 0, -1>
position=< 8, -2> velocity=< 0, 1>
position=<15, 0> velocity=<-2, 0>
position=< 1, 6> velocity=< 1, 0>
position=< 8, 9> velocity=< 0, -1>
position=< 3, 3> velocity=<-1, 1>
position=< 0, 5> velocity=< 0, -1>
position=<-2, 2> velocity=< 2, 0>
position=< 5, -2> velocity=< 1, 2>
position=< 1, 4> velocity=< 2, 1>
position=<-2, 7> velocity=< 2, -2>
position=< 3, 6> velocity=<-1, -1>
position=< 5, 0> velocity=< 1, 0>
position=<-6, 0> velocity=< 2, 0>
position=< 5, 9> velocity=< 1, -2>
position=<14, 7> velocity=<-2, 0>
position=<-3, 6> velocity=< 2, -1>"""
test_word = """\
............
.#...#..###.
.#...#...#..
.#...#...#..
.#####...#..
.#...#...#..
.#...#...#..
.#...#...#..
.#...#..###.
............
"""
assert part1(test_points) == test_word
assert part2(test_points) == 3
if __name__ == '__main__':
tests()
|
{"/2018/runner.py": ["/advent_of_code.py"]}
|
14,503
|
Cydt/Pareidolia
|
refs/heads/master
|
/pareidolia/types.py
|
from collections import namedtuple
Size = namedtuple('Size', 'width height')
Dimensions = namedtuple('Dimensions', 'rows columns')
|
{"/pareidolia/combiner.py": ["/pareidolia/types.py"], "/pareidolia/__init__.py": ["/pareidolia/combiner.py"]}
|
14,504
|
Cydt/Pareidolia
|
refs/heads/master
|
/pareidolia/combiner.py
|
import random
import math
from PIL import Image, ImageFilter
from PIL.ImageOps import autocontrast
from .types import Size, Dimensions
# PIL wrappers
def image(filename):
return Image.open(filename)
def make_grayscale(image):
return image.convert("L")
def combine(filenames, size=None, number=None, dimensions=None):
"""
Create a random image from the passed files
images: list
size: (x, y)
"""
# some guards
if filenames is None or len(filenames) == 0:
print('Not enough files provided')
return
if number is None:
number = 1
# dimensions overrules number
if dimensions is None:
dimensions = Dimensions(1, number)
else:
number = dimensions.rows * dimensions.columns
if size is None:
size = Size(400, 200)
# copy and shuffle
shuffled = filenames[:]
random.shuffle(shuffled)
# pick one base image to fill the canvas
base = shuffled[0]
rest = shuffled[1:]
# create grayscale versions
images = map(image, shuffled)
grayscales = list(map(make_grayscale, images))
# create a new image and paste the grayscales
combined = list()
for _ in range(number):
combined.append(combine_images(grayscales, size=size))
show_collage(combined, dimensions)
def show_collage(images, dimensions):
width, height = images[0].size
rows, columns = dimensions
padding = 10
collage_size = (
width * columns + padding * (columns-1),
height * rows + padding * (rows-1)
)
collage = Image.new('L', collage_size)
for row in range(rows):
top = row * (height + padding)
for col in range(columns):
left = col * (width + padding)
idx = row*columns + col
collage.paste(images[idx], ((left, top)))
collage.show()
def crop_square(image, size):
"""
crop a square from a random location in image
"""
width, height = image.size
top = random.randint(0, max(0, height-size))
left = random.randint(0, max(0, width-size))
bottom = min(top + size, height)
right = min(left + size, width)
return image.crop((left, top, right, bottom))
def pythagoras(width, height):
return math.ceil(math.sqrt(math.pow(width,2) + math.pow(height,2)))
def combine_images(images, size=None):
width, height = size
# size for the crop
radius = pythagoras(*size)
# locations for the paste
left = int((width - radius) / 2)
top = int((height - radius) / 2)
# reusable mask (because opacity is fixed)
opacity = 100 # out of 255
mask = Image.new('L', (radius, radius), opacity)
combined = Image.new('L', size, 'gray')
for img in images:
rotation = random.randint(0, 359)
cropped = crop_square(img, radius)
rotated = cropped.rotate(rotation, resample=Image.BICUBIC)
rotated_mask = mask.rotate(rotation)
combined.paste(rotated, (left, top), rotated_mask)
combined = autocontrast(combined)
return combined
|
{"/pareidolia/combiner.py": ["/pareidolia/types.py"], "/pareidolia/__init__.py": ["/pareidolia/combiner.py"]}
|
14,505
|
Cydt/Pareidolia
|
refs/heads/master
|
/pareidolia/__init__.py
|
from .combiner import combine
|
{"/pareidolia/combiner.py": ["/pareidolia/types.py"], "/pareidolia/__init__.py": ["/pareidolia/combiner.py"]}
|
14,507
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/processing/gazetteer/stringgazetteer.py
|
"""
This module provides Gazetteer classes which allow matching the text or the tokens of documents against
gazetteer lists, lists of interesting texts or token sequences and annotate the matches with features from the
gazetteer lists.
"""
from recordclass import structclass
from gatenlp.utils import init_logger
from gatenlp.processing.gazetteer.base import GazetteerAnnotator
# TODO: Implement the StringGazetteer!!!!!!!!!!!!!!!!!!!!!!
# NOTE: Match was a dataclass originally
Match = structclass("Match", ("start", "end", "match", "entrydata", "matcherdata"))
_NOVALUE = object()
import sys
class _Node:
"""
Trie Node: represents the value and the children.
"""
__slots__ = ("children", "value")
def __init__(self):
self.children = dict()
self.value = _NOVALUE
# Will get removed or replaced with a proper pretty-printer!
def debug_print_node(self, file=sys.stderr):
if self.value == _NOVALUE:
print("Node(val=,children=[", end="", file=file)
else:
print(f"Node(val={self.value},children=[", end="", file=file)
for c, n in self.children.items():
print(f"{c}:", end="", file=file)
n.print_node()
print("])", end="", file=file)
class StringGazetteer(GazetteerAnnotator):
def __init__(
self, ignorefunc=None, mapfunc=None, matcherdata=None, defaultdata=None
):
"""
NOTE: NOT YET IMPLEMENTED! (code copied from Matcher package, mostly unchanges)
Create a String Gazetteer.
Args:
ignorefunc: a predicate that returns True for any token that should be ignored.
mapfunc: a function that returns the string to use for each token.
matcherdata: data to add to all matches in the matcherdata field
defaultdata: data to add to matches when the entry data is None
"""
# TODO: need to figure out how to handle word boundaries
# TODO: need to figure out how to handle matching spaces vs. different spaces / no spaces!
# self.nodes = defaultdict(Node)
self.ignorefunc = ignorefunc
self.mapfunc = mapfunc
self.defaultdata = defaultdata
self.matcherdata = matcherdata
self._root = _Node()
self.logger = init_logger(__name__)
raise Exception("Not yet implemented")
def add(self, entry, data=None, listdata=None, append=False):
"""
Add a gazetteer entry or several entries if "entry" is iterable and not a string and store its data.
Note that data has to be a non-None value to indicate that this entry is in the tree (e.g. True).
If an entry already exists, the data is replaced with the new data unless append is True
in which case the data is appended to the list of data already there.
If all elements of the entry are ignored, nothing is done.
:param entry: a string
:param data: the data to add for that gazetteer entry.
:param listdata: the list data to add for that gazeteer entry.
:param append: if true and data is not None, store data in a list and append any new data
:return:
"""
if isinstance(entry, str):
entry = [entry]
for e in entry:
node = self._get_node(e, create=True)
if node == self._root:
# empty string not allowed
continue
if node.value == _NOVALUE:
if append:
node.value = [data]
else:
node.value = data
else:
if append:
node.value.append(data)
else:
node.value = data
def find(
self, text, all=False, skip=True, fromidx=None, toidx=None, matchmaker=None
):
"""
Find gazetteer entries in text.
ignored.
:param text: string to search
:param all: return all matches, if False only return longest match
:param skip: skip forward over longest match (do not return contained/overlapping matches)
:param fromidx: index where to start finding in tokens
:param toidx: index where to stop finding in tokens (this is the last index actually used)
:return: an iterable of Match. The start/end fields of each Match are the character offsets if
text is a string, otherwise are the token offsets.
"""
matches = []
lentext = len(text)
if fromidx is None:
fromidx = 0
if toidx is None:
toidx = lentext - 1
if fromidx >= lentext:
return matches
if toidx >= lentext:
toidx = lentext - 1
if fromidx > toidx:
return matches
i = fromidx
self.logger.debug(f"From index {i} to index {toidx} for {text}")
while i < toidx:
chr = text[i]
if self.ignorefunc and self.ignorefunc(chr):
i += 1
continue
if self.mapfunc:
chr = self.mapfunc(chr)
longest_len = 0
longest_match = None
node = self._root
node = node.children.get(chr)
k = 0
while node is not None:
if node.value != _NOVALUE:
# we found a match
cur_len = k + 1
if matchmaker:
match = matchmaker(
i,
i + k + 1,
text[i: i + k + 1],
node.value,
self.matcherdata,
)
else:
match = Match(
i,
i + k + 1,
text[i: i + k + 1],
node.value,
self.matcherdata,
)
if all:
matches.append(match)
else:
# NOTE: only one longest match is possible, but it can have a list of data if append=True
if cur_len > longest_len:
longest_len = cur_len
longest_match = match
while True:
k += 1
if i + k >= len(text):
break
chr = text[i + k]
if self.ignorefunc and self.ignorefunc(chr):
continue
if self.mapfunc:
chr = self.mapfunc(chr)
node = node.children.get(chr)
break
if i + k >= len(text):
break
if not all and longest_match is not None:
matches.append(longest_match)
if skip:
i += max(k, 1)
else:
i += 1
return matches
def __setitem__(self, key, value):
node = self._get_node(key, create=True)
node.value = value
def __getitem__(self, item):
node = self._get_node(item, create=False, raise_error=True)
if node.value == _NOVALUE:
raise KeyError(item)
return node.value
def get(self, item, default=None):
node = self._get_node(item, create=False, raise_error=False)
if node is None:
return default
if node.value == _NOVALUE:
return default
return node.value
def _get_node(self, item, create=False, raise_error=True):
"""
Returns the node corresponding to the last character in key or raises a KeyError if create is False
and the node does not exist. If create is True, inserts the node.
:param item: the key for which to find a node
:param create: if True, insert all necessary nodes
:param raise_error: if True and create is False, raises an error if not found, if False, returns None
:return: the node corresponding to the key or None if no node found and raise_error is False
"""
node = self._root
for el in item:
if self.ignorefunc and self.ignorefunc(el):
continue
if self.mapfunc:
el = self.mapfunc(el)
if create:
node = node.children.setdefault(el, _Node())
else:
node = node.children.get(el)
if not node:
if raise_error:
raise KeyError(item)
else:
return None
return node
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,508
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/setup.py
|
#!/usr/bin/env python
# encoding: utf-8
"""Packaging script for the gatenlp library."""
import sys
import os
from setuptools import setup, find_packages
import re
if sys.version_info < (3, 6):
sys.exit("ERROR: gatenlp requires Python 3.6+")
JARFILE = "gatetools-gatenlpworker-1.0.jar"
JARFILE_DEST = os.path.join(
"_jars", JARFILE
) # where it should be relative to the gatenlp package
here = os.path.abspath(os.path.dirname(__file__))
with open(os.path.join(here, "README.md")) as f:
readme = f.read()
def versionfromfile(*filepath):
infile = os.path.join(here, *filepath)
with open(infile) as fp:
version_match = re.search(
r"^__version__\s*=\s*['\"]([^'\"]*)['\"]", fp.read(), re.M
)
if version_match:
return version_match.group(1)
raise RuntimeError("Unable to find version string in {}.".format(infile))
version = versionfromfile("gatenlp/version.py")
def get_install_extras_require():
extras_require = {
"formats": ["msgpack", "pyyaml>=5.2", "beautifulsoup4>=4.9.3", "requests", "conllu"],
"basic": ["recordclass"],
"java": ["py4j"],
"stanza": ["stanza>=1.3.0"],
"spacy": ["spacy>=2.2"],
"nltk": ["nltk>=3.5"],
"gazetteers": ["matchtext", "recordclass"],
# the following are not included in all but in alldev
"notebook": [
"ipython",
"ipykernel",
"jupyterlab",
"notebook",
"voila",
"RISE",
"ipywidgets",
],
"dev": [
"pytest",
"pytest-pep8",
"pytest-cov",
"pytest-runner",
"sphinx",
"pdoc3",
"tox",
"mypy",
"bandit",
"prospector[with_pyroma,with_vulture,with_mypy,with_bandid,with_frosted]",
# TODO: have to figure out why we need this? Maybe because we added jupyterlab,notebook,voila
"pytest-tornasync",
"black[d]", # for automatic code formatting
],
}
# Add automatically the 'all' and 'alldev' targets
add_all = [pck for lst in extras_require.values() for pck in lst if pck not in ["dev", "notebook"]]
add_alldev = [pck for lst in extras_require.values() for pck in lst]
extras_require.update({"all": add_all, "alldev": add_alldev})
return extras_require
setup(
name="gatenlp",
version=version,
author="Johann Petrak",
author_email="johann.petrak@gmail.com",
url="https://github.com/GateNLP/python-gatenlp",
keywords=["nlp", "text processing"],
description="GATE NLP implementation in Python.",
long_description=readme,
long_description_content_type="text/markdown",
setup_requires=[
# deliberately not used, since it installs packages without pip, use the "dev" extras instead
],
install_requires=[
"sortedcontainers>=2.0.0",
],
extras_require=get_install_extras_require(),
# NOTE: this is not actually used since it will not work with gatenlp version reporting
# from the gateplugin-Python plugin (since _version.py is not/should not get committed, only distributed)
# (this would also not work if we deploy after committing)
python_requires=">=3.6",
tests_require=["pytest", "pytest-cov"],
platforms="any",
license="Apache License 2.0",
packages=find_packages(),
package_data={
"gatenlp": [
JARFILE_DEST,
os.path.join("serialization", "_htmlviewer", "gatenlp-ann-viewer.html"),
os.path.join(
"serialization", "_htmlviewer", "gatenlp-ann-viewer-merged.js"
),
]
},
# include_package_data=True,
# data_files=[("share/gatenlp", [JARFILE_PATH])],
test_suite="tests",
entry_points={"console_scripts": ["gatenlp-gate-worker=gatenlp.gateworker:run_gate_worker"]},
classifiers=[
# "Development Status :: 6 - Mature",
# "Development Status :: 5 - Production/Stable",
"Development Status :: 4 - Beta",
# "Development Status :: 3 - Alpha",
# "Development Status :: 2 - Pre-Alpha",
# "Development Status :: 1 - Planning",
"License :: OSI Approved :: MIT License",
"Programming Language :: Python :: 3.6",
"Programming Language :: Python :: 3.7",
"Programming Language :: Python :: 3.8",
"Programming Language :: Python :: 3.9",
"Programming Language :: Python :: 3",
"Programming Language :: Python :: 3 :: Only",
"Intended Audience :: Developers",
"Intended Audience :: Science/Research",
"Topic :: Scientific/Engineering",
"License :: OSI Approved :: Apache Software License",
],
)
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,509
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/version.py
|
__version__ = "1.0.6-dev0"
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,510
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/annotation_set.py
|
"""
Module for AnnotationSet class which represents a named collection of
annotations which can arbitrarily overlap.
"""
# TODO: when should two sets be equal? Currently object identity is requried!
from typing import Any, List, Union, Dict, Set, KeysView, Iterator, Generator
# TODO: prior to Python 3.9 we need different Iterable definitions for typing and type checking
from collections.abc import Iterable as abc_Iterable
from typing import Iterable
from collections import defaultdict
import copy
from gatenlp.span import Span
from gatenlp.annotation import Annotation
from gatenlp.impl import SortedIntvls
from gatenlp.utils import support_annotation_or_set, allowspan
__pdoc__ = {
"AnnotationSet.__iter__": True,
"AnnotationSet.__contains__": True,
"AnnotationSet.__getitem__": True,
"AnnotationSet.__len__": True,
}
class InvalidOffsetError(KeyError):
""" """
pass
class AnnotationSet:
def __init__(self, name: str = "", owner_doc=None):
"""
Creates an annotation set. This should not be used directly by the
user, instead the method `Document.annset(name)` should be used to
access the annotation set with a given name from the document.
An annotation set contains an arbitrary number of annotations, which
can overlap in arbitrary ways. Each annotation set has a name and a
document can have as many named annotation sets as needed.
Args:
name: the name of the annotation set, default: the empty string
(default annotation set)
owner_doc: if this is set, the set and all sets created from it
can be queried for the owning document and offsets get checked
against the text of the owning document, if it has text.
Also, the changelog is only updated if an annotation
set has an owning document.
"""
self._name = name
self._owner_doc = owner_doc
self._index_by_offset = None
self._index_by_ol = None
self._index_by_type = None
# internally we represent the annotations as a map from
# annotation id (int) to Annotation
self._annotations = {}
self._is_immutable = False
self._next_annid = 0
@property
def name(self):
"""
Returns the name of the annotation set.
Note: the name of a set cannot be changed.
"""
return self._name
@property
def changelog(self):
"""
Returns the changelog or None if no changelog is set.
"""
if self._owner_doc is None:
return None
return self._owner_doc.changelog
def __setattr__(self, key, value):
"""
Prevent immutable fields from getting overridden, once they have been
set.
"""
if key == "name" or key == "owner_doc":
if self.__dict__.get(key, None) is None:
super().__setattr__(key, value)
else:
raise Exception(
"AnnotationSet attribute cannot get changed after being set"
)
else:
super().__setattr__(key, value)
def detach(self, restrict_to=None):
"""
Creates an immutable and detached copy of this set, optionally
restricted to the given annotation ids. A detached annotation
set does not have an owning document and deleting or adding
annotations does not change the annotations stored with the document.
However, the annotations in a detached annotation set
are the same as those stored in the attached set, so updating their
features will modify the annotations in the document as well.
Args:
restrict_to: an iterable of annotation ids, if None, all the
annotations from this set.
Returns:
an immutable annotation set
"""
annset = AnnotationSet(name="detached-from:" + self.name)
annset._is_immutable = True
if restrict_to is None:
annset._annotations = {
annid: self._annotations[annid] for annid in self._annotations.keys()
}
else:
annset._annotations = {
annid: self._annotations[annid] for annid in restrict_to
}
annset._next_annid = self._next_annid
return annset
def detach_from(self, anns: Iterable):
"""
Creates an immutable detached annotation set from the annotations
in anns which could by either a collection of annotations or
annotation ids (int numbers) which are assumed to be the annotation
ids from this set.
The next annotation id for the created set is the highest seen
annotation id from anns plus one.
Args:
anns: an iterable of annotations
Returns:
an immutable detached annotation set
"""
annset = AnnotationSet(name="detached-from:" + self.name)
annset._is_immutable = True
annset._annotations = {}
nextid = -1
for ann in anns:
if isinstance(ann, int):
annset._annotations[ann] = self._annotations[ann]
annid = ann
else:
annset._annotations[id] = ann
annid = ann.id
if annid > nextid:
nextid = annid
annset._next_annid = nextid + 1
return annset
@staticmethod
def create_from(anns: Union[Iterable[Annotation], Annotation], name=None) -> None:
"""
Creates an immutable detached annotation set from the annotations
in anns. The set contains shallow copies of the annotations and the
annotation id is preserved, unless it is a duplicate in which the next
available id is used.
Args:
anns: an iterable of annotations or a single annotation
Returns:
An immutable detached annotation set
"""
annset = AnnotationSet(name=name)
annset._is_immutable = True
annset._annotations = {}
annset._next_annid = 0
if isinstance(anns, Annotation):
anns = [anns]
for ann in anns:
# if the id is already in the set, assign the next available one
ann = ann.copy()
if ann.id in annset._annotations:
ann._id = annset._next_annid
annset._annotations[annset._next_annid] = ann
annset._next_annid += 1
else:
# if the id is not yet in the set, keep it and make sure that after adding,
# the next annid is adapted, if necessary!
annset._annotations[ann.id] = ann
if ann.id >= annset._next_annid:
annset._next_annid = ann.id + 1
return annset
@property
def immutable(self) -> bool:
"""
Get or set the immutability of the annotation set. If it is
immutable, annotations cannot be added or removed from the set,
but the annotations themselves can still have their features modified.
All detached annotation sets are immutable when created,
but can be made mutable afterwards.
"""
return self._is_immutable
@immutable.setter
def immutable(self, val: bool) -> None:
self._is_immutable = val
def isdetached(self) -> bool:
"""
Returns True if the annotation set is detached, False otherwise.
"""
return self._owner_doc is None
def _create_index_by_offset(self) -> None:
"""
Generates the offset index, if it does not already exist.
The offset index is an interval tree that stores the annotation
ids for the offset interval of the annotation.
"""
if self._index_by_offset is None:
self._index_by_offset = SortedIntvls()
for ann in self._annotations.values():
self._index_by_offset.add(ann.start, ann.end, ann.id)
def _create_index_by_ol(self) -> None:
"""
Generates an index by start offset, end offset and annotation id
"""
if self._index_by_ol is None:
self._index_by_ol = SortedIntvls(by_ol=True)
for ann in self._annotations.values():
self._index_by_ol.add(ann.start, ann.end, ann.id)
def _create_index_by_type(self) -> None:
"""
Generates the type index, if it does not already exist.
The type index is a map from
annotation type to a set of all annotation ids with that type.
"""
if self._index_by_type is None:
self._index_by_type = defaultdict(set)
for ann in self._annotations.values():
self._index_by_type[ann.type].add(ann.id)
def _add_to_indices(self, annotation: Annotation) -> None:
"""
If we have created the indices, add the annotation to them.
Args:
annotation: the annotation to add to the indices.
annotation: Annotation:
"""
if self._index_by_type is not None:
self._index_by_type[annotation.type].add(annotation.id)
if self._index_by_offset is not None:
self._index_by_offset.add(annotation.start, annotation.end, annotation.id)
def _remove_from_indices(self, annotation: Annotation) -> None:
"""
Remove an annotation from the indices.
Args:
annotation: the annotation to remove.
annotation: Annotation:
"""
if self._index_by_offset is not None:
self._index_by_offset.remove(
annotation.start, annotation.end, annotation.id
)
if self._index_by_type is not None:
self._index_by_type[annotation.type].remove(annotation.id)
@staticmethod
def _intvs2idlist(intvs, ignore=None) -> List[int]:
"""
Convert an iterable of interval tuples (start, end, id) to a list of ids
Args:
intvs: iterable of interval tuples
ignore: an optional annotation id that should not get included
in the result (Default value = None)
Returns:
list of ids
"""
if ignore is not None:
return [i[2] for i in intvs if i[2] != ignore]
else:
return [i[2] for i in intvs]
@staticmethod
def _intvs2idset(intvs, ignore=None) -> Set[int]:
"""
Convert an iterable of interval tuples (start, end, id) to a
set of ids
Args:
intvs: iterable of interval tuples
ignore: (Default value = None)
Returns:
set of ids
"""
ret = set()
if ignore is not None:
for i in intvs:
if i[2] != ignore:
ret.add(i[2])
else:
for i in intvs:
ret.add(i[2])
return ret
def _restrict_intvs(self, intvs, ignore=None):
"""
Args:
intvs:
ignore: (Default value = None)
"""
return self.detach(
restrict_to=AnnotationSet._intvs2idlist(intvs, ignore=ignore)
)
def __len__(self) -> int:
"""
Return number of annotations in the set.
"""
return len(self._annotations)
@property
def size(self) -> int:
"""
Returns the number of annotations in the annotation set.
"""
return len(self._annotations)
@property
def document(self):
"""
Returns the owning document, if set. If the owning document was not set, returns None.
"""
return self._owner_doc
@support_annotation_or_set
def _check_offsets(self, start: int, end: int, annid=None) -> None:
"""
Checks the offsets for the given span/annotation against the document boundaries, if we know the owning
document and if the owning document has text.
Args:
start: start offset
end: end offset
annid: (Default value = None)
"""
if self._owner_doc is None:
return
if self._owner_doc.text is None:
return
doc_size = len(self._owner_doc)
if start < 0:
raise InvalidOffsetError("Annotation starts before 0")
if end < 0:
raise InvalidOffsetError("Annotation ends before 0")
if start > end:
raise InvalidOffsetError("Annotation ends before it starts")
if start > doc_size:
raise InvalidOffsetError(
"Annotation starts after document ends: start={}, docsize={}".format(
start, doc_size
)
)
if end > doc_size:
raise InvalidOffsetError(
"Annotation ends after document ends: end={}, docsize={}".format(
end, doc_size
)
)
@property
def start(self):
"""
Returns the smallest start offset of all annotations, i.e the start
of the span of the whole set. This needs the index and creates
it if necessary.
Throws:
an exception if there are no annotations in the set.
"""
if self.size == 0:
raise Exception("Annotation set is empty, cannot determine start offset")
self._create_index_by_offset()
return self._index_by_offset.min_start()
@property
def end(self):
"""
Returns the end offset of the annotation set, i.e. the biggest end offset of any annotation.
This needs the index and creates it if necessary.
Throws:
an exception if there are no annotations in the set.
"""
if self.size == 0:
raise Exception("Annotation set is empty, cannot determine end offset")
self._create_index_by_offset()
return self._index_by_offset.max_end()
@property
def length(self):
"""
Returns the the length of the annotation set span.
Throws:
an exception if there are no annotations in the set.
"""
return self.end - self.start
@allowspan
def add(
self,
start: int,
end: int,
anntype: str,
features: Dict[str, Any] = None,
annid: int = None,
):
"""
Adds an annotation to the set.
Once an annotation has been added,
the start and end offsets,
the type, and the annotation id of the annotation are immutable.
If an annotation id is specified that already exists in the set, an
exception is raised.
Args:
start: start offset
end: end offset
anntype: the annotation type
features: a map, an iterable of tuples or an existing feature map.
In any case, the features are used
to create a new feature map for this annotation. If the map
is empty or this parameter is None, the
annotation does not store any map at all.
annid: the annotation id, if not specified the next free one
for this set is used. NOTE: the id should
normally left unspecified and get assigned automatically.
Returns:
the new annotation
"""
if annid is not None and not isinstance(annid, int):
raise Exception("Parameter annid must be an int, mixed up with features?")
if features is not None and isinstance(features, int):
raise Exception(
"Parameter features must not be an int: mixed up with annid?"
)
if self._is_immutable:
raise Exception("Cannot add an annotation to an immutable annotation set")
self._check_offsets(start, end)
if annid and annid in self._annotations:
raise Exception(
"Cannot add annotation with id {}, already in set".format(annid)
)
if annid is None:
annid = self._next_annid
self._next_annid = self._next_annid + 1
ann = Annotation(start, end, anntype, features=features, annid=annid)
ann._owner_set = self
if not self._annotations:
self._annotations = {}
self._annotations[annid] = ann
self._add_to_indices(ann)
if self.changelog is not None:
entry = {
"command": "annotation:add",
"set": self.name,
"start": ann.start,
"end": ann.end,
"type": ann.type,
"features": ann._features.to_dict(),
"id": ann.id,
}
self.changelog.append(entry)
return ann
def add_ann(self, ann, annid: int = None):
"""
Adds a shallow copy of the given ann to the annotation set,
either with a new annotation id or with the one given.
If an annotation id that already exists in the set is specified,
an exception is raised.
Args:
ann: the annotation to copy into the set
annid: the annotation id, if not specified the next free one for
this set is used. Note: the id should normally be left unspecified
and get assigned automatically.
Returns:
the added annotation
"""
return self.add(ann.start, ann.end, ann.type, ann.features, annid=annid)
def add_ann(self, ann, annid: int = None):
"""
Adds a shallow copy of the given ann to the annotation set,
either with a new annotation id or with the one given.
Args:
ann: the annotation to copy into the set
annid: the annotation id, if not specified the next free one for
this set is used. Note: the id should normally left unspecified
and get assigned automatically.
Returns:
the added annotation
"""
return self.add(ann.start, ann.end, ann.type, ann.features, annid=annid)
# TODO/NOTE: Iterable[Annotation] with Iterable from collections.abc is not possible here prior to Python 3.9
# instead, Iterable must come from typing
def add_anns(self, anns: Iterable[Annotation], annid_from_ann=False):
"""
Adds shallow copies of all annotations from the iterable to the set.
Args:
anns: an iterable of Annotations
annid_from_ann: if True, use the same annotation id as in the annotation, this will raise
an exception if the set already contains and annotation with this id.
If False assign a new id to the added annotation.
"""
for ann in anns:
if annid_from_ann:
self.add(ann.start, ann.end, ann.type, ann.features, annid=ann.id)
else:
self.add(ann.start, ann.end, ann.type, ann.features)
def remove(
self, annoriter: Union[int, Annotation, Iterable], raise_on_notexisting=True
) -> None:
"""
Removes the given annotation which is either the id or the annotation
instance or recursively all annotations in the iterable.
Throws:
exception if the annotation set is immutable or the annotation
is not in the set
Args:
annoriter: either the id (int) or the annotation instance
(Annotation) or an iterable of
id or annotation instance or iterable ...
raise_on_notexisting: (default: True) if false, silently accepts
non-existing annotations/ids and does nothing.
Note: if this is True, but the annotation set is immutable,
an Exception is still raised.
"""
if self._is_immutable:
raise Exception(
"Cannot remove an annotation from an immutable annotation set"
)
if isinstance(annoriter, abc_Iterable):
for a in annoriter:
self.remove(a, raise_on_notexisting=raise_on_notexisting)
return
annid = None # make pycharm happy
if isinstance(annoriter, int):
annid = annoriter
if annid not in self._annotations:
raise Exception(
"Annotation with id {} not in annotation set, cannot remove".format(
annid
)
)
annoriter = self._annotations[annid]
elif isinstance(annoriter, Annotation):
annid = annoriter.id
if annid not in self._annotations:
raise Exception(
"Annotation with id {} does not belong to this set, cannot remove".format(
annid
)
)
# NOTE: once the annotation has been removed from the set, it could
# still be referenced
# somewhere else and its features could get modified. In order to
# prevent logging of such changes,
# the owning set gets cleared for the annotation
annoriter._owner_set = None
del self._annotations[annid]
if self.changelog is not None:
self.changelog.append(
{"command": "annotation:remove", "set": self.name, "id": annid}
)
self._remove_from_indices(annoriter)
def clear(self) -> None:
"""
Removes all annotations from the set.
"""
self._annotations.clear()
self._index_by_offset = None
self._index_by_type = None
if self.changelog is not None:
self.changelog.append({"command": "annotations:clear", "set": self.name})
def clone_anns(self, memo=None):
"""
Replaces the annotations in this set with deep copies of the
originals. If this is a detached set,
then this makes sure that any modifications to the annotations do not
affect the original annotations
in the attached set. If this is an attached set, it makes sure that
all other detached sets cannot affect
the annotations in this set any more. The owning set of the
annotations that get cloned is cleared.
Args:
memo: for internal use by our __deepcopy__ implementation.
"""
tmpdict = {}
for annid, ann in self._annotations.items():
newann = copy.deepcopy(ann, memo=memo)
ann._owner_set = None
tmpdict[annid] = newann
for annid, ann in tmpdict.items():
self._annotations[annid] = ann
def __copy__(self):
"""
NOTE: creating a copy always creates a detached set, but a mutable one.
"""
c = self.detach()
c._is_immutable = False
return c
def copy(self):
"""
Returns a shallow copy of the annotation set.
"""
return self.__copy__()
def __deepcopy__(self, memo=None):
if memo is None:
memo = {}
c = self.detach()
c._is_immutable = False
c.clone_anns(memo=memo)
return c
def deepcopy(self):
"""
Returns a deep copy of the annotation set.
"""
return copy.deepcopy(self)
def __iter__(self) -> Iterator:
"""
Yields all the annotations of the set.
Important: using the iterator will always create the index if it
is not already there!
For fast iteration use fast_iter() which does not allow sorting or
offset ranges.
Yields:
the annotations in document order
"""
# return iter(self._annotations.values())
return self.iter()
def fast_iter(self) -> Generator:
"""
Yields annotations in insertion order. This is faster then the
default iterator and does not
need to index (so if the index does not exist, it will not be built).
"""
if self._annotations:
for annid, ann in self._annotations.items():
yield ann
def iter(
self,
start_ge: Union[int, None] = None,
start_lt: Union[None, int] = None,
with_type: str = None,
reverse: bool = False,
) -> Generator:
"""
Default iterator.
Yields annotations ordered by increasing starting annotation offset and increasing annotation id,
otionally limited by the other parameters.
Args:
start_ge: the offset from where to start including annotations
start_lt: the last offset to use as the starting offset of an annotation
with_type: only annotations of this type
reverse: process in reverse document order
Yields:
Annotations in default document order, or reverse document order
"""
if with_type is not None:
allowedtypes = set()
if isinstance(type, str):
allowedtypes.add(with_type)
else:
for atype in with_type:
allowedtypes.add(atype)
else:
allowedtypes = None
if not self._annotations:
return
maxoff = None
if start_ge is not None:
assert start_ge >= 0
if start_lt is not None:
assert start_lt >= 1
maxoff = start_lt + 1
if start_lt is not None and start_ge is not None:
assert start_lt > start_ge
self._create_index_by_offset()
for _start, _end, annid in self._index_by_offset.irange(
minoff=start_ge, maxoff=maxoff, reverse=reverse
):
if (
allowedtypes is not None
and self._annotations[annid].type not in allowedtypes
):
continue
yield self._annotations[annid]
def iter_ol(
self,
start_ge: Union[int, None] = None,
start_lt: Union[None, int] = None,
with_type: str = None,
reverse: bool = False,
) -> Generator:
"""
Offset-Length Iterator.
Yields annotations ordered by increasing start offset, by increasing end offset
and increasing annotoation id, otionally limited
by the other parameters.
Args:
start_ge: the offset from where to start including annotations
start_lt: the last offset to use as the starting offset of an annotation
with_type: only annotations of this type
reverse: process in reverse document order
Yields:
Annotations ordered by offset and length.
"""
if with_type is not None:
allowedtypes = set()
if isinstance(type, str):
allowedtypes.add(with_type)
else:
for atype in with_type:
allowedtypes.add(atype)
else:
allowedtypes = None
if not self._annotations:
return
maxoff = None
if start_ge is not None:
assert start_ge >= 0
if start_lt is not None:
assert start_lt >= 1
maxoff = start_lt + 1
if start_lt is not None and start_ge is not None:
assert start_lt > start_ge
self._create_index_by_ol()
for _start, _end, annid in self._index_by_ol.irange(
minoff=start_ge, maxoff=maxoff, reverse=reverse
):
if (
allowedtypes is not None
and self._annotations[annid].type not in allowedtypes
):
continue
yield self._annotations[annid]
def reverse_iter(self, **kwargs):
"""
Same as iter, but with the reverse parameter set to true.
Args:
kwargs: Same as for iter(), with revers=True fixed.
**kwargs: will get passed on the Annotation.iter
Returns:
same result as iter()
"""
return self.iter(reverse=True, **kwargs)
def get(
self, annid: Union[int, Annotation], default=None
) -> Union[Annotation, None]:
"""
Gets the annotation with the given annotation id or returns the given default.
NOTE: for handling cases where legacy code still expects the add method to return
an id and not the annotation, this will accept an annotation so the the frequent
pattern still works:
annid = annset.add(b,e,t).id
ann = annset.get(annid)
If an annotation is passed the annotation from the set with the id of that annotation is
returned, if the annotation is from that set, this will return the same object, if it is
still in the set (or return the default value).
Args:
annid: the annotation id of the annotation to retrieve.
default: what to return if an annotation with the given id is not
found. (Default value = None)
annid: Union[int:
Annotation]:
Returns:
the annotation or the default value.
"""
if isinstance(annid, Annotation):
annid = annid.id
return self._annotations.get(annid, default)
def first(self):
"""
Return the first (or only) annotation in the set by offset.
Returns:
first annotation
"""
sz = len(self._annotations)
if sz == 0:
raise Exception("Empty set, there is no first annotation")
elif sz == 1:
return next(iter(self._annotations.values()))
self._create_index_by_offset()
_, _, annid = next(self._index_by_offset.irange(reverse=False))
return self._annotations[annid]
def last(self):
"""
Return the last (or only) annotation by offset.
Returns:
last annotation
"""
sz = len(self._annotations)
if sz == 0:
raise Exception("Empty set, there is no last annotation")
elif sz == 1:
return next(iter(self._annotations.values()))
self._create_index_by_offset()
_, _, annid = next(self._index_by_offset.irange(reverse=True))
return self._annotations[annid]
def for_idx(self, idx, default=None):
"""
Return the annotation corresponding to the index idx in the set.
This returns the
annotation stored at the index, as added to the set. The order usually
depends on the insertion time.
If no annotation with the given index is specified, the value
specified for `default` is returned.
Args:
idx: index of the annotation in the set
default: default value to return if now annotation with the given index exists
Returns:
the annotation with the given index or the default value
"""
# TODO: we could make this more memory efficient (but slower) by
# iterating over values until getting idxth
tmplist = list(self._annotations.values())
if idx < len(tmplist):
return tmplist[idx]
else:
return default
def __getitem__(self, annid):
"""
Gets the annotation with the given annotation id or throws an exception.
Args:
annid: the annotation id
Returns:
annotation
"""
return self._annotations[annid]
def with_type(self, *anntype: Union[str, Iterable], non_overlapping: bool = False):
"""
Gets annotations of the specified type(s).
Creates the type index if necessary.
Args:
anntype: one or more types or type lists. The union of all types
specified that way is used to filter the annotations. If no type
is specified, an empty detached set is returned.
non_overlapping: if True, only return annotations of any of the
given types which do not overlap with other annotations. If
there are several annotations that start at
the same offset, use the type that comes first in the
parameters, if there are more than one of that type, use the
one that would come first in the usual sort order.
Returns:
a detached immutable annotation set with the matching annotations.
"""
atypes = []
for atype in anntype:
if isinstance(atype, str):
atypes.append(atype)
else:
for t in atype:
atypes.append(t)
if not atypes:
return self.detach(restrict_to=[])
self._create_index_by_type()
annids = set()
for t in atypes:
idxs = self._index_by_type.get(t)
if idxs:
annids.update(idxs)
if non_overlapping:
# need to get annotations grouped by start offset and sorted according to
# what the Annotation class defines
allanns = sorted(annids, key=lambda x: self._annotations[x])
allanns = [self._annotations[x] for x in allanns]
allannsgrouped = []
curstart = None
curset = None
for ann in allanns:
if curstart is None:
curset = [ann]
curstart = ann.start
elif curstart == ann.start:
curset.append(ann)
else:
allannsgrouped.append(curset)
curset = [ann]
curstart = ann.start
if curset:
allannsgrouped.append(curset)
retanns = []
# now go through all the grouped annoations and select the top priority one
# then skip to the next group that does not overlap with the one we just selected
typepriority = dict()
for i, atype in enumerate(atypes):
typepriority[atype] = len(atypes) - i
curminoffset = 0
for group in allannsgrouped:
# instead of sorting, go through the group and find the top priority one
topann = None
if len(group) == 1:
if group[0].start >= curminoffset:
topann = group[0]
elif len(group) == 0:
raise Exception("We should never get a 0 size group here!")
else:
for i, ann in enumerate(group):
if ann.start >= curminoffset:
topann = ann
break
for ann in group[i + 1:]:
if ann.start < curminoffset:
continue
if typepriority[ann.type] > typepriority[topann.type]:
topann = ann
elif typepriority[ann.type] == typepriority[topann.type]:
if ann.end > topann.end:
topann = ann
elif ann.end == topann.end:
if ann.id > topann.id:
topann = ann
if topann is not None:
retanns.append(topann)
curminoffset = topann.end
annids = [ann.id for ann in retanns]
return self.detach(restrict_to=annids)
def by_offset(self):
"""
Yields lists of annotations which start at the same offset.
"""
self._create_index_by_offset()
lastoff = -1
curlist = []
for ann in self.iter():
if ann.start != lastoff:
if lastoff != -1:
yield curlist
lastoff = ann.start
curlist = [ann]
else:
curlist.append(ann)
if lastoff != -1:
yield curlist
def by_span(self):
"""
Yields list of annotations with identical spans. Note: first needs
to sort all annotations!
"""
self._create_index_by_offset()
lastsoff = -1
lasteoff = -1
curlist = []
for ann in self.iter_ol():
if ann.start != lastsoff or ann.end != lasteoff:
if lastsoff != -1:
yield curlist
lastsoff = ann.start
lasteoff = ann.end
curlist = [ann]
else:
curlist.append(ann)
if lastsoff != -1:
yield curlist
@property
def type_names(self) -> KeysView[str]:
"""
Gets the names of all types in this set. Creates the type index
if necessary.
"""
self._create_index_by_type()
return self._index_by_type.keys()
@support_annotation_or_set
def startingat(
self, start: int, _ignored: Any = None, annid=None, include_self=False
):
"""
Gets all annotations starting at the given offset (empty if none) and
returns them in a detached annotation set.
Note: this can be called with an annotation or annotation set instead
of the start offset. If called with an annotation, this annotation is
not included in the result set if `include_self` is `False`
Args:
start: the offset where annotations should start
_ignored: dummy parameter to allow the use of annotations and
annotation sets
annid: dummy parameter to allow the use of annotations and
annotation sets
include_self: should annotation passed be included in the result
Returns:
detached annotation set of matching annotations
"""
self._create_index_by_offset()
intvs = self._index_by_offset.starting_at(start)
if not include_self and annid is not None:
ignore = annid
else:
ignore = None
return self._restrict_intvs(intvs, ignore=ignore)
@support_annotation_or_set
def start_min_ge(
self, offset: int, _ignored: Any = None, annid=None, include_self=False
):
"""Gets all annotations starting at the first possible offset
at or after the given offset and returns them in an immutable
annotation set.
Args:
offset: The offset
_ignored: dummy parameter to allow the use of annotations and
annotation sets
annid: annotation id
include_self: should annotation passed be included in the result
Returns:
annotation set of matching annotations
"""
self._create_index_by_offset()
intvs = self._index_by_offset.starting_from(offset)
# now select only those first ones which all have the same offset
if not include_self and annid is not None:
ignore = annid
else:
ignore = None
retids = set()
startoff = None
for intv in intvs:
if startoff is None:
startoff = intv[0]
if ignore is not None:
if ignore != intv[2]:
retids.add(intv[2])
else:
retids.add(intv[2])
elif startoff == intv[0]:
if ignore is not None:
if ignore != intv[2]:
retids.add(intv[2])
else:
retids.add(intv[2])
else:
break
return self.detach(restrict_to=retids)
@support_annotation_or_set
def start_ge(self, start: int, _ignored: Any = None, annid=None,
include_self=False):
"""
Return the annotations that start at or after the given start offset.
Args:
start: Start offset
_ignored: dummy parameter to allow the use of annotations and
annotation sets
annid: annotation id
include_self: should annotation passed be included in the result
Returns:
an immutable annotation set of the matching annotations
"""
self._create_index_by_offset()
intvs = self._index_by_offset.starting_from(start)
if not include_self and annid is not None:
ignore = annid
else:
ignore = None
return self._restrict_intvs(intvs, ignore=ignore)
@support_annotation_or_set
def start_lt(self, offset: int, _ignored: Any = None, _annid=None):
"""
Returns the annotations that start before the given offset
(or annotation). This also accepts an annotation or set.
Args:
offset: offset before which the annotations should start
_ignored: dummy parameter to allow the use of annotations and
annotation sets
_annid: annotation id
Returns:
an immutable annotation set of the matching annotations
"""
self._create_index_by_offset()
intvs = self._index_by_offset.starting_before(offset)
return self._restrict_intvs(intvs)
@support_annotation_or_set
def overlapping(self, start: int, end: int, annid=None, include_self=False):
"""
Gets annotations overlapping with the given span. Instead of the
start and end offsets,
also accepts an annotation or annotation set.
For each annotation ann in the result set, ann.overlapping(span)
is True
Args:
start: start offset of the span
end: end offset of the span
annid: the annotation id of the annotation representing the span.
(Default value = None)
include_self: if True and the annotation id for the span is given,
do not include that annotation in the result set.
(Default value = False)
Returns:
an immutable annotation set with the matching annotations
"""
self._create_index_by_offset()
intvs = self._index_by_offset.overlapping(start, end)
if not include_self and annid is not None:
ignore = annid
else:
ignore = None
return self._restrict_intvs(intvs, ignore=ignore)
@support_annotation_or_set
def covering(self, start: int, end: int, annid=None, include_self=False):
"""
Gets the annotations which contain the given offset range
(or annotation/annotation set), i.e. annotations such that the given
offset range is within the annotation.
For each annotation ann in the result set, ann.covering(span) is True.
Args:
start: the start offset of the span
end: the end offset of the span
annid: the annotation id of the annotation representing the span.
(Default value = None)
include_self: if True and the annotation id for the span is given,
do not include that
annotation in the result set. (Default value = False)
Returns:
an immutable annotation set with the matching annotations, if any
"""
self._create_index_by_offset()
intvs = self._index_by_offset.covering(start, end)
if not include_self and annid is not None:
ignore = annid
else:
ignore = None
return self._restrict_intvs(intvs, ignore=ignore)
@support_annotation_or_set
def within(self, start: int, end: int, annid=None, include_self=False):
"""
Gets annotations that fall completely within the given offset range,
i.e. annotations such that the offset range is covering each of the
annotation.
For each annotation ann in the result set, ann.within(span) is True.
Args:
start: start offset of the range
end: end offset of the range
annid: the annotation id of the annotation representing the span.
(Default value = None)
include_self: if True and the annotation id for the span is given,
do not include that
annotation in the result set. (Default value = False)
Returns:
an immutable annotation set with the matching annotations
"""
if start > end:
raise Exception("Invalid offset range: {},{}".format(start, end))
else:
self._create_index_by_offset()
intvs = self._index_by_offset.within(start, end)
if not include_self and annid is not None:
ignore = annid
else:
ignore = None
return self._restrict_intvs(intvs, ignore=ignore)
@support_annotation_or_set
def coextensive(self, start: int, end: int, annid=None, include_self=False):
"""
Returns a detached annotation set with all annotations that start and
end at the given offsets.
For each annotation ann in the result set, ann.coextensive(span) is True.
Args:
start: start offset of the span
end: end offset of the span
annid: the annotation id of the annotation representing the span.
(Default value = None)
include_self: if True and the annotation id for the span is given,
do not include that annotation in the result set.
Returns:
annotation set with all annotations that have the same start
and end offsets.
"""
self._create_index_by_offset()
intvs = self._index_by_offset.at(start, end)
if not include_self and annid is not None:
ignore = annid
else:
ignore = None
return self._restrict_intvs(intvs, ignore=ignore)
@support_annotation_or_set
def before(
self, start: int, end: int, annid=None, include_self=False, immediately=False
):
"""
Returns a detached annotation set with all annotations that end
before the given offsets.
For each annotation ann in the result set, ann.isbefore(span) is True.
Args:
start: start offset of the span
end: end offset of the span
annid: the annotation id of the annotation representing the span.
(Default value = None)
include_self: if True and the annotation id for the span is given,
do not include that annotation in the result set.
immediately: if True, the end offset of the annotations return
must coincide with the start offset of the span (default=False)
Returns:
annotation set with all annotations that end before the given span
"""
self._create_index_by_offset()
if immediately:
intvs = self._index_by_offset.ending_at(start)
else:
intvs = self._index_by_offset.ending_to(start)
# we need to filter self if self is zero-length!
if not include_self and annid is not None:
ignore = annid
else:
ignore = None
return self._restrict_intvs(intvs, ignore=ignore)
@support_annotation_or_set
def after(
self, start: int, end: int, annid=None, include_self=False, immediately=False
):
"""
Returns a detached annotation set with all annotations that start
after the given span.
For each annotation ann in the result set, ann.isafter(span) is True.
Args:
start: start offset of the span
end: end offset of the span
annid: the annotation id of the annotation representing the span.
(Default value = None)
include_self: if True and the annotation id for the span is given,
do not include that annotation in the result set.
immediately: if True, the start offset of the annotations
returned must coincide with the
end offset of the span (default=False)
Returns:
annotation set with all annotations that start after the given span
"""
self._create_index_by_offset()
if immediately:
intvs = self._index_by_offset.starting_at(end)
else:
intvs = self._index_by_offset.starting_from(end)
# we need to filter self if self is zero-length!
if not include_self and annid is not None:
ignore = annid
else:
ignore = None
return self._restrict_intvs(intvs, ignore=ignore)
@property
def span(self) -> Span:
"""
Returns a tuple with the start and end offset the corresponds to the
smallest start offset of any annotation
and the largest end offset of any annotation.
(Builds the offset index)
"""
if len(self._annotations) == 0:
return Span(0, 0)
self._create_index_by_offset()
return Span(self._index_by_offset.min_start(), self._index_by_offset.max_end())
def __contains__(self, annorannid: Union[int, Annotation]) -> bool:
"""
Provides 'annotation in annotation_set' functionality
Args:
:param annorannid: the annotation instance or annotation id to check
Returns:
`True` if the annotation exists in the set, `False` otherwise
"""
if isinstance(annorannid, Annotation):
return annorannid.id in self._annotations
return (
annorannid in self._annotations
) # On the off chance someone passed an ID in directly
contains = __contains__
def __repr__(self) -> str:
"""
Returns the string representation of the set.
"""
return "AnnotationSet({})".format(repr(list(self.iter())))
def to_dict(self, anntypes=None, **kwargs):
"""
Convert an annotation set to its dict representation.
Args:
anntypes: if not None, an iterable of annotation types to include
**kwargs: passed on to the dict creation of contained annotations.
Returns:
the dict representation of the annotation set.
"""
if anntypes is not None:
anntypesset = set(anntypes)
anns_list = list(
val.to_dict(**kwargs)
for val in self._annotations.values()
if val.type in anntypesset
)
else:
anns_list = list(
val.to_dict(**kwargs) for val in self._annotations.values()
)
return {
# NOTE: Changelog is not getting added as it is stored in the document part!
"name": self.name,
"annotations": anns_list,
"next_annid": self._next_annid,
}
@staticmethod
def from_dict(dictrepr, owner_doc=None, **kwargs):
"""
Create an AnnotationSet from its dict representation and optionally
set the owning document.
Args:
dictrepr: the dict representation of the annotation set
owner_doc: the owning document
**kwargs: passed on to the creation of annotations
Returns:
the annotation set
"""
annset = AnnotationSet(dictrepr.get("name"), owner_doc=owner_doc)
annset._next_annid = dictrepr.get("next_annid")
if dictrepr.get("annotations"):
annset._annotations = dict(
(int(a["id"]), Annotation.from_dict(a, owner_set=annset, **kwargs))
for a in dictrepr.get("annotations")
)
else:
annset._annotations = {}
return annset
@staticmethod
def from_anns(anns, deep_copy=False, **kwargs):
"""
Create a detached AnnotationSet from an iterable of annotations.
Args:
anns: an iterable of annotations
deep_copy: if the annotations should get added as copies
(default) or deep copies.
Returns:
the annotation set
"""
annset = AnnotationSet(name="", owner_doc=None)
annset._annotations = dict()
maxid = 0
for ann in anns:
if deep_copy:
addann = ann.deepcopy()
else:
addann = ann.copy()
annset._annotations[addann.id] = addann
if addann.id > maxid:
maxid = addann.id
annset._next_annid = maxid
annset._is_immutable = True
return annset
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,511
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/document.py
|
"""
Module that implements the Document class for representing gatenlp documents with
features and annotation sets.
"""
from typing import KeysView, Callable, Union, List
import logging
import importlib
import copy as lib_copy
from gatenlp.annotation_set import AnnotationSet
from gatenlp.annotation import Annotation
from gatenlp.offsetmapper import OffsetMapper, OFFSET_TYPE_PYTHON, OFFSET_TYPE_JAVA
from gatenlp.features import Features
from gatenlp.utils import in_notebook, in_colab
from gatenlp.changelog import ChangeLog
from gatenlp.changelog_consts import (
ACTION_ADD_ANN,
ACTION_ADD_ANNSET,
ACTION_CLEAR_ANNS,
ADDANN_UPDATE_FEATURES,
ACTION_CLEAR_ANN_FEATURES,
ACTION_CLEAR_DOC_FEATURES,
ACTION_DEL_ANN,
ACTION_DEL_ANN_FEATURE,
ACTION_DEL_DOC_FEATURE,
ACTION_SET_ANN_FEATURE,
ACTION_SET_DOC_FEATURE,
ADDANN_ADD_NEW_FEATURES,
ADDANN_ADD_WITH_NEW_ID,
ADDANN_IGNORE,
ADDANN_REPLACE_ANNOTATION,
ADDANN_REPLACE_FEATURES,
)
logging.basicConfig()
logger = logging.getLogger(__name__)
logger.setLevel(logging.INFO)
class Document:
"""
Represent a GATE document. This is different from the original Java GATE representation in
several ways:
* the text is not mutable and can only be set at creation time, so there is no "edit" method
* as a feature bearer, all the methods to set, get and manipulate features are part of this
class, there is
no separate "FeatureMap" to store them
* does not support listener callbacks
* there is no separate abstraction for "content", the only content possible is text which
is a unicode string that can be acessed with the "text()" method
* Spans of text can be directly accessed using doc[from:to]
* Features may only have string keys and values which can be json-serialised
* Annotation offsets by default are number of Unicde code points, this is different from Java
where the offsets are UTF-16 Unicode code units
* Offsets of all annotations can be changed from/to Java (from python index of unicode
codepoint to Java index of UTF-16 code unit and back)
* No part of the document has to be present, not even the text (this allows saving just
the annotations separately from the text)
* Once the text has been set, it is immutable (no support to edit text and change annotation
offsets accordingly)
Args:
text: the text of the document. The text can be None to indicate that no initial text
should be set. Once the text has been set for a document, it is immutable and cannot
be changed.
features: the initial document features to set, a sequence of key/value tuples
changelog: a ChangeLog instance to use to log changes.
"""
def __init__(self, text: str = None, features=None, changelog: ChangeLog = None):
if text is not None:
assert isinstance(text, str)
if changelog is not None:
assert isinstance(changelog, ChangeLog)
self._changelog = changelog
self._features = Features(features, _change_logger=self._log_feature_change)
self._annotation_sets = dict()
self._text = text
self.offset_type = OFFSET_TYPE_PYTHON
self._name = ""
@property
def name(self):
""" """
return self._name
@name.setter
def name(self, val):
"""
Args:
val:
Returns:
"""
if val is None:
val = ""
if not isinstance(val, str):
raise Exception("Name must be a string")
self._name = val
if self._changelog is not None:
ch = {"command": "name:set"}
ch["name"] = val
self._changelog.append(ch)
def _ensure_type_python(self) -> None:
""" """
if self.offset_type != OFFSET_TYPE_PYTHON:
raise Exception(
"Document cannot be used if it is not type PYTHON, "
+ "use to_type(OFFSET_TYPE_PYTHON) first"
)
def _fixup_annotations(self, method: Callable) -> None:
"""
Args:
method: Callable:
Returns:
"""
annset_names = self._annotation_sets.keys()
for annset_name in annset_names:
annset = self._annotation_sets[annset_name]
if annset._annotations is not None:
for ann in annset._annotations.values():
ann._start = method(ann._start)
ann._end = method(ann._end)
def to_offset_type(self, offsettype: str) -> Union[OffsetMapper, None]:
"""Convert all the offsets of all the annotations in this document to the
required type, either OFFSET_TYPE_JAVA or OFFSET_TYPE_PYTHON. If the offsets
are already of that type, this does nothing.
NOTE: if the document has a ChangeLog, it is NOT also converted!
The method returns the offset mapper if anything actually was converted,
otherwise None.
Args:
offsettype: either OFFSET_TYPE_JAVA or OFFSET_TYPE_PYTHON
offsettype: str:
Returns:
offset mapper or None
"""
if offsettype == self.offset_type:
return None
if offsettype == OFFSET_TYPE_JAVA and self.offset_type == OFFSET_TYPE_PYTHON:
# convert from currently python to java
om = OffsetMapper(self._text)
self._fixup_annotations(om.convert_to_java)
self.offset_type = OFFSET_TYPE_JAVA
elif offsettype == OFFSET_TYPE_PYTHON and self.offset_type == OFFSET_TYPE_JAVA:
# convert from currently java to python
om = OffsetMapper(self._text)
self._fixup_annotations(om.convert_to_python)
self.offset_type = OFFSET_TYPE_PYTHON
else:
raise Exception("Odd offset type")
return om
def apply_changes(self, changes, handle_existing_anns=ADDANN_ADD_WITH_NEW_ID):
"""Apply changes from a ChangeLog to this document. `changes` can be a ChangeLog instance,
a sequence of change objects (dicts) as stored in a ChangeLog instance, or a single
change object.
The document is modified in-place.
Args:
changes: one or more changes
handle_existing_anns: what to do if the change from the changelog tries to
add an annotation with an annotation id that already exists in the target set.
(Default value = ADDANN_ADD_WITH_NEW_ID)
"""
if isinstance(changes, dict):
changes = [changes]
elif isinstance(changes, ChangeLog):
changes = changes.changes
for change in changes:
cmd = change.get("command")
fname = change.get("feature")
fvalue = change.get("value")
features = change.get("features")
sname = change.get("set")
annid = change.get("id")
if cmd is None:
raise Exception("Change without field 'command'")
if cmd == ACTION_ADD_ANNSET:
assert sname is not None
self.annset(sname)
elif cmd == ACTION_ADD_ANN:
assert sname is not None
assert annid is not None
anns = self.annset(sname)
ann = anns.get(annid)
start = change.get("start")
end = change.get("end")
anntype = change.get("type")
if ann is None:
anns.add(start, end, anntype, annid=annid, features=features)
else:
if handle_existing_anns == ADDANN_IGNORE:
pass
elif handle_existing_anns == ADDANN_ADD_WITH_NEW_ID:
anns.add(start, end, anntype)
elif handle_existing_anns == ADDANN_REPLACE_ANNOTATION:
anns.remove(annid)
anns.add(start, end, anntype, annid)
elif handle_existing_anns == ADDANN_UPDATE_FEATURES:
ann.features.update(features)
elif handle_existing_anns == ADDANN_REPLACE_FEATURES:
ann.features.clear()
ann.features.update(features)
elif handle_existing_anns == ADDANN_ADD_NEW_FEATURES:
fns = ann.features.names()
for f in features.keys():
if f not in fns:
ann.features[f] = features[f]
elif handle_existing_anns == ADDANN_IGNORE:
pass
elif cmd == ACTION_CLEAR_ANNS:
assert sname is not None
anns = self.annset(sname)
anns.clear()
elif cmd == ACTION_CLEAR_ANN_FEATURES:
assert sname is not None
assert annid is not None
anns = self.annset(sname)
ann = anns.get(annid)
if ann is not None:
ann.features.clear()
else:
pass # ignore, could happen with a detached annotation
elif cmd == ACTION_CLEAR_DOC_FEATURES:
self.features.clear()
elif cmd == ACTION_SET_ANN_FEATURE:
assert fname is not None
assert sname is not None
assert annid is not None
ann = self.annset(sname).get(annid)
ann.features[fname] = fvalue
elif cmd == ACTION_DEL_ANN_FEATURE:
assert sname is not None
assert annid is not None
anns = self.annset(sname)
ann = anns.get(annid)
if ann is not None:
if fname is not None:
ann.features.pop(fname, None)
else:
pass # ignore, could happen with a detached annotation
elif cmd == ACTION_DEL_DOC_FEATURE:
assert fname is not None
self.features.pop(fname, None)
elif cmd == ACTION_DEL_ANN:
assert sname is not None
assert annid is not None
anns = self.annset(sname)
anns.remove(annid)
elif cmd == ACTION_SET_DOC_FEATURE:
assert fname is not None
self.features[fname] = fvalue
elif cmd == ACTION_CLEAR_DOC_FEATURES:
self._features.clear()
elif cmd == ACTION_DEL_DOC_FEATURE:
assert fname is not None
del self._features[fname]
else:
raise Exception("Unknown ChangeLog action: ", cmd)
@property
def features(self):
"""Accesses the features as a FeatureViewer instance. Changes made on this object are
reflected in the document and recorded in the change log, if there is one.
:return: A FeatureViewer view of the document features.
Args:
Returns:
"""
return self._features
@property
def changelog(self):
"""Get the ChangeLog or None if no ChangeLog has been set.
:return: the changelog
Args:
Returns:
"""
return self._changelog
@changelog.setter
def changelog(self, chlog):
"""Make the document use the given changelog to record all changes
from this moment on.
Args:
chlog: the new changelog to use or None to not use any
Returns:
the changelog used previously or None
"""
self._changelog = chlog
@property
def text(self) -> str:
"""Get the text of the document. For a partial document, the text may be None.
:return: the text of the document
Args:
Returns:
"""
self._ensure_type_python()
return self._text
@text.setter
def text(self, value: str) -> None:
"""
Set the text of the document. This is only possible as long as it has not been set
yet, after that, the text is immutable.
IMPORTANT: it is possible to add arbitrary annotations to a document which does not have any
text. This is meant to allow handling of annotation-only representations.
However, if the text is set after annotations have been added, annotation offsets are not
checked and it is possible to thus create an invalid document where annotations refer to
text ranges that do not exist!
Args:
value: the text for the document
value: str:
Returns:
"""
if self._text is None:
self._text = value
else:
raise NotImplementedError("Text cannot be modified")
def _log_feature_change(
self, command: str, feature: str = None, value=None
) -> None:
"""
Args:
command: str:
feature: str: (Default value = None)
value: (Default value = None)
Returns:
"""
if self._changelog is None:
return
command = "doc-" + command
ch = {"command": command}
if command == "doc-feature:set":
ch["feature"] = feature
ch["value"] = value
self._changelog.append(ch)
def __len__(self) -> int:
"""
Return the length of the text.
Note: this will convert the type of the document to python!
:return: the length of the document text
"""
self._ensure_type_python()
if self._text is None:
return 0
else:
return len(self._text)
def __getitem__(self, span) -> str:
"""
Get the text for the given span.
:param span: a single number, an offset range of the form from:to or an annotation.
If annotation, uses the annotation's offset span.
:return: the text of the span
"""
self._ensure_type_python()
if isinstance(span, Annotation):
return self.text[span._start:span._end]
if isinstance(span, AnnotationSet):
return self.text[span.start():span.end()]
if hasattr(span, "start") and hasattr(span, "end"):
return self.text[span.start:span.end]
return self.text[span]
def annset(self, name: str = "") -> AnnotationSet:
"""
Get the named annotation set, if name is not given or the empty string,
the default annotation set.
If the annotation set does not already exist, it is created.
Args:
name: the annotation set name, the empty string is used for the
"default annotation set".
name: str: (Default value = "")
Returns:
the specified annotation set.
"""
self._ensure_type_python()
if name not in self._annotation_sets:
annset = AnnotationSet(owner_doc=self, name=name)
self._annotation_sets[name] = annset
if self._changelog:
self._changelog.append({"command": "annotations:add", "set": name})
return annset
else:
return self._annotation_sets[name]
def annset_names(self) -> List[str]:
"""
Args:
Returns:
:return: annotation set names
"""
self._ensure_type_python()
return list(self._annotation_sets.keys())
def remove_annset(self, name: str):
"""Completely remove the annotation set.
Args:
name: name of the annotation set to remove
name: str:
Returns:
"""
if name not in self._annotation_sets:
raise Exception(f"AnnotationSet with name {name} does not exist")
del self._annotation_sets[name]
if self._changelog:
self._changelog.append({"command": "annotations:remove", "set": name})
def anns(self, ann_spec):
"""
Return a detached annotation set with all annotations which match the annotation specification.
Args:
annset_spec: either a single string which is interpreted as an annotation set name, or a list where
each element is either a string (annotation set name) or a tuple. If an element is a tuple, the
first element of the tuple must be the annotation set name and the second element either a type
name or a list of type names.
Returns:
a detached, immutable set with all the annotations matching the annotation specification
"""
return AnnotationSet.create_from(self.yield_anns(ann_spec))
def yield_anns(self, ann_spec):
"""
Yield all annotations which match the annotation specification.
The order of the annotations is unespecified.
Args:
annset_spec: either a single string which is interpreted as an annotation set name, or a list where
each element is either a string (annotation set name) or a tuple. If an element is a tuple, the
first element of the tuple must be the annotation set name and the second element either a type
name or a list of type names.
Yields:
all the annotations matching the annotation specification
"""
if isinstance(ann_spec, str):
tmpset = self._annotation_sets.get(ann_spec)
if tmpset is not None:
for ann in tmpset._annotations.values():
yield ann
return
for spec in ann_spec:
if isinstance(spec, str):
tmpset = self._annotation_sets.get(spec)
if tmpset is not None:
for ann in tmpset._annotations.values():
yield ann
else:
setname, types = spec
if isinstance(types, str):
types = [types]
tmpset = self._annotation_sets.get(setname)
if tmpset is not None:
for ann in tmpset._annotations.values():
if ann.type in types:
yield ann
def __repr__(self) -> str:
"""
String representation of the document, showing all content.
:return: string representation
"""
return "Document({},features={},anns={})".format(
self.text, self._features, self._annotation_sets.__repr__()
)
def __str__(self) -> str:
asets = (
"["
+ ",".join([f"'{k}':{len(v)}" for k, v in self._annotation_sets.items()])
+ "]"
)
return "Document({},features={},anns={})".format(
self.text, self._features, asets
)
def to_dict(self, offset_type=None, annsets=None, **kwargs):
"""Convert this instance to a dictionary that can be used to re-create the instance with
from_dict.
NOTE: if there is an active changelog, it is not included in the output as this
field is considered a transient field!
Args:
offset_type: convert to the given offset type on the fly (Default value = None)
annsets: if not None, a list of annotation set/type specifications: each element
is either a string, the name of the annotation set to include, or a tuple where the
first element is the annotation set name and the second element is either a
type name or a list of type names. The same annotation set name should not be used
in more than one specification.
**kwargs: get passed on to the to_dict methods of included objects.
Returns:
the dictionary representation of this instance
"""
# if the specified offset type is equal to what we have, do nothing, otherwise
# create an offset mapper and pass it down to where we actually convert the annotations
if offset_type is not None:
assert offset_type == OFFSET_TYPE_JAVA or offset_type == OFFSET_TYPE_PYTHON
if offset_type != self.offset_type:
if self._text is not None:
om = OffsetMapper(self._text)
kwargs["offset_mapper"] = om
kwargs["offset_type"] = offset_type
else:
offset_type = self.offset_type
# create the annotation sets map
if annsets is not None:
annsets_dict = {}
for spec in annsets:
if isinstance(spec, str):
tmpset = self._annotation_sets.get(spec)
if tmpset is not None:
annsets_dict[spec] = tmpset.to_dict(**kwargs)
else:
setname, types = spec
if isinstance(types, str):
types = [types]
tmpset = self._annotation_sets.get(setname)
if tmpset is not None:
annsets_dict[setname] = self._annotation_sets[setname].to_dict(
anntypes=types, **kwargs
)
else:
annsets_dict = {
name: aset.to_dict(**kwargs)
for name, aset in self._annotation_sets.items()
}
return {
"annotation_sets": annsets_dict,
"text": self._text,
"features": self._features.to_dict(),
"offset_type": offset_type,
"name": self.name,
}
@staticmethod
def from_dict(dictrepr, **_kwargs):
"""Return a Document instance as represented by the dictionary dictrepr.
Args:
dictrepr: return: the initialized Document instance
**_kwargs: not used, ignored
Returns:
the initialized Document instance
"""
feats = dictrepr.get("features")
doc = Document(dictrepr.get("text"), features=feats)
doc.name = dictrepr.get("name")
doc.offset_type = dictrepr.get("offset_type")
if (
doc.offset_type != OFFSET_TYPE_JAVA
and doc.offset_type != OFFSET_TYPE_PYTHON
):
raise Exception("Invalid offset type, cannot load: ", doc.offset_type)
annsets = {
name: AnnotationSet.from_dict(adict, owner_doc=doc)
for name, adict in dictrepr.get("annotation_sets").items()
}
doc._annotation_sets = annsets
return doc
def save(
self,
destination,
fmt=None,
offset_type=None,
mod="gatenlp.serialization.default",
annsets=None,
**kwargs,
):
"""Save the document to the destination file.
Args:
destination: either a file name or something that has a write(string) method.
fmt: serialization format, by default the format is inferred from the file extension.
offset_type: store using the given offset type or keep the current if None
(Default value = None)
mod: module where the document saver is implemented.
(Default value = "gatenlp.serialization.default")
annsets: if not None, a list of annotation set names or tuples of set name and a
list of annotation types to include in the serialized document.
kwargs: additional parameters for the document saver.
"""
if annsets is not None:
kwargs["annsets"] = annsets
if fmt is None or isinstance(fmt, str):
m = importlib.import_module(mod)
saver = m.get_document_saver(destination, fmt)
saver(Document, self, to_ext=destination, offset_type=offset_type, **kwargs)
else:
# assume fmt is a callable to get used directly
fmt(Document, self, to_ext=destination, offset_type=offset_type, **kwargs)
def save_mem(
self,
fmt="json",
offset_type=None,
mod="gatenlp.serialization.default",
**kwargs,
):
"""Serialize to a string or bytes in the given format.
Args:
fmt: serialization format to use. (Default value = "json")
offset_type: store using the given offset type or keep the current if None
(Default value = None)
mod: module where the document saver is implemented.
(Default value = "gatenlp.serialization.default")
kwargs: additional parameters for the format.
"""
if not fmt:
raise Exception("Format required.")
if isinstance(fmt, str):
m = importlib.import_module(mod)
saver = m.get_document_saver(None, fmt)
return saver(Document, self, to_mem=True, offset_type=offset_type, **kwargs)
else:
fmt(Document, self, to_mem=True, offset_type=offset_type, **kwargs)
@staticmethod
def load(source, fmt=None, mod="gatenlp.serialization.default", **kwargs):
"""
Load or import a document from the given source. The source can be a file path or
file name or a URL. If the type of the source is str, then if it starts with
"http[s]://" it will get treated as a URL. In order to deliberatly use a file instead of
a URL, create a pathlib Path, in order to deliberately use URL instead of a file parse
the URL using urllib.
Example: `Document.load(urllib.parse.urlparse(someurl), fmt=theformat)`
Example: `Document.load(pathlib.Path(somepath), fmt=theformat)`
NOTE: the offset type of the document is always converted to PYTHON when loading!
Args:
source: the URL or file path to load from.
fmt: the format of the source. By default the format is inferred by the file extension.
The format can be a format memnonic like "json", "html", or a known mime type
like "text/bdocjs".
mod: the name of a module where the document loader is implemented.
(Default value = "gatenlp.serialization.default")
kwargs: additional format specific keyword arguments to pass to the loader
Returns:
the loaded document
"""
if fmt is None or isinstance(fmt, str):
m = importlib.import_module(mod)
loader = m.get_document_loader(source, fmt)
doc = loader(Document, from_ext=source, **kwargs)
else:
doc = fmt(Document, from_ext=source, **kwargs)
if doc.offset_type == OFFSET_TYPE_JAVA:
doc.to_offset_type(OFFSET_TYPE_PYTHON)
return doc
@staticmethod
def load_mem(source, fmt="json", mod="gatenlp.serialization.default", **kwargs):
"""
Create a document from the in-memory serialization in source. Source can be a string or
bytes, depending on the format.
Note: the offset type is always converted to PYTHON when loading!
Args:
source: the string/bytes to deserialize
fmt: if string, the format identifier or mime type (Default value = "json"), otherwise
assumed to be a callable that retrieves and returns the document
mod: the name of the module where the loader is implemented
(Default value = "gatenlp.serialization.default")
kwargs: additional arguments to pass to the loader
"""
if not fmt:
raise Exception("Format required.")
if isinstance(fmt, str):
m = importlib.import_module(mod)
loader = m.get_document_loader(None, fmt)
doc = loader(Document, from_mem=source, **kwargs)
else:
doc = fmt(Document, from_mem=source, **kwargs)
if doc.offset_type == OFFSET_TYPE_JAVA:
doc.to_offset_type(OFFSET_TYPE_PYTHON)
return doc
def __copy__(self):
"""
Creates a shallow copy except the changelog which is set to None. The document feature map is
a new instance, so features added in one copy will not show up in the other. However if
feature values of copied features are objects, they are shared between the copies.
Annotation sets are separate but the features of shared annotations are shared.
Returns:
shallow copy of the document
"""
doc = Document(self._text)
doc._annotation_sets = dict()
for name, aset in self._annotation_sets.items():
doc._annotation_sets[name] = aset.copy()
doc._annotation_sets[name]._owner_doc = doc
doc.offset_type = self.offset_type
doc._features = self._features.copy()
return doc
def copy(self, annsets=None):
"""
Creates a shallow copy except the changelog which is set to None. If annsets is specified,
creates a shallow copy but also limits the annotations to the one specified.
Args:
annsets: if not None, a list of annotation set/type specifications: each element
is either a string, the name of the annotation set to include, or a tuple where the
first element is the annotation set name and the second element is either a
type name or a list of type names. The same annotation set name should not be used
in more than one specification.
Returns:
shallow copy of the document, optionally with some annotations removed
"""
if annsets is None:
return self.__copy__()
doc = Document(self._text)
doc.offset_type = self.offset_type
doc._features = self._features.copy()
doc._annotation_sets = dict()
for spec in annsets:
if isinstance(spec, str):
tmpset = self._annotation_sets.get(spec)
if tmpset is not None:
doc._annotation_sets[spec] = self._annotation_sets[spec].copy()
doc._annotation_sets[spec]._owner_doc = doc
else:
setname, types = spec
if isinstance(types, str):
types = [types]
tmpset = self._annotation_sets.get(setname)
if tmpset is not None:
annset = AnnotationSet(owner_doc=doc, name=setname)
anns = self.annset(setname).with_type(types)
for ann in anns:
annset.add_ann(ann)
doc._annotation_sets[setname] = annset
return doc
def deepcopy(self, annsets=None, memo=None):
"""
Creates a deep copy, except the changelog which is set to None. If annset is not None, the
annotations in the copy are restricted to the given set.
Args:
memo: the memoization dictionary to use.
annsets: which annsets and types to include
Returns:
a deep copy of the document.
"""
if self._features is not None:
fts = lib_copy.deepcopy(self._features.to_dict(), memo)
else:
fts = None
doc = Document(self._text, features=fts)
doc._changelog = None
doc.offset_type = self.offset_type
if annsets is None:
doc._annotation_sets = lib_copy.deepcopy(self._annotation_sets, memo)
else:
doc._annotation_sets = dict()
for spec in annsets:
if isinstance(spec, str):
tmpset = self._annotation_sets.get(spec)
if tmpset is not None:
doc._annotation_sets[spec] = lib_copy.deepcopy(tmpset, memo)
doc._annotation_sets[spec]._owner_doc = doc
else:
setname, types = spec
if isinstance(types, str):
types = [types]
tmpset = self._annotation_sets.get(setname)
if tmpset is not None:
annset = AnnotationSet(owner_doc=doc, name=setname)
anns = tmpset.with_type(types)
for ann in anns:
annset.add_ann(lib_copy.deepcopy(ann, memo))
doc._annotation_sets[setname] = annset
return doc
def __deepcopy__(self, memo=None):
"""
Creates a deep copy, except the changelog which is set to None.
Args:
memo: the memoization dictionary to use.
Returns:
a deep copy of the document.
"""
return lib_copy.deepcopy(self, memo=memo)
def _repr_html_(self):
"""
Render function for Jupyter notebooks. Returns the html-ann-viewer HTML.
This renders the HTML for notebook, for offline mode, but does not add the JS
but instead initializes the JS in the notebook unless gatenlp.init_notebook()
has bee called already.
"""
if in_colab():
return self._show_colab(display=False)
else:
return self._show_notebook(display=False)
# TODO: maybe allow manual selection of how to show the document, e.g. also by
# writing to a tmp file and browsing in a browser, or pprint etc.
def show(self, to=None, htmlid=None, annsets=None, doc_style=None):
"""
Show the document, possibly in a Jupyter notebook. This allows to assign a specific htmlid so
the generated HTML can be directly styled afterwards.
This directly sends the rendered document to the cell (no display/HTML necessary) if
the destination is a notebook.
Args:
to: if None, try to guess if this is called from within a notebook and if yes, which kind.
Otherwise, explicitly specify where to show the document to, one of "console", "jupyter",
"colab".
htmlid: the HTML id prefix to use for classes and element ids.
annsets: if not None, a list of annotation set/type specifications.
Each element is either
the name of a set to fully include, or a tuple with the name of the set as
the first element
and with a single type name or a list of type names as the second element
doc_style: if not None, use this as the style for the document text box
"""
if to == "colab":
self._show_colab(htmlid=htmlid, display=True, annsets=annsets, doc_style=doc_style)
return
elif to == "jupyter":
self._show_notebook(htmlid=htmlid, display=True, annsets=annsets, doc_style=doc_style)
return
elif to == "console":
return self.__str__()
elif to is not None:
raise Exception(f"Not a valid value for parameter to: {to}. Use one of console, jupyter, colab")
if in_notebook():
if in_colab():
self._show_colab(htmlid=htmlid, display=True, annsets=annsets, doc_style=doc_style)
return
else:
self._show_notebook(htmlid=htmlid, display=True, annsets=annsets, doc_style=doc_style)
return
else:
return self.__str__()
def _show_colab(self, htmlid=None, display=False, annsets=None, doc_style=None):
from gatenlp.serialization.default import JS_GATENLP_URL, JS_JQUERY_URL
from IPython.display import display_html, Javascript
from IPython.display import display as i_display
i_display(Javascript(url=JS_JQUERY_URL))
i_display(Javascript(url=JS_GATENLP_URL))
html = self.save_mem(
fmt="html-ann-viewer",
notebook=True,
add_js=False,
offline=True,
htmlid=htmlid,
annsets=annsets,
doc_style=doc_style,
)
if display:
display_html(html, raw=True)
else:
return html
def _show_notebook(self, htmlid=None, display=False, annsets=None, doc_style=None):
from gatenlp.gatenlpconfig import gatenlpconfig
from gatenlp.serialization.default import HtmlAnnViewerSerializer
from IPython.display import display_html
if not gatenlpconfig.notebook_js_initialized:
HtmlAnnViewerSerializer.init_javscript()
gatenlpconfig.notebook_js_initialized = True
html = self.save_mem(
fmt="html-ann-viewer",
notebook=True,
add_js=False,
offline=True,
htmlid=htmlid,
annsets=annsets,
doc_style=doc_style,
)
if display:
display_html(html, raw=True)
else:
return html
def attach(self, annset, name, check=True):
"""
Attach a detached set to the document. This should get used with caution and is mainly
intended for use inside the gatenlp library to allow for fast incremental creation of
new documents and document sets. The set can only be added if a set with the given name
does not yet exist at all.
Args:
annset: the annotation set to attach
name: the name for the annotation set
check: if False, prevent any checking. WARNING: this may create an inconsistent/illegal document!
"""
if name in self._annotation_sets:
raise Exception(f"Cannot attach set, a set with the name {name} already exists")
if check:
# check if the offsets are consistent with the document
mylen = len(self)
for ann in annset._annotations.values():
if ann.end > mylen:
raise Exception(f"Cannot attach set, annotation beyond text end: {ann}")
self._annotation_sets[name] = annset
annset._owner_doc = self
# class MultiDocument(Document):
# """
# NOTE: This is just experimental for now, DO NOT USE!
#
# A MultiDocument can store more than one document, each identified by their ids. One of those
# documents is always the "active" one and the MultiDocument can be used just like a Document
# with that content. In addition, there are methods to make each of the other documents active
# and to create mappings between annotations of pairs of documents.
#
# An AnnotationMapping is something that maps annotations to annotations, either for the same
# document, from the same or different sets, of for different documents. Once an annotation
# becomes part of a mapping, that annotation is becoming immutable. Even if the original
# annotation in the document changes or gets removed, the mapping retains the original copy of
# the annotation until the mapping is modified or removed.
# """
#
# # TODO: ALL necessary fields of the document must be references of mutable objects so that
# # if something is changed for the active document the one stored in the documents map is
# # really updated as well, or we must override the updating method to change both!
# # A better way could be to override all methods to always directly change the document in the
# # documents map, and simply pass on all calls to the activated document.
# # In that case, to_dict and from_dict would actually generate the fields for normal document
# # readers and ignore them on restore
# def __init__(
# self, text: str = None, features=None, changelog: ChangeLog = None, docid=0
# ):
# logger.warning("Experimental feature, DO NOT USE")
# self.documents = {} # map from document id to document
# self._mappings = None # TODO: we need to implement this
# self._docid = None
# doc = Document(text, features=features, changelog=changelog)
# self.documents[docid] = doc
# self.activate(docid)
#
# @property
# def docid(self):
# return self._docid
#
# def activate(self, docid=0):
# if docid not in self.documents:
# raise Exception(f"Cannot activate id {docid}, not in MultiDocument")
# doc = self.documents[docid]
# self._changelog = doc._changelog
# self._features = doc._features
# self._annotation_sets = doc._annotation_sets
# self._text = doc._text
# self.offset_type = OFFSET_TYPE_PYTHON
# self._name = doc._name
# self._docid = docid
#
# def add_document(self, doc, docid=None, activate=False):
# if docid is None:
# docid = len(self.documents)
# elif docid in self.documents:
# raise Exception(
# f"Cannot add document to MultiDocument, id {docid} already exists"
# )
# self.documents[docid] = doc
# if activate:
# self.activate(docid)
# return docid
#
# def to_dict(self, offset_type=None, **kwargs):
# # TODO: check what to do with the offset type parameter!
# # The basic strategy is that we simply create the dictionary for the active document plus
# # the entries for the documents map and the annotation mappings. That way, any reader of the
# # dict representation which just ignored unknown fields can still read this in as a normal
# # document from the active document.
# # The drawback is that the active document is represented twice, but OK
# thedict = {
# "annotation_sets": {
# name: aset.to_dict() for name, aset in self._annotation_sets.items()
# },
# "text": self._text,
# "features": self._features.to_dict(),
# "offset_type": self.offset_type,
# "name": self.name,
# }
# thedict["documents"] = {
# docid: doc.to_dict() for docid, doc in self.documents.items()
# }
# thedict["docid"] = self._docid
# thedict["mappings"] = self._mappings
# return thedict
#
# @staticmethod
# def from_dict(dictrepr, **kwargs):
# """
# Create a MultiDocument from the dictionary representation.
#
# Args:
# dictrepr: the dictionary representation
# **kwargs: additional kwargs to pass on
#
# Returns:
#
# """
# feats = dictrepr.get("features")
# docid = dictrepr.get("docid")
# doc = MultiDocument(dictrepr.get("text"), features=feats, docid=docid)
# doc.name = dictrepr.get("name")
# doc.offset_type = dictrepr.get("offset_type")
# if (
# doc.offset_type != OFFSET_TYPE_JAVA
# and doc.offset_type != OFFSET_TYPE_PYTHON
# ):
# raise Exception("Invalid offset type, cannot load: ", doc.offset_type)
# annsets = {
# name: AnnotationSet.from_dict(adict, owner_doc=doc)
# for name, adict in dictrepr.get("annotation_sets").items()
# }
# doc._annotation_sets = annsets
# doc.documents = {
# did: Document.from_dict(d)
# for did, d in dictrepr.get("documents", {}).items()
# }
# # TODO: get the mappings back!
# return doc
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,512
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/processing/gazetteer/base.py
|
"""
Base class for all gazetteer annotators
"""
from gatenlp.processing.annotator import Annotator
class GazetteerAnnotator(Annotator):
def __call__(self, *args, **kwargs):
raise RuntimeError("Not implemented in Gazetteer base class")
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,513
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/processing/gazetteer/__init__.py
|
"""
Module for various gazetteer annotator implementations.
"""
from gatenlp.processing.annotator import Annotator
from gatenlp.processing.gazetteer.tokengazetteer import TokenGazetteer
from gatenlp.processing.gazetteer.stringgazetteer import StringGazetteer
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,514
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/urlfileutils.py
|
"""
Module for functions that help reading binary and textual data from either URLs or local files.
"""
from io import TextIOWrapper
from pathlib import Path
from urllib.parse import ParseResult
from urllib.request import urlopen
import requests
def is_url(ext):
"""
Returns a tuple (True, urlstring) if ext should be interpreted as a (HTTP(s)) URL, otherwise false, pathstring
If ext is None, returns None, None.
Args:
ext: something that represents an external resource: string, url parse, pathlib path object ...
Returns:
a tuple (True, urlstring) or (False,pathstring)
"""
if ext is None:
return None, None
if isinstance(ext, str):
if ext.startswith("http://") or ext.startswith("https://"):
return True, ext
else:
# for now, if we have ext starting with file:// we just remove that part and assume the
# rest is supposed to be a proper file path
if ext.startswith("file://"):
ext = ext[7:]
return False, ext
elif isinstance(ext, Path):
return False, str(ext)
elif isinstance(ext, ParseResult):
return True, ext.geturl()
else:
raise Exception(f"Odd type: {ext}")
def get_str_from_url(url, encoding=None): # pragma: no cover
"""Read a string from the URL.
Args:
url: some URL
encoding: override the encoding that would have determined automatically (Default value = None)
Returns:
the string
"""
req = requests.get(url)
if encoding is not None:
req.encoding = encoding
return req.text
def get_bytes_from_url(url): # pragma: no cover
"""
Reads bytes from url.
Args:
url: the URL
Returns:
the bytes
"""
req = requests.get(url)
return req.content
def yield_lines_from(url_or_file, encoding="utf-8"): # pragma: no cover
"""
Yields lines of text from either a file or an URL
Args:
url_or_file: either a file path or URL. If this is a string, then it is interpreted as an URL
only if it starts with http:// or https://, otherwise it can be a parsed urllib url
or a pathlib path
encoding: the encoding to use
"""
isurl, extstr = is_url(url_or_file)
if isurl is None:
return
if isurl:
for line in urlopen(extstr):
line = line.decode(encoding)
yield line
else:
with open(extstr, "rt", encoding=encoding) as infp:
for line in infp:
yield line
def stream_from(url_or_file, encoding="utf-8"): # pragma: no cover
"""
Return an open stream from either the URL or the file, if encoding is None, in binary mode, otherwise
in text mode with the given encoding.
Args:
url_or_file: URL or file
encoding: if None, open in binary mode, otherwise in text mode with this encoding
Returns:
open stream or None if we cannot determine if it is an URL or file
"""
isurl, extstr = is_url(url_or_file)
if isurl is None:
return
if isurl:
tmpfp = urlopen(extstr)
if encoding is not None:
return TextIOWrapper(tmpfp, encoding=encoding)
else:
return tmpfp
else:
if encoding is not None:
return open(extstr, "rt", encoding=encoding)
else:
return open(extstr, "rb")
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,515
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/corpora/files.py
|
"""
Module that defines Corpus and DocumentSource/DocumentDestination classes which access documents
as lines or parts in a file.
"""
import json
from gatenlp.urlfileutils import yield_lines_from
from gatenlp.document import Document
from gatenlp.corpora.base import DocumentSource, DocumentDestination
from gatenlp.corpora.base import MultiProcessingAble
class BdocjsLinesFileSource(DocumentSource, MultiProcessingAble):
"""
A document source which reads one bdoc json serialization of a document from each line of the given file.
"""
def __init__(self, file):
"""
Create a JsonLinesFileSource.
Args:
file: the file path (a string) or an open file handle.
"""
self.file = file
def __iter__(self):
with open(self.file, "rt", encoding="utf-8") as infp:
for line in infp:
yield Document.load_mem(line, fmt="json")
class BdocjsLinesFileDestination(DocumentDestination):
"""
Writes one line of JSON per document to the a single output file.
"""
def __init__(self, file):
"""
Args:
file: the file to write to. If it exists, it gets overwritten without warning.
Expected to be a string or an open file handle.
"""
if isinstance(file, str):
self.fh = open(file, "wt", encoding="utf-8")
else:
self.fh = file
self.n = 0
def __enter__(self):
return self
def __exit__(self, extype, value, traceback):
self.fh.close()
def append(self, doc):
"""
Append a document to the destination.
Args:
doc: the document, if None, no action is performed.
"""
if doc is None:
return
assert isinstance(doc, Document)
self.fh.write(doc.save_mem(fmt="json"))
self.fh.write("\n")
self.n += 1
def close(self):
self.fh.close()
class JsonLinesFileSource(DocumentSource, MultiProcessingAble):
"""
A document source which reads one json serialization per line, creates a document from one field
in the json and optionally stores all or a selection of remaining fields as document feature "__data".
"""
def __init__(self, file, text_field="text", data_fields=None, data_feature="__data"):
"""
Create a JsonLinesFileSource.
Args:
file: the file path (a string) or an open file handle.
text_field: the field name where to get the document text from.
data_fields: if a list of names, store these fields in the "__data" feature. if True, store all fields.
data_feature: the name of the data feature, default is "__data"
"""
# feature_fields: NOT YET IMPLEMENTED -- a mapping from original json fields to document features
self.file = file
self.text_field = text_field
self.data_fields = data_fields
self.data_feature = data_feature
def __iter__(self):
with open(self.file, "rt", encoding="utf-8") as infp:
for line in infp:
data = json.loads(line)
# TODO: what if the field does not exist? should we use get(text_field, "") instead?
text = data[self.text_field]
doc = Document(text)
if self.data_fields:
if isinstance(self.data_fields, list):
tmp = {}
for fname in self.data_fields:
# TODO: what if the field does not exist?
tmp[fname] = data[fname]
else:
tmp = data
doc.features[self.data_feature] = tmp
yield doc
class JsonLinesFileDestination(DocumentDestination):
"""
Writes one line of JSON per document to the a single output file. This will either write the document json
as nested data or the document text to the field designated for the document and will write other json
fields from the "__data" document feature.
"""
def __init__(self, file, document_field="text", document_bdocjs=False, data_fields=True, data_feature="__data"):
"""
Args:
file: the file to write to. If it exists, it gets overwritten without warning.
Expected to be a string or an open file handle.
document_field: the name of the json field that will contain the document either just the text or
the bdocjs representation if document_bdocjs is True.
document_bdocjs: if True store the bdocjs serialization into the document_field instead of just the text
data_fields: if a list, only store these fields in the json, if False, do not store any additional fields.
Default is True: store all fields as is.
data_feature: the name of the data feature, default is "__data"
"""
if isinstance(file, str):
self.fh = open(file, "wt", encoding="utf-8")
else:
self.fh = file
self.n = 0
self.document_field = document_field
self.document_bdocjs = document_bdocjs
self.data_fields = data_fields
self.data_feature = data_feature
def __enter__(self):
return self
def __exit__(self, _extype, _value, _traceback):
self.fh.close()
def append(self, doc):
"""
Append a document to the destination.
Args:
doc: the document, if None, no action is performed.
"""
if doc is None:
return
assert isinstance(doc, Document)
data = {}
if self.data_fields:
if isinstance(self.data_fields, list):
for fname in self.data_fields:
data[fname] = doc.features[self.data_feature][fname]
else:
data.update(doc.features[self.data_feature])
# assign the document field last so it overwrites anything that comes from the data feature!
if self.document_bdocjs:
data[self.document_field] = doc.save_mem(fmt="json")
else:
data[self.document_field] = doc.text
self.fh.write(json.dumps(data))
self.fh.write("\n")
self.n += 1
def close(self):
self.fh.close()
class TsvFileSource(DocumentSource, MultiProcessingAble):
"""
A TsvFileSource is a DocumentSource which is a single TSV file with a fixed number of tab-separated
values per row. Each document in sequence is created from the text in one of the columns and
document features can be set from arbitrary columns as well.
"""
def __init__(self, source, hdr=True, text_col=None, feature_cols=None, data_cols=None, data_feature="__data"):
"""
Creates the TsvFileSource.
Args:
source: a file path or URL
hdr: if True (default), expects a header line with the column names, if a list, should be the list
of column names, if False/None, no header line is expected.
text_col: the column which contains the text for creating the document. Either the column number,
or the name of the column (only possible if there is a header line) or a function that should
take the list of fields and arbitrary kwargs and return the text. Also passes "cols" and "n"
as keyward arguments.
feature_cols: if not None, must be either a dictionary mapping document feature names to the
column numbers or column names of where to get the feature value from;
or a function that should take the list of fields and arbitrary kwargs and return a dictionary
with the features. Also passes "cols" (dict mapping column names to column indices, or None) and
"n" (current line number) as keyword arguments.
data_cols: if not None, either an iterable of the names of columns to store in the special document
feature "__data" or if "True", stores all columns. At the moment this only works if the tsv file
has a header line. The values are stored as a list in the order of the names given or the original
order of the values in the TSV file.
data_feature: the name of the document feature where to store the data, default is "__data"
"""
assert text_col is not None
self.hdr = hdr
self.text_col = text_col
self.feature_cols = feature_cols
self.data_cols = data_cols
self.source = source
self.n = 0
self.hdr2col = {}
if data_cols and not hdr:
raise Exception("Header must be present if data_cols should be used")
self.data_feature = data_feature
def __iter__(self):
reader = yield_lines_from(self.source)
if self.hdr and self.n == 0:
self.n += 1
self.hdr = next(reader).rstrip("\n\r").split("\t")
if self.hdr:
self.hdr2col = {name: idx for idx, name in enumerate(self.hdr)}
for line in reader:
line = line.rstrip("\n\r")
fields = line.split("\t")
if isinstance(self.text_col, int):
text = fields[self.text_col]
elif callable(self.text_col):
text = self.text_col(fields, cols=self.hdr2col, n=self.n)
else:
text = fields[self.hdr2col[self.text_col]]
doc = Document(text)
if self.feature_cols:
if callable(self.feature_cols):
doc.features.update(
self.feature_cols(fields, cols=self.hdr2col, n=self.n)
)
else:
for fname, colid in self.feature_cols.items():
if isinstance(colid, int):
value = fields[colid]
else:
value = fields[self.hdr2col[colid]]
doc.features[fname] = value
if self.data_cols:
if isinstance(self.data_cols, list):
data = {}
for cname in self.data_cols:
if isinstance(cname, str):
data[cname] = fields[self.hdr2col[cname]]
else:
# assume it is the column index!
data[cname] = fields[cname]
else:
data = fields
doc.features[self.data_feature] = data
self.n += 1
yield doc
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,516
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/processing/__init__.py
|
"""
Package for annotators, and other things related to processing documents.
"""
from gatenlp.processing.gazetteer.tokengazetteer import TokenGazetteer
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,517
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/processing/matcher.py
|
"""
Module that defines classes for matchers other than gazetteers which match e.g. regular expressions
of strings or annotations.
"""
class StringRegexMatcher:
"""
NOT YET IMPLEMENTED
"""
pass
# class AnnotationRegexMatcher:
# """ """
# pass
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,518
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/tests/test_gateworker.py
|
"""
Module to test the GateWorker and GateWorkerAnnotator
"""
import os
from gatenlp import Document
from gatenlp.utils import init_logger
from gatenlp.gateworker import GateWorker
logger = init_logger("test_gateworker")
should_exit = not os.environ.get("GATE_HOME")
if should_exit:
logger.warning("Environment variable GATE_HOME not set, skipping tests in TestGateWorker")
def make_doc1():
"""
Create and return a document for testing
"""
doc = Document("This is just some test document. It mentions New York.")
return doc
class TestGateWorker:
def test_gateworker01(self):
"""
Unit test method (make linter happy)
"""
if should_exit:
return
txt = "some text"
with GateWorker() as gw1:
gdoc1 = gw1.createDocument(txt)
pdoc1 = gw1.gdoc2pdoc(gdoc1)
assert pdoc1.text == txt
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,519
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/processing/normalizer.py
|
"""
Module that provides classes for normalizers. Normalizers are annotators which change the text of selected
annotations or the entire text of the document.
Since the text of a document is immutable, such Normalizers return a new document with the modified text.
Since the text is modified any annotations present in the document may be invalid, therefore all annotations
are removed when the new document is returned. Document features are preserved. Any changelog is preserved but
the normalization is not logged.
"""
from gatenlp.processing.annotator import Annotator
class Normalizer(Annotator):
"""
Base class of all normalizers.
"""
pass
class TextNormalizer(Normalizer):
"""
NOT YET IMPLEMENTED
"""
pass
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,520
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/__init__.py
|
# NOTE: do not place a comment at the end of the version assignment
# line since we parse that line in a shell script!
# __version__ = "0.9.9"
from gatenlp.version import __version__
try:
import sortedcontainers
except Exception:
import sys
print(
"ERROR: required package sortedcontainers cannot be imported!", file=sys.stderr
)
print(
"Please install it, using e.g. 'pip install -U sortedcontainers'",
file=sys.stderr,
)
sys.exit(1)
# TODO: check version of sortedcontainers (we have 2.1.0)
from gatenlp.utils import init_logger
logger = init_logger("gatenlp")
from gatenlp.span import Span
from gatenlp.annotation import Annotation
from gatenlp.annotation_set import AnnotationSet
from gatenlp.changelog import ChangeLog
from gatenlp.document import Document
from gatenlp.gate_interaction import _pr_decorator as GateNlpPr
from gatenlp.gate_interaction import interact
# Importing GateWorker or other classes which depend on any package other than sortedcontains will
# break the Python plugin!
# from gatenlp.gateworker import GateWorker, GateWorkerAnnotator
def init_notebook(): # pragma: no cover
from gatenlp.serialization.default import HtmlAnnViewerSerializer
from gatenlp.gatenlpconfig import gatenlpconfig
HtmlAnnViewerSerializer.init_javscript()
gatenlpconfig.notebook_js_initialized = True
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,521
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/serialization/default.py
|
"""
Module that implements the various ways of how to save and load documents and change logs.
"""
import io
import os
import sys
import yaml
from collections import defaultdict
# import ruyaml as yaml
try:
from yaml import CFullLoader as Loader, CDumper as Dumper
except ImportError:
from yaml import FullLoader as Loader, Dumper
yaml_loader = yaml.Loader
yaml_dumper = yaml.Dumper
from random import choice
from string import ascii_uppercase
from msgpack import pack, Unpacker
from gatenlp.document import Document
from gatenlp.annotation_set import AnnotationSet
from gatenlp.annotation import Annotation
from gatenlp.changelog import ChangeLog
from gatenlp.features import Features
from gatenlp.utils import get_nested
from gatenlp.urlfileutils import is_url, get_str_from_url, get_bytes_from_url
from gzip import open as gopen, compress, decompress
from bs4 import BeautifulSoup
from gatenlp.gatenlpconfig import gatenlpconfig
import bs4
import warnings
import pickle
try:
from bs4 import GuessedAtParserWarning
warnings.filterwarnings("ignore", category=GuessedAtParserWarning)
except ImportError as ex:
pass
# import orjson as usejson
# import json as usejson
# import rapidjson as usejson
# import ujson as usejson
# import hyperjson as usejson
import json
JSON_WRITE = "wt"
JSON_READ = "rt"
# # for replacing json by orjson
# class json:
# @staticmethod
# def load(fp):
# data = fp.read()
# return usejson.loads(data)
# @staticmethod
# def loads(data):
# return usejson.loads(data)
# @staticmethod
# def dump(obj, fp):
# buf = usejson.dumps(obj)
# fp.write(buf)
# @staticmethod
# def dumps(obj):
# return usejson.dumps(obj)
# # for replacing json with one of the other implementations
# class json:
# @staticmethod
# def load(fp):
# return usejson.load(fp)
# @staticmethod
# def loads(data):
# return usejson.loads(data)
# @staticmethod
# def dump(obj, fp):
# buf = usejson.dump(obj, fp)
# @staticmethod
# def dumps(obj):
# return usejson.dumps(obj)
# TODO: for ALL save options, allow to filter the annotations that get saved!
# TODO: then use this show only limited set of annotations in the viewer
# TODO: create Document.display(....) to show document in various ways in the current
# environment, e.g. Jupyter notebook, select anns, configure colour palette, size etc.
# TODO: when loading from a URL, allow for deciding on the format based on the mime type!
# So if we do not have the format, we should get the header for the file, check the mime type and see
# if we have a loder registered for that and then let the loader do the rest of the work. This may
# need loaders to be able to use an already open stream.
TWITTER_DEFAULT_INCLUDE_FIELDS = [
"id_str",
"user.id_str",
"user.screen_name",
"user.name" "created_at",
"is_quote_status",
"quote_count",
"retweet_count",
"favourite_count",
"favourited",
"retweeted",
"lang",
"$is_retweet_status",
"retweeted_status.user.screen_name",
]
class JsonSerializer:
"""
This class performs the saving and load of Documents and ChangeLog instances to and from the
BDOC JSON format files, optionally with gzip compression.
"""
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
gzip=False,
annsets=None,
**kwargs,
):
"""
Args:
clazz: the class of the object that gets saved
inst: the object to get saved
to_ext: where to save to, this should be a file path, only one of to_ext and to_mem should be specified
to_mem: if True, return a String serialization
offset_type: the offset type to use for saving, if None (default) use "p" (Python)
offset_mapper: the offset mapper to use, only needed if the type needs to get converted
gzip: if True, the JSON gets gzip compressed
annsets: which annotation sets and types to include, list of set names or (setanmes, types) tuples
**kwargs:
"""
d = inst.to_dict(offset_type=offset_type, offset_mapper=offset_mapper, annsets=annsets, **kwargs)
if to_mem:
if gzip:
compress(json.dumps(d).encode("UTF-8"))
else:
return json.dumps(d)
else:
if gzip:
with gopen(to_ext, JSON_WRITE) as outfp:
json.dump(d, outfp)
else:
with open(to_ext, JSON_WRITE) as outfp:
json.dump(d, outfp)
@staticmethod
def save_gzip(clazz, inst, **kwargs):
"""
Invokes the save method with gzip=True
"""
JsonSerializer.save(clazz, inst, gzip=True, **kwargs)
@staticmethod
def load(
clazz, from_ext=None, from_mem=None, offset_mapper=None, gzip=False, **kwargs
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False)
**kwargs:
Returns:
"""
# print("RUNNING load with from_ext=", from_ext, " from_mem=", from_mem)
if from_ext is not None and from_mem is not None:
raise Exception("Exactly one of from_ext and from_mem must be specified ")
if from_ext is None and from_mem is None:
raise Exception("Exactly one of from_ext and from_mem must be specified ")
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
# print("DEBUG: we got a URL")
if gzip:
from_mem = get_bytes_from_url(extstr)
else:
from_mem = get_str_from_url(extstr, encoding="utf-8")
else:
# print("DEBUG: not a URL !!!")
pass
if from_mem is not None:
if gzip:
d = json.loads(decompress(from_mem).decode("UTF-8"))
else:
d = json.loads(from_mem)
doc = clazz.from_dict(d, offset_mapper=offset_mapper, **kwargs)
else: # from_ext must have been not None and a path
if gzip:
with gopen(extstr, JSON_READ) as infp:
d = json.load(infp)
else:
with open(extstr, JSON_READ) as infp:
d = json.load(infp)
doc = clazz.from_dict(d, offset_mapper=offset_mapper, **kwargs)
return doc
@staticmethod
def load_gzip(clazz, **kwargs):
"""
Args:
clazz:
**kwargs:
Returns:
"""
return JsonSerializer.load(clazz, gzip=True, **kwargs)
class PickleSerializer:
"""
This class performs the saving and load of Documents and ChangeLog instances to and from pickle format.
"""
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
gzip=False,
**kwargs,
):
"""
Args:
clazz: the class of the object that gets saved
inst: the object to get saved
to_ext: where to save to, this should be a file path, only one of to_ext and to_mem should be specified
to_mem: if True, return a String serialization
offset_type: the offset type to use for saving, if None (default) use "p" (Python)
offset_mapper: the offset mapper to use, only needed if the type needs to get converted
gzip: must be False, gzip is not supported
**kwargs:
"""
if gzip:
raise Exception("Gzip not supported for pickle")
if to_mem:
return pickle.dumps(inst, protocol=-1)
else:
with open(to_ext, "wb") as outfp:
pickle.dump(inst, outfp, protocol=-1)
@staticmethod
def load(
clazz, from_ext=None, from_mem=None, offset_mapper=None, gzip=False, **kwargs
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False) must be False, True not supported
**kwargs:
Returns:
"""
# print("RUNNING load with from_ext=", from_ext, " from_mem=", from_mem)
if from_ext is not None and from_mem is not None:
raise Exception("Exactly one of from_ext and from_mem must be specified ")
if from_ext is None and from_mem is None:
raise Exception("Exactly one of from_ext and from_mem must be specified ")
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
from_mem = get_bytes_from_url(extstr)
else:
# print("DEBUG: not a URL !!!")
pass
if from_mem is not None:
doc = pickle.loads(from_mem)
else: # from_ext must have been not None and a path
with open(extstr, "rb") as infp:
doc = pickle.load(infp)
return doc
class PlainTextSerializer:
""" """
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
encoding="UTF-8",
gzip=False,
**kwargs,
):
"""
Args:
clazz:
inst:
to_ext: (Default value = None)
to_mem: (Default value = None)
offset_type: (Default value = None)
offset_mapper: (Default value = None)
encoding: (Default value = "UTF-8")
gzip: (Default value = False)
**kwargs:
Returns:
"""
txt = inst.text
if txt is None:
txt = ""
if to_mem:
if gzip:
compress(txt.encode(encoding))
else:
return txt
else:
if gzip:
with gopen(to_ext, "wt", encoding=encoding) as outfp:
outfp.write(txt)
else:
with open(to_ext, "wt", encoding=encoding) as outfp:
outfp.write(txt)
@staticmethod
def save_gzip(clazz, inst, **kwargs):
"""
Args:
clazz:
inst:
**kwargs:
Returns:
"""
PlainTextSerializer.save(clazz, inst, gzip=True, **kwargs)
@staticmethod
def load(
clazz,
from_ext=None,
from_mem=None,
offset_mapper=None,
encoding="UTF-8",
gzip=False,
**kwargs,
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
encoding: (Default value = "UTF-8")
gzip: (Default value = False)
**kwargs:
Returns:
"""
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
if gzip:
from_mem = get_bytes_from_url(extstr)
else:
from_mem = get_str_from_url(extstr, encoding=encoding)
if from_mem is not None:
if gzip:
txt = decompress(from_mem).decode(encoding)
else:
txt = from_mem
doc = Document(txt)
else:
if gzip:
with gopen(extstr, "rt", encoding=encoding) as infp:
txt = infp.read()
else:
with open(extstr, "rt", encoding=encoding) as infp:
txt = infp.read()
doc = Document(txt)
return doc
@staticmethod
def load_gzip(clazz, **kwargs):
"""
Args:
clazz:
**kwargs:
Returns:
"""
return PlainTextSerializer.load(clazz, gzip=True, **kwargs)
class YamlSerializer:
""" """
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
gzip=False,
annsets=None,
**kwargs,
):
"""
Args:
clazz:
inst:
to_ext: (Default value = None)
to_mem: (Default value = None)
offset_type: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False)
annsets: which annotation sets and types to include, list of set names or (setanmes, types) tuples
**kwargs:
"""
d = inst.to_dict(offset_type=offset_type, offset_mapper=offset_mapper, annsets=annsets, **kwargs)
if to_mem:
if gzip:
compress(yaml.dump(d, Dumper=yaml_dumper).encode("UTF-8"))
else:
return yaml.dump(d, Dumper=yaml_dumper)
else:
if gzip:
with gopen(to_ext, "wt") as outfp:
yaml.dump(d, outfp, Dumper=yaml_dumper)
else:
with open(to_ext, "wt") as outfp:
yaml.dump(d, outfp, Dumper=yaml_dumper)
@staticmethod
def save_gzip(clazz, inst, **kwargs):
"""
Args:
clazz:
inst:
**kwargs:
Returns:
"""
YamlSerializer.save(clazz, inst, gzip=True, **kwargs)
@staticmethod
def load(
clazz, from_ext=None, from_mem=None, offset_mapper=None, gzip=False, **kwargs
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False)
**kwargs:
Returns:
"""
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
if gzip:
from_mem = get_bytes_from_url(extstr)
else:
from_mem = get_str_from_url(extstr, encoding="utf-8")
if from_mem is not None:
if gzip:
d = yaml.load(decompress(from_mem).decode("UTF-8"), Loader=yaml_loader)
else:
d = yaml.load(from_mem, Loader=yaml_loader)
doc = clazz.from_dict(d, offset_mapper=offset_mapper, **kwargs)
else:
if gzip:
with gopen(extstr, "rt") as infp:
d = yaml.load(infp, Loader=yaml_loader)
else:
with open(extstr, "rt") as infp:
d = yaml.load(infp, Loader=yaml_loader)
doc = clazz.from_dict(d, offset_mapper=offset_mapper, **kwargs)
return doc
@staticmethod
def load_gzip(clazz, **kwargs):
"""
Args:
clazz:
**kwargs:
Returns:
"""
return YamlSerializer.load(clazz, gzip=True, **kwargs)
MSGPACK_VERSION_HDR = "sm2"
class MsgPackSerializer:
""" """
@staticmethod
def document2stream(doc: Document, stream):
"""
Args:
doc: Document:
stream:
doc: Document:
Returns:
"""
pack(MSGPACK_VERSION_HDR, stream)
pack(doc.offset_type, stream)
pack(doc.text, stream)
pack(doc.name, stream)
pack(doc._features.to_dict(), stream)
pack(len(doc._annotation_sets), stream)
for name, annset in doc._annotation_sets.items():
pack(name, stream)
pack(annset._next_annid, stream)
pack(len(annset), stream)
for ann in annset.fast_iter():
pack(ann.type, stream)
pack(ann.start, stream)
pack(ann.end, stream)
pack(ann.id, stream)
pack(ann.features.to_dict(), stream)
@staticmethod
def stream2document(stream):
"""
Args:
stream:
Returns:
"""
u = Unpacker(stream)
version = u.unpack()
if version != MSGPACK_VERSION_HDR:
raise Exception("MsgPack data starts with wrong version")
doc = Document()
doc.offset_type = u.unpack()
doc._text = u.unpack()
doc.name = u.unpack()
doc._features = Features(u.unpack())
nsets = u.unpack()
setsdict = dict()
doc.annotation_sets = setsdict
for iset in range(nsets):
sname = u.unpack()
if sname is None:
sname = ""
annset = AnnotationSet(name=sname, owner_doc=doc)
annset._next_annid = u.unpack()
nanns = u.unpack()
for iann in range(nanns):
atype = u.unpack()
astart = u.unpack()
aend = u.unpack()
aid = u.unpack()
afeatures = u.unpack()
ann = Annotation(astart, aend, atype, annid=aid, features=afeatures)
annset._annotations[aid] = ann
setsdict[sname] = annset
doc._annotation_sets = setsdict
return doc
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
offset_type=None,
offset_mapper=None,
**kwargs,
):
"""
Args:
clazz:
inst:
to_ext: (Default value = None)
to_mem: (Default value = None)
offset_type: (Default value = None)
offset_mapper: (Default value = None)
**kwargs:
Returns:
"""
if isinstance(inst, Document):
writer = MsgPackSerializer.document2stream
elif isinstance(inst, ChangeLog):
raise Exception("Not implemented yet")
else:
raise Exception("Object not supported")
if to_mem:
f = io.BytesIO()
else:
f = open(to_ext, "wb")
writer(inst, f)
if to_mem:
return f.getvalue()
else:
f.close()
@staticmethod
def load(clazz, from_ext=None, from_mem=None, offset_mapper=None, **kwargs):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
**kwargs:
Returns:
"""
if clazz == Document:
reader = MsgPackSerializer.stream2document
elif clazz == ChangeLog:
raise Exception("Not implemented yet")
else:
raise Exception("Object not supported")
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
from_mem = get_bytes_from_url(extstr)
if from_mem:
f = io.BytesIO(from_mem)
else:
f = open(extstr, "rb")
doc = reader(f)
return doc
JS_JQUERY_URL = "https://ajax.googleapis.com/ajax/libs/jquery/3.5.1/jquery.min.js"
JS_GATENLP_URL = "https://unpkg.com/gatenlp-ann-viewer@1.0.14/gatenlp-ann-viewer.js"
JS_JQUERY = f"<script src=\"{JS_JQUERY_URL}\"></script>"
JS_GATENLP = f"<script src=\"{JS_GATENLP_URL}\"></script>"
HTML_TEMPLATE_FILE_NAME = "gatenlp-ann-viewer.html"
JS_GATENLP_FILE_NAME = "gatenlp-ann-viewer-merged.js"
html_ann_viewer_serializer_js_loaded = False
class HtmlAnnViewerSerializer:
""" """
@staticmethod
def javascript():
"""
Return the Javascript needed for the HTML Annotation viewer.
Returns: Javascript string.
"""
jsloc = os.path.join(
os.path.dirname(__file__), "_htmlviewer", JS_GATENLP_FILE_NAME
)
if not os.path.exists(jsloc):
raise Exception(
"Could not find JavsScript file, {} does not exist".format(jsloc)
)
with open(jsloc, "rt", encoding="utf-8") as infp:
js = infp.read()
js = """<script type="text/javascript">""" + js + "</script>"
return js
@staticmethod
def init_javscript():
import IPython
IPython.display.display_html(HtmlAnnViewerSerializer.javascript(), raw=True)
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
notebook=False,
offline=False,
add_js=True,
htmlid=None,
stretch_height=False,
annsets=None,
doc_style=None,
**kwargs,
):
"""Convert a document to HTML for visualizing it.
Args:
clazz: the class of the object to save
inst: the instance/object to save
to_ext: the destination where to save to unless to_mem is given
to_mem: if true, ignores to_ext and returns the representation
notebook: if True only create a div which can be injected into a notebook or other HTML, otherwise
generate a full HTML document
offline: if true, include all the Javascript needed in the generated HTML , otherwise load library
from the internet.
add_js: if true (default), add the necessary Javascript either directly or by loading a library from
the internet. If false, assume that the Javascript is already there (only makes sense with
notebook=True).
htmlid: the id to use for HTML ids so it is possible to have several independent viewers in the
same HTML page and to style the output from a separate notebook cell
max_height1: if this is set, then the maximum height of the first row of the viewer is set to the
given value (default: 20em). If this is None, then the height is set to
stretch_height: if False, rows 1 and 2 of the viewer will not have the height set, but only
min and max height (default min is 10em for row1 and 7em for row2, max is the double of those).
If True, no max haight is set and instead the height is set to a percentage (default is
67vh for row 1 and 30vh for row 2). The values used can be changed via gateconfig.
annsets: if None, include all annotation sets and types, otherwise this should be a list of either
set names, or tuples, where the first entry is a set name and the second entry is either a type
name or list of type names to include.
doc_style: if not None, any additional styling for the document text box, if None, use whatever
is defined in gatenlpconfig or do not use.
kwargs: swallow any other kwargs.
Returns: if to_mem is True, returns the representation, otherwise None.
"""
if not isinstance(inst, Document):
raise Exception("Not a document!")
# TODO: why are we doing a deepcopy here?
doccopy = inst.deepcopy(annsets=annsets)
doccopy.to_offset_type("j")
json = doccopy.save_mem(fmt="json", **kwargs)
htmlloc = os.path.join(
os.path.dirname(__file__), "_htmlviewer", HTML_TEMPLATE_FILE_NAME
)
if not os.path.exists(htmlloc):
raise Exception(
"Could not find HTML template, {} does not exist".format(htmlloc)
)
with open(htmlloc, "rt", encoding="utf-8") as infp:
html = infp.read()
txtcolor = gatenlpconfig.doc_html_repr_txtcolor
if notebook:
str_start = "<!--STARTDIV-->"
str_end = "<!--ENDDIV-->"
idx1 = html.find(str_start) + len(str_start)
idx2 = html.find(str_end)
if htmlid:
rndpref = str(htmlid)
else:
rndpref = "".join(choice(ascii_uppercase) for i in range(10))
html = html[idx1:idx2]
html = f"""<div><style>#{rndpref}-wrapper {{ color: {txtcolor} !important; }}</style>
<div id="{rndpref}-wrapper">
{html}
</div></div>"""
# replace the prefix with a random one
html = html.replace("GATENLPID", rndpref)
if offline:
# global html_ann_viewer_serializer_js_loaded
# if not html_ann_viewer_serializer_js_loaded:
if add_js:
jsloc = os.path.join(
os.path.dirname(__file__), "_htmlviewer", JS_GATENLP_FILE_NAME
)
if not os.path.exists(jsloc):
raise Exception(
"Could not find JavsScript file, {} does not exist".format(
jsloc
)
)
with open(jsloc, "rt", encoding="utf-8") as infp:
js = infp.read()
js = """<script type="text/javascript">""" + js + "</script>"
# html_ann_viewer_serializer_js_loaded = True
else:
js = ""
else:
js = JS_JQUERY + JS_GATENLP
if stretch_height:
height1 = gatenlpconfig.doc_html_repr_height1_stretch
height2 = gatenlpconfig.doc_html_repr_height2_stretch
else:
height1 = gatenlpconfig.doc_html_repr_height1_nostretch
height2 = gatenlpconfig.doc_html_repr_height2_nostretch
html = html.replace("$$JAVASCRIPT$$", js, 1).replace("$$JSONDATA$$", json, 1)
html = html.replace("$$HEIGHT1$$", height1, 1).replace(
"$$HEIGHT2$$", height2, 1
)
if doc_style is None:
doc_style = gatenlpconfig.doc_html_repr_doc_style
if doc_style is None:
doc_style = ""
html = html.replace("$$DOCTEXTSTYLE$$", doc_style, 1)
if to_mem:
return html
else:
with open(to_ext, "wt", encoding="utf-8") as outfp:
outfp.write(html)
class HtmlLoader:
""" """
@staticmethod
def load_rendered(
clazz,
from_ext=None,
from_mem=None,
parser=None,
markup_set_name="Original markups",
process_soup=None,
offset_mapper=None,
**kwargs,
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
parser: (Default value = None)
markup_set_name: (Default value = "Original markups")
process_soup: (Default value = None)
offset_mapper: (Default value = None)
**kwargs:
Returns:
"""
raise Exception("Rendered html parser not yet implemented")
@staticmethod
def load(
clazz,
from_ext=None,
from_mem=None,
parser="html.parser",
markup_set_name="Original markups",
encoding=None,
**kwargs,
):
"""Load a HTML file.
Args:
clazz: param from_ext:
from_ext: file our URL source
from_mem: string source
parser: one of "html.parser", "lxml", "lxml-xml", "html5lib" (default is "html.parser")
markup_set_name: the annotation set name for the set to contain the HTML
annotations (Default value = "Original markups")
encoding: the encoding to use for reading the file
"""
# NOTE: for now we have a simple heuristic for adding newlines to the text:
# before and after a block element, a newline is added unless there is already one
# NOTE: for now we use multi_valued_attributes=None which prevents attributes of the
# form "class='val1 val2'" to get converted into features with a list of values.
isurl, extstr = is_url(from_ext)
if from_ext is not None:
if isurl:
from_mem = get_str_from_url(extstr, encoding=encoding)
if from_mem:
bs = BeautifulSoup(from_mem, features=parser, multi_valued_attributes=None)
else:
with open(extstr, encoding=encoding) as infp:
bs = BeautifulSoup(infp, features=parser, multi_valued_attributes=None)
# we recursively iterate the tree depth first, going through the children
# and adding to a list that either contains the text or a dict with the information
# about annotations we want to add
nlels = {
"pre",
"br",
"p",
"div",
"tr",
"h1",
"h2",
"h3",
"h4",
"h5",
"h6",
"li",
"address",
"article",
"aside",
"blockquote",
"del",
"figure",
"figcaption",
"footer",
"header",
"hr",
"ins",
"main",
"nav",
"section",
"summary",
"input",
"legend",
"option",
"textarea",
"bdi",
"bdo",
"center",
"code",
"dfn",
"menu",
"dir",
"caption",
}
ignoreels = {"script", "style"}
docinfo = {"anninfos": [], "curoffset": 0, "curid": 0, "text": ""}
def walktree(el):
"""
Args:
el:
Returns:
"""
# print("DEBUG: type=", type(el))
if isinstance(el, bs4.element.Doctype):
# print("DEBUG: got doctype", type(el))
pass
elif isinstance(el, bs4.element.Comment):
# print("DEBUG: got Comment", type(el))
pass
elif isinstance(el, bs4.element.Script):
# print("DEBUG: got Script", type(el))
pass
elif isinstance(el, bs4.element.Tag):
# print("DEBUG: got tag: ", type(el), " name=",el.name)
# some tags we ignore completely:
if el.name in ignoreels:
return
# for some tags we insert a new line before, but only if we do not already have one
if not docinfo["text"].endswith("\n") and el.name in nlels:
docinfo["text"] += "\n"
# print("DEBUG: adding newline before at ", docinfo["curoffset"])
docinfo["curoffset"] += 1
ann = {
"type": el.name,
"features": el.attrs,
"id": docinfo["curid"],
"event": "start",
"start": docinfo["curoffset"],
}
thisid = docinfo["curid"]
docinfo["anninfos"].append(ann)
docinfo["curid"] += 1
for child in el.children:
walktree(child)
# for some tags we insert a new line after
if not docinfo["text"].endswith("\n") and el.name in nlels:
docinfo["text"] += "\n"
# print("DEBUG: adding newline after at ", docinfo["curoffset"])
docinfo["curoffset"] += 1
docinfo["anninfos"].append(
{"event": "end", "id": thisid, "end": docinfo["curoffset"]}
)
elif isinstance(el, bs4.element.NavigableString):
# print("DEBUG: got text: ", el)
text = str(el)
if text == "\n" and docinfo["text"].endswith("\n"):
return
docinfo["text"] += text
docinfo["curoffset"] += len(el)
else:
print("WARNING: odd element type", type(el))
walktree(bs)
# need to add the end corresponding to bs
# print("DEBUG: got docinfo:\n",docinfo)
id2anninfo = {} # from id to anninfo
nstart = 0
for anninfo in docinfo["anninfos"]:
if anninfo["event"] == "start":
nstart += 1
id2anninfo[anninfo["id"]] = anninfo
nend = 0
for anninfo in docinfo["anninfos"]:
if anninfo["event"] == "end":
nend += 1
end = anninfo["end"]
annid = anninfo["id"]
anninfo = id2anninfo[annid]
anninfo["end"] = end
# print("DEBUG: got nstart/nend", nstart, nend)
assert nstart == nend
# print("DEBUG: got id2anninfo:\n", id2anninfo)
doc = Document(docinfo["text"])
annset = doc.annset(markup_set_name)
for i in range(nstart):
anninfo = id2anninfo[i]
annset.add(
anninfo["start"],
anninfo["end"],
anntype=anninfo["type"],
features=anninfo["features"],
)
return doc
class TweetV1Serializer:
@staticmethod
def doc2twitterv1dict(doc, annsets=None, prefix_sep=None):
d = doc.to_dict(annsets=annsets)
ret = {"full_text": doc.text}
ents = defaultdict(list)
for setname, annset in d.get("annotation_sets", {}).items():
for ann in annset.get("annotations", []):
anntype = ann["type"]
if prefix_sep is not None and setname != "":
anntype = setname + prefix_sep + anntype
annlist = ents[anntype]
twitterann = {
"indices": [ann["start"], ann["end"]]
}
twitterann.update(ann["features"])
annlist.append(twitterann)
ret["entities"] = ents
return ret
@staticmethod
def save(
clazz,
inst,
to_ext=None,
to_mem=None,
annsets=None,
prefix_sep=None,
**kwargs,
):
"""
Args:
clazz: the class of the object that gets saved
inst: the object to get saved
to_ext: where to save to, this should be a file path, only one of to_ext and to_mem should be specified
to_mem: if True, return a String serialization
offset_type: the offset type to use for saving, if None (default) use "p" (Python)
offset_mapper: the offset mapper to use, only needed if the type needs to get converted
annsets: which annotation sets and types to include, list of set names or (setanmes, types) tuples
prefix_types: if not None, prefix all types with the name of the annotation set the annotation comes from
and use the given string as the separator (can be the empty string for no seaparator).
For annotations from the default set the type stays unchanged.
**kwargs:
"""
d = TweetV1Serializer.doc2twitterv1dict(inst, annsets=annsets, prefix_sep=prefix_sep)
if to_mem:
return json.dumps(d)
else:
with open(to_ext, JSON_WRITE) as outfp:
json.dump(d, outfp)
@staticmethod
def load(
clazz,
from_ext=None,
from_mem=None,
include_fields=None,
include_entities=True,
include_quote=False,
outsetname="Original markups",
tweet_ann="Tweet",
):
"""
Load a tweet from Twitter JSON format.
IMPORTANT: this is still very experimental, will change in the future!
Args:
clazz: internal use
from_ext: the file/url to load from
from_mem: string to load from
include_fields: a list of fields to include where nested field names are dot-separated, e.g.
"user.location". All these fields are included using the nested field name in either the
features of the tweet annotation with the Type specified, or the features of the document
if `tweet_ann` is None.
include_entities: create annotations for the tweet entities in the set with outsetname
include_quote: if True, add the quoted tweet after an empty line and treat it as a separate
tweet just like the original tweet.
outset: the annotation set where to put entity annotations and the tweet annotation(s)
tweet_ann: the annotation type to use to span the tweet and contain all the features.
Returns:
document representing the tweet
"""
if from_ext is not None:
isurl, extstr = is_url(from_ext)
if isurl:
jsonstr = get_str_from_url(extstr, encoding="utf-8")
tweet = json.loads(jsonstr)
else:
with open(extstr, "rt", encoding="utf-8") as infp:
tweet = json.load(infp)
elif from_mem is not None:
tweet = json.loads(from_mem)
else:
raise Exception("Cannot load from None")
if tweet is None:
raise Exception("Could not decode Tweet JSON")
if tweet.get("truncated"):
text = get_nested(tweet, "extended_tweet.full_text")
else:
text = get_nested(tweet, "text")
if text is None:
raise Exception("No text field found")
quoted_status = None
if include_quote:
quoted_status = tweet.get("quoted_status")
if quoted_status is not None:
qtext = quoted_status.get("text", "")
text += "\n" + qtext
doc = Document(text)
anns = doc.annset(outsetname)
if tweet_ann:
ann = anns.add(0, len(text), tweet_ann)
features = ann.features
else:
features = doc.features
if include_fields is None:
include_fields = TWITTER_DEFAULT_INCLUDE_FIELDS
for field in include_fields:
if field.startswith("$"):
if field == "$is_retweet_status":
rs = get_nested(tweet, "retweeted_status", silent=True)
if rs is not None:
features[field] = True
continue
val = get_nested(tweet, field, silent=True)
if val is not None:
features[field] = val
if include_entities:
if tweet.get("truncated"):
entities = get_nested(tweet, "extended_tweet.entities", default={})
else:
entities = get_nested(tweet, "entities", default={})
for etype, elist in entities.items():
for ent in elist:
start, end = ent["indices"]
anns.add(start, end, etype)
# TODO: if we have a quoted_status, add features and entities from there:
# Essentially the same processing as for the original tweet, but at document offset
# len(tweet)+1 (2?)
return doc
class GateXmlLoader:
""" """
@staticmethod
def value4objectwrapper(text):
"""This may one day convert things like lists, maps, shared objects to Python, but for
now we always throw an exeption.
Args:
text: return:
Returns:
"""
raise Exception(
"Cannot load GATE XML which contains gate.corpora.ObjectWrapper data"
)
@staticmethod
def load(clazz, from_ext=None, ignore_unknown_types=False):
"""
Args:
clazz:
from_ext: (Default value = None)
ignore_unknown_types: (Default value = False)
Returns:
"""
# TODO: the code below is just an outline and needs work!
# TODO: make use of the test document created in repo project-python-gatenlp
import xml.etree.ElementTree as ET
isurl, extstr = is_url(from_ext)
if isurl:
xmlstring = get_str_from_url(extstr, encoding="utf-8")
root = ET.fromstring(xmlstring)
else:
tree = ET.parse(extstr)
root = tree.getroot()
# or: root = ET.fromstring(xmlstring)
# check we do have a GATE document
assert root.tag == "GateDocument"
assert root.attrib == {"version": "3"}
def parsefeatures(feats):
"""
Args:
feats:
Returns:
"""
features = {}
for feat in list(feats):
name = None
value = None
for el in list(feat):
if el.tag == "Name":
if el.get("className") == "java.lang.String":
name = el.text
else:
raise Exception(
"Odd Feature Name type: " + el.get("className")
)
elif el.tag == "Value":
cls_name = el.get("className")
if cls_name == "java.lang.String":
value = el.text
elif cls_name == "java.lang.Integer":
value = int(el.text)
elif cls_name == "java.lang.Long":
value = int(el.text)
elif cls_name == "java.math.BigDecimal":
value = float(el.text)
elif cls_name == "java.lang.Boolean":
value = bool(el.text)
# elif cls_name == "gate.corpora.ObjectWrapper":
# value = GateXmlLoader.value4objectwrapper(el.text)
else:
if ignore_unknown_types:
print(
f"Warning: ignoring feature with serialization type: {cls_name}",
file=sys.stderr,
)
else:
raise Exception(
"Unsupported serialization type: "
+ el.get("className")
)
if name is not None and value is not None:
features[name] = value
return features
# get the document features
docfeatures = {}
feats = root.findall("./GateDocumentFeatures/Feature")
docfeatures = parsefeatures(feats)
textwithnodes = root.findall("./TextWithNodes")
text = ""
node2offset = {}
curoff = 0
for item in textwithnodes:
if item.text:
print("Got item text: ", item.text)
text += item.text
# TODO HTML unescape item text
curoff += len(item.text)
for node in item:
nodeid = node.get("id")
node2offset[nodeid] = curoff
if node.tail:
# TODO: unescape item.text?
print("Gote node tail: ", node.tail)
text += node.tail
curoff += len(node.tail)
annsets = root.findall("./AnnotationSet")
annotation_sets = {} # map name - set
for annset in annsets:
if annset.get("Name"):
setname = annset.get("Name")
else:
setname = ""
annots = annset.findall("./Annotation")
annotations = []
maxannid = 0
for ann in annots:
annid = int(ann.attrib["Id"])
maxannid = max(maxannid, annid)
anntype = ann.attrib["Type"]
startnode = ann.attrib["StartNode"]
endnode = ann.attrib["EndNode"]
startoff = node2offset[startnode]
endoff = node2offset[endnode]
feats = ann.findall("./Feature")
features = parsefeatures(feats)
if len(features) == 0:
features = None
annotation = {
"id": annid,
"type": anntype,
"start": startoff,
"end": endoff,
"features": features,
}
annotations.append(annotation)
annset = {
"name": setname,
"annotations": annotations,
"next_annid": maxannid + 1,
}
annotation_sets[setname] = annset
docmap = {
"text": text,
"features": docfeatures,
"offset_type": "p",
"annotation_sets": annotation_sets,
}
doc = Document.from_dict(docmap)
return doc
def determine_loader(
clazz, from_ext=None, from_mem=None, offset_mapper=None, gzip=False, **kwargs
):
"""
Args:
clazz:
from_ext: (Default value = None)
from_mem: (Default value = None)
offset_mapper: (Default value = None)
gzip: (Default value = False)
**kwargs:
Returns:
"""
first = None
if from_mem:
first = from_mem[0]
else:
with open(from_ext, "rt") as infp:
first = infp.read(1)
if first == "{":
return JsonSerializer.load(
clazz,
from_ext=from_ext,
from_mem=from_mem,
offset_mapper=offset_mapper,
gzip=gzip,
**kwargs,
)
else:
return MsgPackSerializer.load(
clazz,
from_ext=from_ext,
from_mem=from_mem,
offset_mapper=offset_mapper,
gzip=gzip,
**kwargs,
)
DOCUMENT_SAVERS = {
"text/plain": PlainTextSerializer.save,
"text/plain+gzip": PlainTextSerializer.save_gzip,
"text": PlainTextSerializer.save,
"json": JsonSerializer.save,
"jsongz": JsonSerializer.save_gzip,
"bdocjs": JsonSerializer.save,
"pickle": PickleSerializer.save,
"bdocjsgz": JsonSerializer.save_gzip,
"text/bdocjs": JsonSerializer.save,
"text/bdocjs+gzip": JsonSerializer.save_gzip,
"yaml": YamlSerializer.save,
"bdocym": YamlSerializer.save,
"yamlgz": YamlSerializer.save_gzip,
"text/bdocym": YamlSerializer.save,
"text/bdocym+gzip+": YamlSerializer.save_gzip,
"msgpack": MsgPackSerializer.save,
"bdocmp": MsgPackSerializer.save,
"tweet-v1": TweetV1Serializer.save,
"text/bdocmp": MsgPackSerializer.save,
"application/msgpack": MsgPackSerializer.save,
"html-ann-viewer": HtmlAnnViewerSerializer.save,
}
DOCUMENT_LOADERS = {
"json": JsonSerializer.load,
"jsongz": JsonSerializer.load_gzip,
"bdocjs": JsonSerializer.load,
"bdocjsgz": JsonSerializer.load_gzip,
"text/bdocjs": JsonSerializer.load,
"text/bdocjs+gzip": JsonSerializer.load_gzip,
"yaml": YamlSerializer.load,
"yamlgz": YamlSerializer.load_gzip,
"bdocym": YamlSerializer.load,
"bdocymzg: ": YamlSerializer.load_gzip,
"text/bdocym": YamlSerializer.load,
"text/bdocym+gzip": YamlSerializer.load_gzip,
"msgpack": MsgPackSerializer.load,
"bdocmp": MsgPackSerializer.load,
"application/msgpack": MsgPackSerializer.load,
"text/bdocmp": MsgPackSerializer.load,
"jsonormsgpack": determine_loader,
"text/plain": PlainTextSerializer.load,
"text/plain+gzip": PlainTextSerializer.load_gzip,
"text": PlainTextSerializer.load,
"text/html": HtmlLoader.load,
"html": HtmlLoader.load,
"html-rendered": HtmlLoader.load_rendered,
"gatexml": GateXmlLoader.load,
"tweet-v1": TweetV1Serializer.load,
"pickle": PickleSerializer.load,
}
CHANGELOG_SAVERS = {
"json": JsonSerializer.save,
"text/bdocjs+gzip": JsonSerializer.save_gzip,
"text/bdocjs": JsonSerializer.save,
}
CHANGELOG_LOADERS = {
"json": JsonSerializer.load,
"text/bdocjs+gzip": JsonSerializer.load_gzip,
"text/bdocjs": JsonSerializer.load,
}
# map extensions to document types
EXTENSIONS = {
"bdocjs": "json",
"bdocym": "yaml",
"bdocym.gz": "text/bdocym+gzip",
"bdoc.gz": "text/bdocjs+gzip", # lets assume it is compressed json
"bdoc": "jsonormsgpack",
"bdocjs.gz": "text/bdocjs+gzip",
"bdocjson": "json",
"bdocmp": "msgpack",
"txt": "text/plain",
"txt.gz": "text/plain+gzip",
"html": "text/html",
"htm": "text/html",
"pickle": "pickle",
}
def get_handler(filespec, fmt, handlers, saveload, what):
"""
Args:
filespec:
fmt:
handlers:
saveload:
what:
Returns:
"""
msg = f"Could not determine how to {saveload} {what} for format {fmt} in module gatenlp.serialization.default"
if fmt:
handler = handlers.get(fmt)
if not handler:
raise Exception(msg)
return handler
else:
if not filespec: # in case of save_mem
raise Exception(msg)
if isinstance(filespec, os.PathLike):
wf = os.fspath(filespec)
elif isinstance(filespec, str):
wf = filespec
else:
raise Exception(msg)
name, ext = os.path.splitext(wf)
if ext == ".gz":
ext2 = os.path.splitext(name)[1]
if ext2:
ext2 = ext2[1:]
ext = ext2 + ext
elif ext:
ext = ext[1:]
fmt = EXTENSIONS.get(ext)
msg = f"Could not determine how to {saveload} {what} for format {fmt} and with " \
"extension {ext} in module gatenlp.serialization.default"
if not fmt:
raise Exception(msg)
handler = handlers.get(fmt)
if not handler:
raise Exception(msg)
return handler
def get_document_saver(filespec, fmt):
"""
Args:
filespec:
fmt:
Returns:
"""
return get_handler(filespec, fmt, DOCUMENT_SAVERS, "save", "document")
def get_document_loader(filespec, fmt):
"""
Args:
filespec:
fmt:
Returns:
"""
return get_handler(filespec, fmt, DOCUMENT_LOADERS, "load", "document")
def get_changelog_saver(filespec, fmt):
"""
Args:
filespec:
fmt:
Returns:
"""
return get_handler(filespec, fmt, CHANGELOG_SAVERS, "save", "changelog")
def get_changelog_loader(filespec, fmt):
"""
Args:
filespec:
fmt:
Returns:
"""
return get_handler(filespec, fmt, CHANGELOG_LOADERS, "load", "changelog")
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,522
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/gatenlp/processing/client.py
|
"""
Module that provides various Annotators which act as clients to REST annotation services.
"""
import logging
import json
from gatenlp.processing.annotator import Annotator
import requests
from requests.auth import HTTPBasicAuth
from gatenlp.utils import init_logger
import time
from gatenlp.offsetmapper import OffsetMapper
# TODO:
# * support compression send/receive
# * send GATE XML for existing annotations (requires GATE XML serialization writer)
# * send raw HTML or other formats support by the endpoint instead "doc" (which so far is just text)
# * maybe support the 100-continue protocol so far we dont
# * ERROR HANDLING: raise exception vs return None?
class GateCloudAnnotator(Annotator):
"""
This annotator sends the text of a document to a GATE Cloud (https://cloud.gate.ac.uk/) endpoint and uses the
returned result to create annotations.
"""
def __init__(
self,
api_key=None,
api_password=None,
url=None,
ann_types=None,
map_types=None,
out_annset="",
min_delay_ms=501,
):
"""
Create a GateCloudAnnotator.
Args:
api_key: API key needed to authenticate. Some services can be used in a limited way without
authentication.
api_password: API password needed to authenticale.
url: the URL of the annotation service endpoint, shown on the GATE Cloud page for the service
ann_types: this can be used to let the service annotate fewer or more than the default list of annotation
types. The default list and all possible annotations are shown on the GATE Cloud page for the service.
Either a string with comma separated annotation types preceded by a colon (e.g. ":Person,:Location")
or a python list with those type names (e.g. [":Person", ":Location"]). If the list contains type names
without a leading colon, the colon is added.
map_types: a dict which maps the annotation types from the service to arbitrary new annotation types,
any type name not in the map will remain unchanged.
out_annset: the annotation set in which to store the annotations
min_delay_ms: minimum time in milliseconds between two subsequent requests to the server
"""
self.api_key = api_key
self.api_password = api_password
self.url = url
self.map_types = map_types
self.min_delay_s = min_delay_ms / 1000.0
self.out_annset = out_annset
if ann_types:
if isinstance(ann_types, str):
self.ann_types = ann_types
elif isinstance(ann_types, list):
self.ann_types = ",".join(
[at if at.startswith(":") else ":" + at for at in ann_types]
)
else:
raise Exception(
"ann_types mist be a string of types like ':Person,:Location' or a list of types"
)
else:
self.ann_types = None
self.logger = init_logger()
self.logger.setLevel(logging.DEBUG)
self._last_call_time = 0
def __call__(self, doc, **kwargs):
delay = time.time() - self._last_call_time
if delay < self.min_delay_s:
time.sleep(self.min_delay_s - delay)
if "url" in kwargs:
url = kwargs["url"]
else:
url = self.url
text = doc.text
hdrs = {
"Content-Type": "text/plain; charset=UTF-8",
"Accept": "application/gate+json",
}
params = {}
if self.ann_types:
params["annotations"] = self.ann_types
# NOTE: not sure when this is needed, for now, disabled
# next_annid = doc.annset(self.out_annset)._next_annid
# params["nextAnnotationId"] = str(next_annid)
# self.logger.debug(f"Sending text={text}, params={params}")
if self.api_key:
response = requests.post(
url,
data=text.encode("utf-8"),
headers=hdrs,
params=params,
auth=HTTPBasicAuth(self.api_key, self.api_password),
)
else:
response = requests.post(
url, data=text.encode("utf-8"), headers=hdrs, params=params
)
scode = response.status_code
if scode != 200:
raise Exception(f"Something went wrong, received status code {scode}")
json = response.json()
ents = json.get("entities", {})
annset = doc.annset(self.out_annset)
for typename, anns in ents.items():
for anndata in anns:
feats = {}
start, end = (
None,
None,
) # cause an exception if the return data does not have indices
for fname, fval in anndata.items():
if fname == "indices":
start, end = fval[0], fval[1]
else:
feats[fname] = fval
if self.map_types:
typename = self.map_types.get(typename, typename)
# self.logger.debug(f"Adding annotation {start},{start},{typename},{feats}")
annset.add(start, end, typename, features=feats)
return doc
class TagMeAnnotator(Annotator):
"""
An annotator that sends text to the TagMe Annotation service (https://sobigdata.d4science.org/group/tagme/tagme)
and uses the result to annotate the document.
"""
def __init__(
self,
url=None,
auth_token=None,
lang="en",
ann_type="Mention",
task="tag", # or spot
out_annset="",
min_delay_ms=501,
tweet=False,
include_all_spots=False,
long_text=None,
epsilon=None,
link_pattern="https://{0}.wikipedia.org/wiki/{1}",
):
"""
Create a TagMeAnnotator.
Args:
lang: the language of the text, one of 'de', 'en' (default), 'it'
ann_type: the annotation type for the new annotations, default is "Mention"
auth_token: the authentication token needed to use the service
url: the annotation service endpoint, is None, the default endpoint for the task (spot or tag) is used
task: one of "spot" (only find mentions) or "tag" (find mentions and link), default is "tag"
out_annset: the annotationset to put the new annotations in
min_delay_ms: minimum time in ms to wait between requests to the server
tweet: if True, TagMe expects a Tweet (default is False)
include_all_spots: if True, include spots that cannot be linked (default is False)
long_text: if not None, the context length to use (default: None)
epsilon: if not None, the epsilong value (float) to use (default: None)
link_pattern: the URL pattern to use to turn the "title" returned from TagMe into an actual link. The
default is "https://{0}.wikipedia.org/wiki/{1}" where {0} gets replaced with the language code and
{1} gets replaced with the title.
"""
if url is None:
if task == "tag":
url = "https://tagme.d4science.org/tagme/tag"
elif task == "spot":
url = "https://tagme.d4science.org/tagme/spot"
else:
raise Exception("task must be 'tag' or 'spot'")
assert lang in ["en", "de", "it"]
if long_text is not None:
assert isinstance(long_text, int)
if epsilon is not None:
assert isinstance(epsilon, float)
self.long_text = long_text
self.epsilon = epsilon
self.lang = lang
self.auth_token = auth_token
self.url = url
self.tweet = tweet
self.include_all_spots = include_all_spots
self.out_annset = out_annset
self.min_delay_s = min_delay_ms / 1000.0
self.logger = init_logger()
# self.logger.setLevel(logging.DEBUG)
self._last_call_time = 0
self.ann_type = ann_type
self.link_pattern = link_pattern
def __call__(self, doc, **kwargs):
if "tweet" in kwargs:
tweet = kwargs["tweet"]
else:
tweet = self.tweet
delay = time.time() - self._last_call_time
if delay < self.min_delay_s:
time.sleep(self.min_delay_s - delay)
text = doc.text
hdrs = {
"Content-Type": "text/plain; charset=UTF-8",
"Accept": "application/gate+json",
}
params = {
"text": text,
"gcube-token": self.auth_token,
"lang": self.lang,
}
if self.include_all_spots:
params["include_all_spots"] = "true"
if tweet:
params["tweet"] = "true"
if self.long_text is not None:
params["long_text"] = self.long_text
if self.epsilon is not None:
params["epsilon"] = self.epsilon
response = requests.post(self.url, params=params, headers=hdrs)
scode = response.status_code
if scode != 200:
raise Exception(f"Something went wrong, received status code {scode}")
json = response.json()
# self.logger.debug(f"Response JSON: {json}")
ents = json.get("annotations", {})
annset = doc.annset(self.out_annset)
om = OffsetMapper(text)
for ent in ents:
start = ent["start"]
end = ent["end"]
start, end = om.convert_to_python([start, end])
feats = {}
title = ent.get("title")
if title is not None:
if self.link_pattern:
feats["url"] = self.link_pattern.format(self.lang, title)
else:
feats["title"] = title
for fname in ["id", "rho", "link_probability", "lp"]:
fval = ent.get(fname)
if fval is not None:
feats[fname] = fval
# self.logger.debug(f"Adding annotation {start},{end},{feats}")
annset.add(start, end, self.ann_type, features=feats)
return doc
class TextRazorTextAnnotator(Annotator):
"""
An annotator that sends document text to the TextRazor Annotation service (https://www.textrazor.com/)
and uses the result to annotate the document.
NOTE: this annotator and how it can get parametrized will still change!
"""
def __init__(
self,
url=None, # use default
auth_token=None,
lang=None, # if None/not specified, TextRazor auto-detects
extractors=None,
out_annset="",
min_delay_ms=501,
):
"""
Create a TextRazorTextAnnotator.
Args:
lang: if specified, override the auto-detected language of the text
auth_token: the authentication token needed to use the service
url: the annotation service endpoint, is None, the default endpoint https://api.textrazor.com is used
extractors: a list of extractor names or a string with comma-separated extractor names to add to the
minimum extractors (words, sentences). If None uses words, sentences, entities.
NOTE: currently only words, sentences, entities is supported.!
out_annset: the annotationset to put the new annotations in
min_delay_ms: minimum time in ms to wait between requests to the server
"""
if url is None:
url = "https://api.textrazor.com"
self.url = url
self.lang = lang
self.out_annset = out_annset
self.auth_token = auth_token
self.min_delay_s = min_delay_ms / 1000.0
self.logger = init_logger()
self.logger.setLevel(logging.DEBUG)
self._last_call_time = 0
if extractors is not None:
if isinstance(extractors, str):
extractors = extractors.split(",")
if isinstance(extractors, list):
allextrs = set()
allextrs.update(extractors)
allextrs.update(["words", "sentences"])
self.extractors = ",".join(list(allextrs))
else:
raise Exception("Odd extractors, must be list of strings or string")
else:
self.extractors = "words,sentences,entities"
def __call__(self, doc, **kwargs):
delay = time.time() - self._last_call_time
if delay < self.min_delay_s:
time.sleep(self.min_delay_s - delay)
text = doc.text
hdrs = {
# 'Content-Type': 'text/plain; charset=UTF-8',
# 'Accept-encoding': 'gzip' # TODO: to enable compressed responses
# 'Content-encoding': 'gzip' # TODO: to enable compressed requests
"X-TextRazor-Key": self.auth_token
}
data = {"text": text.encode("UTF-8")}
if self.extractors:
data["extractors"] = self.extractors
if self.lang:
data["languageOverride"] = self.lang
self.logger.debug(f"Sending request to {self.url}, data={data}, headers={hdrs}")
response = requests.post(
self.url,
# params=params,
data=data,
headers=hdrs,
)
scode = response.status_code
if scode != 200:
raise Exception(f"Something went wrong, received status code {scode}")
json = response.json()
ok = json.get("ok", False)
if not ok:
raise Exception(f"Something went wrong, did not get OK, json: {json}")
self.logger.debug(f"Response JSON: {json}")
resp = json.get("response", {})
entities = resp.get("entities", [])
sentences = resp.get("sentences", [])
categories = resp.get("categories", [])
topics = resp.get("topics", [])
entailments = resp.get("entailments", [])
relations = resp.get("relations", [])
properties = resp.get("properties", [])
nounphrases = resp.get("nounPhrases", [])
language = resp.get("language")
languageIsReliable = resp.get("languageIsReliable")
tok2off = {} # maps token idxs to tuples (start,end)
annset = doc.annset(self.out_annset)
for s in sentences:
sentstart = None
sentend = None
words = s.get("words", [])
end = None
for word in words:
start = word["startingPos"]
end = word["endingPos"]
if sentstart is None:
sentstart = start
tokidx = word["position"]
feats = {}
feats["partOfSpeech"] = word["partOfSpeech"]
feats["lemma"] = word["lemma"]
if word.get("stem"):
feats["stem"] = word["stem"]
annset.add(start, end, "Token", features=feats)
tok2off[tokidx] = (start, end)
if end is not None:
sentend = end
if sentstart is not None and sentend is not None:
annset.add(sentstart, sentend, "Sentence")
for ent in entities:
feats = {}
for fname in [
"wikiLink",
"entityEnglishId",
"wikidataId",
"relevanceScore",
"confidenceScore",
"type",
"freebaseId",
"entityId",
"freebaseTypes",
]:
if fname in ent:
feats[fname] = ent[fname]
annset.add(ent["startingPos"], ent["endingPos"], "Entity", feats)
return doc
class ElgTextAnnotator(Annotator):
# TODO: maybe we should eventually always use the elg package and the elg Service class!
# TODO: however, currently their way how handling auth is done is too limiting see issues #8, #9
# TODO: use template and return the URL from a method or use elg.utils
ELG_SC_LIVE_URL_PREFIX = "https://live.european-language-grid.eu/auth/realms/ELG/protocol/openid-connect/auth?"
ELG_SC_LIVE_URL_PREFIX += (
"client_id=python-sdk&redirect_uri=urn:ietf:wg:oauth:2.0:oob&response_type=code"
)
ELG_SC_LIVE_URL_OFFLINE = ELG_SC_LIVE_URL_PREFIX + "&scope=offline_access"
ELG_SC_LIVE_URL_OPENID = ELG_SC_LIVE_URL_PREFIX + "&scope=openid"
ELG_SC_DEV_URL_PREFIX = "https://dev.european-language-grid.eu/auth/realms/ELG/protocol/openid-connect/auth?"
ELG_SC_DEV_URL_PREFIX += (
"client_id=python-sdk&redirect_uri=urn:ietf:wg:oauth:2.0:oob&response_type=code"
)
ELG_SC_DEV_URL_OFFLINE = ELG_SC_DEV_URL_PREFIX + "&scope=offline_access"
ELG_SC_DEV_URL_OPENID = ELG_SC_DEV_URL_PREFIX + "&scope=openid"
"""
An annotator that sends text to one of the services registered with the European Language Grid
(https://live.european-language-grid.eu/) and uses the result to create annotations.
NOTE: This is maybe not properly implemented and not properly tested yet!
"""
def __init__(
self,
url=None,
service=None,
auth=None,
success_code=None,
access_token=None,
refresh_access=False,
out_annset="",
min_delay_ms=501,
anntypes_map=None,
):
"""
Create an ElgTextAnnotator.
NOTE: error handling is not properly implemented yet since we do not know yet how exactly the various
error conditions are represented in the result returned from the ELG services. For now, any error will
throw an exception when `__call__` is invoked.
NOTE: initialization can fail with an exception if success_code is specified and retrieving the
authentification information fails.
Args:
url: the annotation service URL to use. If not specified, the service parameter must be specified.
service: the ELG service number or a tuple (servicenumber, domain). This requires the elg package.
This may raise an exception. If successful, the url and service_meta attributes are set.
auth: a pre-initialized ELG Authentication object. Requires the elg package. If not specified, the
success_code or access_token parameter must be specified.
success_code: the success code returned from the ELG web page for one of the URLs to obtain
success codes. This will try to obtain the authentication information and store it in the
`auth` attribute. Requires the elg package.
To obtain a success code, go the the ELG_SC_LIVE_URL_OPENID or ELG_SC_LIVE_URL_OFFLINE url
and log in with your ELG user id, this will show the success code that can be copy-pasted.
access_token: the access token token for the ELG service. Only used if auth or success_code are not
specified. The access token is probably only valid for a limited amount of time. No refresh
will be done and once the access token is invalid, calling `__call__` will fail with an exception.
The access token can be obtained using the elg package or copied from the "Code samples" tab
on the web page for a service after logging in.
refresh_access: if True, will try to refresh the access token if auth or success_code was specified and
refreshing is possible. Ignored if only access_token was specified
out_annset: the name of the annotation set where to create the annotations (default: "")
min_delay_ms: the minimum delay time between requests in milliseconds (default: 501 ms)
anntypes_map: a map for renaming the annotation type names from the service to the ones to use in
the annotated document.
"""
if [x is not None for x in [url, service]].count(True) != 1:
raise Exception("Exactly one of service or url must be specified")
if [x is not None for x in [auth, success_code, access_token]].count(True) != 1:
raise Exception(
"Exactly one of auth, success_code, or access_token must be specified"
)
self.access_token = access_token
self.success_code = success_code
self.auth = auth
self.url = url
self.service = service
self.service_meta = None
self.refresh_access = refresh_access
# first check if we need to import the elg package
import_elg = False
if access_token:
self.refresh_access = False
if service is not None:
import_elg = True
if auth or success_code:
import_elg = True
if import_elg:
try:
from elg import Authentication
from elg.utils import get_domain, get_metadatarecord
except Exception as ex:
raise Exception(
"For this gatenlp must be installed with extra elg or extra all, e.g. gatenlp[elg]",
ex,
)
if service is not None:
# update this to use the new method:
# https://gitlab.com/european-language-grid/platform/python-client/-/issues/9
if isinstance(service, tuple):
service_id, domain = service
else:
service_id = service
domain = get_domain("live")
self.service_meta = get_metadatarecord(service_id, domain)
# NOTE: there is also elg_execution_location for async requests!
self.url = self.service_meta["service_info"]["elg_execution_location_sync"]
if success_code is not None:
self.auth = Authentication.from_success_code(success_code, domain="live")
if self.auth:
self.access_token = self.auth.access_token
self.min_delay_s = min_delay_ms / 1000.0
self.anntypes_map = anntypes_map
self.out_annset = out_annset
self.logger = init_logger(__name__)
# self.logger.setLevel(logging.DEBUG)
self._last_call_time = 0
def __call__(self, doc, **kwargs):
# if necessary and possible, refresh the access token
if self.refresh_access and self.auth:
self.auth.refresh_if_needed()
delay = time.time() - self._last_call_time
if delay < self.min_delay_s:
time.sleep(self.min_delay_s - delay)
om = OffsetMapper(doc.text)
request_json = json.dumps(
{"type": "text", "content": doc.text, "mimeType": "text/plain"}
)
hdrs = {"Content-Type": "application/json"}
if self.access_token:
hdrs["Authorization"] = f"Bearer {self.access_token}"
response = requests.post(self.url, data=request_json, headers=hdrs)
scode = response.status_code
if scode != 200:
raise Exception(
f"Something went wrong, received status code/text {scode} / {response.text}"
)
response_json = response.json()
# self.logger.debug(f"Response JSON: {json}")
# TODO: check that we have got
# - a map
# - which has the "response" key
# - response value is a map which has "type"= "annotations" and
# - "annotations" is a map with keys being the annotation types and values arrays of annoations
ents = response_json.get("response", {}).get("annotations", {})
annset = doc.annset(self.out_annset)
for ret_anntype, ret_anns in ents.items():
if self.anntypes_map:
anntype = self.anntypes_map.get(ret_anntype, ret_anntype)
else:
anntype = ret_anntype
for ret_ann in ret_anns:
start = ret_ann["start"]
end = ret_ann["end"]
feats = ret_ann.get("features", {})
start, end = om.convert_to_python([start, end])
annset.add(start, end, anntype, features=feats)
return doc
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,523
|
davidwilby/python-gatenlp
|
refs/heads/main
|
/html-ann-viewer/create-debug-html.py
|
from nltk import word_tokenize
from gatenlp import Document
txt = "This is a document "
# 0123456789012345678901
# 0000000000111111111122
doc = Document("This is a document ")
annset = doc.annset()
annset.add(0,4,"Token")
annset.add(4,5,"SpaceToken")
annset.add(5,7,"Token")
annset.add(7,8,"SpaceToken")
annset.add(8,9,"Token")
annset.add(9,10,"SpaceToken")
annset.add(10,18,"Token")
annset.add(18,21,"SpaceToken")
annset.add(0,21,"Document")
annset.add(0,18,"Sentence")
annset.add(2,3,"Ann1")
annset.add(2,2,"Zero1")
annset.add(20,20,"Zero2")
doc.save("debug-html.html", fmt="html-ann-viewer", offline=True)
doc = Document(" x y ")
doc.annset().add(0,1,"Space")
doc.annset().add(1,2,"Space")
doc.annset().add(2,3,"Space")
doc.annset().add(3,4,"Token")
doc.annset().add(4,5,"Space")
doc.annset().add(5,6,"Space")
doc.annset().add(6,7,"Space")
doc.annset().add(7,8,"Token")
doc.annset().add(8,10,"Space")
doc.annset().add(10,11,"Space")
doc.save("debug-html2.html", fmt="html-ann-viewer", offline=True)
|
{"/gatenlp/processing/gazetteer/stringgazetteer.py": ["/gatenlp/processing/gazetteer/base.py"], "/gatenlp/document.py": ["/gatenlp/annotation_set.py", "/gatenlp/serialization/default.py"], "/gatenlp/processing/gazetteer/__init__.py": ["/gatenlp/processing/gazetteer/stringgazetteer.py"], "/gatenlp/corpora/files.py": ["/gatenlp/urlfileutils.py", "/gatenlp/document.py"], "/tests/test_gateworker.py": ["/gatenlp/__init__.py"], "/gatenlp/__init__.py": ["/gatenlp/version.py", "/gatenlp/annotation_set.py", "/gatenlp/document.py", "/gatenlp/serialization/default.py"], "/gatenlp/serialization/default.py": ["/gatenlp/document.py", "/gatenlp/annotation_set.py", "/gatenlp/urlfileutils.py"], "/html-ann-viewer/create-debug-html.py": ["/gatenlp/__init__.py"]}
|
14,524
|
tommywenjiezhang/flask_auth_starter
|
refs/heads/master
|
/flaskr/__init__.py
|
from flask import Flask
from flask_sqlalchemy import SQLAlchemy
db = SQLAlchemy()
from flask_login import LoginManager
def create_app():
app = Flask(__name__,instance_relative_config=False)
login_manager = LoginManager()
login_manager.login_view = "auth.auth_bp"
login_manager.init_app(app)
app.config['SECRET_KEY'] = '9OLWxND4o83j4K4iuopO'
app.config['SQLALCHEMY_DATABASE_URI'] = 'sqlite:///db.sqlite'
db.init_app(app)
from .model import User
@login_manager.user_loader
def load_user(user_id):
return User.query.get(int(user_id))
with app.app_context():
from .home import home
from .auth import auth
app.register_blueprint(home.home_bp, url_prefix='/home')
app.register_blueprint(auth.auth_bp)
return app
|
{"/flaskr/auth/auth.py": ["/flaskr/__init__.py"]}
|
14,525
|
tommywenjiezhang/flask_auth_starter
|
refs/heads/master
|
/flaskr/auth/auth.py
|
from flask import Blueprint, render_template, url_for, request, redirect, flash
import os
from flask import current_app as app
from ..model import User
from .. import db
from werkzeug.security import generate_password_hash, check_password_hash
from flask_login import login_user
auth_bp = Blueprint('auth_bp', __name__,template_folder='templates',static_folder='static')
@auth_bp.context_processor
def override_url_for():
return dict(url_for=dated_url_for)
def dated_url_for(endpoint, **values):
if endpoint == 'static':
filename = values.get('filename', None)
if filename:
file_path = os.path.join(app.root_path,
endpoint, filename)
values['q'] = int(os.stat(file_path).st_mtime)
return url_for(endpoint, **values)
@auth_bp.route('/login')
def login():
return render_template("login.html")
@auth_bp.route('/login',methods=['POST'])
def login_post():
email = request.form['email']
password = request.form['password']
user = User.query.filter_by(email=email).first()
if not user or not check_password_hash(user.password, password):
flash('Please check your login details and try again.', 'errors')
return redirect(url_for('auth_bp.login'))
login_user(user)
flash('Login Successfully', 'success')
return redirect(url_for('home_bp.profile'))
@auth_bp.route('/signup')
def signup():
return render_template('signup.html')
@auth_bp.route('/signup', methods=['POST'])
def signup_post():
email = request.form.get('email')
name = request.form.get('name')
password = request.form.get('password')
user = User.query.filter_by(email=email).first() # if this returns a user, then the email already exists in database
if user: # if a user is found, we want to redirect back to signup page so user can try again
flash("Email address already exist", 'errors')
return redirect(url_for('auth_bp.signup'))
# create new user with the form data. Hash the password so plaintext version isn't saved.
new_user = User(email=email, name=name, password=generate_password_hash(password, method='sha256'))
# add the new user to the database
db.session.add(new_user)
db.session.commit()
return redirect(url_for('auth_bp.login'))
@auth_bp.route('/logout')
def logout():
return 'Logout'
|
{"/flaskr/auth/auth.py": ["/flaskr/__init__.py"]}
|
14,526
|
tommywenjiezhang/flask_auth_starter
|
refs/heads/master
|
/flaskr/home/home.py
|
from flask import Blueprint , render_template
home_bp = Blueprint('home_bp',__name__,template_folder='templates')
@home_bp.route('/',methods=['GET'])
def index():
return render_template("body.html")
@home_bp.route('/profile')
def profile():
return 'Profile'
|
{"/flaskr/auth/auth.py": ["/flaskr/__init__.py"]}
|
14,532
|
izaansohail/Django_Testing
|
refs/heads/main
|
/home/models.py
|
from django.db import models
# Create your models here.
class Contact(models.Model):
name = models.CharField(max_length=200,default=None,null=True)
cnic = models.CharField(max_length=200,default=None,null=True)
address = models.CharField(max_length=200,default=None,null=True)
organization = models.CharField(max_length=200,default=None,null=True)
contact_number = models.CharField(max_length=200,default=None,null=True)
checkin = models.CharField(max_length=200,default=None,null=True)
checkout = models.CharField(max_length=200,default=None,null=True)
contact_person = models.CharField(max_length=200,default=None,null=True)
purpose = models.CharField(max_length=200,default=None,null=True)
img_location = models.CharField(max_length=200,default=None,null=True)
def __str__(self):
return str(self.name)
|
{"/home/admin.py": ["/home/models.py"], "/home/views.py": ["/home/models.py"]}
|
14,533
|
izaansohail/Django_Testing
|
refs/heads/main
|
/home/migrations/0003_contact_cnic.py
|
# Generated by Django 3.2.5 on 2021-07-09 04:52
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0002_auto_20210708_1316'),
]
operations = [
migrations.AddField(
model_name='contact',
name='cnic',
field=models.CharField(default=None, max_length=200, null=True),
),
]
|
{"/home/admin.py": ["/home/models.py"], "/home/views.py": ["/home/models.py"]}
|
14,534
|
izaansohail/Django_Testing
|
refs/heads/main
|
/home/migrations/0002_auto_20210708_1316.py
|
# Generated by Django 3.2.5 on 2021-07-08 08:16
from django.db import migrations, models
class Migration(migrations.Migration):
dependencies = [
('home', '0001_initial'),
]
operations = [
migrations.RemoveField(
model_name='contact',
name='date',
),
migrations.RemoveField(
model_name='contact',
name='email',
),
migrations.RemoveField(
model_name='contact',
name='password',
),
migrations.AddField(
model_name='contact',
name='address',
field=models.CharField(default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='contact',
name='checkin',
field=models.CharField(default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='contact',
name='checkout',
field=models.CharField(default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='contact',
name='contact_number',
field=models.CharField(default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='contact',
name='contact_person',
field=models.CharField(default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='contact',
name='img_location',
field=models.CharField(default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='contact',
name='name',
field=models.CharField(default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='contact',
name='organization',
field=models.CharField(default=None, max_length=200, null=True),
),
migrations.AddField(
model_name='contact',
name='purpose',
field=models.CharField(default=None, max_length=200, null=True),
),
]
|
{"/home/admin.py": ["/home/models.py"], "/home/views.py": ["/home/models.py"]}
|
14,535
|
izaansohail/Django_Testing
|
refs/heads/main
|
/home/admin.py
|
from django.contrib import admin
from home.models import Contact
# Register your models here.
admin.site.register(Contact)
class ContactView(admin.ModelAdmin):
list_display = ['Name','cnic','address',
'organization','contact_number','checkin','checkout',
'contact_person','purpose','img_location']
|
{"/home/admin.py": ["/home/models.py"], "/home/views.py": ["/home/models.py"]}
|
14,536
|
izaansohail/Django_Testing
|
refs/heads/main
|
/home/views.py
|
from django.shortcuts import render
from django.http import HttpResponse
from home.models import Contact
from django.contrib import messages
# Create your views here.
def index(request):
return render(request, 'homescreen.html')
# return HttpResponse("this is homepage")
def about(request):
return render(request, 'about.html')
# return HttpResponse("this is about page")
def services(request):
return render(request, 'services.html')
# return HttpResponse("this is services page")
def contact(request):
if request.method == "POST":
name = request.POST.get('Name')
cnic = request.POST.get('cnic')
address = request.POST.get('Address')
organization = request.POST.get('Organization')
contact_number = request.POST.get('Contact_Number')
checkin = request.POST.get('Check-In')
checkout = request.POST.get('Check-Out')
contact_person = request.POST.get('Contact_Person')
purpose = request.POST.get('Purpose')
img_location = request.POST.get('Img_lLocation')
contact = Contact(name=name,cnic = cnic, address=address,
organization=organization,contact_number=contact_number,checkin=checkin,checkout=checkout,
contact_person=contact_person,purpose=purpose,img_location=img_location)
contact.save()
messages.success(request, 'Your Details have been saved in database.')
return render(request, 'contact.html')
# return HttpResponse("this is contact page")
|
{"/home/admin.py": ["/home/models.py"], "/home/views.py": ["/home/models.py"]}
|
14,537
|
fyabc/DBLab02
|
refs/heads/master
|
/DBLab02.py
|
from flask import Flask
from flask_bootstrap import Bootstrap
# Main Application
from views import app
if __name__ == '__main__':
app.run(debug=False)
|
{"/DBLab02.py": ["/views.py"], "/forms.py": ["/config.py"], "/dbOperations.py": ["/createApp.py", "/config.py"], "/views.py": ["/createApp.py", "/config.py", "/forms.py", "/dbOperations.py"]}
|
14,538
|
fyabc/DBLab02
|
refs/heads/master
|
/forms.py
|
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
from flask.ext.wtf import Form
from wtforms import StringField, SubmitField, SelectField, PasswordField, IntegerField
from wtforms.validators import DataRequired, NumberRange, Length
# Local modules.
from config import TableNames
class SignInForm(Form):
userID = StringField('用户ID', validators=[DataRequired()])
userName = StringField('用户名', validators=[DataRequired()])
password = PasswordField(
'密码', validators=[DataRequired(), Length(min=6, message='密码长度不得少于6个字符。')])
submit = SubmitField('注册')
class QueryForm(Form):
type = SelectField('查询类型', coerce=str, choices=TableNames)
queryName = StringField('查询主键名称', default='')
submit = SubmitField('查询')
class LoginForm(Form):
userName = StringField('账号', validators=[DataRequired()])
password = PasswordField('密码', validators=[DataRequired()])
submit = SubmitField('登录')
myUserName = 'fyabc'
myPassword = 'fy95102'
class ReserveForm(Form):
customerID = StringField('用户编号', validators=[DataRequired()])
reserveType = SelectField('预订类型', coerce=int,
choices=[
(1, '航班'),
(2, '宾馆'),
(3, '出租车')
])
reserveKey = StringField('预订名称', validators=[DataRequired()])
submit = SubmitField('预订')
class UnsubscribeForm(Form):
reservationID = IntegerField('预订编号', validators=[DataRequired()])
submit = SubmitField('退订')
class InsertForm(Form):
type = SelectField('插入类型', coerce=str, choices=[name for name in TableNames if name[0] != 'Reservations'])
primaryKey = StringField('主键名称', validators=[DataRequired()])
price = IntegerField('价格', validators=[NumberRange(min=1, max=524287)])
numTotal = IntegerField('数量', validators=[NumberRange(min=1, max=1023)])
password = StringField('密码')
fromCity = StringField('出发城市')
toCity = StringField('目的城市')
customerName = StringField('用户名称')
submit = SubmitField('插入记录')
class DeleteForm(Form):
type = SelectField('删除类型', coerce=str, choices=[name for name in TableNames])
primaryKey = StringField('主键名称', validators=[DataRequired()])
submit = SubmitField('删除记录')
class RouteQueryForm(Form):
fromCity = StringField('出发城市', validators=[DataRequired()])
toCity = StringField('目的城市', validators=[DataRequired()])
submit = SubmitField('查询线路')
class CustomerQueryForm(Form):
IDNumber = StringField('用户ID')
customerName = StringField('用户名称')
submit = SubmitField('查询用户')
|
{"/DBLab02.py": ["/views.py"], "/forms.py": ["/config.py"], "/dbOperations.py": ["/createApp.py", "/config.py"], "/views.py": ["/createApp.py", "/config.py", "/forms.py", "/dbOperations.py"]}
|
14,539
|
fyabc/DBLab02
|
refs/heads/master
|
/dbOperations.py
|
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
# These 2 statements are important.
import pymysql
pymysql.install_as_MySQLdb()
from flask import request, session
from flask.ext.sqlalchemy import SQLAlchemy
from sqlalchemy.exc import IntegrityError, InvalidRequestError
# Local modules.
from createApp import app
import config
db = SQLAlchemy(app)
class DBErrorMessage:
def __init__(self, code, *message):
self.code = code
self.message = message
class Flights(db.Model):
__tablename__ = 'Flights'
flightNum = db.Column(name='flightNum', type_=db.String(20), primary_key=True, nullable=False)
price = db.Column(name='price', type_=db.BigInteger)
numSeats = db.Column(name='numSeats', type_=db.Integer)
numAvail = db.Column(name='numAvail', type_=db.Integer)
fromCity = db.Column(name='fromCity', type_=db.String(20))
toCity = db.Column(name='toCity', type_=db.String(20))
constraint = db.CheckConstraint('numAvail >= 0')
columns = [('flightNum', '航班号'), ('price', '价格'), ('numSeats', '座位数量'), ('numAvail', '可用数量'),
('fromCity', '出发城市'), ('toCity', '目的城市')]
def __init__(self, flightNum, price, numSeats, fromCity, toCity):
self.flightNum = flightNum
self.price = price
self.numSeats = numSeats
self.numAvail = numSeats
self.fromCity = fromCity
self.toCity = toCity
def __repr__(self):
return 'Flights(flightNum=%s, price=%d, numSeats=%d, numAvail=%d, fromCity=%s, toCity=%s)' % \
(self.flightNum, self.price, self.numSeats, self.numAvail, self.fromCity, self.toCity)
class Hotels(db.Model):
__tablename__ = 'Hotels'
location = db.Column(name='location', type_=db.String(20), primary_key=True, nullable=False)
price = db.Column(name='price', type_=db.BigInteger)
numRooms = db.Column(name='numRooms', type_=db.Integer)
numAvail = db.Column(name='numAvail', type_=db.Integer)
constraint = db.CheckConstraint('numAvail >= 0')
columns = [('location', '地点'), ('price', '价格'), ('numRooms', '房间数量'), ('numAvail', '可用数量')]
def __init__(self, location, price, numRooms):
self.location = location
self.price = price
self.numRooms = numRooms
self.numAvail = numRooms
def __repr__(self):
return 'Hotels(location=%s, price=%d, numRooms=%d, numAvail=%d)' % \
(self.location, self.price, self.numRooms, self.numAvail)
class Cars(db.Model):
__tablename__ = 'Cars'
location = db.Column(name='location', type_=db.String(20), primary_key=True, nullable=False)
price = db.Column(name='price', type_=db.BigInteger)
numCars = db.Column(name='numCars', type_=db.Integer)
numAvail = db.Column(name='numAvail', type_=db.Integer)
constraint = db.CheckConstraint('numAvail >= 0')
columns = [('location', '地点'), ('price', '价格'), ('numCars', '车辆数量'), ('numAvail', '可用数量')]
def __init__(self, location, price, numCars):
self.location = location
self.price = price
self.numCars = numCars
self.numAvail = numCars
def __repr__(self):
return 'Cars(location=%s, price=%d, numCars=%d, numAvail=%d)' % \
(self.location, self.price, self.numCars, self.numAvail)
class Customers(db.Model):
__tablename__ = 'Customers'
IDNumber = db.Column(name='IDNumber', type_=db.String(20), primary_key=True, nullable=False)
customerName = db.Column(name='customerName', type_=db.String(20))
password = db.Column(name='password', type_=db.String(20))
columns = [('IDNumber', '用户ID'), ('customerName', '用户名'), ('password', '密码')]
def __init__(self, IDNumber, customerName, password):
self.IDNumber = IDNumber
self.customerName = customerName
self.password = password
# Used by flask_login.
def is_authenticated(self):
# return Customers.query.get(self.IDNumber).password == self.password
return True
def is_active(self):
return True
def is_anonymous(self):
return False
def get_id(self):
return str(self.IDNumber)
def __repr__(self):
return 'Customers(IDNumber=%s, customerName=%s, password=%s)' %\
(self.IDNumber, self.customerName, self.password)
class Reservations(db.Model):
__tablename__ = 'Reservations'
reservationID = db.Column(name='reservationID', type_=db.Integer, primary_key=True, nullable=False,
autoincrement=True)
customerID = db.Column(db.ForeignKey('Customers.IDNumber'), name='customerID', type_=db.String(20), nullable=False)
reserveType = db.Column(name='reserveType', type_=db.SmallInteger)
reserveKey = db.Column(name='reserveKey', type_=db.String(20), nullable=False)
columns = [('reservationID', '预订ID'), ('customerID', '顾客ID'),
('reserveType', '预订类型'), ('reserveKey', '预订名称')]
def __init__(self, customerID, reserveType, reserveKey):
self.customerID = customerID
self.reserveType = reserveType
self.reserveKey = reserveKey
def __repr__(self):
return 'Reservations(customerID=%s, reserveType=%d, reserveKey=%s) # reservationID=%d' % \
(self.customerID, self.reserveType, self.reserveKey, self.reservationID)
Tables = {
table.__tablename__: table
for table in [Flights, Cars, Hotels, Customers, Reservations]
}
def queryOneColumn(result, nameCol=0):
return [row[nameCol] for row in result.fetchall()]
def createTriggers():
allProcedures = queryOneColumn(db.session.execute("""Show Procedure Status Where Db = 'DBLab02';"""), 1)
if 'changeReserve' not in allProcedures:
# You needn't to change delimiter in mysql APIs, because ';' will not split the query in API.
db.session.execute(
"""\
CREATE PROCEDURE DBLab02.changeReserve(resType INT(11), resKey CHAR(20), ins_or_del BOOLEAN)
BEGIN
IF resType = 1 THEN
UPDATE Flights
SET numAvail = numAvail + If(ins_or_del, -1, 1)
WHERE flightNum = resKey
;
ELSEIF resType = 2 THEN
UPDATE Hotels
SET numAvail = numAvail + If(ins_or_del, -1, 1)
WHERE location = resKey
;
ELSEIF resType = 3 THEN
UPDATE Cars
SET numAvail = numAvail + If(ins_or_del, -1, 1)
WHERE location = resKey
;
END IF;
END;
"""
)
# 在表 FLIGHTS 中,numAvail 表示指定航班上的还可以被预订的座位数。对
# 于 一 个 给 定 的 航 班 ( flightNum ) , 数 据 库 一 致 性 的 条 件 之 一 是 , 表
# RESERVATIONS 中所有预订该航班的条目数加上该航班的剩余座位数必须
# 等于该航班上总的座位数。这个条件对于表 CARS 和表 HOTELS 同样适用。
allTriggers = queryOneColumn(db.session.execute("""Show Triggers;"""))
if 'T_AvailableNum_Ins' not in allTriggers:
db.session.execute(
"""
CREATE TRIGGER T_AvailableNum_Ins
AFTER INSERT ON Reservations
FOR EACH ROW
CALL changeReserve(new.reserveType, new.reserveKey, TRUE)
;
"""
)
if 'T_AvailableNum_Del' not in allTriggers:
db.session.execute(
"""
CREATE TRIGGER T_AvailableNum_Del
AFTER DELETE ON Reservations
FOR EACH ROW
CALL changeReserve(old.reserveType, old.reserveKey, FALSE)
;
"""
)
if 'T_AvailableNum_Update' not in allTriggers:
db.session.execute(
"""
CREATE TRIGGER T_AvailableNum_Update
AFTER UPDATE ON Reservations
FOR EACH ROW
BEGIN
CALL changeReserve(old.reserveType, old.reserveKey, FALSE);
CALL changeReserve(new.reserveType, new.reserveKey, TRUE);
END
;
"""
)
db.session.commit()
def query():
table = Tables.get(request.form['type'])
queryName = request.form['queryName']
if config.adminLoggedIn or table in (Flights, Hotels, Cars):
if queryName == '':
return table, table.query.all()
else:
return table, [table.query.get(queryName)]
else:
if table == Reservations:
return table, table.query.filter(Reservations.customerID == session.get('user_id')).all()
else:
return table, [table.query.get(session.get('user_id'))]
def addReserve():
if config.adminLoggedIn:
customerID = request.form['customerID']
else:
customerID = session.get('user_id')
reserveType = int(request.form['reserveType'])
reserveKey = request.form['reserveKey']
table = None
if reserveType == 1:
table = Flights
elif reserveType == 2:
table = Hotels
elif reserveType == 3:
table = Cars
# test if the reserveKey in the Table.
reserveEntity = table.query.get(reserveKey)
if reserveEntity is None:
return DBErrorMessage(1, '没有在数据库中找到预订的名称。')
elif reserveEntity.numAvail <= 0:
return DBErrorMessage(5, '没有多余的空位了!')
reservation = Reservations(customerID, reserveType, reserveKey)
try:
db.session.add(reservation)
db.session.commit()
except IntegrityError as e:
print(e)
db.session.rollback()
return DBErrorMessage(2, '完整性错误:数据库中不存在该用户编号。', '详细信息:%s' % e)
except Exception as e:
print(e)
db.session.rollback()
return DBErrorMessage(3, '其他错误:%s' % e)
return DBErrorMessage(0, '预订成功!预订编号为%d,请记得保存。' % reservation.reservationID)
def removeReserve():
try:
reservationID = int(request.form['reservationID'])
assert 1 <= reservationID
except ValueError as e:
print(e)
return DBErrorMessage(4, '预订编号必须为正整数')
if config.adminLoggedIn:
deleteNum = Reservations.query.filter(Reservations.reservationID == reservationID).delete(False)
else:
deleteNum = Reservations.query.filter(Reservations.reservationID == reservationID,
Reservations.customerID == session.get('user_id')).delete(False)
db.session.commit()
if deleteNum == 0:
return DBErrorMessage(1, '没有在数据库中找到预订的编号,或这不是您的预订。')
else:
return DBErrorMessage(0, '退订成功!')
def insertRecord():
table = Tables[request.form['type']]
if table == Customers:
IDNumber = request.form['primaryKey']
customerName = request.form['customerName']
password = request.form['password']
try:
db.session.add(Customers(IDNumber, customerName, password))
db.session.commit()
except IntegrityError as e:
print(e)
db.session.rollback()
return DBErrorMessage(2, '完整性错误:数据库中已存在该用户。', '详细信息:%s' % e)
except Exception as e:
print(e)
db.session.rollback()
return DBErrorMessage(3, '其他错误:%s' % e)
return DBErrorMessage(0, '添加用户成功!')
else:
# validate the price and numTotal.
try:
price = int(request.form['price'])
numTotal = int(request.form['numTotal'])
assert 1 <= price <= 1048576
assert 1 <= numTotal <= 1024
except (ValueError, AssertionError) as e:
print(e)
return DBErrorMessage(4, '价格必须为1~1048576的整数,可用数量必须为1~1024的整数')
try:
if table == Flights:
db.session.add(table(request.form['primaryKey'], price, numTotal, request.form['fromCity'],
request.form['toCity']))
else:
db.session.add(table(request.form['primaryKey'], price, numTotal))
db.session.commit()
except IntegrityError as e:
print(e)
db.session.rollback()
return DBErrorMessage(2, '完整性错误:数据库中已存在主键相同的记录。', '详细信息:%s' % e)
except Exception as e:
print(e)
db.session.rollback()
return DBErrorMessage(3, '其他错误:%s' % e)
return DBErrorMessage(0, '添加记录成功!')
def deleteRecord():
print(request.form)
table = Tables[request.form['type']]
primaryKey = request.form['primaryKey']
if table == Reservations:
deleteNum = Reservations.query.filter(Reservations.reservationID == primaryKey).delete(False)
db.session.commit()
if deleteNum == 0:
return DBErrorMessage(1, '没有在数据库中找到预订的编号。')
else:
return DBErrorMessage(0, '删除预订成功!')
elif table == Customers:
# Before deleting a customer, you should remove all reservations it reserved.
Reservations.query.filter(Reservations.customerID == primaryKey).delete(False)
deleteNum = Customers.query.filter(Customers.IDNumber == primaryKey).delete(False)
db.session.commit()
if deleteNum == 0:
return DBErrorMessage(1, '没有在数据库中找到用户的ID。')
else:
return DBErrorMessage(0, '删除用户成功!')
else:
try:
# Before deleting a flight, you should remove all reservations that reserve it.
Reservations.query.filter(Reservations.reserveKey == primaryKey).delete(False)
db.session.delete(table.query.get(primaryKey))
db.session.commit()
except InvalidRequestError as e:
print(e)
db.session.rollback()
return DBErrorMessage(1, '没有在数据库中找到对应的记录。')
except Exception as e:
print(e)
db.session.rollback()
return DBErrorMessage(3, '其他错误:%s' % e)
return DBErrorMessage(0, '删除记录成功!')
def insertCustomer():
userID = request.form['userID']
userName = request.form['userName']
password = request.form['password']
try:
db.session.add(Customers(userID, userName, password))
db.session.commit()
except IntegrityError as e:
print(e)
db.session.rollback()
return DBErrorMessage(2, '完整性错误:该用户ID已被注册!')
except Exception as e:
print(e)
db.session.rollback()
return DBErrorMessage(3, '其他错误:%s' % e)
return DBErrorMessage(0, '注册成功!')
def routeQuery():
fromCity = request.form['fromCity']
toCity = request.form['toCity']
flightsResult = Flights.query.filter(Flights.fromCity == fromCity, Flights.toCity == toCity).all()
hotelsResult = Hotels.query.filter(Hotels.location == toCity).all()
carsResult = Cars.query.filter(Cars.location == toCity).all()
return flightsResult, hotelsResult, carsResult
def customerQuery():
if config.adminLoggedIn:
IDNumber = request.form['IDNumber']
customerName = request.form['customerName']
else:
IDNumber = session.get('user_id')
customerName = ''
if IDNumber != '':
return Reservations.query.filter(Reservations.customerID == IDNumber).all()
elif customerName != '':
return db.session.query(Reservations) \
.join(Customers) \
.filter(Reservations.customerID == Customers.IDNumber, Customers.customerName == customerName) \
.all()
else:
return Reservations.query.all()
def dropTable():
db.drop_all()
def createTable():
# dropTable()
db.create_all()
createTriggers()
# result = db.engine.execute("Show databases")
# print(result.fetchall())
createTable()
|
{"/DBLab02.py": ["/views.py"], "/forms.py": ["/config.py"], "/dbOperations.py": ["/createApp.py", "/config.py"], "/views.py": ["/createApp.py", "/config.py", "/forms.py", "/dbOperations.py"]}
|
14,540
|
fyabc/DBLab02
|
refs/heads/master
|
/createApp.py
|
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
import os
from flask import Flask
from flask.ext.bootstrap import Bootstrap
from flask.ext.login import LoginManager
app = Flask(__name__)
app.config.from_object('config')
Bootstrap(app)
lm = LoginManager()
lm.init_app(app)
|
{"/DBLab02.py": ["/views.py"], "/forms.py": ["/config.py"], "/dbOperations.py": ["/createApp.py", "/config.py"], "/views.py": ["/createApp.py", "/config.py", "/forms.py", "/dbOperations.py"]}
|
14,541
|
fyabc/DBLab02
|
refs/heads/master
|
/views.py
|
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
from flask import render_template, request, redirect, url_for
from flask.ext.login import login_user, logout_user, login_required
# Local modules.
from createApp import app, lm
import config
from forms import QueryForm, LoginForm, ReserveForm, UnsubscribeForm, \
InsertForm, RouteQueryForm, CustomerQueryForm, SignInForm, DeleteForm
# Use this import to initialize the database connection.
from dbOperations import db, Customers, \
query, addReserve, removeReserve, insertRecord, deleteRecord, routeQuery, customerQuery, insertCustomer,\
Flights, Hotels, Cars, Reservations
@app.route('/')
@app.route('/index')
def mainPage():
return render_template('index.html')
@app.route('/query', methods=['GET', 'POST'])
@login_required
def queryPage():
results = None
table = None
form = QueryForm()
if form.validate_on_submit():
table, results = query()
return render_template('query.html', query=form, queryResult=results, table=table,
isAdmin=config.adminLoggedIn)
@app.route('/reserve', methods=['GET', 'POST'])
@login_required
def reservePage():
errorCode = None
if request.method == 'POST':
if 'customerID' in request.form:
errorCode = addReserve()
else:
errorCode = removeReserve()
return render_template('reserve.html', reserveForm=ReserveForm(), unsubscribeForm=UnsubscribeForm(),
errorCode=errorCode, isAdmin=config.adminLoggedIn)
@app.route('/routeQuery', methods=['GET', 'POST'])
def routeQueryPage():
flightsResults, hotelsResults, carsResults = None, None, None
form = RouteQueryForm()
if form.validate_on_submit():
flightsResults, hotelsResults, carsResults = routeQuery()
return render_template('routeQuery.html', routeQueryForm=form,
flightsResults=flightsResults, hotelsResults=hotelsResults, carsResults=carsResults,
Flights=Flights, Hotels=Hotels, Cars=Cars)
@app.route('/customerQuery', methods=['GET', 'POST'])
@login_required
def customerQueryPage():
results = None
form = CustomerQueryForm()
if form.validate_on_submit():
results = customerQuery()
return render_template('customerQuery.html', customerQueryForm=form,
results=results, Reservations=Reservations, isAdmin=config.adminLoggedIn)
@app.route('/signIn', methods=['GET', 'POST'])
def signInPage():
form = SignInForm()
errorCode = None
if form.validate_on_submit():
errorCode = insertCustomer()
return render_template('signIn.html', signInForm=form, errorCode=errorCode,
isAdmin=config.adminLoggedIn)
@lm.user_loader
def load_user(idNumber):
return Customers.query.get(idNumber)
@app.route('/login', methods=['GET', 'POST'])
def loginPage():
loginFailed = False
form = LoginForm()
if form.validate_on_submit():
user = Customers.query.get(form.userName.data)
if user and user.password == form.password.data:
login_user(user, remember=False)
if user.IDNumber == form.myUserName:
config.adminLoggedIn = True
else:
loginFailed = True
return render_template('login.html', loginForm=form, loginFailed=loginFailed,
isAdmin=config.adminLoggedIn)
# if login_required but not authorized, redirect to login page.
lm.unauthorized_callback = loginPage
@app.route('/logout')
@login_required
def logoutPage():
logout_user()
config.adminLoggedIn = False
return render_template('logout.html')
@app.route('/insert', methods=['GET', 'POST'])
@login_required
def insertPage():
if not config.adminLoggedIn:
return redirect(url_for('.loginPage'))
errorCode = None
if request.method == 'POST':
errorCode = insertRecord()
return render_template('insert.html', insertForm=InsertForm(), errorCode=errorCode)
@app.route('/delete', methods=['GET', 'POST'])
@login_required
def deletePage():
if not config.adminLoggedIn:
return redirect(url_for('.loginPage'))
form = DeleteForm()
errorCode = None
if form.validate_on_submit():
errorCode = deleteRecord()
return render_template('delete.html', deleteForm=form, errorCode=errorCode)
|
{"/DBLab02.py": ["/views.py"], "/forms.py": ["/config.py"], "/dbOperations.py": ["/createApp.py", "/config.py"], "/views.py": ["/createApp.py", "/config.py", "/forms.py", "/dbOperations.py"]}
|
14,542
|
fyabc/DBLab02
|
refs/heads/master
|
/config.py
|
# -*- coding: utf-8 -*-
__author__ = 'fyabc'
import os
basedir = os.path.abspath(os.path.dirname(__file__))
CSRF_ENABLED = True
SECRET_KEY = 'hard-string'
SQLALCHEMY_DATABASE_URI = 'mysql://root:fy95102@localhost/DBLab02'
SQLALCHEMY_TRACK_MODIFICATIONS = True
TableNames = [
('Flights', '航班'),
('Hotels', '宾馆'),
('Cars', '出租车'),
('Customers', '用户'),
('Reservations', '预订情况'),
]
# The administrator has logged in or not.
adminLoggedIn = False
|
{"/DBLab02.py": ["/views.py"], "/forms.py": ["/config.py"], "/dbOperations.py": ["/createApp.py", "/config.py"], "/views.py": ["/createApp.py", "/config.py", "/forms.py", "/dbOperations.py"]}
|
14,566
|
Pingze-github/pyRequest
|
refs/heads/master
|
/lib/sqlite.py
|
import sqlite3
class ReqLogDB:
def __init__(self):
(self.conn, self.cursor) = self.createConn()
def createConn(self):
conn = sqlite3.connect('reqlog.db')
print('sqlite 数据库连接成功')
cursor = conn.cursor()
cursor.execute('''CREATE TABLE IF NOT EXISTS LOG
(ID INTEGER PRIMARY KEY AUTOINCREMENT NOT NULL,
METHOD TEXT NOT NULL,
URL TEXT NOT NULL,
QUERY TEXT NOT NULL,
BODY TEXT NOT NULL);''')
conn.commit()
return (conn, cursor)
def execute(self, sql):
self.cursor.execute(sql)
self.conn.commit()
def insert(self, log):
self.cursor.execute('INSERT INTO LOG (METHOD,URL,QUERY,BODY) VALUES (?,?,?,?)', (log['method'], log['url'], log['query'], log['body']))
self.conn.commit()
rows = self.cursor.execute('SELECT ID FROM LOG ORDER BY ID DESC LIMIT 1')
return list(rows)[0][0]
def selectAll(self):
logs = []
rows = self.cursor.execute('SELECT * FROM LOG ORDER BY ID DESC')
for row in rows:
logs.append({
'id': row[0],
'method': row[1],
'url': row[2],
'query': row[3],
'body': row[4],
})
return logs
def selectOne(self, id):
rows = self.cursor.execute('SELECT * FROM LOG WHERE ID=' + str(id))
row = list(rows)[0]
log = {
'id': row[0],
'method': row[1],
'url': row[2],
'query': row[3],
'body': row[4],
}
return log
|
{"/index.py": ["/lib/sqlite.py"]}
|
14,567
|
Pingze-github/pyRequest
|
refs/heads/master
|
/index.py
|
import os
import sys
import time
import re
import json
from urllib.parse import urlparse
from collections import OrderedDict
import requests
from PyQt5.QtWidgets import QApplication,QWidget,QVBoxLayout,QHBoxLayout,QTabWidget,QPushButton,QTextEdit,QPlainTextEdit,QLineEdit,QLabel,QComboBox,QListWidget,QListWidgetItem
from PyQt5.QtWebEngineWidgets import QWebEngineView
from PyQt5.QtCore import QUrl,QThread,pyqtSignal
from PyQt5.QtGui import QIcon,QFont,QFontDatabase
# 全局异常捕获
def printErrors(exc_type, exc_value, traceback):
sys.stderr.write(traceback)
sys.excepthook = printErrors
from lib.sqlite import ReqLogDB
reqLogDB = ReqLogDB()
# 超时
# 请求详细信息
# 支持多方法
# 支持动态body/query
# TODO 支持Headers
# 编码问题
# 美化、字体
# TODO 增加记录
# TODO 增加侧边栏
# TODO debug 报错
# 成功打包icon和字体
# TODO 增加菜单
# 增加查看快捷键
# url和query联动
# TODO 尾部状态栏
# TODO Request标签页 自动切换
# 全局变量
dataPath = 'data.pkl'
logs = reqLogDB.selectAll()
def jsonPretty(jstr):
return json.dump(json.loads(jstr), indent=2)
class RequestThread(QThread):
finishSignal = pyqtSignal(dict)
def __init__(self, window):
super().__init__()
self._window = window
self.headers = {
'user-agent': 'Mozilla/5.0 (Windows NT 6.1; WOW64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/60.0.3112.113 Safari/537.36'
}
def __request(self, options):
response = requests.request(options['method'], options['url'], headers=options['headers'], data=window.body, params=window.query)
return response
def run(self):
start = time.time()
url = self._window.reqUrlInput.text()
text = ''
try:
print('Request Sending:', url)
method = window.reqMethodCombo.currentText()
print(method)
response = self.__request({
'url': url,
'method': method,
'headers': self.headers
})
charsetPatt = re.compile('charset=["\']{0,1}([A-Za-z0-9\-]+)["\']', re.IGNORECASE)
matches = charsetPatt.search(str(response.text))
if matches :
response.encoding = matches.group(1)
text = response.text
print(response.__getattribute__('encoding'))
print(response.status_code)
print(response.headers)
print(response.cookies)
stats = 'Status: Success \n'
stats += '{}: {}\n'.format('Code', response.status_code)
stats += '{}: {:.3f}s\n'.format('ResponseTime', time.time() - start)
stats += '{}: {}\n'.format('Encoding', response.encoding)
stats += '{}: {}\n'.format('Headers', json.dumps(dict(response.headers), indent=2))
print('Request Success:', response.url)
except Exception as e:
#print('Request Failed:', e)
stats = 'Status: Failed \n' + 'Error: ' + str(e)
print('请求耗时:', time.time() - start)
try :
text = json.dumps(json.loads(text), ensure_ascii=False)
except:
pass
sigData = {
'url': url,
'text': text,
'stats': stats
}
self.finishSignal.emit(sigData)
def formatParamParse(paramText):
param = OrderedDict()
paramLines = str.split(paramText, '\n')
for line in paramLines:
items = str.split(line)
if len(items) == 2:
param[str(items[0])] = str(items[1])
return param
def formatParamStringify(param):
paramFormat = ''
for k in param:
paramFormat += '{} {}\n'.format(k, param[k])
return paramFormat
def paramParse(paramStr):
paramPats = str.split(paramStr, '&')
param = OrderedDict()
for paramPat in paramPats:
equalIndex = paramPat.find('=')
if equalIndex > 0:
param[paramPat[:equalIndex]] = paramPat[equalIndex+1:]
return param
def urlencodeFromMap(m):
us = ''
for k in m:
us += k + '=' + str(m[k]) + '&'
return us[:-1]
class Window(QWidget):
query = {}
reqStatsObj = {}
zoom = 1.2
def __init__(self):
super().__init__()
self.__render()
self.show()
self.reqUrlInput.setFocus()
self.requestThread = RequestThread(self)
self.requestThread.finishSignal.connect(self.__setRes)
# 渲染组件
def __render(self):
self.setWindowTitle('PyRequest')
self.__renderSelf()
self.__renderMain()
self.__renderLeft()
self.reqUrlInput.textEdited.emit(self.reqUrlInput.text())
self.queryEdit.textChanged.emit()
self.bodyEdit.textChanged.emit()
def __renderSelf(self):
self.leftLayout = QVBoxLayout()
self.mainLayout = QVBoxLayout()
layout = QHBoxLayout(self)
layout.addLayout(self.leftLayout)
layout.addLayout(self.mainLayout)
self.setWindowIcon(QIcon('assets/icon.ico'))
self.resize(900 * self.zoom, 600 * self.zoom)
def __renderLeft(self):
self.reqList = QListWidget()
self.reqList.itemClicked.connect(self.__logItemClicked)
self.reqList.setMaximumWidth(300)
for log in logs:
logItem = QListWidgetItem(log['method'] + ' ' + log['url'])
logItem.setData(99, log['id'])
self.reqList.addItem(logItem)
self.leftLayout.addWidget(self.reqList)
def __logItemClicked(self, item):
id = item.data(99)
log = reqLogDB.selectOne(id)
print(log)
self.reqMethodCombo.setCurrentText(log['method'])
self.reqUrlInput.setText(log['url'])
self.queryEdit.setPlainText(log['query'])
self.bodyEdit.setPlainText(log['body'])
self.__clearRes()
def __appendLog(self, log):
logItem = QListWidgetItem(log['method'] + ' ' + log['url'])
logItem.setData(99, log['id'])
self.reqList.insertItem(0, logItem)
def __renderMain(self):
# input
self.reqMethodCombo = QComboBox()
self.reqMethodCombo.addItems(['GET', 'POST'])
self.reqMethodCombo.currentTextChanged.connect(self.__methodChange)
self.reqUrlInput = QLineEdit()
self.reqUrlInput.setText('http://ip.taobao.com/service/getIpInfo.php?ip=59.41.95.234')
self.reqUrlInput.textEdited.connect(self.__urlChanged)
self.reqButton = QPushButton()
self.reqButton.setText('SEND')
self.reqButton.clicked.connect(self.__request)
inputLayout = QHBoxLayout()
inputLayout.addWidget(self.reqMethodCombo)
inputLayout.addWidget(self.reqUrlInput)
inputLayout.addWidget(self.reqButton)
# body&query
self.queryLabel = QLabel('Query')
self.queryEdit = QPlainTextEdit()
self.queryEdit.textChanged.connect(self.__queryEditChanged)
self.bodyLabel = QLabel('Body')
self.bodyEdit = QPlainTextEdit()
self.bodyEdit.textChanged.connect(self.__bodyEditChanged)
queryLayout = QVBoxLayout()
queryLayout.addWidget(self.queryLabel)
queryLayout.addWidget(self.queryEdit)
bodyLayout = QVBoxLayout()
bodyLayout.addWidget(self.bodyLabel)
bodyLayout.addWidget(self.bodyEdit)
self.bodyEdit.hide()
self.bodyLabel.hide()
paramLayout = QHBoxLayout()
paramLayout.addLayout(queryLayout)
paramLayout.addLayout(bodyLayout)
self.mainLayout.addLayout(inputLayout)
self.mainLayout.addLayout(paramLayout)
# response
self.resTab = self.__createResTab()
self.mainLayout.addWidget(self.resTab)
def __createResTab(self):
resTab = QTabWidget()
self.reqStats = QTextEdit()
self.resStats = QTextEdit()
self.resText = QTextEdit()
self.resJSON = QTextEdit()
self.resView = QWebEngineView()
resTab.addTab(self.reqStats, 'req')
resTab.addTab(self.resStats, 'res')
resTab.addTab(self.resText, 'text')
resTab.addTab(self.resJSON, 'json')
resTab.addTab(self.resView, 'view')
return resTab
# 发起请求
def __request(self):
self.__clearRes()
bodyRaw = self.bodyEdit.toPlainText()
self.body = formatParamParse(bodyRaw)
self.resView.setHtml('')
# self.resView.setUrl(QUrl(self.reqUrlInput.text()))
self.requestThread.start()
# 处理返回
def __setRes(self, res):
if (self.resTab.currentIndex() == 0):
self.resTab.setCurrentIndex(1)
self.resStats.setPlainText(res['stats'])
self.resText.setPlainText(res['text'])
try :
jsonstr = json.dumps(json.loads(res['text']), indent=2, ensure_ascii=False)
self.resJSON.setPlainText(jsonstr)
except Exception as e:
print(e)
self.resJSON.setPlainText('Not a JSON string')
self.resView.setHtml(res['text'])
self.__log()
# 请求记录
def __log(self):
log = {}
log['method'] = self.reqMethodCombo.currentText()
log['url'] = self.reqUrlInput.text()
log['query'] = self.queryEdit.toPlainText()
log['body'] = self.bodyEdit.toPlainText()
id = reqLogDB.insert(log)
log['id'] = id
self.__appendLog(log)
# 清空返回栏
def __clearRes(self):
self.resStats.setText('')
self.resText.setText('')
self.resView.setHtml('')
self.resJSON.setText('')
# 方法切换
def __methodChange(self, text):
if text == 'GET':
self.bodyEdit.hide()
self.bodyLabel.hide()
else:
self.bodyEdit.show()
self.bodyLabel.show()
# 快捷键
def keyPressEvent(self, event):
print(event.key())
key = event.key()
if key in (16777268, 16777220, 16777221):
self.__request()
if key >= 49 and key <= 53:
self.resTab.setCurrentIndex(key - 49)
if key == 71:
self.reqMethodCombo.setCurrentText('GET')
if key == 80:
self.reqMethodCombo.setCurrentText('POST')
if key == 87:
if (self.reqMethodCombo.currentText() == 'GET'):
self.reqMethodCombo.setCurrentText('POST')
elif (self.reqMethodCombo.currentText() == 'POST'):
self.reqMethodCombo.setCurrentText('GET')
# query/body/reqStats 联动
def __queryEditChanged(self):
queryRaw = self.queryEdit.toPlainText()
self.__querySetFromFormat(queryRaw)
def __bodyEditChanged(self):
bodyRaw = self.bodyEdit.toPlainText()
self.__reqStatsChanged({'body': formatParamParse(bodyRaw)})
def __urlChanged(self, url):
self.__querySetFromUrl(url)
def __querySetFromFormat(self, queryRaw):
query = formatParamParse(queryRaw)
self.query = query
self.__reqStatsChanged({'query': query})
queryStr = urlencodeFromMap(query)
if queryStr:
url = self.reqUrlInput.text()
urlParts = urlparse(url)
url = '{}://{}{}?{}'.format(urlParts.scheme, urlParts.netloc, urlParts.path, queryStr)
self.reqUrlInput.setText(url)
self.__reqStatsChanged({'url': url})
def __querySetFromUrl(self, url):
queryStr = urlparse(url).query
query = paramParse(queryStr)
self.__reqStatsChanged({'url': url})
self.__reqStatsChanged({'query': query})
queryFormat = formatParamStringify(query)
self.queryEdit.blockSignals(True)
self.queryEdit.setPlainText(queryFormat)
self.queryEdit.blockSignals(False)
def __reqStatsChanged(self, things):
for k in things:
thing = things[k]
self.reqStatsObj[k] = thing
reqStatsStr = json.dumps(self.reqStatsObj, indent=2, ensure_ascii=False)
self.reqStats.setPlainText(reqStatsStr)
def getfont():
fontId = QFontDatabase.addApplicationFont('assets/MSYHMONO.ttf')
if fontId != -1:
fontFamilies = QFontDatabase.applicationFontFamilies(fontId)
font = QFont()
font.setFamily(fontFamilies[0])
font.setPixelSize(12)
return font
app = QApplication(sys.argv)
font = getfont()
if font:
app.setFont(font)
window = Window()
sys.exit(app.exec_())
|
{"/index.py": ["/lib/sqlite.py"]}
|
14,585
|
sumanthm-git/mongo_pipe
|
refs/heads/master
|
/csv_json.py
|
import pandas as pd
import numpy as np
from pandas.io import json
#reading the sample CSV
df = pd.read_csv('sales.csv', dtype=object)
#making the _id field for json to use as unique field in MongoDB
df.index = np.arange(1, len(df)+1)
df['_id'] = df.index
copy_index = df.pop('_id')
df.insert(0,'_id',copy_index)
#converting onject to datetime64[ns]
df['Transaction_date'] = pd.to_datetime(df['Transaction_date'])
df['Account_Created'] = pd.to_datetime(df['Account_Created'])
df['Last_Login'] = pd.to_datetime(df['Last_Login'])
#print(df.dtypes)
#print(df)
#encoding df using records formatted json ans using iso dateformat
res = df.to_json(orient='records',date_format='iso')
#loads take file-like object, reads the data from that object and use that string to create a json object
parsed = json.loads(res)
#print(parsed)
#dumps take an json object and produces a string
str_res = json.dumps(parsed, indent=4)
#print(str_res)
|
{"/json_into_mongo.py": ["/csv_json.py"]}
|
14,586
|
sumanthm-git/mongo_pipe
|
refs/heads/master
|
/json_into_mongo.py
|
import csv_json as cj
import pymongo
myclient = pymongo.MongoClient("mongodb://localhost:27017/")
mydb = myclient["mydatabase"]
mycol = mydb["sales"]
mycol.insert_many(cj.parsed)
#print(cj.parsed)
|
{"/json_into_mongo.py": ["/csv_json.py"]}
|
14,587
|
steven1227/Drello
|
refs/heads/master
|
/config.py
|
import os
basedir = os.path.abspath(os.path.dirname(__file__))
WTF_CSRF_ENABLED = True
DATABASE = '/tmp/drello.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
MONGODB_SETTINGS = {'DB': "Drello"}
|
{"/app/__init__.py": ["/app/models.py"], "/app/models.py": ["/app/__init__.py"]}
|
14,588
|
steven1227/Drello
|
refs/heads/master
|
/app/__init__.py
|
from flask import Flask
from flask.ext.mongoengine import MongoEngine
app = Flask(__name__)
app.config.from_object('config')
db = MongoEngine(app)
from app.views import *
from app.models import *
|
{"/app/__init__.py": ["/app/models.py"], "/app/models.py": ["/app/__init__.py"]}
|
14,589
|
steven1227/Drello
|
refs/heads/master
|
/drello.py
|
# all the imports
import sqlite3
from contextlib import closing
from flask import Flask, request, session, g, redirect, url_for, \
abort, render_template, flash, jsonify
# configuration
DATABASE = '/tmp/flaskr.db'
DEBUG = True
SECRET_KEY = 'development key'
USERNAME = 'admin'
PASSWORD = 'default'
# create our little application :)
app = Flask(__name__)
app.config.from_object(__name__)
# email: email, password: password
@app.route('/')
def index():
return app.send_static_file('index.html')
@app.route('/api/login', methods=['POST'])
def login():
# todo database
json_data = request.json
if json_data['email'] == 'rendong_liu@hotmail.com' and json_data['password']==123:
session['loggedin'] = True
status = True
else:
status = False
return jsonify({'result':status})
@app.route('/api/logout')
def logout():
session.pop('loggedin', None)
return jsonify({'result': 'success'})
if __name__ == '__main__':
app.run()
|
{"/app/__init__.py": ["/app/models.py"], "/app/models.py": ["/app/__init__.py"]}
|
14,590
|
steven1227/Drello
|
refs/heads/master
|
/app/models.py
|
from app import db
from flask import url_for
import datetime
class Comment(db.EmbeddedDocument):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
body = db.StringField(verbose_name="Comment", required=True)
author = db.StringField(verbose_name="Name", max_length=255, required=True)
class Post(db.Document):
created_at = db.DateTimeField(default=datetime.datetime.now, required=True)
title = db.StringField(max_length=255, required=True)
slug = db.StringField(max_length=255, required=True)
body = db.StringField(required=True)
comments = db.ListField(db.EmbeddedDocumentField('Comment'))
def get_absolute_url(self):
return url_for('post', kwargs={"slug": self.slug})
def __unicode__(self):
return self.title
def __repr__(self):
return '<Post %r>' % (self.body)
meta = {
'allow_inheritance': True,
'indexes': ['-created_at', 'slug'],
'ordering': ['-created_at']
}
class User(db.DynamicDocument):
account_type = db.StringField(max_length=255, required=True)
email = db.EmailField(required=True,unique=True)
password = db.StringField(max_length=255, required=True)
name = db.StringField(max_length=255, required=True)
image = db.ImageField(size=(500,500,True))
address = db.StringField(max_length=255, required=True)
phone = db.StringField(max_length=255,unique=True)
#Can be intergate into User
#Todo
class Patient(db.Document):
user_id = db.ReferenceField(db.ObjectId,required=True)
sex = db.StringField(max_length=255, required=True)
birthdate = db.DateTimeField(required=True);
name = db.StringField(max_length=255, required=True)
blood = db.ReferenceField(db.ObjectId)
class Admin(db.Document):
user_id = db.ReferenceField(db.ObjectId)
class Department(db.Document):
name = db.StringField(max_length=255, required=True)
description = db.StringField(max_length=255, required=True)
class Appointment(db.Document):
date = db.DateTimeField(required=True);
patient = db.EmbeddedDocumentField('Patient')
class Precription(db.Document):
date = db.DateTimeField(required=True);
patient = db.EmbeddedDocumentField('Patient')
class Bed(db.Document):
bed_number = db.IntField(required=True)
bed_type = db.StringField(max_length=255, required=True)
patient = db.EmbeddedDocumentField('Patient',required=True)
allotment_date = db.DateTimeField(required=True)
discharge_date = db.DateTimeField(required=True)
class BloodBankDonner(db.Document):
name = db.StringField(max_length=255, required=True)
sex = db.StringField(max_length=255, required=True)
blood = db.ReferenceField(db.ObjectId)
last_donation_date = db.DateTimeField(required=True)
class BloodBankStatus(db.Document):
blood_group = db.StringField(max_length=255, required=True,unique=True)
status = db.IntField(required=True)
class Report(db.Document):
type = db.StringField(max_length=255, required=True)
description = db.StringField(max_length=255, required=True)
date = db.DateTimeField(required=True)
patient = db.EmbeddedDocumentField('Patient')
class Message(db.Document):
from_who = db.EmbeddedDocumentField('User')
content = db.StringField(max_length=255, required=True)
class Event(db.Document):
date = db.DateTimeField(required=True)
content = db.StringField(max_length=255, required=True)
#Can be intergate into User
class Doctor(db.Document):
user_id = db.ReferenceField(db.ObjectId,require=True)
department = db.EmbeddedDocumentField('Department')
appointments = db.ListField(db.EmbeddedDocumentField('Appointment'))
precriptions = db.ListField(db.EmbeddedDocumentField('Precription'))
patients = db.ListField(db.EmbeddedDocumentField('Patient'))
reports = db.ListField(db.EmbeddedDocumentField('Report'))
messages = db.ListField(db.EmbeddedDocumentField('Message'))
class Invoice(db.Document):
invoice_number = db.IntField(required=True)
title = db.StringField(max_length=255, required=True)
patient = db.EmbeddedDocumentField('Patient')
creation_date = db.DateTimeField(required=True)
due_date = db.DateTimeField(required=True)
vat_per = db.IntField(required=True)
discount_amount = db.IntField(required=True)
status = db.StringField(max_length=255, required=True)
#Can be intergate into User
class accountant(db.Document):
user_id = db.ReferenceField(db.ObjectId,require=True)
invoice = db.ListField(db.EmbeddedDocumentField('Invoice'))
#Can be intergate into User
class Nurse(db.Document):
user_id = db.ReferenceField(db.ObjectId,require=True)
patients = db.ListField(db.EmbeddedDocumentField('Patient'))
reports = db.ListField(db.EmbeddedDocumentField('Report'))
#Todo Pharmacist and Receptionist
|
{"/app/__init__.py": ["/app/models.py"], "/app/models.py": ["/app/__init__.py"]}
|
14,603
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/copy_.py
|
import logging
import os
import sys
from argparse import ArgumentParser, Namespace
from typing import cast
import tilecloud_chain.configuration
from tilecloud_chain import Count, DropEmpty, HashDropper, TileGeneration, add_common_options
from tilecloud_chain.format import duration_format, size_format
logger = logging.getLogger(__name__)
class Copy:
"""Copy the tiles from a cache to an other."""
count = None
def copy(
self,
options: Namespace,
gene: TileGeneration,
layer: str,
source: str,
destination: str,
task_name: str,
) -> None:
self._copy(options, gene, layer, source, destination, task_name)
def _copy(
self,
options: Namespace,
gene: TileGeneration,
layer_name: str,
source: str,
dest: str,
task_name: str,
) -> None:
# disable metatiles
assert gene.config_file
config = gene.get_config(gene.config_file)
layer = config.config["layers"][layer_name]
cast(tilecloud_chain.configuration.LayerWms, layer)["meta"] = False
count_tiles_dropped = Count()
gene.create_log_tiles_error(layer_name)
source_tilestore = gene.get_tilesstore(source)
dest_tilestore = gene.get_tilesstore(dest)
gene.init_tilecoords(config, layer_name)
gene.add_geom_filter()
gene.add_logger()
gene.get(source_tilestore, "Get the tiles")
gene.imap(DropEmpty(gene))
# Discard tiles with certain content
if "empty_tile_detection" in layer:
empty_tile = layer["empty_tile_detection"]
gene.imap(
HashDropper(
empty_tile["size"], empty_tile["hash"], store=dest_tilestore, count=count_tiles_dropped
)
)
if options.process:
gene.process(options.process)
gene.imap(DropEmpty(gene))
self.count = gene.counter_size()
gene.put(dest_tilestore, "Store the tiles")
gene.consume()
if not options.quiet:
print(
f"""The tile {task_name} of layer '{layer_name}' is finish
Nb {task_name} tiles: {self.count.nb}
Nb errored tiles: {gene.error}
Nb dropped tiles: {count_tiles_dropped.nb}
Total time: {duration_format(gene.duration)}
Total size: {size_format(self.count.size)}
Time per tile: {(gene.duration / self.count.nb * 1000).seconds if self.count.nb != 0 else 0} ms
Size per tile: {self.count.size / self.count.nb if self.count.nb != 0 else -1} o
"""
)
def main() -> None:
"""Copy the tiles from a cache to an other."""
try:
parser = ArgumentParser(
description="Used to copy the tiles from a cache to an other", prog=sys.argv[0]
)
add_common_options(parser, near=False, time=False, dimensions=True, cache=False)
parser.add_argument("--process", dest="process", metavar="NAME", help="The process name to do")
parser.add_argument("source", metavar="SOURCE", help="The source cache")
parser.add_argument("dest", metavar="DEST", help="The destination cache")
options = parser.parse_args()
gene = TileGeneration(options.config, options)
assert gene.config_file
config = gene.get_config(gene.config_file)
if options.layer:
copy = Copy()
copy.copy(options, gene, options.layer, options.source, options.dest, "copy")
else:
layers = (
config.config["generation"]["default_layers"]
if "default_layers" in config.config["generation"]
else config.config["layers"].keys()
)
for layer in layers:
copy = Copy()
copy.copy(options, gene, layer, options.source, options.dest, "copy")
except SystemExit:
raise
except: # pylint: disable=bare-except
logger.exception("Exit with exception")
if os.environ.get("TESTS", "false").lower() == "true":
raise
sys.exit(1)
def process() -> None:
"""Copy the tiles from a cache to an other."""
try:
parser = ArgumentParser(
description="Used to copy the tiles from a cache to an other", prog=sys.argv[0]
)
add_common_options(parser, near=False, time=False, dimensions=True)
parser.add_argument("process", metavar="PROCESS", help="The process name to do")
options = parser.parse_args()
gene = TileGeneration(options.config, options, multi_thread=False)
copy = Copy()
if options.layer:
copy.copy(options, gene, options.layer, options.cache, options.cache, "process")
else:
assert gene.config_file
config = gene.get_config(gene.config_file)
layers_name = (
config.config["generation"]["default_layers"]
if "default_layers" in config.config.get("generation", {})
else config.config["layers"].keys()
)
for layer in layers_name:
copy.copy(options, gene, layer, options.cache, options.cache, "process")
except SystemExit:
raise
except: # pylint: disable=bare-except
logger.exception("Exit with exception")
sys.exit(1)
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,604
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/database_logger.py
|
import logging
import sys
import time
import psycopg2.sql
from prometheus_client import Summary
import tilecloud_chain.configuration
from tilecloud import Tile
_LOGGER = logging.getLogger(__name__)
_INSERT_SUMMARY = Summary("tilecloud_chain_database_logger", "Number of database inserts", ["layer"])
class DatabaseLoggerCommon:
"""Log the generated tiles in a database."""
def __init__(self, config: tilecloud_chain.configuration.Logging, daemon: bool):
db_params = config["database"]
while True:
try:
self.connection = psycopg2.connect(
dbname=db_params["dbname"],
host=db_params.get("host"),
port=db_params.get("port"),
user=db_params.get("user"),
password=db_params.get("password"),
)
break
except psycopg2.OperationalError:
_LOGGER.warning("Failed connecting to the database. Will try again in 1s", exc_info=True)
if daemon:
time.sleep(1)
else:
sys.exit(2)
if "." in db_params["table"]:
schema, table = db_params["table"].split(".")
else:
schema = "public"
table = db_params["table"]
with self.connection.cursor() as cursor:
cursor.execute(
"SELECT EXISTS(SELECT 1 FROM pg_tables WHERE schemaname=%s AND tablename=%s)", (schema, table)
)
schema = psycopg2.extensions.quote_ident(schema, self.connection)
table = psycopg2.extensions.quote_ident(table, self.connection)
if not cursor.fetchone()[0]:
try:
cursor.execute(
psycopg2.sql.SQL(
"CREATE TABLE {}.{} ("
" id BIGSERIAL PRIMARY KEY,"
" layer CHARACTER VARYING(80) NOT NULL,"
" run INTEGER NOT NULL,"
" action CHARACTER VARYING(7) NOT NULL,"
" tile TEXT NOT NULL,"
" UNIQUE (layer, run, tile))"
).format(psycopg2.sql.Identifier(schema), psycopg2.sql.Identifier(table))
)
self.connection.commit()
except psycopg2.DatabaseError:
logging.exception("Unable to create table %s.%s", schema, table)
sys.exit(1)
else:
try:
cursor.execute(
psycopg2.sql.SQL(
"INSERT INTO {}.{}(layer, run, action, tile) VALUES (%s, %s, %s, %s)"
).format(psycopg2.sql.Identifier(schema), psycopg2.sql.Identifier(table)),
("test_layer", -1, "test", "-1x-1"),
)
except psycopg2.DatabaseError:
logging.exception("Unable to insert logging data into %s.%s", schema, table)
sys.exit(1)
finally:
self.connection.rollback()
self.schema = schema
self.table = table
class DatabaseLoggerInit(DatabaseLoggerCommon):
"""Log the generated tiles in a database."""
def __init__(self, config: tilecloud_chain.configuration.Logging, daemon: bool) -> None:
super().__init__(config, daemon)
with self.connection.cursor() as cursor:
cursor.execute(
psycopg2.sql.SQL("SELECT COALESCE(MAX(run), 0) + 1 FROM {}.{}").format(
psycopg2.sql.Identifier(self.schema), psycopg2.sql.Identifier(self.table)
)
)
(self.run,) = cursor.fetchone()
def __call__(self, tile: Tile) -> Tile:
tile.metadata["run"] = self.run
return tile
class DatabaseLogger(DatabaseLoggerCommon):
"""Log the generated tiles in a database."""
def __call__(self, tile: Tile) -> Tile:
if tile is None:
_LOGGER.warning("The tile is None")
return None
if tile.error:
action = "error"
elif tile.data:
action = "create"
else:
action = "delete"
layer = tile.metadata.get("layer", "- No layer -")
run = tile.metadata.get("run", -1)
with _INSERT_SUMMARY.labels(layer).time():
with self.connection.cursor() as cursor:
try:
cursor.execute(
psycopg2.sql.SQL(
"INSERT INTO {} (layer, run, action, tile) "
"VALUES (%(layer)s, %(run)s, %(action)s::varchar(7), %(tile)s)"
).format(psycopg2.sql.Identifier(self.schema), psycopg2.sql.Identifier(self.table)),
{"layer": layer, "action": action, "tile": str(tile.tilecoord), "run": run},
)
except psycopg2.IntegrityError:
self.connection.rollback()
cursor.execute(
psycopg2.sql.SQL(
"UPDATE {} SET action = %(action)s "
"WHERE layer = %(layer)s AND run = %(run)s AND tile = %(tile)s"
).format(psycopg2.sql.Identifier(self.schema), psycopg2.sql.Identifier(self.table)),
{"layer": layer, "action": action, "tile": str(tile.tilecoord), "run": run},
)
self.connection.commit()
return tile
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,605
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/controller.py
|
import logging
import math
import os
import pkgutil
import sys
from argparse import ArgumentParser
from copy import copy
from hashlib import sha1
from io import BytesIO, StringIO
from math import exp, log
from typing import IO, List, Literal, Optional, Union, cast
from urllib.parse import urlencode, urljoin
import botocore.exceptions
import requests
import ruamel.yaml
from azure.core.exceptions import ResourceNotFoundError
from azure.identity import DefaultAzureCredential
from azure.storage.blob import BlobServiceClient, ContentSettings
from bottle import jinja2_template
from PIL import Image
from prometheus_client import Summary
import tilecloud.store.redis
import tilecloud.store.s3
import tilecloud_chain.configuration
from tilecloud.lib.PIL_ import FORMAT_BY_CONTENT_TYPE
from tilecloud_chain import (
DatedConfig,
TileGeneration,
add_common_options,
get_queue_store,
get_tile_matrix_identifier,
)
_LOGGER = logging.getLogger(__name__)
_GET_STATUS_SUMMARY = Summary("tilecloud_chain_get_status", "Number of get_stats", ["type", "queue"])
def main(args: Optional[List[str]] = None, out: Optional[IO[str]] = None) -> None:
"""Generate the contextual file like the legends."""
del out
try:
parser = ArgumentParser(
description="Used to generate the contextual file like the capabilities, the legends, "
"the OpenLayers example",
prog=args[0] if args else sys.argv[0],
)
add_common_options(parser, tile_pyramid=False, no_geom=False, default_config_file=True)
parser.add_argument(
"--status", default=False, action="store_true", help="Display the SQS queue status and exit"
)
parser.add_argument(
"--legends",
"--generate-legend-images",
default=False,
action="store_true",
dest="legends",
help="Generate the legend images",
)
parser.add_argument(
"--dump-config",
default=False,
action="store_true",
help="Dump the used config with default values and exit",
)
options = parser.parse_args(args[1:] if args else sys.argv[1:])
gene = TileGeneration(options.config, options, layer_name=options.layer)
assert gene.config_file
config = gene.get_config(gene.config_file)
if options.status:
status(gene)
sys.exit(0)
if options.cache is None:
options.cache = config.config["generation"]["default_cache"]
if options.dump_config:
_validate_generate_wmts_capabilities(config.config["caches"][options.cache], options.cache, True)
yaml = ruamel.yaml.YAML()
yaml_out = StringIO()
yaml.dump(config.config, yaml_out)
print(yaml_out.getvalue())
sys.exit(0)
if options.legends:
_generate_legend_images(gene)
except SystemExit:
raise
except: # pylint: disable=bare-except
_LOGGER.exception("Exit with exception")
if os.environ.get("TESTS", "false").lower() == "true":
raise
sys.exit(1)
def get_azure_client() -> BlobServiceClient:
"""Get the Azure blog storage client."""
if "AZURE_STORAGE_CONNECTION_STRING" in os.environ and os.environ["AZURE_STORAGE_CONNECTION_STRING"]:
return BlobServiceClient.from_connection_string(os.environ["AZURE_STORAGE_CONNECTION_STRING"])
else:
return BlobServiceClient(
account_url=os.environ["AZURE_STORAGE_ACCOUNT_URL"],
credential=DefaultAzureCredential(),
)
def _send(
data: Union[bytes, str], path: str, mime_type: str, cache: tilecloud_chain.configuration.Cache
) -> None:
if cache["type"] == "s3":
cache_s3 = cast(tilecloud_chain.configuration.CacheS3, cache)
client = tilecloud.store.s3.get_client(cache_s3.get("host"))
key_name = os.path.join(f"{cache['folder']}", path)
bucket = cache_s3["bucket"]
client.put_object(
ACL="public-read",
Body=data,
Key=key_name,
Bucket=bucket,
ContentEncoding="utf-8",
ContentType=mime_type,
)
if cache["type"] == "azure":
cache_azure = cast(tilecloud_chain.configuration.CacheAzure, cache)
key_name = os.path.join(f"{cache['folder']}", path)
blob = get_azure_client().get_blob_client(container=cache_azure["container"], blob=key_name)
blob.upload_blob(data, overwrite=True)
blob.upload_blob(
data,
overwrite=True,
content_settings=ContentSettings( # type: ignore
content_type=mime_type,
content_encoding="utf-8",
cache_control=cache_azure["cache_control"],
),
)
else:
if isinstance(data, str):
data = data.encode("utf-8")
folder = cache["folder"] or ""
filename = os.path.join(folder, path)
directory = os.path.dirname(filename)
if not os.path.exists(directory):
os.makedirs(directory)
with open(filename, "wb") as f:
f.write(data)
def _get(path: str, cache: tilecloud_chain.configuration.Cache) -> Optional[bytes]:
if cache["type"] == "s3":
cache_s3 = cast(tilecloud_chain.configuration.CacheS3, cache)
client = tilecloud.store.s3.get_client(cache_s3.get("host"))
key_name = os.path.join(f"{cache['folder']}".format(), path)
bucket = cache_s3["bucket"]
try:
response = client.get_object(Bucket=bucket, Key=key_name)
return cast(bytes, response["Body"].read())
except botocore.exceptions.ClientError as ex:
if ex.response["Error"]["Code"] == "NoSuchKey":
return None
else:
raise
if cache["type"] == "azure":
cache_azure = cast(tilecloud_chain.configuration.CacheAzure, cache)
key_name = os.path.join(f"{cache['folder']}", path)
try:
blob = get_azure_client().get_blob_client(container=cache_azure["container"], blob=key_name)
return blob.download_blob().readall()
except ResourceNotFoundError:
return None
else:
cache_filesystem = cast(tilecloud_chain.configuration.CacheFilesystem, cache)
p = os.path.join(cache_filesystem["folder"], path)
if not os.path.isfile(p):
return None
with open(p, "rb") as file:
return file.read()
def _validate_generate_wmts_capabilities(
cache: tilecloud_chain.configuration.Cache, cache_name: str, exit_: bool
) -> bool:
if "http_url" not in cache and "http_urls" not in cache:
_LOGGER.error(
"The attribute 'http_url' or 'http_urls' is required in the object cache[%s].", cache_name
)
if exit_:
sys.exit(1)
return False
return True
def get_wmts_capabilities(
gene: TileGeneration, cache_name: str, exit_: bool = False, config: Optional[DatedConfig] = None
) -> Optional[str]:
"""Get the WMTS capabilities for a configuration file."""
if config is None:
assert gene.config_file
config = gene.get_config(gene.config_file)
cache = config.config["caches"][cache_name]
if _validate_generate_wmts_capabilities(cache, cache_name, exit_):
server = gene.get_main_config().config.get("server")
base_urls = _get_base_urls(cache)
_fill_legend(gene, cache, server, base_urls, config=config)
data = pkgutil.get_data("tilecloud_chain", "wmts_get_capabilities.jinja")
assert data
return cast(
str,
jinja2_template(
data.decode("utf-8"),
layers=config.config["layers"],
layer_legends=gene.layer_legends,
grids=config.config["grids"],
getcapabilities=urljoin( # type: ignore
base_urls[0],
(
server.get("wmts_path", "wmts") + "/1.0.0/WMTSCapabilities.xml"
if server is not None
else cache.get("wmtscapabilities_file", "1.0.0/WMTSCapabilities.xml")
),
),
base_urls=base_urls,
base_url_postfix=(server.get("wmts_path", "wmts") + "/") if server is not None else "",
get_tile_matrix_identifier=get_tile_matrix_identifier,
server=server is not None,
has_metadata="metadata" in config.config,
metadata=config.config.get("metadata"),
has_provider="provider" in config.config,
provider=config.config.get("provider"),
enumerate=enumerate,
ceil=math.ceil,
int=int,
sorted=sorted,
),
)
return None
def _get_base_urls(cache: tilecloud_chain.configuration.Cache) -> List[str]:
base_urls = []
if "http_url" in cache:
if "hosts" in cache:
cc = copy(cache)
for host in cache["hosts"]:
cc["host"] = host # type: ignore
base_urls.append(cache["http_url"] % cc)
else:
base_urls = [cache["http_url"] % cache]
if "http_urls" in cache:
base_urls = [url % cache for url in cache["http_urls"]]
base_urls = [url + "/" if url[-1] != "/" else url for url in base_urls]
return base_urls
def _fill_legend(
gene: TileGeneration,
cache: tilecloud_chain.configuration.Cache,
server: Optional[tilecloud_chain.configuration.Server],
base_urls: List[str],
config: Optional[DatedConfig] = None,
) -> None:
if config is None:
assert gene.config_file
config = gene.get_config(gene.config_file)
for layer_name, layer in config.config["layers"].items():
previous_legend: Optional[tilecloud_chain.Legend] = None
previous_resolution = None
if "legend_mime" in layer and "legend_extension" in layer and layer_name not in gene.layer_legends:
gene.layer_legends[layer_name] = []
legends = gene.layer_legends[layer_name]
for zoom, resolution in enumerate(config.config["grids"][layer["grid"]]["resolutions"]):
path = "/".join(
[
"1.0.0",
layer_name,
layer["wmts_style"],
f"legend{zoom}.{layer['legend_extension']}",
]
)
img = _get(path, cache)
if img is not None:
new_legend: tilecloud_chain.Legend = {
"mime_type": layer["legend_mime"],
"href": os.path.join(
base_urls[0], server.get("static_path", "static") + "/" if server else "", path
),
}
legends.append(new_legend)
if previous_legend is not None:
assert previous_resolution is not None
middle_res = exp((log(previous_resolution) + log(resolution)) / 2)
previous_legend["min_resolution"] = middle_res
new_legend["max_resolution"] = middle_res
try:
pil_img = Image.open(BytesIO(img))
new_legend["width"] = pil_img.size[0]
new_legend["height"] = pil_img.size[1]
except Exception: # pragma: nocover
_LOGGER.warning(
"Unable to read legend image '%s', with '%s'",
path,
repr(img),
exc_info=True,
)
previous_legend = new_legend
previous_resolution = resolution
def _generate_legend_images(gene: TileGeneration) -> None:
assert gene.config_file
config = gene.get_config(gene.config_file)
cache = config.config["caches"][gene.options.cache]
for layer_name, layer in config.config["layers"].items():
if "legend_mime" in layer and "legend_extension" in layer:
if layer["type"] == "wms":
session = requests.session()
session.headers.update(layer["headers"])
previous_hash = None
for zoom, resolution in enumerate(config.config["grids"][layer["grid"]]["resolutions"]):
legends = []
for wmslayer in layer["layers"].split(","):
response = session.get(
layer["url"]
+ "?"
+ urlencode(
{
"SERVICE": "WMS",
"VERSION": layer.get("version", "1.0.0"),
"REQUEST": "GetLegendGraphic",
"LAYER": wmslayer,
"FORMAT": layer["legend_mime"],
"TRANSPARENT": "TRUE" if layer["legend_mime"] == "image/png" else "FALSE",
"STYLE": layer["wmts_style"],
"SCALE": resolution / 0.00028,
}
)
)
try:
legends.append(Image.open(BytesIO(response.content)))
except Exception: # pragma: nocover
_LOGGER.warning(
"Unable to read legend image for layer '%s'-'%s', resolution '%s': %s",
layer_name,
wmslayer,
resolution,
response.content,
exc_info=True,
)
width = max(i.size[0] for i in legends)
height = sum(i.size[1] for i in legends)
image = Image.new("RGBA", (width, height))
y = 0
for i in legends:
image.paste(i, (0, y))
y += i.size[1]
string_io = BytesIO()
image.save(string_io, FORMAT_BY_CONTENT_TYPE[layer["legend_mime"]])
result = string_io.getvalue()
new_hash = sha1(result).hexdigest() # nosec
if new_hash != previous_hash:
previous_hash = new_hash
_send(
result,
f"1.0.0/{layer_name}/{layer['wmts_style']}/"
f"legend{zoom}.{layer['legend_extension']}",
layer["legend_mime"],
cache,
)
def _get_resource(resource: str) -> bytes:
path = os.path.join(os.path.dirname(__file__), resource)
with open(path, "rb") as f:
return f.read()
def status(gene: TileGeneration) -> None:
"""Print th tilegeneration status."""
print("\n".join(get_status(gene)))
def get_status(gene: TileGeneration) -> List[str]:
"""Get the tile generation status."""
config = gene.get_main_config()
store = get_queue_store(config, False)
type_: Union[Literal["redis"], Literal["sqs"]] = "redis" if "redis" in config.config else "sqs"
conf = config.config[type_]
with _GET_STATUS_SUMMARY.labels(type_, conf["queue"]).time():
status_ = store.get_status()
return [name + ": " + str(value) for name, value in status_.items()]
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,606
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/multitilestore.py
|
import logging
from itertools import chain, groupby, starmap
from typing import Callable, Dict, Iterable, Iterator, Optional, Tuple
from tilecloud import Tile, TileStore
logger = logging.getLogger(__name__)
class MultiTileStore(TileStore):
"""Redirect to the corresponding Tilestore for the layer and config file."""
def __init__(self, get_store: Callable[[str, str], Optional[TileStore]]) -> None:
"""Initialize."""
TileStore.__init__(self)
self.get_store = get_store
self.stores: Dict[Tuple[str, str], Optional[TileStore]] = {}
def _get_store(self, config_file: str, layer: str) -> Optional[TileStore]:
store = self.stores.get((config_file, layer))
if store is None:
store = self.get_store(config_file, layer)
self.stores[(config_file, layer)] = store
return store
def __contains__(self, tile: Tile) -> bool:
"""
Return true if this store contains ``tile``.
Arguments:
tile: Tile
"""
layer = tile.metadata["layer"]
config_file = tile.metadata["config_file"]
store = self._get_store(config_file, layer)
assert store is not None
return tile in store
def delete_one(self, tile: Tile) -> Tile:
"""
Delete ``tile`` and return ``tile``.
Arguments:
tile: Tile
"""
layer = tile.metadata["layer"]
config_file = tile.metadata["config_file"]
store = self._get_store(config_file, layer)
assert store is not None
return store.delete_one(tile)
def list(self) -> Iterator[Tile]:
"""Generate all the tiles in the store, but without their data."""
# Too dangerous to list all tiles in all stores. Return an empty iterator instead
while False:
yield
def put_one(self, tile: Tile) -> Tile:
"""
Store ``tile`` in the store.
Arguments:
tile: Tile
"""
layer = tile.metadata["layer"]
config_file = tile.metadata["config_file"]
store = self._get_store(config_file, layer)
assert store is not None
return store.put_one(tile)
def get_one(self, tile: Tile) -> Optional[Tile]:
"""
Add data to ``tile``, or return ``None`` if ``tile`` is not in the store.
Arguments:
tile: Tile
"""
layer = tile.metadata["layer"]
config_file = tile.metadata["config_file"]
store = self._get_store(config_file, layer)
assert store is not None
return store.get_one(tile)
def get(self, tiles: Iterable[Optional[Tile]]) -> Iterator[Optional[Tile]]:
"""See in superclass."""
def apply(key: Tuple[str, str], tiles: Iterator[Tile]) -> Iterable[Optional[Tile]]:
store = self._get_store(*key)
if store is None:
return tiles
return store.get(tiles)
return chain.from_iterable(starmap(apply, groupby(tiles, self._get_layer)))
def put(self, tiles: Iterable[Tile]) -> Iterator[Tile]:
"""See in superclass."""
def apply(key: Tuple[str, str], tiles: Iterator[Tile]) -> Iterator[Tile]:
store = self._get_store(*key)
assert store is not None
return store.put(tiles)
return chain.from_iterable(starmap(apply, groupby(tiles, self._get_layer)))
def delete(self, tiles: Iterable[Tile]) -> Iterator[Tile]:
"""See in superclass."""
def apply(key: Tuple[str, str], tiles: Iterator[Tile]) -> Iterator[Tile]:
store = self._get_store(*key)
assert store is not None
return store.delete(tiles)
return chain.from_iterable(starmap(apply, groupby(tiles, self._get_layer)))
@staticmethod
def _get_layer(tile: Optional[Tile]) -> Tuple[str, str]:
assert tile is not None
return (tile.metadata["config_file"], tile.metadata["layer"])
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,607
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/views/admin.py
|
# Copyright (c) 2018-2023 by Camptocamp
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Camptocamp nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import io
import json
import logging
import multiprocessing
import os
import re
import shlex
import subprocess # nosec
from typing import IO, Any, Callable, Dict, List
from urllib.parse import urljoin
import pyramid.httpexceptions
import pyramid.request
import pyramid.response
from c2cwsgiutils.auth import AuthenticationType, auth_type, auth_view
from pyramid.view import view_config
import tilecloud_chain.server
from tilecloud_chain import controller, generate
from tilecloud_chain.controller import get_status
_LOG = logging.getLogger(__name__)
class Admin:
"""The admin views."""
def __init__(self, request: pyramid.request.Request):
"""Initialize."""
self.request = request
tilecloud_chain.server.init_tilegeneration(
self.request.registry.settings["tilegeneration_configfile"]
)
self.gene = tilecloud_chain.server.tilegeneration
@view_config(route_name="admin", renderer="tilecloud_chain:templates/admin_index.html") # type: ignore
@view_config(route_name="admin_slash", renderer="tilecloud_chain:templates/admin_index.html") # type: ignore
def index(self) -> Dict[str, Any]:
"""Get the admin index page."""
assert self.gene
config = self.gene.get_host_config(self.request.host)
server_config = config.config.get("server", {})
main_config = self.gene.get_main_config()
main_server_config = main_config.config.get("server", {})
return {
"auth_type": auth_type(self.request.registry.settings),
"has_access": self.request.has_permission("admin", config.config.get("authentication", {})),
"commands": server_config.get("predefined_commands", []),
"status": get_status(self.gene),
"admin_path": main_server_config.get("admin_path", "admin"),
"AuthenticationType": AuthenticationType,
}
@view_config(route_name="admin_run", renderer="fast_json") # type: ignore
def run(self) -> pyramid.response.Response:
"""Run the command given by the user."""
assert self.gene
auth_view(self.request)
if "command" not in self.request.POST:
self.request.response.status_code = 400
return {"error": "The POST argument 'command' is required"}
commands = shlex.split(self.request.POST["command"])
command = commands[0].replace("_", "-")
allowed_commands = (
self.gene.get_main_config()
.config.get("server", {})
.get("allowed_commands", ["generate-tiles", "generate-controller", "generate-cost"])
)
if command not in allowed_commands:
return {
"error": f"The given command '{command}' is not allowed, allowed command are: "
f"{', '.join(allowed_commands)}"
}
add_role = False
arguments = {c.split("=")[0]: c.split("=")[1:] for c in commands[1:]}
if command == "generate-tiles":
add_role = "--get-hash" not in arguments and "--get-bbox" not in arguments
allowed_arguments = (
self.gene.get_main_config()
.config.get("server", {})
.get(
"allowed_arguments",
[
"--layer",
"--get-hash",
"--generate-legend-images",
"--dump-config",
"--get-bbox",
"--help",
"--ignore-errors",
"--bbox",
"--zoom",
"--test",
"--near",
"--time",
"--measure-generation-time",
"--no-geom",
"--dimensions",
"--quiet",
"--verbose",
"--debug",
"--get-hash",
"--get-bbox",
],
)
)
for arg in arguments.keys():
if arg.startswith("-") and arg not in allowed_arguments:
self.request.response.status_code = 400
return {
"error": (
f"The argument {arg} is not allowed, allowed arguments are: "
f"{', '.join(allowed_arguments)}"
)
}
final_command = [
command,
f"--host={self.request.host}",
f"--config={self.gene.get_host_config_file(self.request.host)}",
]
if add_role:
final_command += ["--role=master"]
final_command += commands[1:]
display_command = shlex.join(final_command)
_LOG.info("Run the command `%s`", display_command)
env: Dict[str, str] = {}
env.update(os.environ)
env["FRONTEND"] = "noninteractive"
main = None
if final_command[0] in ["generate-tiles", "generate_tiles"]:
main = generate.main
elif final_command[0] in ["generate-controller", "generate_controller"]:
main = controller.main
if main is not None:
return_dict: Dict[str, Any] = {}
proc = multiprocessing.Process(
target=_run_in_process, args=(final_command, env, main, return_dict)
)
proc.start()
proc.join()
return return_dict
completed_process = subprocess.run( # nosec # pylint: disable=subprocess-run-check
final_command,
capture_output=True,
env=env,
)
if completed_process.returncode != 0:
_LOG.warning(
"The command `%s` exited with an error code: %s\nstdout:\n%s\nstderr:\n%s",
display_command,
completed_process.returncode,
completed_process.stdout.decode(),
completed_process.stderr.decode(),
)
stdout_parsed = _parse_stdout(completed_process.stdout.decode())
out = _format_output(
"<br />".join(stdout_parsed),
int(os.environ.get("TILECLOUD_CHAIN_MAX_OUTPUT_LENGTH", 1000)),
)
if completed_process.stderr:
out += "<br />Error:<br />" + _format_output(
completed_process.stderr.decode().replace("\n", "<br />"),
int(os.environ.get("TILECLOUD_CHAIN_MAX_OUTPUT_LENGTH", 1000)),
)
return {
"out": out,
"error": completed_process.returncode != 0,
}
@view_config(route_name="admin_test", renderer="tilecloud_chain:templates/openlayers.html") # type: ignore
def admin_test(self) -> Dict[str, Any]:
assert self.gene
config = self.gene.get_host_config(self.request.host)
main_config = self.gene.get_main_config()
return {
"proj4js_def": re.sub(
r"\s+",
" ",
config.config["openlayers"]["proj4js_def"],
),
"srs": config.config["openlayers"]["srs"],
"center_x": config.config["openlayers"]["center_x"],
"center_y": config.config["openlayers"]["center_y"],
"zoom": config.config["openlayers"]["zoom"],
"http_url": urljoin(
self.request.current_route_url(),
"/" + main_config.config["server"].get("wmts_path", "wmts") + "/"
if "server" in config.config
else "/",
),
}
def _parse_stdout(stdout: str) -> List[str]:
stdout_parsed = []
for line in stdout.splitlines():
try:
json_message = json.loads(line)
msg = json_message["msg"]
if json_message.get("logger_name", "").startswith("tilecloud"):
if "full_message" in json_message:
full_message = json_message["full_message"].replace("\n", "<br />")
msg += f"<br />{full_message}"
stdout_parsed.append(msg)
except: # pylint: disable=bare-except
stdout_parsed.append(line)
return stdout_parsed
def _format_output(string: str, max_length: int = 1000) -> str:
result = ""
for line in string.splitlines():
if len(string) > max_length:
break
if line.startswith("{"):
try:
parsed = json.loads(line)
if "source_facility" in parsed:
if not parsed.startswith("tilecloud"):
continue
if result:
result += "\n"
if (
"level_name" in parsed
and "source_facility" in parsed
and "line" in parsed
and "msg" in parsed
):
if parsed.startswith("tilecloud"):
result += (
f"[{parsed['level_name']}] {parsed['source_facility']}:{parsed['line']} "
f"{parsed['msg']}"
)
elif "msg" in parsed:
result += parsed["msg"]
else:
result += line
except json.decoder.JSONDecodeError:
if result:
result += "\n"
result += line
else:
if result:
result += "\n"
result += line
if len(string) > max_length:
return string[: max_length - 3] + "\n..."
return string
def _run_in_process(
final_command: List[str],
env: Dict[str, str],
main: Callable[[List[str], IO[str]], Any],
return_dict: Dict[str, Any],
) -> None:
display_command = shlex.join(final_command)
error = False
out = io.StringIO()
try:
for key, value in env.items():
os.environ[key] = value
_LOG.debug("Running the command `%s` using the function directly", display_command)
main(final_command, out)
except Exception:
_LOG.exception("Error while running the command `%s`", display_command)
error = True
return_dict["out"] = _format_output(
"<br />".join(_parse_stdout(out.getvalue())),
int(os.environ.get("TILECLOUD_CHAIN_MAX_OUTPUT_LENGTH", 1000)),
)
return_dict["error"] = error
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,608
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/tests/test_ui.py
|
import os
import subprocess
import pytest
import skimage.io
from c2cwsgiutils.acceptance.image import check_image
REGENERATE = False
def test_should_not_commit():
assert REGENERATE is False
@pytest.mark.parametrize(
"url,expected_file_name,height,width",
[
pytest.param("http://application:8080/admin/", "not-login", 250, 800, id="not-login"),
pytest.param("http://application:8080/admin/test", "test", 800, 800, id="test-not-login"),
pytest.param("http://app_test_user:8080/admin", "index", 500, 1000, id="index"),
pytest.param("http://app_test_user:8080/admin/test", "test", 800, 800, id="test"),
],
)
def test_ui(url, expected_file_name, height, width):
subprocess.run(
[
"node",
"screenshot.js",
f"--url={url}",
f"--width={width}",
f"--height={height}",
f"--output=/tmp/{expected_file_name}.png",
],
check=True,
)
check_image(
"/results",
skimage.io.imread(f"/tmp/{expected_file_name}.png")[:, :, :3],
os.path.join(os.path.dirname(__file__), f"{expected_file_name}.expected.png"),
generate_expected_image=REGENERATE,
)
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,609
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/tests/__init__.py
|
import logging
import os
import re
import shutil
import sys
import traceback
from io import StringIO
from logging import config
from typing import Any, Callable, List, Tuple, Union
from unittest import TestCase
import yaml
DIFF = 200
log = logging.getLogger("tests")
config.dictConfig(
{
"version": 1,
"loggers": {
"default": {"level": "INFO"},
"tilecloud": {"level": "DEBUG"},
"tilecloud_chain": {"level": "DEBUG"},
},
}
)
class NoAliasDumper(yaml.SafeDumper):
def ignore_aliases(self, data: Any) -> bool:
return True
class CompareCase(TestCase):
def assert_result_equals(self, result: str, expected: str, regex: bool = False) -> None:
expected = expected.split("\n")
result = re.sub("\n[^\n]*\r", "\n", result)
result = re.sub("^[^\n]*\r", "", result)
result = result.split("\n")
for n, test in enumerate(zip(expected, result)):
if test[0] != "PASS...":
try:
if regex:
self.assertRegex(test[1].strip(), f"^{test[0].strip()}$")
else:
self.assertEqual(test[0].strip(), test[1].strip())
except AssertionError as e:
for i in range(max(0, n - DIFF), min(len(result), n + DIFF + 1)):
if i == n:
print(f"> {i} {result[i]}")
log.info(f"> {i} {result[i]}")
else:
print(f" {i} {result[i]}")
log.info(f" {i} {result[i]}")
raise e
self.assertEqual(len(expected), len(result), repr(result))
def run_cmd(
self, cmd: Union[List[str], str], main_func: Callable, get_error: bool = False
) -> Tuple[str, str]:
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
old_stderr = sys.stderr
sys.stderr = mystderr = StringIO()
try:
self.assert_main_equals(cmd, main_func, [], get_error)
except AssertionError:
sys.stdout = old_stdout
sys.stderr = old_stderr
print(mystdout.getvalue())
print(mystderr.getvalue())
raise
finally:
sys.stdout = old_stdout
sys.stderr = old_stderr
log.info(mystdout.getvalue())
log.info(mystderr.getvalue())
return mystdout.getvalue(), mystderr.getvalue()
def assert_cmd_equals(
self, cmd: Union[List[str], str], main_func: Callable, empty_err: bool = False, **kargs: Any
) -> None:
out, err = self.run_cmd(cmd, main_func)
if empty_err:
self.assertEqual(err, "")
if isinstance(out, bytes):
out = out.decode("utf-8")
else:
out = str(out)
self.assert_result_equals(result=out, **kargs)
def assert_cmd_exit_equals(self, cmd: str, main_func: Callable) -> None:
sys.argv = re.sub(" +", " ", cmd).split(" ")
try:
main_func()
assert "exit() not called."
except SystemExit:
pass
def assert_main_equals(
self,
cmd: Union[List[str], str],
main_func: Callable,
expected: List[List[str]] = None,
get_error: bool = False,
**kargs: Any,
) -> None:
if expected:
for expect in expected:
if os.path.exists(expect[0]):
os.remove(expect[0])
if type(cmd) == list:
sys.argv = cmd
else:
sys.argv = re.sub(" +", " ", cmd).split(" ")
try:
main_func()
assert get_error is False
except SystemExit as e:
if get_error:
assert e.code not in (None, 0), str(e)
else:
assert e.code in (None, 0), str(e)
except AssertionError:
raise
except Exception:
if not get_error:
log.exception("Unexpected error")
assert get_error is True, traceback.format_exc()
if expected:
for expect in expected:
with open(expect[0]) as f:
self.assert_result_equals(f.read(), expect[1], **kargs)
def assert_main_except_equals(
self, cmd: str, main_func: Callable, expected: List[List[str]], get_error: bool = False, **kargs: Any
) -> None:
sys.argv = cmd.split(" ")
try:
main_func()
assert get_error is False
except SystemExit as e:
if get_error:
assert e.code not in (None, 0), str(e)
else:
assert e.code in (None, 0), str(e)
except AssertionError:
raise
except Exception:
assert False, traceback.format_exc()
if expected:
for expect in expected:
with open(expect[0]) as f:
self.assert_result_equals(f.read(), expect[1], **kargs)
def assert_yaml_equals(self, result: str, expected: str) -> None:
expected = yaml.dump(
yaml.safe_load(expected), width=120, default_flow_style=False, Dumper=NoAliasDumper
)
result = yaml.dump(yaml.safe_load(result), width=120, default_flow_style=False, Dumper=NoAliasDumper)
self.assert_result_equals(result=result, expected=expected)
def assert_cmd_yaml_equals(self, cmd: str, main_func: Callable, **kargs: Any) -> None:
old_stdout = sys.stdout
sys.stdout = mystdout = StringIO()
self.assert_main_equals(cmd, main_func, [])
sys.stdout = old_stdout
self.assert_yaml_equals(result=mystdout.getvalue(), **kargs)
def assert_tiles_generated(self, directory: str, **kargs: Any) -> None:
if os.path.exists(directory):
shutil.rmtree(directory, ignore_errors=True)
self.assert_tiles_generated_deleted(directory=directory, **kargs)
def assert_tiles_generated_deleted(
self, directory: str, tiles_pattern: str, tiles: Any, expected: str = "", **kargs: Any
) -> None:
self.assert_cmd_equals(expected=expected, **kargs)
count = 0
for path, dirs, files in os.walk(directory):
if len(files) != 0:
log.info((path, files))
print((path, files))
count += len(files)
self.assertEqual(count, len(tiles))
for tile in tiles:
log.info(directory + tiles_pattern % tile)
print(directory + tiles_pattern % tile)
self.assertTrue(os.path.exists(directory + tiles_pattern % tile))
def assert_files_generated(self, **kargs):
self.assert_tiles_generated(tiles_pattern="%s", **kargs)
class MatchRegex:
"""Assert that a given string meets some expectations."""
def __init__(self, regex) -> None:
self._regex = re.compile(regex)
def __eq__(self, other: str) -> bool:
return self._regex.match(other) is not None
def match(self, other: str) -> re.Match:
return self._regex.match(other)
def __repr__(self):
return self._regex.pattern
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,610
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/server.py
|
# Copyright (c) 2013-2023 by Stéphane Brunner
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions
# are met:
# 1. Redistributions of source code must retain the above copyright
# notice, this list of conditions and the following disclaimer.
# 2. Redistributions in binary form must reproduce the above copyright
# notice, this list of conditions and the following disclaimer in the
# documentation and/or other materials provided with the distribution.
# 3. Neither the name of Camptocamp nor the names of its contributors may
# be used to endorse or promote products derived from this software
# without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND
# ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED
# WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
# DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
# (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
# LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
# ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
# (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
# SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import collections
import datetime
import json
import logging
import mimetypes
import os
import time
from typing import TYPE_CHECKING, Any, Dict, Generic, List, Optional, TypeVar, Union, cast
from urllib.parse import parse_qs, urlencode
import botocore.exceptions
import c2cwsgiutils.pyramid
import pyramid.response
import pyramid.session
import requests
from azure.core.exceptions import ResourceNotFoundError
from c2cwsgiutils import health_check
from pyramid.config import Configurator
from pyramid.httpexceptions import HTTPException, exception_response
from pyramid.request import Request
from pyramid.router import Router
from pyramid_mako import add_mako_renderer
import tilecloud.store.s3
import tilecloud_chain.configuration
import tilecloud_chain.security
from tilecloud import Tile, TileCoord
from tilecloud_chain import TileGeneration, controller, internal_mapcache
from tilecloud_chain.controller import get_azure_client
logger = logging.getLogger(__name__)
tilegeneration = None
def init_tilegeneration(config_file: Optional[str]) -> None:
"""Initialize the tile generation."""
global tilegeneration # pylint: disable=global-statement
if tilegeneration is None:
if config_file is not None:
logger.info("Use config file: '%s'", config_file)
log_level = os.environ.get("TILE_SERVER_LOGLEVEL")
tilegeneration = TileGeneration(
config_file,
collections.namedtuple( # type: ignore
"Options",
["verbose", "debug", "quiet", "bbox", "zoom", "test", "near", "time", "geom", "ignore_error"],
)(
log_level == "verbose", # type: ignore
log_level == "debug",
log_level == "quiet",
None,
None,
None,
None,
None,
True,
False,
),
configure_logging=False,
multi_thread=False,
maxconsecutive_errors=False,
)
Response = TypeVar("Response")
class DatedStore:
"""Store with timestamp to be able to invalidate it on configuration change."""
def __init__(self, store: tilecloud.TileStore, mtime: float) -> None:
"""Initialize."""
self.store = store
self.mtime = mtime
class DatedFilter:
"""Filter with timestamp to be able to invalidate it on configuration change."""
def __init__(self, layer_filter: Optional[tilecloud_chain.IntersectGeometryFilter], mtime: float) -> None:
"""Initialize."""
self.filter = layer_filter
self.mtime = mtime
class Server(Generic[Response]):
"""The generic implementation of the WMTS server."""
def __init__(self) -> None:
"""Initialize."""
try:
self.filter_cache: Dict[str, Dict[str, DatedFilter]] = {}
self.s3_client_cache: Dict[str, "botocore.client.S3"] = {}
self.store_cache: Dict[str, Dict[str, DatedStore]] = {}
assert tilegeneration
self.wmts_path = tilegeneration.get_main_config().config["server"]["wmts_path"]
self.static_path = tilegeneration.get_main_config().config["server"]["static_path"].split("/")
except Exception:
logger.exception("Initialization error")
raise
@staticmethod
def get_expires_hours(config: tilecloud_chain.DatedConfig) -> float:
"""Get the expiration time in hours."""
return config.config.get("server", {}).get("expires", tilecloud_chain.configuration.EXPIRES_DEFAULT)
@staticmethod
def get_static_allow_extension(config: tilecloud_chain.DatedConfig) -> List[str]:
"""Get the allowed extensions in the static view."""
return config.config["server"].get(
"static_allow_extension", ["jpeg", "png", "xml", "js", "html", "css"]
)
@staticmethod
def get_cache_name(config: tilecloud_chain.DatedConfig) -> str:
"""Get the cache name."""
return config.config["server"].get("cache", config.config["generation"]["default_cache"])
def get_s3_client(self, config: tilecloud_chain.DatedConfig) -> "botocore.client.S3":
"""Get the AWS S3 client."""
cache_s3 = cast(tilecloud_chain.configuration.CacheS3, self.get_cache(config))
if cache_s3.get("host", "aws") in self.s3_client_cache:
return self.s3_client_cache[cache_s3.get("host", "aws")]
for n in range(10):
try:
client = tilecloud.store.s3.get_client(cache_s3.get("host"))
self.s3_client_cache[cast(str, cache_s3.get("host", "aws"))] = client
return client
except KeyError as e:
error = e
time.sleep(n * 10)
raise error
def get_cache(self, config: tilecloud_chain.DatedConfig) -> tilecloud_chain.configuration.Cache:
"""Get the cache from the config."""
return config.config["caches"][self.get_cache_name(config)]
@staticmethod
def get_layers(config: tilecloud_chain.DatedConfig) -> List[str]:
"""Get the layer from the config."""
layers: List[str] = cast(List[str], config.config["layers"].keys())
return config.config["server"].get("layers", layers)
def get_filter(
self, config: tilecloud_chain.DatedConfig, layer_name: str
) -> Optional[tilecloud_chain.IntersectGeometryFilter]:
"""Get the filter from the config."""
dated_filter = self.filter_cache.get(config.file, {}).get(layer_name)
if dated_filter is not None and dated_filter.mtime == config.mtime:
return dated_filter.filter
assert tilegeneration
layer_filter = (
tilecloud_chain.IntersectGeometryFilter(gene=tilegeneration)
if config.config["server"]["geoms_redirect"]
else None
)
self.filter_cache.setdefault(config.file, {})[layer_name] = DatedFilter(layer_filter, config.mtime)
return layer_filter
def get_store(self, config: tilecloud_chain.DatedConfig, layer_name: str) -> tilecloud.TileStore:
"""Get the store from the config."""
dated_store = self.store_cache.get(config.file, {}).get(layer_name)
if dated_store is not None and dated_store.mtime == config.mtime:
return dated_store.store
assert tilegeneration
store = tilegeneration.get_store(config, self.get_cache(config), layer_name, read_only=True)
self.store_cache.setdefault(config.file, {})[layer_name] = DatedStore(store, config.mtime)
return store
@staticmethod
def get_max_zoom_seed(config: tilecloud_chain.DatedConfig, layer_name: str) -> int:
"""Get the max zoom to be bet in the stored cache."""
layer = config.config["layers"][layer_name]
if "min_resolution_seed" in layer:
max_zoom_seed = -1
for zoom, resolution in enumerate(config.config["grids"][layer["grid"]]["resolutions"]):
if resolution > layer["min_resolution_seed"]:
max_zoom_seed = zoom
return max_zoom_seed
else:
return 999999
def _read(
self,
key_name: str,
headers: Dict[str, str],
config: tilecloud_chain.DatedConfig,
**kwargs: Any,
) -> Response:
cache = self.get_cache(config)
try:
cache_s3 = cast(tilecloud_chain.configuration.CacheS3, cache)
bucket = cache_s3
response = self.get_s3_client(config).get_object(Bucket=bucket, Key=key_name)
body = response["Body"]
try:
headers["Content-Type"] = response.get("ContentType")
return self.response(config, body.read(), headers, **kwargs)
finally:
body.close()
except botocore.exceptions.ClientError as ex:
if ex.response["Error"]["Code"] == "NoSuchKey":
return self.error(config, 404, key_name + " not found")
else:
raise
def _get(
self,
path: str,
headers: Dict[str, str],
config: tilecloud_chain.DatedConfig,
**kwargs: Any,
) -> Response:
"""Get capabilities or other static files."""
assert tilegeneration
cache = self.get_cache(config)
if cache["type"] == "s3":
cache_s3 = cast(tilecloud_chain.configuration.CacheS3, cache)
key_name = os.path.join(cache_s3["folder"], path)
try:
return self._read(key_name, headers, config, **kwargs)
except Exception:
del self.s3_client_cache[cache_s3.get("host", "aws")]
return self._read(key_name, headers, config, **kwargs)
if cache["type"] == "azure":
cache_azure = cast(tilecloud_chain.configuration.CacheAzure, cache)
key_name = os.path.join(cache_azure["folder"], path)
try:
blob = get_azure_client().get_blob_client(container=cache_azure["container"], blob=key_name)
properties = blob.get_blob_properties()
data = blob.download_blob().readall()
return self.response(
config,
data if isinstance(data, bytes) else data.encode("utf-8"), # type: ignore
{
"Content-Encoding": cast(str, properties.content_settings.content_encoding),
"Content-Type": cast(str, properties.content_settings.content_type),
},
**kwargs,
)
except ResourceNotFoundError:
return self.error(config, 404, path + " not found", **kwargs)
else:
cache_filesystem = cast(tilecloud_chain.configuration.CacheFilesystem, cache)
folder = cache_filesystem["folder"] or ""
if path.split(".")[-1] not in self.get_static_allow_extension(config):
return self.error(config, 403, "Extension not allowed", **kwargs)
p = os.path.join(folder, path)
if not os.path.isfile(p):
return self.error(config, 404, path + " not found", **kwargs)
with open(p, "rb") as file:
data = file.read()
content_type = mimetypes.guess_type(p)[0]
if content_type:
headers["Content-Type"] = content_type
return self.response(config, data, headers, **kwargs)
def __call__(
self,
config: tilecloud_chain.DatedConfig,
config_file: str,
environ: Dict[str, str],
start_response: bytes,
) -> Response:
"""Build the response on request."""
params = {}
for key, value in parse_qs(environ["QUERY_STRING"], True).items():
params[key.upper()] = value[0]
path = None if len(params) > 0 else environ["PATH_INFO"][1:].split("/")
return self.serve(path, params, config=config, config_file=config_file, start_response=start_response)
def serve(
self,
path: Optional[List[str]],
params: Dict[str, str],
config: tilecloud_chain.DatedConfig,
**kwargs: Any,
) -> Response:
"""Serve the WMTS requests."""
if not config or not config.config:
return self.error(
config,
404,
"No configuration file found for the host or the configuration has an error, see logs for details",
**kwargs,
)
try:
dimensions = []
metadata = {}
assert tilegeneration
if path:
if tuple(path[: len(self.static_path)]) == tuple(self.static_path):
return self._get(
"/".join(path[len(self.static_path) :]),
{
"Expires": (
datetime.datetime.utcnow()
+ datetime.timedelta(hours=self.get_expires_hours(config))
).isoformat(),
"Cache-Control": f"max-age={3600 * self.get_expires_hours(config)}",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET",
},
config=config,
**kwargs,
)
elif len(path) >= 1 and path[0] != self.wmts_path:
return self.error(
config,
404,
f"Type '{path[0]}' don't exists, allows values: '{self.wmts_path}' or "
f"'{'/'.join(self.static_path)}'",
**kwargs,
)
path = path[1:] # remove type
if path:
if len(path) == 2 and path[0] == "1.0.0" and path[1].lower() == "wmtscapabilities.xml":
params["SERVICE"] = "WMTS"
params["VERSION"] = "1.0.0"
params["REQUEST"] = "GetCapabilities"
elif len(path) < 7:
return self.error(config, 400, "Not enough path", **kwargs)
else:
params["SERVICE"] = "WMTS"
params["VERSION"] = path[0]
params["LAYER"] = path[1]
params["STYLE"] = path[2]
if params["LAYER"] in self.get_layers(config):
layer = cast(
tilecloud_chain.configuration.LayerWms,
config.config["layers"][params["LAYER"]],
)
else:
return self.error(config, 400, f"Wrong Layer '{params['LAYER']}'", **kwargs)
index = 3
dimensions = path[index : index + len(layer.get("dimensions", {}))]
for dimension in layer.get("dimensions", {}):
metadata["dimension_" + dimension["name"]] = path[index]
params[dimension["name"].upper()] = path[index]
index += 1
last = path[-1].split(".")
if len(path) < index + 4:
return self.error(config, 400, "Not enough path", **kwargs)
params["TILEMATRIXSET"] = path[index]
params["TILEMATRIX"] = path[index + 1]
params["TILEROW"] = path[index + 2]
if len(path) == index + 4:
params["REQUEST"] = "GetTile"
params["TILECOL"] = last[0]
if last[1] != layer["extension"]:
return self.error(config, 400, f"Wrong extension '{last[1]}'", **kwargs)
elif len(path) == index + 6:
params["REQUEST"] = "GetFeatureInfo"
params["TILECOL"] = path[index + 3]
params["I"] = path[index + 4]
params["J"] = last[0]
params["INFO_FORMAT"] = layer.get("info_formats", ["application/vnd.ogc.gml"])[0]
else:
return self.error(config, 400, "Wrong path length", **kwargs)
params["FORMAT"] = layer["mime_type"]
else:
if "SERVICE" not in params or "REQUEST" not in params or "VERSION" not in params:
return self.error(config, 400, "Not all required parameters are present", **kwargs)
if params["SERVICE"] != "WMTS":
return self.error(config, 400, f"Wrong Service '{params['SERVICE']}'", **kwargs)
if params["VERSION"] != "1.0.0":
return self.error(config, 400, f"Wrong Version '{params['VERSION']}'", **kwargs)
if params["REQUEST"] == "GetCapabilities":
headers = {
"Content-Type": "application/xml",
"Expires": (
datetime.datetime.utcnow() + datetime.timedelta(hours=self.get_expires_hours(config))
).isoformat(),
"Cache-Control": f"max-age={3600 * self.get_expires_hours(config)}",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET",
}
cache = self.get_cache(config)
if "wmtscapabilities_file" in cache:
wmtscapabilities_file = cache["wmtscapabilities_file"]
return self._get(wmtscapabilities_file, headers, config=config, **kwargs)
else:
body = controller.get_wmts_capabilities(
tilegeneration, self.get_cache_name(config), config=config
)
assert body
headers["Content-Type"] = "application/xml"
return self.response(config, body.encode("utf-8"), headers=headers, **kwargs)
if (
"FORMAT" not in params
or "LAYER" not in params
or "TILEMATRIXSET" not in params
or "TILEMATRIX" not in params
or "TILEROW" not in params
or "TILECOL" not in params
):
return self.error(config, 400, "Not all required parameters are present", **kwargs)
if not path:
if params["LAYER"] in self.get_layers(config):
layer = cast(
tilecloud_chain.configuration.LayerWms,
config.config["layers"][params["LAYER"]],
)
else:
return self.error(config, 400, f"Wrong Layer '{params['LAYER']}'", **kwargs)
for dimension in layer.get("dimensions", []):
value = (
params[dimension["name"].upper()]
if dimension["name"].upper() in params
else dimension["default"]
)
dimensions.append(value)
metadata["dimension_" + dimension["name"]] = value
if params["STYLE"] != layer["wmts_style"]:
return self.error(config, 400, f"Wrong Style '{params['STYLE']}'", **kwargs)
if params["TILEMATRIXSET"] != layer["grid"]:
return self.error(config, 400, f"Wrong TileMatrixSet '{params['TILEMATRIXSET']}'", **kwargs)
metadata["layer"] = params["LAYER"]
metadata["config_file"] = config.file
tile = Tile(
TileCoord(
# TODO fix for matrix_identifier = resolution
int(params["TILEMATRIX"]),
int(params["TILECOL"]),
int(params["TILEROW"]),
),
metadata=metadata,
)
if params["REQUEST"] == "GetFeatureInfo":
if "I" not in params or "J" not in params or "INFO_FORMAT" not in params:
return self.error(config, 400, "Not all required parameters are present", **kwargs)
if "query_layers" in layer:
return self.forward(
config,
layer["url"]
+ "?"
+ urlencode(
{
"SERVICE": "WMS",
"VERSION": layer.get("version", "1.1.1"),
"REQUEST": "GetFeatureInfo",
"LAYERS": layer["layers"],
"QUERY_LAYERS": layer["query_layers"],
"STYLES": params["STYLE"],
"FORMAT": params["FORMAT"],
"INFO_FORMAT": params["INFO_FORMAT"],
"WIDTH": config.config["grids"][layer["grid"]]["tile_size"],
"HEIGHT": config.config["grids"][layer["grid"]]["tile_size"],
"SRS": config.config["grids"][layer["grid"]]["srs"],
"BBOX": tilegeneration.get_grid(config, layer["grid"]).extent(tile.tilecoord),
"X": params["I"],
"Y": params["J"],
}
),
no_cache=True,
**kwargs,
)
else:
return self.error(config, 400, f"Layer '{params['LAYER']}' not queryable", **kwargs)
if params["REQUEST"] != "GetTile":
return self.error(config, 400, f"Wrong Request '{params['REQUEST']}'", **kwargs)
if params["FORMAT"] != layer["mime_type"]:
return self.error(config, 400, f"Wrong Format '{params['FORMAT']}'", **kwargs)
if tile.tilecoord.z > self.get_max_zoom_seed(config, params["LAYER"]):
return self._map_cache(config, layer, tile, kwargs)
layer_filter = self.get_filter(config, params["LAYER"])
if layer_filter:
meta_size = layer["meta_size"]
meta_tilecoord = (
TileCoord(
# TODO fix for matrix_identifier = resolution
tile.tilecoord.z,
round(tile.tilecoord.x / meta_size * meta_size),
round(tile.tilecoord.y / meta_size * meta_size),
meta_size,
)
if meta_size != 1
else tile.tilecoord
)
if not layer_filter.filter_tilecoord(
config, meta_tilecoord, params["LAYER"], host=self.get_host(**kwargs)
):
return self._map_cache(config, layer, tile, kwargs)
store = self.get_store(config, params["LAYER"])
if store is None:
return self.error(
config,
400,
f"No store found for layer '{params['LAYER']}'",
**kwargs,
)
tile2 = store.get_one(tile)
if tile2:
if tile2.error:
return self.error(config, 500, tile2.error, **kwargs)
assert tile2.data
assert tile2.content_type
return self.response(
config,
tile2.data,
headers={
"Content-Type": tile2.content_type,
"Expires": (
datetime.datetime.utcnow()
+ datetime.timedelta(hours=self.get_expires_hours(config))
).isoformat(),
"Cache-Control": f"max-age={3600 * self.get_expires_hours(config)}",
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET",
"Tile-Backend": "Cache",
},
**kwargs,
)
else:
return self.error(config, 204, **kwargs)
except HTTPException:
raise
except Exception:
logger.exception("An unknown error occurred")
raise
def _map_cache(
self,
config: tilecloud_chain.DatedConfig,
layer: tilecloud_chain.configuration.Layer,
tile: Tile,
kwargs: Dict[str, Any],
) -> Response:
"""Get the tile on a cache of tile."""
assert tilegeneration
return internal_mapcache.fetch(config, self, tilegeneration, layer, tile, kwargs)
def forward(
self,
config: tilecloud_chain.DatedConfig,
url: str,
headers: Optional[Any] = None,
no_cache: bool = False,
**kwargs: Any,
) -> Response:
"""Forward the request on a fallback WMS server."""
if headers is None:
headers = {}
if no_cache:
headers["Cache-Control"] = "no-cache"
headers["Pragma"] = "no-cache"
response = requests.get(url, headers=headers) # nosec
if response.status_code == 200:
response_headers = dict(response.headers)
if no_cache:
response_headers["Cache-Control"] = "no-cache, no-store"
response_headers["Pragma"] = "no-cache"
else:
response_headers["Expires"] = (
datetime.datetime.utcnow() + datetime.timedelta(hours=self.get_expires_hours(config))
).isoformat()
response_headers["Cache-Control"] = f"max-age={3600 * self.get_expires_hours(config)}"
response_headers["Access-Control-Allow-Origin"] = "*"
response_headers["Access-Control-Allow-Methods"] = "GET"
return self.response(config, response.content, headers=response_headers, **kwargs)
else:
message = (
f"The URL '{url}' return '{response.status_code} {response.reason}', "
f"content:\n{response.text}"
)
logger.warning(message)
return self.error(config, 502, message=message, **kwargs)
def error(
self,
config: tilecloud_chain.DatedConfig,
code: int,
message: Optional[Union[Exception, str]] = "",
**kwargs: Any,
) -> Response:
"""Build the error, should be implemented in a sub class."""
raise NotImplementedError
def response(
self,
config: tilecloud_chain.DatedConfig,
data: bytes,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> Response:
"""Build the response, should be implemented in a sub class."""
raise NotImplementedError
def get_host(self, **kwargs: Any) -> str:
"""Get the host used in Prometheus stats and in the JSON logs, should be implemented in a sub class."""
del kwargs
return "localhost"
if TYPE_CHECKING:
WsgiServerBase = Server[List[bytes]]
else:
WsgiServerBase = Server
class WsgiServer(WsgiServerBase):
"""Convert the error and response for the WSGI server."""
HTTP_MESSAGES = {
204: "204 No Content",
400: "400 Bad Request",
403: "403 Forbidden",
404: "404 Not Found",
502: "502 Bad Gateway",
}
def error(
self,
config: tilecloud_chain.DatedConfig,
code: int,
message: Optional[Union[Exception, str]] = "",
**kwargs: Any,
) -> List[bytes]:
"""Build the error."""
assert message is not None
kwargs["start_response"](self.HTTP_MESSAGES[code], [])
return [str(message).encode()]
def response(
self,
config: tilecloud_chain.DatedConfig,
data: bytes,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> List[bytes]:
"""Build the response."""
if headers is None:
headers = {}
headers["Content-Length"] = str(len(data))
kwargs["start_response"]("200 OK", headers.items())
return [data]
def app_factory(
global_config: Any,
configfile: Optional[str] = os.environ.get("TILEGENERATION_CONFIGFILE"),
**local_conf: Any,
) -> WsgiServer:
"""Create the WSGI server."""
del global_config
del local_conf
init_tilegeneration(configfile)
return WsgiServer()
if TYPE_CHECKING:
PyramidServerBase = Server[pyramid.response.Response]
else:
PyramidServerBase = Server
class PyramidServer(PyramidServerBase):
"""Convert the error and response for Pyramid."""
def error(
self,
config: tilecloud_chain.DatedConfig,
code: int,
message: Optional[Union[Exception, str]] = None,
**kwargs: Any,
) -> pyramid.response.Response:
"""Build the Pyramid response on error."""
headers = {
"Access-Control-Allow-Origin": "*",
"Access-Control-Allow-Methods": "GET",
}
if code < 300:
headers.update(
{
"Expires": (
datetime.datetime.utcnow() + datetime.timedelta(hours=self.get_expires_hours(config))
).isoformat(),
"Cache-Control": f"max-age={3600 * self.get_expires_hours(config)}",
}
)
return exception_response(code, detail=message, headers=headers)
raise exception_response(code, detail=message, headers=headers)
def response(
self,
config: tilecloud_chain.DatedConfig,
data: bytes,
headers: Optional[Dict[str, str]] = None,
**kwargs: Any,
) -> pyramid.response.Response:
"""Build the Pyramid response."""
if headers is None:
headers = {}
request: pyramid.request.Request = kwargs["request"]
request.response.headers = headers
if isinstance(data, memoryview):
request.response.body_file = data
else:
request.response.body = data
return request.response
def get_host(self, **kwargs: Any) -> str:
request: pyramid.request.Request = kwargs["request"]
assert isinstance(request.host, str)
return request.host
pyramid_server = None
class PyramidView:
"""The Pyramid view."""
def __init__(self, request: Request) -> None:
"""Init the Pyramid view."""
self.request = request
global pyramid_server # pylint: disable=global-statement
init_tilegeneration(request.registry.settings.get("tilegeneration_configfile"))
if pyramid_server is None:
pyramid_server = PyramidServer()
self.server = pyramid_server
def __call__(self) -> pyramid.response.Response:
"""Call the Pyramid view."""
params = {}
path = None
if "path" in self.request.matchdict:
path = self.request.matchdict["path"]
for param, value in self.request.params.items():
params[param.upper()] = value
assert tilegeneration
return self.server.serve(
path,
params,
host=self.request.host,
config=tilegeneration.get_host_config(self.request.host),
request=self.request,
)
def forbidden(request: pyramid.request.Request) -> pyramid.response.Response:
"""Return a 403 Forbidden response."""
is_auth = c2cwsgiutils.auth.is_auth(request)
if is_auth:
return pyramid.httpexceptions.HTTPForbidden(request.exception.message)
return pyramid.httpexceptions.HTTPFound(
location=request.route_url(
"c2c_github_login",
_query={"came_from": request.current_route_url()},
)
)
def main(global_config: Any, **settings: Any) -> Router:
"""Start the server in Pyramid."""
del global_config # unused
config = Configurator(settings=settings)
config.set_session_factory(
pyramid.session.BaseCookieSessionFactory(json)
if os.environ.get("TILECLOUD_CHAIN_DEBUG_SESSION", "false").lower() == "true"
else pyramid.session.SignedCookieSessionFactory(
os.environ["TILECLOUD_CHAIN_SESSION_SECRET"], salt=os.environ["TILECLOUD_CHAIN_SESSION_SALT"]
)
)
init_tilegeneration(settings.get("tilegeneration_configfile"))
assert tilegeneration
config.include(c2cwsgiutils.pyramid.includeme)
health_check.HealthCheck(config)
add_mako_renderer(config, ".html")
config.set_security_policy(tilecloud_chain.security.SecurityPolicy())
config.add_forbidden_view(forbidden)
config.add_route(
"admin",
f"/{tilegeneration.get_main_config().config['server']['admin_path']}",
request_method="GET",
)
config.add_route(
"admin_slash",
f"/{tilegeneration.get_main_config().config['server']['admin_path']}/",
request_method="GET",
)
config.add_route(
"admin_run",
f"/{tilegeneration.get_main_config().config['server']['admin_path']}/run",
request_method="POST",
)
config.add_route(
"admin_test",
f"/{tilegeneration.get_main_config().config['server']['admin_path']}/test",
request_method="GET",
)
config.add_static_view(
name=f"/{tilegeneration.get_main_config().config['server']['admin_path']}/static",
path="/app/tilecloud_chain/static",
)
config.add_route("tiles", "/*path", request_method="GET")
config.add_view(PyramidView, route_name="tiles")
config.scan("tilecloud_chain.views")
return config.make_wsgi_app()
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,611
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/expiretiles.py
|
import logging
import sys
from argparse import ArgumentParser
import psycopg2.sql
from shapely.geometry import MultiPolygon, Polygon
from shapely.ops import unary_union
from tilecloud.grid.quad import QuadTileGrid
from tilecloud_chain import parse_tilecoord
logger = logging.getLogger(__name__)
def main() -> None:
"""Import the osm2pgsql expire-tiles file to Postgres."""
try:
parser = ArgumentParser(
description="Used to import the osm2pgsql expire-tiles file to Postgres", prog=sys.argv[0]
)
parser.add_argument(
"--buffer",
type=float,
default=0.0,
help="Extent buffer to the tiles [m], default is 0",
)
parser.add_argument(
"--simplify",
type=float,
default=0.0,
help="Simplify the result geometry [m], default is 0",
)
parser.add_argument(
"--create",
default=False,
action="store_true",
help="create the table if not exists",
)
parser.add_argument(
"--delete",
default=False,
action="store_true",
help="empty the table",
)
parser.add_argument(
"file",
metavar="FILE",
help="The osm2pgsql expire-tiles file",
)
parser.add_argument(
"connection",
metavar="CONNECTION",
help=(
"The PostgreSQL connection string e.g. "
'"user=www-data password=www-data dbname=sig host=localhost"'
),
)
parser.add_argument(
"table",
metavar="TABLE",
help="The PostgreSQL table to fill",
)
parser.add_argument(
"--schema",
default="public",
help="The PostgreSQL schema to use (should already exists), default is public",
)
parser.add_argument(
"column",
metavar="COLUMN",
default="geom",
nargs="?",
help='The PostgreSQL column, default is "geom"',
)
parser.add_argument(
"--srid",
type=int,
default=3857,
nargs="?",
help="The stored geometry SRID, no conversion by default (3857)",
)
options = parser.parse_args()
connection = psycopg2.connect(options.connection)
cursor = connection.cursor()
if options.create:
cursor.execute(
"SELECT count(*) FROM pg_tables WHERE schemaname=%(schema)s AND tablename=%(table)s",
{"schema": options.schema, "table": options.table},
)
if cursor.fetchone()[0] == 0:
cursor.execute(
psycopg2.sql.SQL("CREATE TABLE IF NOT EXISTS {}.{} (id serial)").format(
psycopg2.sql.Identifier(options.schema), psycopg2.sql.Identifier(options.table)
)
)
cursor.execute(
"SELECT AddGeometryColumn(%(schema)s, %(table)s, %(column)s, %(srid)s, 'MULTIPOLYGON', 2)",
{
"schema": options.schema,
"table": options.table,
"column": options.column,
"srid": options.srid,
},
)
if options.delete:
cursor.execute(psycopg2.sql.SQL("DELETE FROM {}").format(psycopg2.sql.Identifier(options.table)))
geoms = []
grid = QuadTileGrid(
max_extent=(-20037508.34, -20037508.34, 20037508.34, 20037508.34),
)
with open(options.file, encoding="utf-8") as f:
for coord in f:
extent = grid.extent(parse_tilecoord(coord), options.buffer)
geoms.append(
Polygon(
(
(extent[0], extent[1]),
(extent[0], extent[3]),
(extent[2], extent[3]),
(extent[2], extent[1]),
)
)
)
if len(geoms) == 0:
print("No coords found")
connection.commit()
cursor.close()
connection.close()
sys.exit(0)
geom = unary_union(geoms)
if geom.geom_type == "Polygon":
geom = MultiPolygon((geom,))
if options.simplify > 0:
geom.simplify(options.simplify)
if options.srid <= 0:
cursor.execute(
psycopg2.sql.SQL("INSERT INTO {} ({}) VALUES (ST_GeomFromText(%(geom)s))").format(
psycopg2.sql.Identifier(options.table),
psycopg2.sql.Identifier(options.column),
),
{
"geom": geom.wkt,
},
)
elif options.srid != 3857:
cursor.execute(
psycopg2.sql.SQL(
"INSERT INTO {} ({}) VALUES (ST_Transform(ST_GeomFromText(%(geom)s, 3857), %(srid)s))"
).format(
psycopg2.sql.Identifier(options.table),
psycopg2.sql.Identifier(options.column),
),
{
"geom": geom.wkt,
"srid": options.srid,
},
)
else:
cursor.execute(
psycopg2.sql.SQL("INSERT INTO {} ({}) VALUES (ST_GeomFromText(%(geom)s, 3857))").format(
psycopg2.sql.Identifier(options.table),
psycopg2.sql.Identifier(options.column),
),
{
"geom": geom.wkt,
"srid": options.srid,
},
)
connection.commit()
cursor.close()
connection.close()
print("Import successful")
except SystemExit:
raise
except: # pylint: disable=bare-except
logger.exception("Exit with exception")
sys.exit(1)
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,612
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/tests/test_controller.py
|
import os
import shutil
from tilecloud_chain import TileGeneration, controller
from tilecloud_chain.tests import CompareCase
class TestController(CompareCase):
def setUp(self) -> None: # noqa
self.maxDiff = None
@classmethod
def setUpClass(cls): # noqa
os.chdir(os.path.dirname(__file__))
if os.path.exists("/tmp/tiles"):
shutil.rmtree("/tmp/tiles")
@classmethod
def tearDownClass(cls): # noqa
os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
if os.path.exists("/tmp/tiles"):
shutil.rmtree("/tmp/tiles")
def test_capabilities(self) -> None:
gene = TileGeneration("tilegeneration/test-fix.yaml", configure_logging=False)
config = gene.get_config("tilegeneration/test-fix.yaml")
self.assert_result_equals(
controller.get_wmts_capabilities(gene, config.config["generation"]["default_cache"]),
r"""<\?xml version="1.0" encoding="UTF-8"\?>
<Capabilities version="1.0.0"
xmlns="http://www.opengis.net/wmts/1.0"
xmlns:ows="http://www.opengis.net/ows/1.1"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gml="http://www.opengis.net/gml"
xsi:schemaLocation="http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd">
<ows:ServiceIdentification>
<ows:Title>Some title</ows:Title>
<ows:Abstract>Some abstract</ows:Abstract>
<ows:Keywords>
<ows:Keyword>some</ows:Keyword>
<ows:Keyword>keywords</ows:Keyword>
</ows:Keywords>
<ows:ServiceType>OGC WMTS</ows:ServiceType>
<ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>
<ows:Fees>None</ows:Fees>
<ows:AccessConstraint>None</ows:AccessConstraint>
</ows:ServiceIdentification>
<ows:ServiceProvider>
<ows:ProviderName>The provider name</ows:ProviderName>
<ows:ProviderSite>The provider URL</ows:ProviderSite>
<ows:ServiceContact>
<ows:IndividualName>The contact name</ows:IndividualName>
<ows:PositionName>The position name</ows:PositionName>
<ows:ContactInfo>
<ows:Phone>
<ows:Voice>\+41 11 222 33 44</ows:Voice>
<ows:Facsimile>\+41 11 222 33 44</ows:Facsimile>
</ows:Phone>
<ows:Address>
<ows:DeliveryPoint>Address delivery</ows:DeliveryPoint>
<ows:City>Berne</ows:City>
<ows:AdministrativeArea>BE</ows:AdministrativeArea>
<ows:PostalCode>3000</ows:PostalCode>
<ows:Country>Switzerland</ows:Country>
<ows:ElectronicMailAddress>info@example.com</ows:ElectronicMailAddress>
</ows:Address>
</ows:ContactInfo>
</ows:ServiceContact>
</ows:ServiceProvider>
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://wmts1/tiles/1.0.0/WMTSCapabilities.xml">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://wmts1/tiles/">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
<!-- <ServiceMetadataURL xlink:href="" /> -->
<Contents>
<Layer>
<ows:Title>all</ows:Title>
<ows:Identifier>all</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/all/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>line</ows:Title>
<ows:Identifier>line</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/line/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>mapnik</ows:Title>
<ows:Identifier>mapnik</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/mapnik/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>mapnik_grid</ows:Title>
<ows:Identifier>mapnik_grid</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>application/utfgrid</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="application/utfgrid" resourceType="tile"
template="http://wmts1/tiles/1.0.0/mapnik_grid/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>mapnik_grid_drop</ows:Title>
<ows:Identifier>mapnik_grid_drop</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>application/utfgrid</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="application/utfgrid" resourceType="tile"
template="http://wmts1/tiles/1.0.0/mapnik_grid_drop/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>point</ows:Title>
<ows:Identifier>point</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/point/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>point_hash</ows:Title>
<ows:Identifier>point_hash</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/point_hash/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>point_hash_no_meta</ows:Title>
<ows:Identifier>point_hash_no_meta</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/point_hash_no_meta/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>point_px_buffer</ows:Title>
<ows:Identifier>point_px_buffer</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/point_px_buffer/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>polygon</ows:Title>
<ows:Identifier>polygon</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/polygon/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>polygon2</ows:Title>
<ows:Identifier>polygon2</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/polygon2/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_01</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<TileMatrixSet>
<ows:Identifier>swissgrid_01</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>1</ows:Identifier>
<ScaleDenominator>3571.4285714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1875</MatrixWidth>
<MatrixHeight>1250</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>0_2</ows:Identifier>
<ScaleDenominator>714.28571428[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>9375</MatrixWidth>
<MatrixHeight>6250</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>0_1</ows:Identifier>
<ScaleDenominator>357.14285714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>18750</MatrixWidth>
<MatrixHeight>12500</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
<TileMatrixSet>
<ows:Identifier>swissgrid_025</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>0_25</ows:Identifier>
<ScaleDenominator>892.85714285[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>7500</MatrixWidth>
<MatrixHeight>5000</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
<TileMatrixSet>
<ows:Identifier>swissgrid_2_5</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>2_5</ows:Identifier>
<ScaleDenominator>8928.5714285[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>750</MatrixWidth>
<MatrixHeight>500</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
<TileMatrixSet>
<ows:Identifier>swissgrid_5</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>0</ows:Identifier>
<ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>19</MatrixWidth>
<MatrixHeight>13</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>1</ows:Identifier>
<ScaleDenominator>178571.42857[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>38</MatrixWidth>
<MatrixHeight>25</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>2</ows:Identifier>
<ScaleDenominator>71428.571428[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>94</MatrixWidth>
<MatrixHeight>63</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>3</ows:Identifier>
<ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>188</MatrixWidth>
<MatrixHeight>125</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>4</ows:Identifier>
<ScaleDenominator>17857.142857[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>375</MatrixWidth>
<MatrixHeight>250</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
</Capabilities>""",
True,
)
MULTIHOST_CAPABILITIES = (
r"""<\?xml version="1.0" encoding="UTF-8"\?>
<Capabilities version="1.0.0"
xmlns="http://www.opengis.net/wmts/1.0"
xmlns:ows="http://www.opengis.net/ows/1.1"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gml="http://www.opengis.net/gml"
xsi:schemaLocation="http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd">
<ows:ServiceIdentification>
<ows:Title>Some title</ows:Title>
<ows:Abstract>Some abstract</ows:Abstract>
<ows:Keywords>
<ows:Keyword>some</ows:Keyword>
<ows:Keyword>keywords</ows:Keyword>
</ows:Keywords>
<ows:ServiceType>OGC WMTS</ows:ServiceType>
<ows:ServiceTypeVersion>1.0.0</ows:ServiceTypeVersion>
<ows:Fees>None</ows:Fees>
<ows:AccessConstraint>None</ows:AccessConstraint>
</ows:ServiceIdentification>
<ows:ServiceProvider>
<ows:ProviderName>The provider name</ows:ProviderName>
<ows:ProviderSite>The provider URL</ows:ProviderSite>
<ows:ServiceContact>
<ows:IndividualName>The contact name</ows:IndividualName>
<ows:PositionName>The position name</ows:PositionName>
<ows:ContactInfo>
<ows:Phone>
<ows:Voice>\+41 11 222 33 44</ows:Voice>
<ows:Facsimile>\+41 11 222 33 44</ows:Facsimile>
</ows:Phone>
<ows:Address>
<ows:DeliveryPoint>Address delivery</ows:DeliveryPoint>
<ows:City>Berne</ows:City>
<ows:AdministrativeArea>BE</ows:AdministrativeArea>
<ows:PostalCode>3000</ows:PostalCode>
<ows:Country>Switzerland</ows:Country>
<ows:ElectronicMailAddress>info@example.com</ows:ElectronicMailAddress>
</ows:Address>
</ows:ContactInfo>
</ows:ServiceContact>
</ows:ServiceProvider>
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://wmts1/tiles/1.0.0/WMTSCapabilities.xml">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://wmts1/tiles/">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
<ows:Get xlink:href="http://wmts2/tiles/">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
<ows:Get xlink:href="http://wmts3/tiles/">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
<!-- <ServiceMetadataURL xlink:href="" /> -->
<Contents>
<Layer>
<ows:Title>all</ows:Title>
<ows:Identifier>all</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/all/default/"""
r"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts2/tiles/1.0.0/all/default/"""
r"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts3/tiles/1.0.0/all/default/"""
r"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>line</ows:Title>
<ows:Identifier>line</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/line/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts2/tiles/1.0.0/line/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts3/tiles/1.0.0/line/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>mapnik</ows:Title>
<ows:Identifier>mapnik</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/mapnik/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts2/tiles/1.0.0/mapnik/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts3/tiles/1.0.0/mapnik/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>mapnik_grid</ows:Title>
<ows:Identifier>mapnik_grid</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>application/utfgrid</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="application/utfgrid" resourceType="tile"
template="http://wmts1/tiles/1.0.0/mapnik_grid/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json" />
<ResourceURL format="application/utfgrid" resourceType="tile"
template="http://wmts2/tiles/1.0.0/mapnik_grid/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json" />
<ResourceURL format="application/utfgrid" resourceType="tile"
template="http://wmts3/tiles/1.0.0/mapnik_grid/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>mapnik_grid_drop</ows:Title>
<ows:Identifier>mapnik_grid_drop</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>application/utfgrid</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="application/utfgrid" resourceType="tile"
template="http://wmts1/tiles/1.0.0/mapnik_grid_drop/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json" />
<ResourceURL format="application/utfgrid" resourceType="tile"
template="http://wmts2/tiles/1.0.0/mapnik_grid_drop/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json" />
<ResourceURL format="application/utfgrid" resourceType="tile"
template="http://wmts3/tiles/1.0.0/mapnik_grid_drop/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.json" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>point</ows:Title>
<ows:Identifier>point</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/point/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts2/tiles/1.0.0/point/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts3/tiles/1.0.0/point/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>point_hash</ows:Title>
<ows:Identifier>point_hash</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/point_hash/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts2/tiles/1.0.0/point_hash/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts3/tiles/1.0.0/point_hash/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>point_hash_no_meta</ows:Title>
<ows:Identifier>point_hash_no_meta</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/point_hash_no_meta/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts2/tiles/1.0.0/point_hash_no_meta/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts3/tiles/1.0.0/point_hash_no_meta/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>point_px_buffer</ows:Title>
<ows:Identifier>point_px_buffer</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/point_px_buffer/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts2/tiles/1.0.0/point_px_buffer/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts3/tiles/1.0.0/point_px_buffer/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>polygon</ows:Title>
<ows:Identifier>polygon</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/polygon/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts2/tiles/1.0.0/polygon/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts3/tiles/1.0.0/polygon/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_5</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>polygon2</ows:Title>
<ows:Identifier>polygon2</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/polygon2/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts2/tiles/1.0.0/polygon2/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts3/tiles/1.0.0/polygon2/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid_01</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<TileMatrixSet>
<ows:Identifier>swissgrid_01</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>1</ows:Identifier>
<ScaleDenominator>3571.4285714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>1875</MatrixWidth>
<MatrixHeight>1250</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>0_2</ows:Identifier>
<ScaleDenominator>714.28571428[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>9375</MatrixWidth>
<MatrixHeight>6250</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>0_1</ows:Identifier>
<ScaleDenominator>357.14285714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>18750</MatrixWidth>
<MatrixHeight>12500</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
<TileMatrixSet>
<ows:Identifier>swissgrid_025</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>0_25</ows:Identifier>
<ScaleDenominator>892.85714285[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>7500</MatrixWidth>
<MatrixHeight>5000</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
<TileMatrixSet>
<ows:Identifier>swissgrid_2_5</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>2_5</ows:Identifier>
<ScaleDenominator>8928.5714285[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>750</MatrixWidth>
<MatrixHeight>500</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
<TileMatrixSet>
<ows:Identifier>swissgrid_5</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>0</ows:Identifier>
<ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>19</MatrixWidth>
<MatrixHeight>13</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>1</ows:Identifier>
<ScaleDenominator>178571.42857[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>38</MatrixWidth>
<MatrixHeight>25</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>2</ows:Identifier>
<ScaleDenominator>71428.571428[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>94</MatrixWidth>
<MatrixHeight>63</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>3</ows:Identifier>
<ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>188</MatrixWidth>
<MatrixHeight>125</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>4</ows:Identifier>
<ScaleDenominator>17857.142857[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>375</MatrixWidth>
<MatrixHeight>250</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
</Capabilities>"""
)
def test_multi_host_capabilities(self) -> None:
gene = TileGeneration("tilegeneration/test-fix.yaml", configure_logging=False)
self.assert_result_equals(
controller.get_wmts_capabilities(gene, "multi_host"), self.MULTIHOST_CAPABILITIES, True
)
def test_capabilities_slash(self) -> None:
gene = TileGeneration("tilegeneration/test-capabilities.yaml", configure_logging=False)
config = gene.get_config("tilegeneration/test-capabilities.yaml")
self.assert_result_equals(
controller.get_wmts_capabilities(gene, config.config["generation"]["default_cache"]),
r"""<\?xml version="1.0" encoding="UTF-8"\?>
<Capabilities version="1.0.0"
xmlns="http://www.opengis.net/wmts/1.0"
xmlns:ows="http://www.opengis.net/ows/1.1"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gml="http://www.opengis.net/gml"
xsi:schemaLocation="http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd">
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://wmts1/tiles/1.0.0/WMTSCapabilities.xml">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://wmts1/tiles/">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
<!-- <ServiceMetadataURL xlink:href="" /> -->
<Contents>
<Layer>
<ows:Title>no_dim</ows:Title>
<ows:Identifier>no_dim</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/no_dim/default/"""
"""{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>one</ows:Title>
<ows:Identifier>one</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/one/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>two</ows:Title>
<ows:Identifier>two</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2012</Value>
</Dimension>
<Dimension>
<ows:Identifier>LEVEL</ows:Identifier>
<Default>1</Default>
<Value>1</Value>
<Value>2</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/two/default/"""
"""{DATE}/{LEVEL}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<TileMatrixSet>
<ows:Identifier>swissgrid</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>0</ows:Identifier>
<ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>19</MatrixWidth>
<MatrixHeight>13</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>1</ows:Identifier>
<ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>188</MatrixWidth>
<MatrixHeight>125</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
</Capabilities>""",
True,
)
def test_multi_url_capabilities(self) -> None:
gene = TileGeneration("tilegeneration/test-fix.yaml", configure_logging=False)
self.assert_result_equals(
controller.get_wmts_capabilities(gene, "multi_url"), self.MULTIHOST_CAPABILITIES, True
)
CONFIG = """
caches:
local:
folder: /tmp/tiles
http_url: http://wmts1/tiles/
type: filesystem
wmtscapabilities_file: 1.0.0/WMTSCapabilities.xml
mbtiles:
folder: /tmp/tiles/mbtiles
http_url: http://wmts1/tiles/
type: mbtiles
multi_host:
folder: /tmp/tiles
hosts:
- wmts1
- wmts2
- wmts3
http_url: http://%(host)s/tiles/
type: filesystem
multi_url:
folder: /tmp/tiles
http_urls:
- http://wmts1/tiles/
- http://wmts2/tiles/
- http://wmts3/tiles/
type: filesystem
s3:
bucket: tiles
cache_control: public, max-age=14400
folder: tiles
host: s3-eu-west-1.amazonaws.com
http_url: https://%(host)s/%(bucket)s/%(folder)s/
type: s3
cost:
cloudfront:
download: 0.12
get: 0.009
request_per_layers: 10000000
s3:
download: 0.12
get: 0.01
put: 0.01
storage: 0.125
sqs:
request: 0.01
generation:
default_cache: local
default_layers:
- line
- polygon
error_file: error.list
maxconsecutive_errors: 2
number_process: 1
grids:
swissgrid_01:
bbox:
- 420000
- 30000
- 900000
- 350000
matrix_identifier: resolution
proj4_literal: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel
+towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs
resolution_scale: 10
resolutions:
- 1
- 0.2
- 0.1
srs: EPSG:21781
tile_size: 256
unit: m
swissgrid_025:
bbox:
- 420000
- 30000
- 900000
- 350000
matrix_identifier: resolution
proj4_literal: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel
+towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs
resolution_scale: 4
resolutions:
- 0.25
srs: EPSG:21781
tile_size: 256
unit: m
swissgrid_2_5:
bbox:
- 420000
- 30000
- 900000
- 350000
matrix_identifier: resolution
proj4_literal: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel
+towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs
resolution_scale: 2
resolutions:
- 2.5
srs: EPSG:21781
tile_size: 256
unit: m
swissgrid_5:
bbox:
- 420000
- 30000
- 900000
- 350000
matrix_identifier: zoom
proj4_literal: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=600000 +y_0=200000 +ellps=bessel
+towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs
resolution_scale: 1
resolutions:
- 100
- 50
- 20
- 10
- 5
srs: EPSG:21781
tile_size: 256
unit: m
layers:
all:
bbox:
- 550000.0
- 170000.0
- 560000.0
- 180000.0
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
extension: png
grid: swissgrid_5
headers:
Cache-Control: no-cache, no-store
Pragma: no-cache
layers: point,line,polygon
meta: false
meta_buffer: 128
meta_size: 8
mime_type: image/png
px_buffer: 0
type: wms
url: http://mapserver:8080/mapserv
wmts_style: default
line:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
empty_metatile_detection:
hash: 01062bb3b25dcead792d7824f9a7045f0dd92992
size: 20743
empty_tile_detection:
hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8
size: 334
extension: png
geoms:
- connection: user=postgres password=postgres dbname=tests host=db
sql: the_geom AS geom FROM tests.line
grid: swissgrid_5
headers:
Cache-Control: no-cache
layers: line
meta: true
meta_buffer: 128
meta_size: 8
mime_type: image/png
params:
PARAM: value
px_buffer: 0
type: wms
url: http://mapserver:8080/mapserv
wmts_style: default
mapnik:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
data_buffer: 128
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
extension: png
geoms:
- connection: user=postgres password=postgres dbname=tests host=db
sql: the_geom AS geom FROM tests.polygon
grid: swissgrid_5
layers: __all__
mapfile: mapfile/test.mapnik
meta: false
meta_buffer: 128
meta_size: 8
mime_type: image/png
output_format: png
px_buffer: 0
type: mapnik
wmts_style: default
mapnik_grid:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
data_buffer: 128
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
extension: json
geoms:
- connection: user=postgres password=postgres dbname=tests host=db
sql: the_geom AS geom FROM tests.polygon
grid: swissgrid_5
layers: __all__
layers_fields:
line:
- name
point:
- name
polygon:
- name
mapfile: mapfile/test.mapnik
meta: false
meta_buffer: 128
meta_size: 8
mime_type: application/utfgrid
output_format: grid
px_buffer: 0
resolution: 16
type: mapnik
wmts_style: default
mapnik_grid_drop:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
data_buffer: 128
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
drop_empty_utfgrid: true
extension: json
geoms:
- connection: user=postgres password=postgres dbname=tests host=db
sql: the_geom AS geom FROM tests.polygon
grid: swissgrid_5
layers: __all__
layers_fields:
point:
- name
mapfile: mapfile/test.mapnik
meta: false
meta_buffer: 0
meta_size: 8
mime_type: application/utfgrid
output_format: grid
px_buffer: 0
resolution: 16
type: mapnik
wmts_style: default
point:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
extension: png
geoms:
- connection: user=postgres password=postgres dbname=tests host=db
sql: the_geom AS geom FROM tests.point
grid: swissgrid_5
headers:
Cache-Control: no-cache, no-store
Pragma: no-cache
layers: point
meta: true
meta_buffer: 128
meta_size: 8
mime_type: image/png
min_resolution_seed: 10
px_buffer: 0
type: wms
url: http://mapserver:8080/mapserv
wmts_style: default
point_hash:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
empty_metatile_detection:
hash: 01062bb3b25dcead792d7824f9a7045f0dd92992
size: 20743
empty_tile_detection:
hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8
size: 334
extension: png
geoms:
- connection: user=postgres password=postgres dbname=tests host=db
sql: the_geom AS geom FROM tests.point
grid: swissgrid_5
headers:
Cache-Control: no-cache, no-store
Pragma: no-cache
layers: point
meta: true
meta_buffer: 128
meta_size: 8
mime_type: image/png
min_resolution_seed: 10
px_buffer: 0
type: wms
url: http://mapserver:8080/mapserv
wmts_style: default
point_hash_no_meta:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
empty_tile_detection:
hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8
size: 334
extension: png
grid: swissgrid_5
headers:
Cache-Control: no-cache, no-store
Pragma: no-cache
layers: point
meta: false
meta_buffer: 128
meta_size: 8
mime_type: image/png
px_buffer: 0
type: wms
url: http://mapserver:8080/mapserv
wmts_style: default
point_px_buffer:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
empty_metatile_detection:
hash: 01062bb3b25dcead792d7824f9a7045f0dd92992
size: 20743
empty_tile_detection:
hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8
size: 334
extension: png
geoms:
- connection: user=postgres password=postgres dbname=tests host=db
sql: the_geom AS geom FROM tests.point
grid: swissgrid_5
headers:
Cache-Control: no-cache, no-store
Pragma: no-cache
layers: point
meta: true
meta_buffer: 128
meta_size: 8
mime_type: image/png
px_buffer: 0
px_buffer: 100
type: wms
url: http://mapserver:8080/mapserv
wmts_style: default
polygon:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
empty_metatile_detection:
hash: 01062bb3b25dcead792d7824f9a7045f0dd92992
size: 20743
empty_tile_detection:
hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8
size: 334
extension: png
geoms:
- connection: user=postgres password=postgres dbname=tests host=db
sql: the_geom AS geom FROM tests.polygon
grid: swissgrid_5
headers:
Cache-Control: no-cache, no-store
Pragma: no-cache
layers: polygon
meta: false
meta_buffer: 128
meta_size: 8
mime_type: image/png
px_buffer: 0
type: wms
url: http://mapserver:8080/mapserv
wmts_style: default
polygon2:
cost:
metatile_generation_time: 30
tile_generation_time: 30
tile_size: 20
tileonly_generation_time: 60
dimensions:
- default: '2012'
generate:
- '2012'
name: DATE
values:
- '2005'
- '2010'
- '2012'
empty_metatile_detection:
hash: 01062bb3b25dcead792d7824f9a7045f0dd92992
size: 20743
empty_tile_detection:
hash: dd6cb45962bccb3ad2450ab07011ef88f766eda8
size: 334
extension: png
geoms:
- connection: user=postgres password=postgres dbname=tests host=db
sql: the_geom AS geom FROM tests.polygon
grid: swissgrid_01
headers:
Cache-Control: no-cache, no-store
Pragma: no-cache
layers: polygon
meta: true
meta_buffer: 128
meta_size: 8
mime_type: image/png
px_buffer: 0
type: wms
url: http://mapserver:8080/mapserv
wmts_style: default
metadata:
abstract: Some abstract
access_constraints: None
fees: None
keywords:
- some
- keywords
servicetype: OGC WMTS
title: Some title
openlayers:
center_x: 600000
center_y: 200000
zoom: 3
srs: EPSG:21781
proj4js_def: +proj=somerc +lat_0=46.95240555555556 +lon_0=7.439583333333333 +k_0=1 +x_0=2600000 +y_0=1200000 +ellps=bessel
+towgs84=674.374,15.056,405.346,0,0,0,0 +units=m +no_defs
provider:
contact:
info:
address:
area: BE
city: Berne
country: Switzerland
delivery: Address delivery
email: info@example.com
postal_code: 3000
phone:
fax: +41 11 222 33 44
voice: +41 11 222 33 44
name: The contact name
position: The position name
name: The provider name
url: The provider URL
sns:
region: eu-west-1
topic: arn:aws:sns:eu-west-1:your-account-id:tilecloud
sqs:
queue: sqs_point
"""
def test_config(self) -> None:
self.assert_cmd_yaml_equals(
cmd=".build/venv/bin/generate_controller --dump-config -c tilegeneration/test-fix.yaml",
main_func=controller.main,
expected=self.CONFIG,
)
def test_config_line(self) -> None:
self.assert_cmd_yaml_equals(
cmd=".build/venv/bin/generate_controller -l line --dump-config -c tilegeneration/test-fix.yaml",
main_func=controller.main,
expected=self.CONFIG,
)
def test_quote(self) -> None:
from tilecloud_chain import quote
self.assertEqual(quote("abc"), "abc")
self.assertEqual(quote("a b c"), "'a b c'")
self.assertEqual(quote("'a b c'"), "\"'a b c'\"")
self.assertEqual(quote('"a b c"'), "'\"a b c\"'")
self.assertEqual(quote("a\" b' c"), "'a\" b\\' c'")
self.assertEqual(quote("a'bc"), '"a\'bc"')
self.assertEqual(quote("a'b\"c"), "'a\\'b\"c'")
self.assertEqual(quote('ab"c'), "'ab\"c'")
self.assertEqual(quote(""), "''")
def test_legends(self) -> None:
self.assert_tiles_generated(
cmd=".build/venv/bin/generate_controler -c tilegeneration/test-legends.yaml --legends",
main_func=controller.main,
directory="/tmp/tiles/1.0.0/",
tiles_pattern="%s/default/legend%i.png",
tiles=[("point", 0), ("line", 0), ("line", 2), ("polygon", 0), ("all", 0), ("all", 2)],
)
gene = TileGeneration("tilegeneration/test-legends.yaml", configure_logging=False)
config = gene.get_config("tilegeneration/test-legends.yaml")
self.assert_result_equals(
controller.get_wmts_capabilities(gene, config.config["generation"]["default_cache"]),
r"""<\?xml version="1.0" encoding="UTF-8"\?>
<Capabilities version="1.0.0"
xmlns="http://www.opengis.net/wmts/1.0"
xmlns:ows="http://www.opengis.net/ows/1.1"
xmlns:xlink="http://www.w3.org/1999/xlink"
xmlns:xsi="http://www.w3.org/2001/XMLSchema-instance"
xmlns:gml="http://www.opengis.net/gml"
xsi:schemaLocation="http://schemas.opengis.net/wmts/1.0/wmtsGetCapabilities_response.xsd">
<ows:OperationsMetadata>
<ows:Operation name="GetCapabilities">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://wmts1/tiles/1.0.0/WMTSCapabilities.xml">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
<ows:Operation name="GetTile">
<ows:DCP>
<ows:HTTP>
<ows:Get xlink:href="http://wmts1/tiles/">
<ows:Constraint name="GetEncoding">
<ows:AllowedValues>
<ows:Value>REST</ows:Value>
</ows:AllowedValues>
</ows:Constraint>
</ows:Get>
</ows:HTTP>
</ows:DCP>
</ows:Operation>
</ows:OperationsMetadata>
<!-- <ServiceMetadataURL xlink:href="" /> -->
<Contents>
<Layer>
<ows:Title>all</ows:Title>
<ows:Identifier>all</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
<LegendURL format="image/png" xlink:href="http://wmts1/tiles/1.0.0/all/default/legend0.png" """
"""width="[0-9]*" height="[0-9]*" minScaleDenominator="112938.48786[0-9]*" />
<LegendURL format="image/png" xlink:href="http://wmts1/tiles/1.0.0/all/default/legend2.png" """
"""width="[0-9]*" height="[0-9]*" maxScaleDenominator="112938.48786[0-9]*" />
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/all/default/"""
r"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>line</ows:Title>
<ows:Identifier>line</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
<LegendURL format="image/png" xlink:href="http://wmts1/tiles/1.0.0/line/default/legend0.png" """
r"""width="[0-9]*" height="[0-9]*" minScaleDenominator="112938.48786[0-9]*" />
<LegendURL format="image/png" xlink:href="http://wmts1/tiles/1.0.0/line/default/legend2.png" """
r"""width="[0-9]*" height="[0-9]*" maxScaleDenominator="112938.48786[0-9]*" />
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/line/default/"""
r"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>point</ows:Title>
<ows:Identifier>point</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
<LegendURL format="image/png" xlink:href="http://wmts1/tiles/1.0.0/point/default/legend0.png" """
"""width="[0-9]*" height="[0-9]*" />
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/point/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<Layer>
<ows:Title>polygon</ows:Title>
<ows:Identifier>polygon</ows:Identifier>
<Style isDefault="true">
<ows:Identifier>default</ows:Identifier>
<LegendURL format="image/png" xlink:href="http://wmts1/tiles/1.0.0/polygon/default/legend0.png" """
"""width="[0-9]*" height="[0-9]*" />
</Style>
<Format>image/png</Format>
<Dimension>
<ows:Identifier>DATE</ows:Identifier>
<Default>2012</Default>
<Value>2005</Value>
<Value>2010</Value>
<Value>2012</Value>
</Dimension>
<ResourceURL format="image/png" resourceType="tile"
template="http://wmts1/tiles/1.0.0/polygon/default/"""
"""{DATE}/{TileMatrixSet}/{TileMatrix}/{TileRow}/{TileCol}.png" />
<TileMatrixSetLink>
<TileMatrixSet>swissgrid</TileMatrixSet>
</TileMatrixSetLink>
</Layer>
<TileMatrixSet>
<ows:Identifier>swissgrid</ows:Identifier>
<ows:SupportedCRS>urn:ogc:def:crs:EPSG::21781</ows:SupportedCRS>
<TileMatrix>
<ows:Identifier>0</ows:Identifier>
<ScaleDenominator>357142.85714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>19</MatrixWidth>
<MatrixHeight>13</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>1</ows:Identifier>
<ScaleDenominator>178571.42857[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>38</MatrixWidth>
<MatrixHeight>25</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>2</ows:Identifier>
<ScaleDenominator>71428.571428[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>94</MatrixWidth>
<MatrixHeight>63</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>3</ows:Identifier>
<ScaleDenominator>35714.285714[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>188</MatrixWidth>
<MatrixHeight>125</MatrixHeight>
</TileMatrix>
<TileMatrix>
<ows:Identifier>4</ows:Identifier>
<ScaleDenominator>17857.142857[0-9]*</ScaleDenominator>
<TopLeftCorner>420000 350000</TopLeftCorner>
<TileWidth>256</TileWidth>
<TileHeight>256</TileHeight>
<MatrixWidth>375</MatrixWidth>
<MatrixHeight>250</MatrixHeight>
</TileMatrix>
</TileMatrixSet>
</Contents>
</Capabilities>""",
True,
)
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,613
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/gunicorn.conf.py
|
###
# app configuration
# https://docs.gunicorn.org/en/stable/settings.html
###
import os
import gunicorn.arbiter
import gunicorn.workers.base
from c2cwsgiutils import get_config_defaults, prometheus
from prometheus_client import multiprocess
bind = ":8080"
worker_class = "gthread"
workers = os.environ.get("GUNICORN_WORKERS", 2)
threads = os.environ.get("GUNICORN_THREADS", 10)
preload = "true"
accesslog = "-"
access_log_format = os.environ.get(
"GUNICORN_ACCESS_LOG_FORMAT",
'%(H)s %({Host}i)s %(m)s %(U)s?%(q)s "%(f)s" "%(a)s" %(s)s %(B)s %(D)s %(p)s',
)
###
# logging configuration
# https://docs.python.org/3/library/logging.config.html#logging-config-dictschema
###
logconfig_dict = {
"version": 1,
"root": {
"level": os.environ["OTHER_LOG_LEVEL"],
"handlers": [os.environ["LOG_TYPE"]],
},
"loggers": {
"gunicorn.error": {"level": os.environ["GUNICORN_LOG_LEVEL"]},
# "level = INFO" logs SQL queries.
# "level = DEBUG" logs SQL queries and results.
# "level = WARN" logs neither. (Recommended for production systems.)
"sqlalchemy.engine": {"level": os.environ["SQL_LOG_LEVEL"]},
"c2cwsgiutils": {"level": os.environ["C2CWSGIUTILS_LOG_LEVEL"]},
"tilecloud": {"level": os.environ["TILECLOUD_LOG_LEVEL"]},
"tilecloud_chain": {"level": os.environ["TILECLOUD_CHAIN_LOG_LEVEL"]},
},
"handlers": {
"console": {
"class": "logging.StreamHandler",
"formatter": "generic",
"stream": "ext://sys.stdout",
},
"json": {
"class": "tilecloud_chain.JsonLogHandler",
"formatter": "generic",
"stream": "ext://sys.stdout",
},
},
"formatters": {
"generic": {
"format": "%(asctime)s [%(process)d] [%(levelname)-5.5s] %(message)s",
"datefmt": "[%Y-%m-%d %H:%M:%S %z]",
"class": "logging.Formatter",
}
},
}
raw_paste_global_conf = ["=".join(e) for e in get_config_defaults().items()]
def on_starting(server: gunicorn.arbiter.Arbiter) -> None:
"""
Will start the prometheus server.
Called just before the master process is initialized.
"""
del server
prometheus.start()
def post_fork(server: gunicorn.arbiter.Arbiter, worker: gunicorn.workers.base.Worker) -> None:
"""
Will cleanup the configuration we get from the main process.
Called just after a worker has been forked.
"""
del server, worker
prometheus.cleanup()
def child_exit(server: gunicorn.arbiter.Arbiter, worker: gunicorn.workers.base.Worker) -> None:
"""
Remove the metrics for the exited worker.
Called just after a worker has been exited, in the master process.
"""
del server
multiprocess.mark_process_dead(worker.pid) # type: ignore [no-untyped-call]
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,614
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/tests/test_config.py
|
import os
from testfixtures import LogCapture
from tilecloud_chain import controller
from tilecloud_chain.tests import CompareCase
class TestConfig(CompareCase):
def setUp(self) -> None: # noqa
self.maxDiff = None
@classmethod
def setUpClass(cls): # noqa
os.chdir(os.path.dirname(__file__))
@classmethod
def tearDownClass(cls): # noqa
os.chdir(os.path.dirname(os.path.dirname(os.path.dirname(__file__))))
def test_int_grid(self) -> None:
with LogCapture("tilecloud_chain") as log_capture:
self.run_cmd(
cmd=".build/venv/bin/generate_controller -c tilegeneration/test-int-grid.yaml --dump-config",
main_func=controller.main,
)
log_capture.check()
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
14,615
|
camptocamp/tilecloud-chain
|
refs/heads/master
|
/tilecloud_chain/format.py
|
from datetime import timedelta
from typing import Tuple
def default_int(number_array: Tuple[float, float, float, float]) -> Tuple[int, int, int, int]:
"""Convert an array of float in an array of int."""
return (int(number_array[0]), int(number_array[1]), int(number_array[2]), int(number_array[3]))
def size_format(number: float) -> str:
"""Get human readable size."""
for unit in ["o", "Kio", "Mio", "Gio", "Tio"]:
if number < 1024.0:
if number < 10:
return f"{number:.1f} {unit}"
else:
return f"{number:.0f} {unit}"
number /= 1024.0
return f"{number:.0f} Tio"
def duration_format(duration: timedelta) -> str:
"""Get human readable duration."""
hours, remainder = divmod(duration.seconds, 3600)
minutes, seconds = divmod(remainder, 60)
if duration.days > 0:
return f"{duration.days} {hours}:{minutes:02d}:{seconds:02d}"
else:
return f"{hours}:{minutes:02d}:{seconds:02d}"
|
{"/tilecloud_chain/copy_.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/controller.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/views/admin.py": ["/tilecloud_chain/server.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/server.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/__init__.py", "/tilecloud_chain/controller.py"], "/tilecloud_chain/expiretiles.py": ["/tilecloud_chain/__init__.py"], "/tilecloud_chain/tests/test_controller.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_config.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_expiretiles.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/__init__.py": ["/tilecloud_chain/security.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py"], "/tilecloud_chain/tests/test_generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/tests/test_serve.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/server.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/internal_mapcache.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/generate.py", "/tilecloud_chain/server.py"], "/tilecloud_chain/tests/test_cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/cost.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/format.py"], "/tilecloud_chain/tests/test_copy.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"], "/tilecloud_chain/generate.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/database_logger.py", "/tilecloud_chain/format.py", "/tilecloud_chain/multitilestore.py", "/tilecloud_chain/timedtilestore.py", "/tilecloud_chain/mapnik_.py"], "/tilecloud_chain/tests/test_error.py": ["/tilecloud_chain/__init__.py", "/tilecloud_chain/tests/__init__.py"]}
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.