text stringlengths 0 1.05M | meta dict |
|---|---|
''' Ammo producers '''
from .util import get_opener, FactoryBase
from .module_exceptions import ConfigurationError
from .guns.http2 import Http2Ammo
import logging
logger = logging.getLogger(__name__)
class LineReader(object):
''' One line -- one missile '''
def __init__(self, filename, **kwargs):
self.filename = filename
def __iter__(self):
logger.info("LineReader. Using '%s' as ammo source", self.filename)
with get_opener(self.filename)(self.filename, 'r') as ammo_file:
while True:
for line in ammo_file:
parts = line.rstrip('\r\n').split(maxsplit=1)
if len(parts) == 2:
yield (parts[1], parts[0])
elif len(parts) == 1:
yield ("", parts[0])
else:
raise RuntimeError("Unreachable branch")
logger.debug("EOF. Restarting from the beginning")
ammo_file.seek(0)
class Group(object):
''' Group missiles into batches '''
def __init__(self, iterable, group_size):
self.group_size = group_size
self.iterable = iter(iterable)
def __iter__(self):
while True:
yield (
"multi-%s" % self.group_size,
[next(self.iterable) for _ in range(self.group_size)])
class Http2AmmoProducer(object):
''' Create HTTP/2 missiles from data '''
def __init__(self, iterable):
self.iterable = iter(iterable)
def __iter__(self):
while True:
ammo = next(self.iterable)
yield Http2Ammo("GET", ammo, {}, None)
class AmmoFactory(FactoryBase):
FACTORY_NAME = 'ammo'
def get(self, key):
'''
Return a _new_ reader every time
'''
if key in self.factory_config:
ammo_config = self.factory_config.get(key)
ammo_reader = LineReader(self.factory_config.get(key).get("file"))
batch_size = ammo_config.get("batch", 1)
if batch_size > 1:
ammo_reader = Group(ammo_reader, batch_size)
return ammo_reader
else:
raise ConfigurationError(
"Configuration for %s ammo not found" % key)
| {
"repo_name": "direvius/bfg",
"path": "bfg/ammo.py",
"copies": "1",
"size": "2290",
"license": "mit",
"hash": -3124352313261592000,
"line_mean": 28.358974359,
"line_max": 78,
"alpha_frac": 0.5441048035,
"autogenerated": false,
"ratio": 3.982608695652174,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5026713499152173,
"avg_score": null,
"num_lines": null
} |
# Amnesia is a structure that automatically suppresses incoming weights gradually.
# In a way, a block will tend to forget what it's learnt and throw away unnecessary inputs
# The tendency to forget will counteract with the tendency to learn, until convergence.
# Incoming weights cannot be suppressed individually, since only the weights combined has meaning.
# Blocked modules also have analytical significances.
# So, the weights should be treated in blocks and regulated together by batches.
# Some smart design is probably necessary to control the memory loss coefficient.
# It should not be kept as a constant, once there is enough reason to believe the incoming block is necessary,
# the regulation should stop. But if one dies out without "too much" sacrifice on some good criterion, then the
# network should be regulated this way.
# In a sense, amne creates branches in the development of a neural network, and if better, fine, if not, then should
# go back.
# This is not a search method. Nothing can solve the search problem.
# Let's sketch out what I'm planning to do now. This is one of more complicated projects:
# TODO blocks of neurons
# downstream data should have a handle to control the flow rates of every block upstream
# this vault should be a parameter that allow back prop training
# block of neurons can be implemented two ways:
# 1, group up output of feature vectors with a scaling factor, pipe them up a[]+b[] manner
# 2, also feature vectors, but summed up as a single vector to be passed downstream
# One might prefer the latter, but I prefer the former, mainly becuase vectors might need to be differentiated
# f(a,b) has more expressiveness than f(a+b)
# One could argue that this is the latter's advantage, contrarily, due to native regularization.
# we might try both.
# TODO suppressive derivatives
# add a parameter to the derivative backprop of flow rates
# have this parameter be the output of some control modules
# the vault will be trained and backpropped, but that's only if it surpasses the rate of the loss of memory.
# this need to be controlled reasonably
# TODO probing method
# some plotting method would be great
# I need a way to monitor the vault coefficient
# TODO * vertical regulation
# what if I want to add or remove a whole stratum?
# not a problem for now.
# TODO * complete block control
# the blocks can be regulated if it exists already
# well, I should be able to add modules dynamically to the graph at runtime
# or just save the weights and do everything again.
# this should not be a difficult job. In fact, I see a lot of advantages.
# I can randomly initiate a block and set the flow to be zero. Then the block must fight to make itself important.
# If not, it must be randomly initiated again. Notice the behavior of initial derivative. I do not know the mathematical
# property of the derivative.
# I want to add blocks until suppressive derivatives turn out to be effective.
# I want blocks to compete with each other, and have the similar ones to eliminate the weaker ones
# This will then yield a SVG-like vector group
# TODO ** reuse features
# blocks of neurons natively support many analytical properties that allow the features to be reused
import torch
import torchvision.models.vgg
import torchvision.transforms as transforms
import torchvision.datasets
from torch.utils.data.dataloader import DataLoader
import torch.nn as nn
from torch.nn.parameter import Parameter
import time
import math
import shutil
from torch.autograd import Variable
from torch.autograd.function import Function
from amne.modi_cifar import CIFAR10
from amne.vgg_modified import vgg_feature
import sys
class vault_mul(Function):
# modified multiplication for vaults
@staticmethod
def forward(ctx, vault_coef, feature):
ctx.feature=feature
ctx.vault_coef=vault_coef
return vault_coef * feature
# vault_coef needs to be saved in the module and initiated to be 1
@staticmethod
def backward(ctx,grad_outputs):
# normal return:
# return grad_outputs*feature, grad_outputs*vault_coef
# coefficient should always behave as if 0 is the best choice available
# so when it's positive, it should receive a signal for negative
# vice versa
# at the moment I'm suppressing the vault coefficients according to their scale
# return Variable((0.0001*torch.sign(ctx.vault_coef)+ctx.feature)*grad_outputs.sign().data),\
# Variable(grad_outputs.data*ctx.vault_coef)
val=(0.0001 * torch.sign(ctx.vault_coef) + ctx.feature) * grad_outputs.sign().data
return Variable(torch.Tensor([val.sum()]).cuda()), \
Variable(grad_outputs.data * ctx.vault_coef)
# parameterize the coefficient
# custom define the backward method on the amne modules. modify the backward method based on autograd
class Amnesia_I(nn.Module):
# connect all feature blocks by piping them up with coefficient
def __init__(self,num_classes):
super(Amnesia_I,self).__init__()
self.block1=vgg_feature() # (batch,512)
self.block2=vgg_feature()
self.block3=vgg_feature()
self.block4=vgg_feature()
self.v1=Parameter(torch.FloatTensor([1]).cuda())
self.v2=Parameter(torch.FloatTensor([1]).cuda())
self.v3=Parameter(torch.FloatTensor([1]).cuda())
self.v4=Parameter(torch.FloatTensor([1]).cuda())
# I will use a fully connected classifier now
# If this does not work, I will try to put amne at convolution layer
self.classifier = nn.Sequential(
nn.Linear(2048, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
def forward(self,input):
o1=vault_mul.apply(self.v1,self.block1(input))
o2=vault_mul.apply(self.v2,self.block2(input))
o3=vault_mul.apply(self.v3,self.block3(input))
o4=vault_mul.apply(self.v4,self.block4(input))
x=torch.cat((o1,o2,o3,o4),1)
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class Amnesia_II(nn.Module):
# connect all feature blocks by summing them with coefficient, RN manner.
def __init__(self,num_classes):
super(Amnesia_II,self).__init__()
self.block1=vgg_feature() # (batch,512)
self.block2=vgg_feature()
self.block3=vgg_feature()
self.block4=vgg_feature()
self.v1=Parameter(torch.FloatTensor(1))
self.v2=Parameter(torch.FloatTensor(1))
self.v3=Parameter(torch.FloatTensor(1))
self.v4=Parameter(torch.FloatTensor(1))
self.classifier = nn.Sequential(
nn.Linear(512, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, 4096),
nn.ReLU(True),
nn.Dropout(),
nn.Linear(4096, num_classes),
)
def forward(self,input):
o1=self.v1*self.block1(input)
o2=self.v2*self.block2(input)
o3=self.v3*self.block3(input)
o4=self.v4*self.block4(input)
x=o1+o2+o3+o4
x = x.view(x.size(0), -1)
x = self.classifier(x)
return x
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].view(-1).float().sum(0)
res.append(correct_k.mul_(100.0 / batch_size))
return res
def train(train_loader, model, criterion, optimizer, epoch):
batch_time = AverageMeter()
data_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to train mode
model.cuda()
model.train()
end = time.time()
for i, (input, target) in enumerate(train_loader):
# measure data loading time
data_time.update(time.time() - end)
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input).cuda()
target_var = torch.autograd.Variable(target).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# compute gradient and do SGD step
optimizer.zero_grad()
loss.backward()
# size do not match
optimizer.step()
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print_freq=10
if i % print_freq == 0:
print('Epoch: [{0}][{1}/{2}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Data {data_time.val:.3f} ({data_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
epoch, i, len(train_loader), batch_time=batch_time,
data_time=data_time, loss=losses, top1=top1, top5=top5))
def validate(val_loader, model, criterion):
batch_time = AverageMeter()
losses = AverageMeter()
top1 = AverageMeter()
top5 = AverageMeter()
# switch to evaluate mode
model.eval()
end = time.time()
for i, (input, target) in enumerate(val_loader):
target = target.cuda(async=True)
input_var = torch.autograd.Variable(input, volatile=True).cuda()
target_var = torch.autograd.Variable(target, volatile=True).cuda()
# compute output
output = model(input_var)
loss = criterion(output, target_var)
# measure accuracy and record loss
prec1, prec5 = accuracy(output.data, target, topk=(1, 5))
losses.update(loss.data[0], input.size(0))
top1.update(prec1[0], input.size(0))
top5.update(prec5[0], input.size(0))
# measure elapsed time
batch_time.update(time.time() - end)
end = time.time()
print_freq=10
if i % print_freq == 0:
print('Test: [{0}/{1}]\t'
'Time {batch_time.val:.3f} ({batch_time.avg:.3f})\t'
'Loss {loss.val:.4f} ({loss.avg:.4f})\t'
'Prec@1 {top1.val:.3f} ({top1.avg:.3f})\t'
'Prec@5 {top5.val:.3f} ({top5.avg:.3f})'.format(
i, len(val_loader), batch_time=batch_time, loss=losses,
top1=top1, top5=top5))
print(' * Prec@1 {top1.avg:.3f} Prec@5 {top5.avg:.3f}'
.format(top1=top1, top5=top5))
return top1.avg
def save_checkpoint(state, is_best, filename='checkpoint.pth.tar'):
torch.save(state, filename)
if is_best:
shutil.copyfile(filename, 'model_best.pth.tar')
best_prec1 = 0
def main():
global best_prec1
model=Amnesia_I(10)
cifar_train=CIFAR10(root='/Users/JasonHu/datasets/cifar-10-batches-py',train=True,download=False,transform=transforms.ToTensor())
cifar_test=CIFAR10(root='/Users/JasonHu/datasets/cifar-10-batches-py',train=False,download=False,transform=transforms.ToTensor())
cifar_train=DataLoader(cifar_train,batch_size=64,shuffle=True,num_workers=1)
cifar_test=DataLoader(cifar_test,batch_size=64,shuffle=True,num_workers=1)
lr=0.0001
optimizer=torch.optim.Adam(model.parameters(),lr=lr)
criterion = nn.CrossEntropyLoss().cuda()
epochs=20
for epoch in range(epochs):
train(cifar_train,model,criterion,optimizer,epoch)
prec1 = validate(cifar_test, model, criterion)
is_best = prec1 > best_prec1
best_prec1 = max(prec1, best_prec1)
save_checkpoint({
'epoch': epoch + 1,
'state_dict': model.state_dict(),
'best_prec1': best_prec1,
}, is_best)
validate(cifar_test, model, criterion)
if __name__=="__main__":
main()
'''
/Users/JasonHu/anaconda3/envs/condapy3/bin/python "/Users/JasonHu/Git/Philosophy Machine/amne/amnesia.py"
Epoch: [0][0/782] Time 0.702 (0.702) Data 0.034 (0.034) Loss 2.3033 (2.3033) Prec@1 10.938 (10.938) Prec@5 46.875 (46.875)
Epoch: [0][10/782] Time 0.194 (0.239) Data 0.000 (0.004) Loss 2.3018 (2.3026) Prec@1 6.250 (8.523) Prec@5 45.312 (48.153)
Epoch: [0][20/782] Time 0.194 (0.217) Data 0.000 (0.002) Loss 2.3133 (2.3029) Prec@1 9.375 (9.226) Prec@5 42.188 (48.512)
Epoch: [0][30/782] Time 0.193 (0.209) Data 0.000 (0.002) Loss 2.2674 (2.2958) Prec@1 14.062 (9.879) Prec@5 57.812 (50.353)
Epoch: [0][40/782] Time 0.193 (0.205) Data 0.001 (0.001) Loss 2.1960 (2.2779) Prec@1 12.500 (10.175) Prec@5 59.375 (51.867)
Epoch: [0][50/782] Time 0.189 (0.202) Data 0.001 (0.001) Loss 2.1884 (2.2533) Prec@1 21.875 (11.734) Prec@5 73.438 (56.373)
Epoch: [0][60/782] Time 0.192 (0.201) Data 0.001 (0.001) Loss 2.1929 (2.2405) Prec@1 17.188 (12.500) Prec@5 70.312 (58.991)
Epoch: [0][70/782] Time 0.189 (0.200) Data 0.000 (0.001) Loss 1.9588 (2.2112) Prec@1 21.875 (13.710) Prec@5 82.812 (61.906)
Epoch: [0][80/782] Time 0.196 (0.199) Data 0.000 (0.001) Loss 2.1731 (2.1912) Prec@1 18.750 (14.660) Prec@5 64.062 (63.522)
Epoch: [0][90/782] Time 0.190 (0.198) Data 0.000 (0.001) Loss 1.9284 (2.1704) Prec@1 15.625 (15.436) Prec@5 85.938 (65.144)
Epoch: [0][100/782] Time 0.197 (0.198) Data 0.001 (0.001) Loss 2.0012 (2.1460) Prec@1 21.875 (16.213) Prec@5 79.688 (66.940)
Epoch: [0][110/782] Time 0.191 (0.197) Data 0.001 (0.001) Loss 1.8836 (2.1228) Prec@1 26.562 (17.103) Prec@5 84.375 (68.497)
Epoch: [0][120/782] Time 0.191 (0.197) Data 0.000 (0.001) Loss 2.0547 (2.1062) Prec@1 23.438 (17.743) Prec@5 79.688 (69.757)
Epoch: [0][130/782] Time 0.192 (0.197) Data 0.001 (0.001) Loss 2.0534 (2.0917) Prec@1 25.000 (18.201) Prec@5 76.562 (70.778)
Epoch: [0][140/782] Time 0.192 (0.196) Data 0.000 (0.001) Loss 1.9721 (2.0725) Prec@1 17.188 (18.839) Prec@5 79.688 (71.820)
Epoch: [0][150/782] Time 0.196 (0.196) Data 0.000 (0.001) Loss 1.9191 (2.0561) Prec@1 31.250 (19.640) Prec@5 79.688 (72.724)
Epoch: [0][160/782] Time 0.197 (0.196) Data 0.001 (0.001) Loss 1.7598 (2.0396) Prec@1 31.250 (20.410) Prec@5 89.062 (73.632)
Epoch: [0][170/782] Time 0.198 (0.196) Data 0.001 (0.001) Loss 1.8220 (2.0223) Prec@1 35.938 (21.117) Prec@5 84.375 (74.351)
Epoch: [0][180/782] Time 0.192 (0.196) Data 0.001 (0.001) Loss 1.7711 (2.0109) Prec@1 25.000 (21.435) Prec@5 89.062 (74.931)
Epoch: [0][190/782] Time 0.192 (0.195) Data 0.001 (0.001) Loss 1.7623 (1.9964) Prec@1 28.125 (21.900) Prec@5 85.938 (75.695)
Epoch: [0][200/782] Time 0.191 (0.195) Data 0.000 (0.001) Loss 1.8085 (1.9847) Prec@1 26.562 (22.287) Prec@5 90.625 (76.306)
Epoch: [0][210/782] Time 0.194 (0.195) Data 0.000 (0.001) Loss 1.6871 (1.9763) Prec@1 39.062 (22.719) Prec@5 87.500 (76.740)
Epoch: [0][220/782] Time 0.192 (0.195) Data 0.001 (0.001) Loss 1.8169 (1.9651) Prec@1 25.000 (23.162) Prec@5 89.062 (77.156)
Epoch: [0][230/782] Time 0.190 (0.195) Data 0.000 (0.001) Loss 1.5795 (1.9565) Prec@1 35.938 (23.640) Prec@5 92.188 (77.631)
Epoch: [0][240/782] Time 0.194 (0.195) Data 0.001 (0.001) Loss 1.5260 (1.9458) Prec@1 35.938 (24.092) Prec@5 92.188 (78.073)
Epoch: [0][250/782] Time 0.194 (0.195) Data 0.000 (0.001) Loss 1.5958 (1.9361) Prec@1 37.500 (24.465) Prec@5 90.625 (78.430)
Epoch: [0][260/782] Time 0.195 (0.195) Data 0.001 (0.001) Loss 1.8454 (1.9285) Prec@1 29.688 (24.784) Prec@5 84.375 (78.807)
Epoch: [0][270/782] Time 0.196 (0.194) Data 0.001 (0.001) Loss 1.5604 (1.9170) Prec@1 40.625 (25.259) Prec@5 95.312 (79.215)
Epoch: [0][280/782] Time 0.192 (0.194) Data 0.000 (0.001) Loss 1.8135 (1.9072) Prec@1 31.250 (25.612) Prec@5 87.500 (79.532)
Epoch: [0][290/782] Time 0.197 (0.194) Data 0.001 (0.001) Loss 1.8459 (1.9006) Prec@1 21.875 (25.811) Prec@5 84.375 (79.832)
Epoch: [0][300/782] Time 0.191 (0.194) Data 0.001 (0.001) Loss 1.5859 (1.8918) Prec@1 39.062 (26.168) Prec@5 92.188 (80.186)
Epoch: [0][310/782] Time 0.193 (0.194) Data 0.001 (0.001) Loss 1.4643 (1.8830) Prec@1 57.812 (26.603) Prec@5 90.625 (80.461)
Epoch: [0][320/782] Time 0.190 (0.194) Data 0.000 (0.001) Loss 1.5619 (1.8741) Prec@1 34.375 (26.952) Prec@5 87.500 (80.797)
Epoch: [0][330/782] Time 0.193 (0.194) Data 0.001 (0.001) Loss 1.5192 (1.8657) Prec@1 42.188 (27.261) Prec@5 93.750 (81.132)
Epoch: [0][340/782] Time 0.195 (0.194) Data 0.000 (0.001) Loss 1.7762 (1.8565) Prec@1 37.500 (27.681) Prec@5 89.062 (81.392)
Epoch: [0][350/782] Time 0.193 (0.194) Data 0.001 (0.001) Loss 1.6098 (1.8481) Prec@1 37.500 (28.027) Prec@5 87.500 (81.686)
Epoch: [0][360/782] Time 0.190 (0.194) Data 0.001 (0.001) Loss 1.7619 (1.8415) Prec@1 35.938 (28.324) Prec@5 89.062 (81.899)
Epoch: [0][370/782] Time 0.195 (0.194) Data 0.001 (0.001) Loss 1.7476 (1.8355) Prec@1 34.375 (28.681) Prec@5 82.812 (82.088)
Epoch: [0][380/782] Time 0.193 (0.194) Data 0.001 (0.001) Loss 1.5493 (1.8306) Prec@1 40.625 (28.908) Prec@5 95.312 (82.333)
Epoch: [0][390/782] Time 0.192 (0.194) Data 0.000 (0.001) Loss 1.4643 (1.8237) Prec@1 46.875 (29.216) Prec@5 93.750 (82.557)
Epoch: [0][400/782] Time 0.198 (0.194) Data 0.000 (0.001) Loss 1.7023 (1.8165) Prec@1 34.375 (29.590) Prec@5 87.500 (82.746)
Epoch: [0][410/782] Time 0.193 (0.194) Data 0.001 (0.001) Loss 1.4273 (1.8079) Prec@1 53.125 (29.961) Prec@5 87.500 (82.961)
Epoch: [0][420/782] Time 0.194 (0.194) Data 0.001 (0.001) Loss 1.4597 (1.8016) Prec@1 48.438 (30.237) Prec@5 92.188 (83.150)
Epoch: [0][430/782] Time 0.187 (0.194) Data 0.001 (0.001) Loss 1.3678 (1.7954) Prec@1 54.688 (30.485) Prec@5 89.062 (83.342)
Epoch: [0][440/782] Time 0.194 (0.194) Data 0.001 (0.001) Loss 1.4309 (1.7887) Prec@1 46.875 (30.779) Prec@5 93.750 (83.546)
Epoch: [0][450/782] Time 0.189 (0.194) Data 0.001 (0.001) Loss 1.7235 (1.7827) Prec@1 26.562 (31.007) Prec@5 85.938 (83.731)
Epoch: [0][460/782] Time 0.196 (0.194) Data 0.001 (0.001) Loss 1.5290 (1.7783) Prec@1 45.312 (31.169) Prec@5 92.188 (83.877)
Epoch: [0][470/782] Time 0.195 (0.194) Data 0.000 (0.001) Loss 1.6132 (1.7713) Prec@1 40.625 (31.469) Prec@5 87.500 (84.090)
Epoch: [0][480/782] Time 0.192 (0.194) Data 0.000 (0.001) Loss 1.3747 (1.7641) Prec@1 48.438 (31.779) Prec@5 93.750 (84.274)
Epoch: [0][490/782] Time 0.197 (0.194) Data 0.001 (0.001) Loss 1.4825 (1.7578) Prec@1 46.875 (32.065) Prec@5 90.625 (84.439)
Epoch: [0][500/782] Time 0.190 (0.194) Data 0.001 (0.001) Loss 1.7593 (1.7515) Prec@1 35.938 (32.354) Prec@5 89.062 (84.625)
Epoch: [0][510/782] Time 0.195 (0.194) Data 0.000 (0.001) Loss 1.3651 (1.7456) Prec@1 50.000 (32.608) Prec@5 92.188 (84.788)
Epoch: [0][520/782] Time 0.192 (0.194) Data 0.001 (0.001) Loss 1.4668 (1.7390) Prec@1 53.125 (32.944) Prec@5 92.188 (84.930)
Epoch: [0][530/782] Time 0.190 (0.194) Data 0.001 (0.001) Loss 1.5676 (1.7345) Prec@1 42.188 (33.151) Prec@5 90.625 (85.046)
Epoch: [0][540/782] Time 0.191 (0.194) Data 0.001 (0.001) Loss 1.4591 (1.7299) Prec@1 42.188 (33.315) Prec@5 96.875 (85.201)
Epoch: [0][550/782] Time 0.191 (0.194) Data 0.000 (0.001) Loss 1.5862 (1.7258) Prec@1 43.750 (33.521) Prec@5 92.188 (85.325)
Epoch: [0][560/782] Time 0.196 (0.194) Data 0.001 (0.001) Loss 1.5376 (1.7208) Prec@1 43.750 (33.798) Prec@5 90.625 (85.447)
Epoch: [0][570/782] Time 0.195 (0.194) Data 0.000 (0.001) Loss 1.3889 (1.7156) Prec@1 57.812 (34.033) Prec@5 92.188 (85.585)
Epoch: [0][580/782] Time 0.192 (0.194) Data 0.001 (0.001) Loss 1.1527 (1.7107) Prec@1 51.562 (34.254) Prec@5 96.875 (85.693)
Epoch: [0][590/782] Time 0.189 (0.194) Data 0.000 (0.001) Loss 1.4220 (1.7054) Prec@1 42.188 (34.483) Prec@5 95.312 (85.837)
Epoch: [0][600/782] Time 0.191 (0.194) Data 0.001 (0.001) Loss 1.4860 (1.6997) Prec@1 42.188 (34.721) Prec@5 93.750 (85.969)
Epoch: [0][610/782] Time 0.198 (0.194) Data 0.000 (0.001) Loss 1.2939 (1.6944) Prec@1 53.125 (34.948) Prec@5 93.750 (86.104)
Epoch: [0][620/782] Time 0.191 (0.194) Data 0.001 (0.001) Loss 1.5457 (1.6897) Prec@1 40.625 (35.175) Prec@5 92.188 (86.212)
Epoch: [0][630/782] Time 0.196 (0.194) Data 0.000 (0.001) Loss 1.4939 (1.6844) Prec@1 43.750 (35.417) Prec@5 92.188 (86.339)
Epoch: [0][640/782] Time 0.193 (0.194) Data 0.001 (0.001) Loss 1.4392 (1.6814) Prec@1 42.188 (35.538) Prec@5 95.312 (86.410)
Epoch: [0][650/782] Time 0.195 (0.194) Data 0.001 (0.001) Loss 1.4039 (1.6769) Prec@1 53.125 (35.719) Prec@5 90.625 (86.502)
Epoch: [0][660/782] Time 0.198 (0.194) Data 0.001 (0.001) Loss 1.3795 (1.6724) Prec@1 50.000 (35.914) Prec@5 93.750 (86.632)
Epoch: [0][670/782] Time 0.195 (0.194) Data 0.001 (0.001) Loss 1.2962 (1.6672) Prec@1 48.438 (36.131) Prec@5 92.188 (86.748)
Epoch: [0][680/782] Time 0.193 (0.194) Data 0.001 (0.001) Loss 1.3320 (1.6626) Prec@1 46.875 (36.325) Prec@5 93.750 (86.853)
Epoch: [0][690/782] Time 0.196 (0.194) Data 0.001 (0.001) Loss 1.3146 (1.6591) Prec@1 51.562 (36.498) Prec@5 92.188 (86.941)
Epoch: [0][700/782] Time 0.195 (0.194) Data 0.001 (0.001) Loss 1.4589 (1.6540) Prec@1 39.062 (36.720) Prec@5 90.625 (87.034)
Epoch: [0][710/782] Time 0.197 (0.194) Data 0.000 (0.001) Loss 1.5341 (1.6506) Prec@1 35.938 (36.841) Prec@5 95.312 (87.126)
Epoch: [0][720/782] Time 0.193 (0.194) Data 0.000 (0.001) Loss 1.4486 (1.6477) Prec@1 43.750 (37.002) Prec@5 93.750 (87.207)
Epoch: [0][730/782] Time 0.190 (0.194) Data 0.000 (0.001) Loss 1.3757 (1.6433) Prec@1 45.312 (37.173) Prec@5 96.875 (87.312)
Epoch: [0][740/782] Time 0.195 (0.194) Data 0.000 (0.001) Loss 1.3217 (1.6401) Prec@1 45.312 (37.310) Prec@5 93.750 (87.386)
Epoch: [0][750/782] Time 0.195 (0.194) Data 0.001 (0.001) Loss 1.1799 (1.6362) Prec@1 56.250 (37.488) Prec@5 96.875 (87.467)
Epoch: [0][760/782] Time 0.191 (0.194) Data 0.001 (0.001) Loss 1.1116 (1.6324) Prec@1 65.625 (37.668) Prec@5 92.188 (87.533)
Epoch: [0][770/782] Time 0.197 (0.194) Data 0.001 (0.001) Loss 1.2361 (1.6289) Prec@1 51.562 (37.816) Prec@5 92.188 (87.620)
Epoch: [0][780/782] Time 0.190 (0.194) Data 0.001 (0.001) Loss 1.1375 (1.6251) Prec@1 59.375 (38.014) Prec@5 95.312 (87.700)
Test: [0/157] Time 0.078 (0.078) Loss 1.5470 (1.5470) Prec@1 45.312 (45.312) Prec@5 90.625 (90.625)
Test: [10/157] Time 0.028 (0.034) Loss 1.4560 (1.3493) Prec@1 46.875 (50.994) Prec@5 92.188 (93.892)
Test: [20/157] Time 0.030 (0.032) Loss 1.2065 (1.3078) Prec@1 48.438 (51.711) Prec@5 93.750 (94.568)
Test: [30/157] Time 0.029 (0.031) Loss 1.2266 (1.3040) Prec@1 64.062 (51.815) Prec@5 95.312 (94.657)
Test: [40/157] Time 0.029 (0.030) Loss 1.3829 (1.2973) Prec@1 51.562 (52.096) Prec@5 93.750 (94.360)
Test: [50/157] Time 0.029 (0.030) Loss 1.1329 (1.2991) Prec@1 59.375 (51.808) Prec@5 95.312 (94.210)
Test: [60/157] Time 0.029 (0.030) Loss 1.3574 (1.2998) Prec@1 54.688 (52.075) Prec@5 95.312 (94.109)
Test: [70/157] Time 0.029 (0.030) Loss 1.1240 (1.2960) Prec@1 57.812 (52.245) Prec@5 96.875 (94.498)
Test: [80/157] Time 0.029 (0.030) Loss 1.4008 (1.2969) Prec@1 51.562 (51.987) Prec@5 95.312 (94.637)
Test: [90/157] Time 0.029 (0.030) Loss 1.3679 (1.2964) Prec@1 51.562 (51.923) Prec@5 96.875 (94.694)
Test: [100/157] Time 0.029 (0.030) Loss 1.2416 (1.2870) Prec@1 53.125 (52.243) Prec@5 92.188 (94.725)
Test: [110/157] Time 0.029 (0.030) Loss 1.3442 (1.2901) Prec@1 60.938 (52.365) Prec@5 89.062 (94.637)
Test: [120/157] Time 0.029 (0.030) Loss 1.3414 (1.2898) Prec@1 48.438 (52.466) Prec@5 93.750 (94.615)
Test: [130/157] Time 0.029 (0.030) Loss 1.3802 (1.2943) Prec@1 46.875 (52.314) Prec@5 95.312 (94.573)
Test: [140/157] Time 0.028 (0.030) Loss 1.4862 (1.2909) Prec@1 39.062 (52.482) Prec@5 92.188 (94.648)
Test: [150/157] Time 0.029 (0.029) Loss 1.4107 (1.2938) Prec@1 54.688 (52.494) Prec@5 87.500 (94.547)
* Prec@1 52.530 Prec@5 94.570
Epoch: [1][0/782] Time 0.071 (0.071) Data 0.020 (0.020) Loss 1.1610 (1.1610) Prec@1 53.125 (53.125) Prec@5 96.875 (96.875)
Epoch: [1][10/782] Time 0.194 (0.183) Data 0.001 (0.002) Loss 1.2664 (1.2392) Prec@1 51.562 (53.977) Prec@5 95.312 (95.881)
Epoch: [1][20/782] Time 0.195 (0.188) Data 0.000 (0.001) Loss 1.2199 (1.2628) Prec@1 56.250 (53.125) Prec@5 100.000 (95.610)
Epoch: [1][30/782] Time 0.194 (0.190) Data 0.001 (0.001) Loss 1.3725 (1.2696) Prec@1 51.562 (53.075) Prec@5 95.312 (95.111)
Epoch: [1][40/782] Time 0.190 (0.191) Data 0.000 (0.001) Loss 1.5264 (1.2644) Prec@1 45.312 (53.277) Prec@5 92.188 (95.160)
Epoch: [1][50/782] Time 0.191 (0.191) Data 0.001 (0.001) Loss 1.2262 (1.2772) Prec@1 56.250 (53.094) Prec@5 92.188 (94.700)
Epoch: [1][60/782] Time 0.200 (0.191) Data 0.000 (0.001) Loss 1.2035 (1.2764) Prec@1 56.250 (52.894) Prec@5 93.750 (94.595)
Epoch: [1][70/782] Time 0.194 (0.192) Data 0.001 (0.001) Loss 1.1621 (1.2702) Prec@1 60.938 (53.125) Prec@5 95.312 (94.718)
Epoch: [1][80/782] Time 0.195 (0.192) Data 0.000 (0.001) Loss 1.1941 (1.2594) Prec@1 54.688 (53.492) Prec@5 93.750 (94.830)
Epoch: [1][90/782] Time 0.198 (0.192) Data 0.000 (0.001) Loss 1.2720 (1.2522) Prec@1 64.062 (53.829) Prec@5 96.875 (94.986)
Epoch: [1][100/782] Time 0.199 (0.192) Data 0.001 (0.001) Loss 1.4031 (1.2514) Prec@1 51.562 (53.837) Prec@5 93.750 (95.019)
Epoch: [1][110/782] Time 0.192 (0.192) Data 0.000 (0.001) Loss 1.3074 (1.2515) Prec@1 54.688 (53.913) Prec@5 92.188 (94.975)
Epoch: [1][120/782] Time 0.191 (0.192) Data 0.001 (0.001) Loss 1.3174 (1.2530) Prec@1 57.812 (53.951) Prec@5 92.188 (94.964)
Epoch: [1][130/782] Time 0.201 (0.193) Data 0.000 (0.001) Loss 1.0842 (1.2474) Prec@1 68.750 (54.210) Prec@5 93.750 (94.990)
Epoch: [1][140/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 1.0878 (1.2430) Prec@1 53.125 (54.366) Prec@5 95.312 (95.013)
Epoch: [1][150/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 1.3350 (1.2391) Prec@1 43.750 (54.594) Prec@5 96.875 (95.095)
Epoch: [1][160/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 1.1588 (1.2378) Prec@1 57.812 (54.620) Prec@5 96.875 (95.109)
Epoch: [1][170/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 1.3750 (1.2383) Prec@1 56.250 (54.541) Prec@5 98.438 (95.102)
Epoch: [1][180/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 1.0059 (1.2361) Prec@1 59.375 (54.593) Prec@5 98.438 (95.114)
Epoch: [1][190/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.9390 (1.2347) Prec@1 54.688 (54.548) Prec@5 98.438 (95.116)
Epoch: [1][200/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 1.1234 (1.2319) Prec@1 59.375 (54.618) Prec@5 93.750 (95.134)
Epoch: [1][210/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 1.0524 (1.2290) Prec@1 60.938 (54.791) Prec@5 98.438 (95.142)
Epoch: [1][220/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 1.2702 (1.2258) Prec@1 54.688 (54.963) Prec@5 92.188 (95.100)
Epoch: [1][230/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 1.0817 (1.2215) Prec@1 60.938 (55.134) Prec@5 95.312 (95.116)
Epoch: [1][240/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 1.3468 (1.2171) Prec@1 48.438 (55.316) Prec@5 95.312 (95.150)
Epoch: [1][250/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 1.2514 (1.2198) Prec@1 53.125 (55.248) Prec@5 98.438 (95.157)
Epoch: [1][260/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 1.0934 (1.2183) Prec@1 62.500 (55.370) Prec@5 95.312 (95.145)
Epoch: [1][270/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 1.2806 (1.2136) Prec@1 50.000 (55.593) Prec@5 95.312 (95.197)
Epoch: [1][280/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 1.1359 (1.2115) Prec@1 54.688 (55.750) Prec@5 95.312 (95.151)
Epoch: [1][290/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 1.4053 (1.2128) Prec@1 50.000 (55.772) Prec@5 96.875 (95.125)
Epoch: [1][300/782] Time 0.206 (0.193) Data 0.000 (0.001) Loss 1.0852 (1.2090) Prec@1 56.250 (55.881) Prec@5 96.875 (95.131)
Epoch: [1][310/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 1.0897 (1.2077) Prec@1 53.125 (55.959) Prec@5 96.875 (95.122)
Epoch: [1][320/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 1.1551 (1.2077) Prec@1 62.500 (55.963) Prec@5 96.875 (95.108)
Epoch: [1][330/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 1.0720 (1.2053) Prec@1 64.062 (56.042) Prec@5 95.312 (95.176)
Epoch: [1][340/782] Time 0.186 (0.193) Data 0.001 (0.001) Loss 0.9038 (1.2032) Prec@1 60.938 (56.094) Prec@5 95.312 (95.193)
Epoch: [1][350/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 1.3301 (1.2040) Prec@1 56.250 (56.108) Prec@5 95.312 (95.201)
Epoch: [1][360/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 1.1528 (1.2009) Prec@1 62.500 (56.159) Prec@5 93.750 (95.243)
Epoch: [1][370/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 1.4358 (1.2009) Prec@1 59.375 (56.178) Prec@5 92.188 (95.216)
Epoch: [1][380/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.9478 (1.1992) Prec@1 68.750 (56.250) Prec@5 100.000 (95.230)
Epoch: [1][390/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 1.3566 (1.1974) Prec@1 54.688 (56.338) Prec@5 90.625 (95.221)
Epoch: [1][400/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 1.0058 (1.1947) Prec@1 67.188 (56.476) Prec@5 96.875 (95.227)
Epoch: [1][410/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 1.0953 (1.1940) Prec@1 57.812 (56.520) Prec@5 98.438 (95.236)
Epoch: [1][420/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 1.2517 (1.1917) Prec@1 59.375 (56.621) Prec@5 92.188 (95.231)
Epoch: [1][430/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.8640 (1.1900) Prec@1 67.188 (56.729) Prec@5 98.438 (95.247)
Epoch: [1][440/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 1.1577 (1.1905) Prec@1 57.812 (56.721) Prec@5 95.312 (95.256)
Epoch: [1][450/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 1.0067 (1.1887) Prec@1 67.188 (56.784) Prec@5 96.875 (95.292)
Epoch: [1][460/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 1.2633 (1.1876) Prec@1 48.438 (56.823) Prec@5 96.875 (95.309)
Epoch: [1][470/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 1.1308 (1.1863) Prec@1 64.062 (56.864) Prec@5 95.312 (95.316)
Epoch: [1][480/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 1.2383 (1.1853) Prec@1 53.125 (56.861) Prec@5 95.312 (95.335)
Epoch: [1][490/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 1.3066 (1.1841) Prec@1 54.688 (56.918) Prec@5 93.750 (95.319)
Epoch: [1][500/782] Time 0.199 (0.193) Data 0.000 (0.001) Loss 1.1302 (1.1834) Prec@1 60.938 (56.974) Prec@5 95.312 (95.291)
Epoch: [1][510/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 1.0900 (1.1823) Prec@1 62.500 (57.014) Prec@5 95.312 (95.312)
Epoch: [1][520/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 1.0556 (1.1808) Prec@1 62.500 (57.069) Prec@5 98.438 (95.327)
Epoch: [1][530/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 1.2948 (1.1799) Prec@1 57.812 (57.100) Prec@5 96.875 (95.363)
Epoch: [1][540/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 1.3466 (1.1791) Prec@1 50.000 (57.131) Prec@5 92.188 (95.388)
Epoch: [1][550/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.8895 (1.1774) Prec@1 68.750 (57.225) Prec@5 100.000 (95.412)
Epoch: [1][560/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 1.1223 (1.1749) Prec@1 65.625 (57.314) Prec@5 98.438 (95.432)
Epoch: [1][570/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.9616 (1.1729) Prec@1 56.250 (57.397) Prec@5 98.438 (95.438)
Epoch: [1][580/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.7417 (1.1696) Prec@1 75.000 (57.530) Prec@5 100.000 (95.460)
Epoch: [1][590/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 1.3218 (1.1678) Prec@1 57.812 (57.627) Prec@5 89.062 (95.447)
Epoch: [1][600/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 1.1582 (1.1666) Prec@1 60.938 (57.667) Prec@5 96.875 (95.453)
Epoch: [1][610/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 1.3284 (1.1645) Prec@1 57.812 (57.749) Prec@5 95.312 (95.468)
Epoch: [1][620/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 1.0877 (1.1632) Prec@1 64.062 (57.860) Prec@5 96.875 (95.479)
Epoch: [1][630/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 1.0350 (1.1610) Prec@1 64.062 (57.981) Prec@5 96.875 (95.511)
Epoch: [1][640/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 1.0480 (1.1607) Prec@1 64.062 (57.986) Prec@5 95.312 (95.522)
Epoch: [1][650/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 1.0755 (1.1592) Prec@1 62.500 (58.053) Prec@5 96.875 (95.557)
Epoch: [1][660/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 1.0184 (1.1590) Prec@1 65.625 (58.063) Prec@5 93.750 (95.565)
Epoch: [1][670/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 1.2624 (1.1580) Prec@1 62.500 (58.136) Prec@5 95.312 (95.583)
Epoch: [1][680/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.8502 (1.1562) Prec@1 70.312 (58.219) Prec@5 98.438 (95.595)
Epoch: [1][690/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.9322 (1.1549) Prec@1 70.312 (58.269) Prec@5 100.000 (95.611)
Epoch: [1][700/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 1.0012 (1.1521) Prec@1 67.188 (58.381) Prec@5 93.750 (95.620)
Epoch: [1][710/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.8518 (1.1499) Prec@1 75.000 (58.485) Prec@5 98.438 (95.620)
Epoch: [1][720/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 1.0232 (1.1495) Prec@1 65.625 (58.502) Prec@5 96.875 (95.607)
Epoch: [1][730/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.9336 (1.1484) Prec@1 65.625 (58.558) Prec@5 98.438 (95.607)
Epoch: [1][740/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 1.0499 (1.1477) Prec@1 59.375 (58.576) Prec@5 95.312 (95.616)
Epoch: [1][750/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.9106 (1.1470) Prec@1 65.625 (58.584) Prec@5 96.875 (95.631)
Epoch: [1][760/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 1.0323 (1.1457) Prec@1 62.500 (58.615) Prec@5 96.875 (95.635)
Epoch: [1][770/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 1.0194 (1.1443) Prec@1 56.250 (58.666) Prec@5 96.875 (95.637)
Epoch: [1][780/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.9596 (1.1419) Prec@1 62.500 (58.737) Prec@5 100.000 (95.675)
Test: [0/157] Time 0.077 (0.077) Loss 1.1343 (1.1343) Prec@1 53.125 (53.125) Prec@5 96.875 (96.875)
Test: [10/157] Time 0.029 (0.033) Loss 1.0825 (1.0678) Prec@1 64.062 (61.648) Prec@5 95.312 (96.449)
Test: [20/157] Time 0.030 (0.032) Loss 1.2783 (1.1121) Prec@1 54.688 (60.714) Prec@5 95.312 (96.205)
Test: [30/157] Time 0.030 (0.031) Loss 1.2757 (1.0827) Prec@1 51.562 (61.290) Prec@5 96.875 (96.472)
Test: [40/157] Time 0.029 (0.030) Loss 0.9756 (1.0737) Prec@1 71.875 (62.309) Prec@5 95.312 (96.265)
Test: [50/157] Time 0.029 (0.030) Loss 1.1818 (1.0826) Prec@1 65.625 (62.071) Prec@5 92.188 (96.170)
Test: [60/157] Time 0.029 (0.030) Loss 0.9764 (1.0640) Prec@1 59.375 (62.398) Prec@5 100.000 (96.286)
Test: [70/157] Time 0.029 (0.030) Loss 0.9604 (1.0628) Prec@1 71.875 (62.742) Prec@5 93.750 (96.215)
Test: [80/157] Time 0.029 (0.030) Loss 1.1910 (1.0582) Prec@1 56.250 (63.002) Prec@5 93.750 (96.296)
Test: [90/157] Time 0.029 (0.030) Loss 1.2120 (1.0580) Prec@1 60.938 (63.101) Prec@5 93.750 (96.257)
Test: [100/157] Time 0.029 (0.030) Loss 0.9772 (1.0556) Prec@1 64.062 (63.103) Prec@5 96.875 (96.318)
Test: [110/157] Time 0.029 (0.030) Loss 1.0304 (1.0507) Prec@1 57.812 (63.176) Prec@5 98.438 (96.396)
Test: [120/157] Time 0.029 (0.030) Loss 0.8699 (1.0438) Prec@1 71.875 (63.494) Prec@5 96.875 (96.397)
Test: [130/157] Time 0.029 (0.030) Loss 1.1727 (1.0469) Prec@1 64.062 (63.275) Prec@5 92.188 (96.398)
Test: [140/157] Time 0.029 (0.030) Loss 0.9638 (1.0484) Prec@1 59.375 (63.121) Prec@5 98.438 (96.410)
Test: [150/157] Time 0.028 (0.030) Loss 1.0712 (1.0485) Prec@1 62.500 (63.028) Prec@5 96.875 (96.492)
* Prec@1 63.050 Prec@5 96.520
Epoch: [2][0/782] Time 0.082 (0.082) Data 0.026 (0.026) Loss 0.8687 (0.8687) Prec@1 70.312 (70.312) Prec@5 96.875 (96.875)
Epoch: [2][10/782] Time 0.196 (0.183) Data 0.001 (0.003) Loss 0.8374 (0.9149) Prec@1 76.562 (68.466) Prec@5 96.875 (97.727)
Epoch: [2][20/782] Time 0.192 (0.188) Data 0.000 (0.002) Loss 0.9938 (0.8998) Prec@1 65.625 (68.155) Prec@5 98.438 (97.917)
Epoch: [2][30/782] Time 0.195 (0.190) Data 0.001 (0.001) Loss 1.0324 (0.9121) Prec@1 71.875 (67.843) Prec@5 95.312 (97.732)
Epoch: [2][40/782] Time 0.188 (0.191) Data 0.001 (0.001) Loss 1.2299 (0.9183) Prec@1 54.688 (67.645) Prec@5 95.312 (97.561)
Epoch: [2][50/782] Time 0.198 (0.191) Data 0.001 (0.001) Loss 1.0347 (0.9384) Prec@1 60.938 (66.176) Prec@5 96.875 (97.518)
Epoch: [2][60/782] Time 0.200 (0.192) Data 0.001 (0.001) Loss 0.8061 (0.9452) Prec@1 68.750 (65.958) Prec@5 98.438 (97.336)
Epoch: [2][70/782] Time 0.191 (0.192) Data 0.001 (0.001) Loss 1.0028 (0.9450) Prec@1 62.500 (66.131) Prec@5 96.875 (97.403)
Epoch: [2][80/782] Time 0.186 (0.192) Data 0.001 (0.001) Loss 1.0727 (0.9573) Prec@1 57.812 (65.741) Prec@5 90.625 (97.184)
Epoch: [2][90/782] Time 0.196 (0.192) Data 0.001 (0.001) Loss 0.6956 (0.9456) Prec@1 73.438 (66.020) Prec@5 98.438 (97.201)
Epoch: [2][100/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.7554 (0.9480) Prec@1 64.062 (65.795) Prec@5 95.312 (97.153)
Epoch: [2][110/782] Time 0.194 (0.192) Data 0.001 (0.001) Loss 0.8961 (0.9459) Prec@1 56.250 (65.907) Prec@5 98.438 (97.044)
Epoch: [2][120/782] Time 0.198 (0.192) Data 0.001 (0.001) Loss 0.7381 (0.9462) Prec@1 76.562 (65.948) Prec@5 100.000 (97.107)
Epoch: [2][130/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.8338 (0.9428) Prec@1 75.000 (66.150) Prec@5 98.438 (97.185)
Epoch: [2][140/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.7828 (0.9438) Prec@1 75.000 (66.135) Prec@5 98.438 (97.185)
Epoch: [2][150/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.9443 (0.9440) Prec@1 67.188 (66.091) Prec@5 95.312 (97.175)
Epoch: [2][160/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.8235 (0.9419) Prec@1 64.062 (66.188) Prec@5 96.875 (97.186)
Epoch: [2][170/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.8232 (0.9422) Prec@1 68.750 (66.091) Prec@5 98.438 (97.222)
Epoch: [2][180/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 1.0577 (0.9426) Prec@1 65.625 (66.134) Prec@5 98.438 (97.263)
Epoch: [2][190/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.7970 (0.9439) Prec@1 70.312 (66.018) Prec@5 98.438 (97.276)
Epoch: [2][200/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.8354 (0.9430) Prec@1 67.188 (66.099) Prec@5 98.438 (97.256)
Epoch: [2][210/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.9542 (0.9402) Prec@1 64.062 (66.188) Prec@5 98.438 (97.267)
Epoch: [2][220/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.9642 (0.9410) Prec@1 67.188 (66.176) Prec@5 98.438 (97.229)
Epoch: [2][230/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.8175 (0.9418) Prec@1 68.750 (66.241) Prec@5 96.875 (97.173)
Epoch: [2][240/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.8371 (0.9411) Prec@1 70.312 (66.364) Prec@5 100.000 (97.186)
Epoch: [2][250/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 1.0394 (0.9433) Prec@1 62.500 (66.260) Prec@5 98.438 (97.168)
Epoch: [2][260/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 1.0043 (0.9425) Prec@1 62.500 (66.212) Prec@5 98.438 (97.192)
Epoch: [2][270/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.8972 (0.9422) Prec@1 70.312 (66.190) Prec@5 98.438 (97.221)
Epoch: [2][280/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.8052 (0.9434) Prec@1 71.875 (66.203) Prec@5 98.438 (97.186)
Epoch: [2][290/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 1.0123 (0.9402) Prec@1 62.500 (66.371) Prec@5 95.312 (97.208)
Epoch: [2][300/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 1.1000 (0.9382) Prec@1 59.375 (66.445) Prec@5 95.312 (97.207)
Epoch: [2][310/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 1.2296 (0.9389) Prec@1 59.375 (66.449) Prec@5 93.750 (97.207)
Epoch: [2][320/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.7296 (0.9386) Prec@1 73.438 (66.438) Prec@5 98.438 (97.201)
Epoch: [2][330/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.9185 (0.9395) Prec@1 71.875 (66.484) Prec@5 100.000 (97.205)
Epoch: [2][340/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 1.1639 (0.9419) Prec@1 62.500 (66.441) Prec@5 95.312 (97.182)
Epoch: [2][350/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.7857 (0.9402) Prec@1 73.438 (66.511) Prec@5 98.438 (97.191)
Epoch: [2][360/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 1.1022 (0.9397) Prec@1 59.375 (66.517) Prec@5 93.750 (97.187)
Epoch: [2][370/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.9259 (0.9396) Prec@1 70.312 (66.522) Prec@5 95.312 (97.195)
Epoch: [2][380/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 1.1255 (0.9374) Prec@1 57.812 (66.589) Prec@5 96.875 (97.228)
Epoch: [2][390/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 1.1379 (0.9358) Prec@1 59.375 (66.668) Prec@5 92.188 (97.223)
Epoch: [2][400/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.9732 (0.9362) Prec@1 60.938 (66.665) Prec@5 100.000 (97.206)
Epoch: [2][410/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.8783 (0.9349) Prec@1 70.312 (66.693) Prec@5 96.875 (97.217)
Epoch: [2][420/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 1.1637 (0.9339) Prec@1 57.812 (66.716) Prec@5 96.875 (97.235)
Epoch: [2][430/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.9645 (0.9326) Prec@1 65.625 (66.731) Prec@5 93.750 (97.248)
Epoch: [2][440/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.7824 (0.9310) Prec@1 70.312 (66.773) Prec@5 98.438 (97.258)
Epoch: [2][450/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.9740 (0.9306) Prec@1 67.188 (66.796) Prec@5 95.312 (97.260)
Epoch: [2][460/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 1.1010 (0.9298) Prec@1 57.812 (66.777) Prec@5 95.312 (97.268)
Epoch: [2][470/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.9432 (0.9290) Prec@1 62.500 (66.833) Prec@5 98.438 (97.283)
Epoch: [2][480/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.9353 (0.9294) Prec@1 64.062 (66.778) Prec@5 95.312 (97.281)
Epoch: [2][490/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.8907 (0.9292) Prec@1 73.438 (66.806) Prec@5 93.750 (97.257)
Epoch: [2][500/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.9234 (0.9279) Prec@1 70.312 (66.873) Prec@5 98.438 (97.252)
Epoch: [2][510/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 1.0546 (0.9269) Prec@1 62.500 (66.885) Prec@5 100.000 (97.269)
Epoch: [2][520/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.8516 (0.9264) Prec@1 62.500 (66.894) Prec@5 96.875 (97.253)
Epoch: [2][530/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.9161 (0.9265) Prec@1 65.625 (66.934) Prec@5 100.000 (97.252)
Epoch: [2][540/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.8283 (0.9264) Prec@1 71.875 (66.933) Prec@5 93.750 (97.242)
Epoch: [2][550/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.8696 (0.9250) Prec@1 73.438 (67.003) Prec@5 98.438 (97.252)
Epoch: [2][560/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.9619 (0.9252) Prec@1 60.938 (66.984) Prec@5 96.875 (97.245)
Epoch: [2][570/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.8154 (0.9238) Prec@1 75.000 (67.042) Prec@5 93.750 (97.239)
Epoch: [2][580/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.7728 (0.9232) Prec@1 67.188 (67.056) Prec@5 98.438 (97.249)
Epoch: [2][590/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.7998 (0.9215) Prec@1 68.750 (67.119) Prec@5 98.438 (97.256)
Epoch: [2][600/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.8852 (0.9205) Prec@1 64.062 (67.180) Prec@5 98.438 (97.260)
Epoch: [2][610/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 1.0603 (0.9217) Prec@1 59.375 (67.167) Prec@5 92.188 (97.230)
Epoch: [2][620/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.9370 (0.9218) Prec@1 64.062 (67.182) Prec@5 95.312 (97.235)
Epoch: [2][630/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.9215 (0.9204) Prec@1 60.938 (67.262) Prec@5 95.312 (97.229)
Epoch: [2][640/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.7463 (0.9204) Prec@1 70.312 (67.263) Prec@5 98.438 (97.221)
Epoch: [2][650/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.8680 (0.9194) Prec@1 81.250 (67.312) Prec@5 93.750 (97.228)
Epoch: [2][660/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.7998 (0.9185) Prec@1 68.750 (67.315) Prec@5 98.438 (97.222)
Epoch: [2][670/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.9476 (0.9168) Prec@1 67.188 (67.383) Prec@5 93.750 (97.224)
Epoch: [2][680/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 1.0223 (0.9159) Prec@1 62.500 (67.435) Prec@5 96.875 (97.228)
Epoch: [2][690/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.8879 (0.9157) Prec@1 68.750 (67.450) Prec@5 98.438 (97.239)
Epoch: [2][700/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.7978 (0.9145) Prec@1 75.000 (67.506) Prec@5 98.438 (97.252)
Epoch: [2][710/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.8729 (0.9143) Prec@1 68.750 (67.530) Prec@5 98.438 (97.257)
Epoch: [2][720/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.7391 (0.9138) Prec@1 75.000 (67.567) Prec@5 100.000 (97.259)
Epoch: [2][730/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.9302 (0.9132) Prec@1 67.188 (67.596) Prec@5 95.312 (97.264)
Epoch: [2][740/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.7707 (0.9121) Prec@1 75.000 (67.656) Prec@5 98.438 (97.267)
Epoch: [2][750/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 1.1111 (0.9112) Prec@1 56.250 (67.676) Prec@5 98.438 (97.279)
Epoch: [2][760/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.7709 (0.9118) Prec@1 75.000 (67.664) Prec@5 100.000 (97.273)
Epoch: [2][770/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.7881 (0.9112) Prec@1 70.312 (67.692) Prec@5 96.875 (97.274)
Epoch: [2][780/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.7813 (0.9100) Prec@1 71.875 (67.722) Prec@5 96.875 (97.281)
Test: [0/157] Time 0.084 (0.084) Loss 1.0571 (1.0571) Prec@1 67.188 (67.188) Prec@5 96.875 (96.875)
Test: [10/157] Time 0.029 (0.034) Loss 1.1099 (0.8905) Prec@1 60.938 (67.472) Prec@5 100.000 (97.301)
Test: [20/157] Time 0.029 (0.032) Loss 0.8160 (0.8786) Prec@1 68.750 (68.750) Prec@5 100.000 (96.949)
Test: [30/157] Time 0.029 (0.031) Loss 0.8296 (0.8742) Prec@1 75.000 (69.002) Prec@5 98.438 (97.127)
Test: [40/157] Time 0.029 (0.030) Loss 0.8326 (0.9040) Prec@1 71.875 (68.293) Prec@5 98.438 (96.989)
Test: [50/157] Time 0.029 (0.030) Loss 0.8525 (0.9065) Prec@1 67.188 (68.413) Prec@5 98.438 (97.089)
Test: [60/157] Time 0.029 (0.030) Loss 1.0802 (0.9107) Prec@1 62.500 (68.263) Prec@5 98.438 (97.106)
Test: [70/157] Time 0.030 (0.030) Loss 0.7796 (0.8994) Prec@1 75.000 (68.640) Prec@5 96.875 (97.095)
Test: [80/157] Time 0.029 (0.030) Loss 0.7825 (0.8926) Prec@1 73.438 (68.711) Prec@5 96.875 (97.164)
Test: [90/157] Time 0.029 (0.030) Loss 0.7623 (0.8917) Prec@1 73.438 (68.527) Prec@5 93.750 (97.150)
Test: [100/157] Time 0.029 (0.030) Loss 0.7263 (0.8905) Prec@1 75.000 (68.719) Prec@5 100.000 (97.169)
Test: [110/157] Time 0.029 (0.030) Loss 0.6802 (0.9008) Prec@1 75.000 (68.314) Prec@5 98.438 (97.185)
Test: [120/157] Time 0.029 (0.030) Loss 0.7839 (0.9084) Prec@1 78.125 (68.259) Prec@5 98.438 (97.017)
Test: [130/157] Time 0.030 (0.030) Loss 0.8768 (0.9124) Prec@1 75.000 (68.058) Prec@5 98.438 (97.090)
Test: [140/157] Time 0.031 (0.030) Loss 1.1609 (0.9132) Prec@1 53.125 (68.085) Prec@5 90.625 (97.019)
Test: [150/157] Time 0.029 (0.030) Loss 0.8140 (0.9042) Prec@1 71.875 (68.305) Prec@5 98.438 (97.041)
* Prec@1 68.240 Prec@5 97.020
Epoch: [3][0/782] Time 0.071 (0.071) Data 0.020 (0.020) Loss 0.8774 (0.8774) Prec@1 73.438 (73.438) Prec@5 98.438 (98.438)
Epoch: [3][10/782] Time 0.192 (0.181) Data 0.001 (0.002) Loss 0.7846 (0.7628) Prec@1 68.750 (70.312) Prec@5 100.000 (99.006)
Epoch: [3][20/782] Time 0.195 (0.188) Data 0.000 (0.001) Loss 0.6138 (0.7013) Prec@1 84.375 (73.586) Prec@5 96.875 (98.586)
Epoch: [3][30/782] Time 0.203 (0.190) Data 0.001 (0.001) Loss 0.8567 (0.7172) Prec@1 71.875 (73.286) Prec@5 98.438 (98.488)
Epoch: [3][40/782] Time 0.194 (0.191) Data 0.001 (0.001) Loss 0.7674 (0.7358) Prec@1 73.438 (73.209) Prec@5 100.000 (98.285)
Epoch: [3][50/782] Time 0.193 (0.191) Data 0.001 (0.001) Loss 0.6355 (0.7415) Prec@1 76.562 (73.284) Prec@5 100.000 (98.346)
Epoch: [3][60/782] Time 0.191 (0.192) Data 0.001 (0.001) Loss 0.5577 (0.7445) Prec@1 85.938 (73.181) Prec@5 98.438 (98.233)
Epoch: [3][70/782] Time 0.190 (0.192) Data 0.001 (0.001) Loss 0.8347 (0.7412) Prec@1 59.375 (73.327) Prec@5 100.000 (98.305)
Epoch: [3][80/782] Time 0.195 (0.192) Data 0.001 (0.001) Loss 0.7921 (0.7407) Prec@1 68.750 (73.245) Prec@5 96.875 (98.245)
Epoch: [3][90/782] Time 0.192 (0.192) Data 0.000 (0.001) Loss 0.6565 (0.7384) Prec@1 81.250 (73.352) Prec@5 100.000 (98.231)
Epoch: [3][100/782] Time 0.196 (0.192) Data 0.001 (0.001) Loss 0.7229 (0.7332) Prec@1 71.875 (73.561) Prec@5 98.438 (98.252)
Epoch: [3][110/782] Time 0.194 (0.192) Data 0.000 (0.001) Loss 0.7035 (0.7361) Prec@1 79.688 (73.564) Prec@5 96.875 (98.128)
Epoch: [3][120/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 0.7339 (0.7390) Prec@1 73.438 (73.476) Prec@5 98.438 (98.128)
Epoch: [3][130/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.6383 (0.7421) Prec@1 81.250 (73.414) Prec@5 98.438 (98.104)
Epoch: [3][140/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.6462 (0.7396) Prec@1 79.688 (73.449) Prec@5 100.000 (98.138)
Epoch: [3][150/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.6879 (0.7382) Prec@1 75.000 (73.510) Prec@5 98.438 (98.158)
Epoch: [3][160/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.6234 (0.7396) Prec@1 78.125 (73.467) Prec@5 100.000 (98.185)
Epoch: [3][170/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.9787 (0.7412) Prec@1 65.625 (73.501) Prec@5 95.312 (98.145)
Epoch: [3][180/782] Time 0.200 (0.193) Data 0.001 (0.001) Loss 0.6149 (0.7373) Prec@1 76.562 (73.610) Prec@5 100.000 (98.179)
Epoch: [3][190/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.6379 (0.7339) Prec@1 81.250 (73.814) Prec@5 98.438 (98.200)
Epoch: [3][200/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.8280 (0.7387) Prec@1 71.875 (73.663) Prec@5 98.438 (98.142)
Epoch: [3][210/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.8614 (0.7396) Prec@1 68.750 (73.637) Prec@5 98.438 (98.186)
Epoch: [3][220/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.6373 (0.7417) Prec@1 79.688 (73.536) Prec@5 100.000 (98.225)
Epoch: [3][230/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.7930 (0.7431) Prec@1 76.562 (73.519) Prec@5 100.000 (98.221)
Epoch: [3][240/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.5871 (0.7454) Prec@1 78.125 (73.457) Prec@5 98.438 (98.211)
Epoch: [3][250/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.7762 (0.7463) Prec@1 76.562 (73.444) Prec@5 96.875 (98.207)
Epoch: [3][260/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.7146 (0.7470) Prec@1 73.438 (73.420) Prec@5 98.438 (98.204)
Epoch: [3][270/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.5367 (0.7464) Prec@1 81.250 (73.461) Prec@5 98.438 (98.207)
Epoch: [3][280/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.8366 (0.7460) Prec@1 70.312 (73.471) Prec@5 98.438 (98.198)
Epoch: [3][290/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.6806 (0.7437) Prec@1 78.125 (73.556) Prec@5 96.875 (98.185)
Epoch: [3][300/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.9224 (0.7440) Prec@1 64.062 (73.572) Prec@5 96.875 (98.173)
Epoch: [3][310/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.7389 (0.7444) Prec@1 76.562 (73.578) Prec@5 98.438 (98.171)
Epoch: [3][320/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.6710 (0.7438) Prec@1 73.438 (73.627) Prec@5 100.000 (98.204)
Epoch: [3][330/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.6490 (0.7429) Prec@1 76.562 (73.636) Prec@5 98.438 (98.220)
Epoch: [3][340/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.7868 (0.7434) Prec@1 65.625 (73.575) Prec@5 100.000 (98.236)
Epoch: [3][350/782] Time 0.171 (0.193) Data 0.001 (0.001) Loss 0.5961 (0.7441) Prec@1 84.375 (73.571) Prec@5 100.000 (98.224)
Epoch: [3][360/782] Time 0.187 (0.193) Data 0.001 (0.001) Loss 0.9333 (0.7447) Prec@1 65.625 (73.528) Prec@5 100.000 (98.243)
Epoch: [3][370/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.5489 (0.7445) Prec@1 82.812 (73.509) Prec@5 100.000 (98.261)
Epoch: [3][380/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.6482 (0.7428) Prec@1 79.688 (73.585) Prec@5 98.438 (98.282)
Epoch: [3][390/782] Time 0.199 (0.193) Data 0.000 (0.001) Loss 0.8228 (0.7424) Prec@1 67.188 (73.609) Prec@5 96.875 (98.262)
Epoch: [3][400/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.7058 (0.7423) Prec@1 78.125 (73.570) Prec@5 98.438 (98.266)
Epoch: [3][410/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.9410 (0.7417) Prec@1 68.750 (73.620) Prec@5 92.188 (98.266)
Epoch: [3][420/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.8314 (0.7415) Prec@1 73.438 (73.679) Prec@5 100.000 (98.278)
Epoch: [3][430/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.8712 (0.7426) Prec@1 71.875 (73.680) Prec@5 96.875 (98.267)
Epoch: [3][440/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.7472 (0.7437) Prec@1 70.312 (73.650) Prec@5 100.000 (98.257)
Epoch: [3][450/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.6534 (0.7459) Prec@1 78.125 (73.586) Prec@5 98.438 (98.223)
Epoch: [3][460/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.8931 (0.7467) Prec@1 73.438 (73.556) Prec@5 98.438 (98.227)
Epoch: [3][470/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.6560 (0.7457) Prec@1 75.000 (73.580) Prec@5 98.438 (98.202)
Epoch: [3][480/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.8240 (0.7459) Prec@1 67.188 (73.593) Prec@5 96.875 (98.181)
Epoch: [3][490/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.8387 (0.7443) Prec@1 71.875 (73.663) Prec@5 96.875 (98.189)
Epoch: [3][500/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.7304 (0.7443) Prec@1 78.125 (73.687) Prec@5 93.750 (98.160)
Epoch: [3][510/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.8495 (0.7447) Prec@1 65.625 (73.664) Prec@5 100.000 (98.153)
Epoch: [3][520/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.6077 (0.7440) Prec@1 78.125 (73.710) Prec@5 100.000 (98.168)
Epoch: [3][530/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.5718 (0.7430) Prec@1 79.688 (73.723) Prec@5 98.438 (98.170)
Epoch: [3][540/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.7403 (0.7417) Prec@1 73.438 (73.770) Prec@5 98.438 (98.169)
Epoch: [3][550/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.7350 (0.7410) Prec@1 75.000 (73.803) Prec@5 100.000 (98.168)
Epoch: [3][560/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.8358 (0.7426) Prec@1 67.188 (73.730) Prec@5 96.875 (98.167)
Epoch: [3][570/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.7361 (0.7427) Prec@1 78.125 (73.741) Prec@5 98.438 (98.164)
Epoch: [3][580/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.8742 (0.7432) Prec@1 71.875 (73.688) Prec@5 100.000 (98.163)
Epoch: [3][590/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.7109 (0.7436) Prec@1 76.562 (73.654) Prec@5 98.438 (98.168)
Epoch: [3][600/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.5570 (0.7438) Prec@1 76.562 (73.645) Prec@5 98.438 (98.165)
Epoch: [3][610/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.8779 (0.7443) Prec@1 70.312 (73.642) Prec@5 100.000 (98.177)
Epoch: [3][620/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.8649 (0.7427) Prec@1 71.875 (73.689) Prec@5 93.750 (98.173)
Epoch: [3][630/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.7853 (0.7422) Prec@1 62.500 (73.695) Prec@5 98.438 (98.182)
Epoch: [3][640/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.6710 (0.7414) Prec@1 71.875 (73.708) Prec@5 98.438 (98.196)
Epoch: [3][650/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.7366 (0.7403) Prec@1 71.875 (73.754) Prec@5 96.875 (98.197)
Epoch: [3][660/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.6972 (0.7398) Prec@1 76.562 (73.764) Prec@5 98.438 (98.206)
Epoch: [3][670/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.8772 (0.7392) Prec@1 68.750 (73.768) Prec@5 98.438 (98.212)
Epoch: [3][680/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.6219 (0.7387) Prec@1 78.125 (73.807) Prec@5 98.438 (98.208)
Epoch: [3][690/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.7301 (0.7390) Prec@1 71.875 (73.799) Prec@5 100.000 (98.191)
Epoch: [3][700/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.5578 (0.7381) Prec@1 81.250 (73.832) Prec@5 100.000 (98.201)
Epoch: [3][710/782] Time 0.188 (0.193) Data 0.000 (0.001) Loss 0.5574 (0.7377) Prec@1 81.250 (73.862) Prec@5 100.000 (98.200)
Epoch: [3][720/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.7413 (0.7385) Prec@1 75.000 (73.864) Prec@5 100.000 (98.195)
Epoch: [3][730/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.6209 (0.7378) Prec@1 78.125 (73.908) Prec@5 98.438 (98.192)
Epoch: [3][740/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.8591 (0.7374) Prec@1 60.938 (73.914) Prec@5 98.438 (98.197)
Epoch: [3][750/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.6292 (0.7374) Prec@1 76.562 (73.901) Prec@5 96.875 (98.198)
Epoch: [3][760/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.7886 (0.7368) Prec@1 78.125 (73.936) Prec@5 100.000 (98.201)
Epoch: [3][770/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.7339 (0.7375) Prec@1 73.438 (73.904) Prec@5 96.875 (98.206)
Epoch: [3][780/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.6249 (0.7364) Prec@1 76.562 (73.932) Prec@5 100.000 (98.213)
Test: [0/157] Time 0.080 (0.080) Loss 0.9215 (0.9215) Prec@1 71.875 (71.875) Prec@5 93.750 (93.750)
Test: [10/157] Time 0.028 (0.034) Loss 1.0173 (0.9608) Prec@1 65.625 (68.182) Prec@5 96.875 (95.455)
Test: [20/157] Time 0.029 (0.032) Loss 1.0302 (0.9731) Prec@1 62.500 (67.039) Prec@5 92.188 (96.131)
Test: [30/157] Time 0.031 (0.031) Loss 0.9393 (0.9684) Prec@1 71.875 (67.641) Prec@5 95.312 (96.472)
Test: [40/157] Time 0.029 (0.030) Loss 0.7050 (0.9663) Prec@1 78.125 (67.645) Prec@5 98.438 (96.761)
Test: [50/157] Time 0.028 (0.030) Loss 1.0208 (0.9767) Prec@1 65.625 (67.494) Prec@5 98.438 (96.783)
Test: [60/157] Time 0.028 (0.030) Loss 0.9287 (0.9727) Prec@1 71.875 (67.649) Prec@5 96.875 (96.824)
Test: [70/157] Time 0.033 (0.030) Loss 0.7590 (0.9631) Prec@1 75.000 (67.738) Prec@5 96.875 (96.963)
Test: [80/157] Time 0.029 (0.030) Loss 1.0631 (0.9699) Prec@1 64.062 (67.670) Prec@5 98.438 (96.933)
Test: [90/157] Time 0.029 (0.030) Loss 0.8069 (0.9521) Prec@1 76.562 (68.252) Prec@5 96.875 (96.995)
Test: [100/157] Time 0.029 (0.029) Loss 0.9976 (0.9581) Prec@1 67.188 (68.100) Prec@5 96.875 (96.937)
Test: [110/157] Time 0.029 (0.029) Loss 1.1977 (0.9585) Prec@1 60.938 (68.018) Prec@5 92.188 (97.016)
Test: [120/157] Time 0.028 (0.029) Loss 0.9998 (0.9560) Prec@1 65.625 (68.040) Prec@5 96.875 (97.056)
Test: [130/157] Time 0.029 (0.029) Loss 0.8610 (0.9580) Prec@1 67.188 (67.927) Prec@5 98.438 (96.982)
Test: [140/157] Time 0.030 (0.029) Loss 0.7140 (0.9652) Prec@1 76.562 (67.775) Prec@5 98.438 (96.919)
Test: [150/157] Time 0.030 (0.029) Loss 1.2234 (0.9638) Prec@1 62.500 (67.870) Prec@5 95.312 (96.937)
* Prec@1 67.850 Prec@5 96.990
Epoch: [4][0/782] Time 0.072 (0.072) Data 0.021 (0.021) Loss 0.7031 (0.7031) Prec@1 71.875 (71.875) Prec@5 100.000 (100.000)
Epoch: [4][10/782] Time 0.188 (0.181) Data 0.001 (0.002) Loss 0.4536 (0.6041) Prec@1 84.375 (78.267) Prec@5 96.875 (98.438)
Epoch: [4][20/782] Time 0.197 (0.187) Data 0.000 (0.002) Loss 0.4566 (0.6177) Prec@1 84.375 (77.902) Prec@5 100.000 (98.438)
Epoch: [4][30/782] Time 0.195 (0.189) Data 0.000 (0.001) Loss 0.7092 (0.6039) Prec@1 78.125 (78.276) Prec@5 100.000 (98.538)
Epoch: [4][40/782] Time 0.192 (0.190) Data 0.001 (0.001) Loss 0.6802 (0.5989) Prec@1 75.000 (78.735) Prec@5 95.312 (98.590)
Epoch: [4][50/782] Time 0.190 (0.190) Data 0.001 (0.001) Loss 0.7332 (0.5947) Prec@1 75.000 (78.830) Prec@5 96.875 (98.621)
Epoch: [4][60/782] Time 0.189 (0.191) Data 0.001 (0.001) Loss 0.8251 (0.5901) Prec@1 71.875 (79.073) Prec@5 98.438 (98.668)
Epoch: [4][70/782] Time 0.193 (0.191) Data 0.000 (0.001) Loss 0.6440 (0.5954) Prec@1 76.562 (78.829) Prec@5 100.000 (98.768)
Epoch: [4][80/782] Time 0.192 (0.191) Data 0.001 (0.001) Loss 0.6876 (0.5946) Prec@1 76.562 (78.858) Prec@5 95.312 (98.804)
Epoch: [4][90/782] Time 0.191 (0.192) Data 0.001 (0.001) Loss 0.6188 (0.5862) Prec@1 79.688 (79.258) Prec@5 98.438 (98.832)
Epoch: [4][100/782] Time 0.201 (0.192) Data 0.000 (0.001) Loss 0.5709 (0.5822) Prec@1 81.250 (79.486) Prec@5 98.438 (98.855)
Epoch: [4][110/782] Time 0.195 (0.192) Data 0.000 (0.001) Loss 0.4361 (0.5801) Prec@1 84.375 (79.420) Prec@5 98.438 (98.888)
Epoch: [4][120/782] Time 0.198 (0.192) Data 0.001 (0.001) Loss 0.4934 (0.5803) Prec@1 79.688 (79.455) Prec@5 98.438 (98.902)
Epoch: [4][130/782] Time 0.195 (0.192) Data 0.001 (0.001) Loss 0.7851 (0.5843) Prec@1 76.562 (79.449) Prec@5 95.312 (98.867)
Epoch: [4][140/782] Time 0.190 (0.192) Data 0.000 (0.001) Loss 0.4069 (0.5838) Prec@1 84.375 (79.399) Prec@5 100.000 (98.859)
Epoch: [4][150/782] Time 0.190 (0.192) Data 0.001 (0.001) Loss 0.4838 (0.5809) Prec@1 81.250 (79.439) Prec@5 98.438 (98.872)
Epoch: [4][160/782] Time 0.194 (0.192) Data 0.000 (0.001) Loss 0.7359 (0.5845) Prec@1 78.125 (79.387) Prec@5 98.438 (98.855)
Epoch: [4][170/782] Time 0.193 (0.192) Data 0.001 (0.001) Loss 0.6780 (0.5889) Prec@1 75.000 (79.203) Prec@5 96.875 (98.849)
Epoch: [4][180/782] Time 0.193 (0.192) Data 0.000 (0.001) Loss 0.4145 (0.5877) Prec@1 85.938 (79.178) Prec@5 100.000 (98.878)
Epoch: [4][190/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.3386 (0.5873) Prec@1 84.375 (79.197) Prec@5 100.000 (98.830)
Epoch: [4][200/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.4872 (0.5866) Prec@1 81.250 (79.206) Prec@5 100.000 (98.826)
Epoch: [4][210/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.4237 (0.5861) Prec@1 84.375 (79.191) Prec@5 100.000 (98.837)
Epoch: [4][220/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.5069 (0.5852) Prec@1 82.812 (79.256) Prec@5 98.438 (98.812)
Epoch: [4][230/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.4883 (0.5849) Prec@1 85.938 (79.315) Prec@5 98.438 (98.810)
Epoch: [4][240/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.5146 (0.5852) Prec@1 81.250 (79.318) Prec@5 98.438 (98.833)
Epoch: [4][250/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.7238 (0.5868) Prec@1 75.000 (79.208) Prec@5 96.875 (98.830)
Epoch: [4][260/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.8365 (0.5884) Prec@1 65.625 (79.185) Prec@5 98.438 (98.845)
Epoch: [4][270/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.6498 (0.5885) Prec@1 84.375 (79.278) Prec@5 96.875 (98.830)
Epoch: [4][280/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.6043 (0.5870) Prec@1 76.562 (79.276) Prec@5 98.438 (98.849)
Epoch: [4][290/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.5996 (0.5888) Prec@1 76.562 (79.161) Prec@5 98.438 (98.840)
Epoch: [4][300/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.4481 (0.5872) Prec@1 84.375 (79.179) Prec@5 96.875 (98.848)
Epoch: [4][310/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.7947 (0.5896) Prec@1 70.312 (79.054) Prec@5 98.438 (98.860)
Epoch: [4][320/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.5096 (0.5899) Prec@1 84.375 (79.055) Prec@5 96.875 (98.842)
Epoch: [4][330/782] Time 0.201 (0.193) Data 0.000 (0.001) Loss 0.5830 (0.5924) Prec@1 81.250 (78.984) Prec@5 100.000 (98.810)
Epoch: [4][340/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 0.6067 (0.5920) Prec@1 73.438 (78.982) Prec@5 98.438 (98.822)
Epoch: [4][350/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.8392 (0.5949) Prec@1 67.188 (78.922) Prec@5 98.438 (98.829)
Epoch: [4][360/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.6110 (0.5969) Prec@1 79.688 (78.848) Prec@5 98.438 (98.801)
Epoch: [4][370/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.6025 (0.5955) Prec@1 81.250 (78.917) Prec@5 96.875 (98.821)
Epoch: [4][380/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.6541 (0.5943) Prec@1 79.688 (78.982) Prec@5 98.438 (98.827)
Epoch: [4][390/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.5660 (0.5950) Prec@1 76.562 (78.928) Prec@5 100.000 (98.829)
Epoch: [4][400/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.6957 (0.5935) Prec@1 68.750 (78.963) Prec@5 98.438 (98.835)
Epoch: [4][410/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.6391 (0.5942) Prec@1 81.250 (79.011) Prec@5 100.000 (98.840)
Epoch: [4][420/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.5863 (0.5940) Prec@1 78.125 (79.019) Prec@5 98.438 (98.842)
Epoch: [4][430/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.8292 (0.5936) Prec@1 67.188 (79.060) Prec@5 98.438 (98.851)
Epoch: [4][440/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 0.5260 (0.5948) Prec@1 78.125 (78.990) Prec@5 100.000 (98.856)
Epoch: [4][450/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.6723 (0.5951) Prec@1 75.000 (78.967) Prec@5 98.438 (98.853)
Epoch: [4][460/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.6571 (0.5951) Prec@1 70.312 (78.959) Prec@5 100.000 (98.861)
Epoch: [4][470/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.7208 (0.5954) Prec@1 73.438 (78.948) Prec@5 98.438 (98.862)
Epoch: [4][480/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.6583 (0.5942) Prec@1 76.562 (79.002) Prec@5 98.438 (98.860)
Epoch: [4][490/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.5787 (0.5941) Prec@1 78.125 (78.997) Prec@5 100.000 (98.867)
Epoch: [4][500/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.5553 (0.5945) Prec@1 76.562 (78.964) Prec@5 100.000 (98.880)
Epoch: [4][510/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.5691 (0.5958) Prec@1 82.812 (78.896) Prec@5 100.000 (98.856)
Epoch: [4][520/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.7058 (0.5960) Prec@1 70.312 (78.881) Prec@5 98.438 (98.848)
Epoch: [4][530/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.5199 (0.5953) Prec@1 89.062 (78.899) Prec@5 96.875 (98.849)
Epoch: [4][540/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 0.6058 (0.5951) Prec@1 75.000 (78.902) Prec@5 100.000 (98.856)
Epoch: [4][550/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.6323 (0.5948) Prec@1 75.000 (78.913) Prec@5 98.438 (98.852)
Epoch: [4][560/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.7480 (0.5961) Prec@1 78.125 (78.902) Prec@5 96.875 (98.839)
Epoch: [4][570/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.7797 (0.5963) Prec@1 75.000 (78.908) Prec@5 100.000 (98.845)
Epoch: [4][580/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.4795 (0.5961) Prec@1 85.938 (78.918) Prec@5 100.000 (98.849)
Epoch: [4][590/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.6427 (0.5964) Prec@1 76.562 (78.916) Prec@5 98.438 (98.842)
Epoch: [4][600/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.5328 (0.5961) Prec@1 84.375 (78.941) Prec@5 98.438 (98.840)
Epoch: [4][610/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.5386 (0.5954) Prec@1 82.812 (78.971) Prec@5 98.438 (98.844)
Epoch: [4][620/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.6037 (0.5954) Prec@1 79.688 (78.970) Prec@5 98.438 (98.845)
Epoch: [4][630/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.5140 (0.5952) Prec@1 79.688 (78.967) Prec@5 98.438 (98.839)
Epoch: [4][640/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.4864 (0.5954) Prec@1 81.250 (78.951) Prec@5 98.438 (98.842)
Epoch: [4][650/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.7486 (0.5957) Prec@1 70.312 (78.953) Prec@5 100.000 (98.843)
Epoch: [4][660/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.6848 (0.5965) Prec@1 78.125 (78.924) Prec@5 95.312 (98.830)
Epoch: [4][670/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.5271 (0.5961) Prec@1 82.812 (78.924) Prec@5 100.000 (98.833)
Epoch: [4][680/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.5792 (0.5951) Prec@1 81.250 (78.965) Prec@5 98.438 (98.830)
Epoch: [4][690/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.6623 (0.5948) Prec@1 78.125 (79.002) Prec@5 96.875 (98.822)
Epoch: [4][700/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.7616 (0.5942) Prec@1 70.312 (79.014) Prec@5 96.875 (98.830)
Epoch: [4][710/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.4095 (0.5926) Prec@1 84.375 (79.066) Prec@5 100.000 (98.840)
Epoch: [4][720/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.7171 (0.5933) Prec@1 70.312 (79.040) Prec@5 100.000 (98.834)
Epoch: [4][730/782] Time 0.204 (0.193) Data 0.000 (0.001) Loss 0.6931 (0.5943) Prec@1 75.000 (78.989) Prec@5 98.438 (98.835)
Epoch: [4][740/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.4477 (0.5939) Prec@1 84.375 (78.987) Prec@5 96.875 (98.838)
Epoch: [4][750/782] Time 0.200 (0.193) Data 0.000 (0.001) Loss 0.5157 (0.5942) Prec@1 75.000 (78.982) Prec@5 100.000 (98.833)
Epoch: [4][760/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.6251 (0.5941) Prec@1 79.688 (78.979) Prec@5 96.875 (98.832)
Epoch: [4][770/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.6151 (0.5936) Prec@1 76.562 (79.005) Prec@5 100.000 (98.831)
Epoch: [4][780/782] Time 0.199 (0.193) Data 0.000 (0.001) Loss 0.5590 (0.5932) Prec@1 82.812 (79.015) Prec@5 100.000 (98.834)
Test: [0/157] Time 0.078 (0.078) Loss 0.6734 (0.6734) Prec@1 76.562 (76.562) Prec@5 96.875 (96.875)
Test: [10/157] Time 0.033 (0.034) Loss 0.6832 (0.8552) Prec@1 78.125 (71.875) Prec@5 96.875 (97.727)
Test: [20/157] Time 0.028 (0.032) Loss 0.7421 (0.8726) Prec@1 75.000 (70.833) Prec@5 98.438 (97.619)
Test: [30/157] Time 0.030 (0.031) Loss 0.9610 (0.8633) Prec@1 68.750 (71.472) Prec@5 96.875 (97.581)
Test: [40/157] Time 0.029 (0.031) Loss 0.8122 (0.8506) Prec@1 62.500 (71.418) Prec@5 100.000 (97.752)
Test: [50/157] Time 0.029 (0.031) Loss 0.6656 (0.8192) Prec@1 76.562 (72.518) Prec@5 96.875 (97.702)
Test: [60/157] Time 0.029 (0.031) Loss 0.8534 (0.8126) Prec@1 78.125 (72.720) Prec@5 93.750 (97.746)
Test: [70/157] Time 0.029 (0.030) Loss 0.7949 (0.8148) Prec@1 73.438 (72.601) Prec@5 95.312 (97.755)
Test: [80/157] Time 0.029 (0.030) Loss 0.8358 (0.8141) Prec@1 67.188 (72.396) Prec@5 96.875 (97.685)
Test: [90/157] Time 0.029 (0.030) Loss 0.6117 (0.8100) Prec@1 78.125 (72.424) Prec@5 98.438 (97.734)
Test: [100/157] Time 0.029 (0.030) Loss 0.9083 (0.8071) Prec@1 67.188 (72.571) Prec@5 95.312 (97.633)
Test: [110/157] Time 0.030 (0.030) Loss 0.6693 (0.8112) Prec@1 73.438 (72.396) Prec@5 98.438 (97.551)
Test: [120/157] Time 0.029 (0.030) Loss 0.7871 (0.8146) Prec@1 75.000 (72.379) Prec@5 100.000 (97.534)
Test: [130/157] Time 0.029 (0.030) Loss 0.8617 (0.8152) Prec@1 71.875 (72.412) Prec@5 100.000 (97.579)
Test: [140/157] Time 0.029 (0.030) Loss 1.0295 (0.8147) Prec@1 67.188 (72.507) Prec@5 93.750 (97.584)
Test: [150/157] Time 0.029 (0.030) Loss 0.9672 (0.8178) Prec@1 65.625 (72.320) Prec@5 95.312 (97.599)
* Prec@1 72.220 Prec@5 97.590
Epoch: [5][0/782] Time 0.072 (0.072) Data 0.022 (0.022) Loss 0.5195 (0.5195) Prec@1 81.250 (81.250) Prec@5 100.000 (100.000)
Epoch: [5][10/782] Time 0.193 (0.183) Data 0.000 (0.003) Loss 0.3890 (0.4748) Prec@1 84.375 (83.807) Prec@5 100.000 (99.432)
Epoch: [5][20/782] Time 0.190 (0.188) Data 0.001 (0.002) Loss 0.2825 (0.4225) Prec@1 87.500 (85.193) Prec@5 100.000 (99.479)
Epoch: [5][30/782] Time 0.193 (0.189) Data 0.001 (0.001) Loss 0.4356 (0.4394) Prec@1 82.812 (84.980) Prec@5 98.438 (99.446)
Epoch: [5][40/782] Time 0.193 (0.191) Data 0.001 (0.001) Loss 0.2785 (0.4387) Prec@1 92.188 (85.175) Prec@5 100.000 (99.428)
Epoch: [5][50/782] Time 0.198 (0.191) Data 0.000 (0.001) Loss 0.4183 (0.4383) Prec@1 89.062 (85.172) Prec@5 98.438 (99.418)
Epoch: [5][60/782] Time 0.199 (0.192) Data 0.001 (0.001) Loss 0.4858 (0.4374) Prec@1 84.375 (85.323) Prec@5 98.438 (99.436)
Epoch: [5][70/782] Time 0.192 (0.192) Data 0.000 (0.001) Loss 0.4265 (0.4346) Prec@1 82.812 (85.409) Prec@5 100.000 (99.450)
Epoch: [5][80/782] Time 0.194 (0.192) Data 0.000 (0.001) Loss 0.3969 (0.4293) Prec@1 92.188 (85.475) Prec@5 96.875 (99.421)
Epoch: [5][90/782] Time 0.191 (0.192) Data 0.000 (0.001) Loss 0.3306 (0.4173) Prec@1 87.500 (85.852) Prec@5 100.000 (99.468)
Epoch: [5][100/782] Time 0.194 (0.192) Data 0.000 (0.001) Loss 0.4046 (0.4190) Prec@1 92.188 (85.845) Prec@5 100.000 (99.397)
Epoch: [5][110/782] Time 0.197 (0.192) Data 0.000 (0.001) Loss 0.4847 (0.4180) Prec@1 84.375 (85.909) Prec@5 100.000 (99.395)
Epoch: [5][120/782] Time 0.190 (0.192) Data 0.000 (0.001) Loss 0.3416 (0.4198) Prec@1 89.062 (85.744) Prec@5 100.000 (99.406)
Epoch: [5][130/782] Time 0.192 (0.192) Data 0.000 (0.001) Loss 0.5177 (0.4203) Prec@1 76.562 (85.568) Prec@5 98.438 (99.404)
Epoch: [5][140/782] Time 0.193 (0.192) Data 0.001 (0.001) Loss 0.4433 (0.4164) Prec@1 85.938 (85.738) Prec@5 98.438 (99.391)
Epoch: [5][150/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.6265 (0.4171) Prec@1 78.125 (85.710) Prec@5 100.000 (99.410)
Epoch: [5][160/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.3350 (0.4169) Prec@1 89.062 (85.753) Prec@5 100.000 (99.437)
Epoch: [5][170/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.5334 (0.4183) Prec@1 79.688 (85.700) Prec@5 100.000 (99.433)
Epoch: [5][180/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.4386 (0.4198) Prec@1 84.375 (85.618) Prec@5 100.000 (99.439)
Epoch: [5][190/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.6219 (0.4225) Prec@1 87.500 (85.586) Prec@5 98.438 (99.444)
Epoch: [5][200/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 0.5743 (0.4243) Prec@1 79.688 (85.494) Prec@5 98.438 (99.433)
Epoch: [5][210/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.3316 (0.4244) Prec@1 90.625 (85.471) Prec@5 98.438 (99.437)
Epoch: [5][220/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.2500 (0.4244) Prec@1 92.188 (85.450) Prec@5 100.000 (99.441)
Epoch: [5][230/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.4490 (0.4268) Prec@1 85.938 (85.390) Prec@5 98.438 (99.418)
Epoch: [5][240/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.4130 (0.4272) Prec@1 90.625 (85.393) Prec@5 98.438 (99.423)
Epoch: [5][250/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.6202 (0.4283) Prec@1 76.562 (85.346) Prec@5 100.000 (99.421)
Epoch: [5][260/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.3543 (0.4282) Prec@1 89.062 (85.381) Prec@5 100.000 (99.401)
Epoch: [5][270/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.5303 (0.4278) Prec@1 84.375 (85.378) Prec@5 96.875 (99.389)
Epoch: [5][280/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.3779 (0.4294) Prec@1 89.062 (85.348) Prec@5 100.000 (99.377)
Epoch: [5][290/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.3341 (0.4288) Prec@1 85.938 (85.331) Prec@5 100.000 (99.388)
Epoch: [5][300/782] Time 0.200 (0.193) Data 0.001 (0.001) Loss 0.4382 (0.4306) Prec@1 85.938 (85.247) Prec@5 100.000 (99.377)
Epoch: [5][310/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.5745 (0.4326) Prec@1 81.250 (85.199) Prec@5 100.000 (99.357)
Epoch: [5][320/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.3321 (0.4321) Prec@1 89.062 (85.276) Prec@5 98.438 (99.348)
Epoch: [5][330/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.4212 (0.4347) Prec@1 79.688 (85.140) Prec@5 100.000 (99.344)
Epoch: [5][340/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.3766 (0.4342) Prec@1 89.062 (85.159) Prec@5 100.000 (99.359)
Epoch: [5][350/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.3919 (0.4371) Prec@1 85.938 (85.065) Prec@5 98.438 (99.332)
Epoch: [5][360/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.3462 (0.4386) Prec@1 90.625 (85.033) Prec@5 100.000 (99.320)
Epoch: [5][370/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.2973 (0.4396) Prec@1 89.062 (85.011) Prec@5 100.000 (99.318)
Epoch: [5][380/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.4032 (0.4388) Prec@1 84.375 (84.982) Prec@5 100.000 (99.323)
Epoch: [5][390/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.4885 (0.4381) Prec@1 84.375 (85.014) Prec@5 98.438 (99.321)
Epoch: [5][400/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.5320 (0.4384) Prec@1 84.375 (85.002) Prec@5 98.438 (99.318)
Epoch: [5][410/782] Time 0.200 (0.193) Data 0.001 (0.001) Loss 0.4556 (0.4388) Prec@1 84.375 (84.972) Prec@5 100.000 (99.323)
Epoch: [5][420/782] Time 0.199 (0.193) Data 0.000 (0.001) Loss 0.3919 (0.4386) Prec@1 81.250 (84.980) Prec@5 100.000 (99.325)
Epoch: [5][430/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.5316 (0.4391) Prec@1 84.375 (84.959) Prec@5 100.000 (99.329)
Epoch: [5][440/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.4871 (0.4385) Prec@1 82.812 (84.967) Prec@5 98.438 (99.337)
Epoch: [5][450/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.4535 (0.4385) Prec@1 84.375 (84.936) Prec@5 100.000 (99.345)
Epoch: [5][460/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.4285 (0.4372) Prec@1 85.938 (84.988) Prec@5 100.000 (99.353)
Epoch: [5][470/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.3742 (0.4372) Prec@1 85.938 (84.992) Prec@5 98.438 (99.350)
Epoch: [5][480/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.3048 (0.4375) Prec@1 89.062 (85.012) Prec@5 100.000 (99.350)
Epoch: [5][490/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.3772 (0.4368) Prec@1 84.375 (85.046) Prec@5 100.000 (99.348)
Epoch: [5][500/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.4233 (0.4367) Prec@1 82.812 (85.046) Prec@5 100.000 (99.358)
Epoch: [5][510/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.4294 (0.4377) Prec@1 84.375 (85.002) Prec@5 100.000 (99.361)
Epoch: [5][520/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.4725 (0.4376) Prec@1 81.250 (84.963) Prec@5 98.438 (99.361)
Epoch: [5][530/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.3947 (0.4368) Prec@1 89.062 (84.996) Prec@5 98.438 (99.367)
Epoch: [5][540/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.3345 (0.4365) Prec@1 82.812 (84.993) Prec@5 100.000 (99.379)
Epoch: [5][550/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.5118 (0.4369) Prec@1 78.125 (84.968) Prec@5 98.438 (99.379)
Epoch: [5][560/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.5431 (0.4384) Prec@1 84.375 (84.918) Prec@5 98.438 (99.376)
Epoch: [5][570/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.2769 (0.4382) Prec@1 87.500 (84.914) Prec@5 100.000 (99.382)
Epoch: [5][580/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.4643 (0.4386) Prec@1 87.500 (84.913) Prec@5 100.000 (99.379)
Epoch: [5][590/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.4740 (0.4382) Prec@1 78.125 (84.922) Prec@5 100.000 (99.373)
Epoch: [5][600/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.4964 (0.4381) Prec@1 87.500 (84.908) Prec@5 100.000 (99.376)
Epoch: [5][610/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.3096 (0.4384) Prec@1 89.062 (84.874) Prec@5 100.000 (99.379)
Epoch: [5][620/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.5942 (0.4394) Prec@1 81.250 (84.856) Prec@5 98.438 (99.368)
Epoch: [5][630/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.8238 (0.4396) Prec@1 78.125 (84.855) Prec@5 96.875 (99.369)
Epoch: [5][640/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.3444 (0.4393) Prec@1 87.500 (84.853) Prec@5 100.000 (99.369)
Epoch: [5][650/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.4280 (0.4396) Prec@1 85.938 (84.857) Prec@5 100.000 (99.366)
Epoch: [5][660/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.3256 (0.4392) Prec@1 89.062 (84.876) Prec@5 100.000 (99.369)
Epoch: [5][670/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.5201 (0.4395) Prec@1 82.812 (84.873) Prec@5 100.000 (99.367)
Epoch: [5][680/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.5562 (0.4408) Prec@1 79.688 (84.822) Prec@5 96.875 (99.367)
Epoch: [5][690/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.6242 (0.4416) Prec@1 81.250 (84.800) Prec@5 96.875 (99.360)
Epoch: [5][700/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.5362 (0.4420) Prec@1 84.375 (84.792) Prec@5 98.438 (99.358)
Epoch: [5][710/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.5071 (0.4423) Prec@1 76.562 (84.762) Prec@5 100.000 (99.356)
Epoch: [5][720/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.4123 (0.4423) Prec@1 81.250 (84.765) Prec@5 100.000 (99.359)
Epoch: [5][730/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.5705 (0.4424) Prec@1 78.125 (84.755) Prec@5 100.000 (99.363)
Epoch: [5][740/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.4581 (0.4422) Prec@1 85.938 (84.767) Prec@5 98.438 (99.357)
Epoch: [5][750/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.4957 (0.4421) Prec@1 81.250 (84.760) Prec@5 98.438 (99.355)
Epoch: [5][760/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.4999 (0.4419) Prec@1 85.938 (84.755) Prec@5 100.000 (99.355)
Epoch: [5][770/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.8157 (0.4419) Prec@1 78.125 (84.752) Prec@5 100.000 (99.362)
Epoch: [5][780/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.6032 (0.4424) Prec@1 81.250 (84.743) Prec@5 96.875 (99.350)
Test: [0/157] Time 0.079 (0.079) Loss 1.0463 (1.0463) Prec@1 70.312 (70.312) Prec@5 96.875 (96.875)
Test: [10/157] Time 0.029 (0.034) Loss 0.8305 (0.9847) Prec@1 73.438 (69.744) Prec@5 96.875 (97.017)
Test: [20/157] Time 0.029 (0.032) Loss 0.8050 (0.9218) Prec@1 75.000 (71.131) Prec@5 98.438 (97.321)
Test: [30/157] Time 0.029 (0.031) Loss 1.0127 (0.9123) Prec@1 70.312 (71.119) Prec@5 100.000 (97.480)
Test: [40/157] Time 0.029 (0.031) Loss 0.8980 (0.8894) Prec@1 70.312 (71.380) Prec@5 93.750 (97.637)
Test: [50/157] Time 0.029 (0.030) Loss 0.8679 (0.8776) Prec@1 75.000 (71.661) Prec@5 96.875 (97.763)
Test: [60/157] Time 0.029 (0.030) Loss 0.6632 (0.8724) Prec@1 79.688 (71.644) Prec@5 96.875 (97.592)
Test: [70/157] Time 0.029 (0.030) Loss 0.9317 (0.8681) Prec@1 67.188 (71.721) Prec@5 96.875 (97.711)
Test: [80/157] Time 0.028 (0.030) Loss 0.7864 (0.8729) Prec@1 73.438 (71.817) Prec@5 95.312 (97.531)
Test: [90/157] Time 0.029 (0.030) Loss 1.2743 (0.8732) Prec@1 59.375 (71.961) Prec@5 96.875 (97.476)
Test: [100/157] Time 0.032 (0.030) Loss 0.7353 (0.8702) Prec@1 76.562 (72.215) Prec@5 96.875 (97.478)
Test: [110/157] Time 0.030 (0.030) Loss 0.9003 (0.8695) Prec@1 75.000 (72.269) Prec@5 93.750 (97.452)
Test: [120/157] Time 0.029 (0.030) Loss 0.9467 (0.8562) Prec@1 73.438 (72.611) Prec@5 100.000 (97.534)
Test: [130/157] Time 0.029 (0.030) Loss 1.1018 (0.8548) Prec@1 65.625 (72.650) Prec@5 98.438 (97.555)
Test: [140/157] Time 0.029 (0.030) Loss 0.6352 (0.8479) Prec@1 82.812 (72.773) Prec@5 98.438 (97.629)
Test: [150/157] Time 0.029 (0.030) Loss 0.7234 (0.8436) Prec@1 78.125 (72.806) Prec@5 100.000 (97.630)
* Prec@1 72.930 Prec@5 97.660
Epoch: [6][0/782] Time 0.074 (0.074) Data 0.022 (0.022) Loss 0.3121 (0.3121) Prec@1 92.188 (92.188) Prec@5 100.000 (100.000)
Epoch: [6][10/782] Time 0.198 (0.183) Data 0.001 (0.002) Loss 0.3096 (0.3141) Prec@1 85.938 (89.489) Prec@5 100.000 (99.858)
Epoch: [6][20/782] Time 0.195 (0.188) Data 0.001 (0.002) Loss 0.2513 (0.3388) Prec@1 92.188 (88.393) Prec@5 98.438 (99.628)
Epoch: [6][30/782] Time 0.197 (0.190) Data 0.000 (0.001) Loss 0.3014 (0.3180) Prec@1 87.500 (89.062) Prec@5 100.000 (99.647)
Epoch: [6][40/782] Time 0.198 (0.191) Data 0.000 (0.001) Loss 0.3390 (0.3150) Prec@1 90.625 (89.405) Prec@5 96.875 (99.505)
Epoch: [6][50/782] Time 0.193 (0.192) Data 0.001 (0.001) Loss 0.2819 (0.3054) Prec@1 92.188 (89.737) Prec@5 100.000 (99.571)
Epoch: [6][60/782] Time 0.196 (0.192) Data 0.001 (0.001) Loss 0.2742 (0.3064) Prec@1 90.625 (89.575) Prec@5 100.000 (99.616)
Epoch: [6][70/782] Time 0.190 (0.192) Data 0.001 (0.001) Loss 0.2852 (0.3038) Prec@1 89.062 (89.723) Prec@5 100.000 (99.604)
Epoch: [6][80/782] Time 0.203 (0.192) Data 0.001 (0.001) Loss 0.2701 (0.2974) Prec@1 93.750 (89.873) Prec@5 100.000 (99.653)
Epoch: [6][90/782] Time 0.192 (0.192) Data 0.001 (0.001) Loss 0.3281 (0.2965) Prec@1 85.938 (89.973) Prec@5 98.438 (99.657)
Epoch: [6][100/782] Time 0.191 (0.192) Data 0.000 (0.001) Loss 0.2767 (0.2926) Prec@1 92.188 (90.053) Prec@5 100.000 (99.691)
Epoch: [6][110/782] Time 0.190 (0.192) Data 0.001 (0.001) Loss 0.1988 (0.2906) Prec@1 93.750 (90.118) Prec@5 100.000 (99.676)
Epoch: [6][120/782] Time 0.199 (0.192) Data 0.000 (0.001) Loss 0.3487 (0.2915) Prec@1 87.500 (90.096) Prec@5 100.000 (99.677)
Epoch: [6][130/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.1792 (0.2925) Prec@1 92.188 (89.993) Prec@5 100.000 (99.654)
Epoch: [6][140/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.2845 (0.2931) Prec@1 92.188 (90.027) Prec@5 100.000 (99.656)
Epoch: [6][150/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.3488 (0.2975) Prec@1 87.500 (89.849) Prec@5 100.000 (99.617)
Epoch: [6][160/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.3262 (0.2993) Prec@1 87.500 (89.810) Prec@5 100.000 (99.622)
Epoch: [6][170/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.2522 (0.2970) Prec@1 92.188 (89.857) Prec@5 100.000 (99.635)
Epoch: [6][180/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.4420 (0.2944) Prec@1 87.500 (89.874) Prec@5 96.875 (99.637)
Epoch: [6][190/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.5423 (0.2948) Prec@1 78.125 (89.897) Prec@5 98.438 (99.648)
Epoch: [6][200/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.3062 (0.2957) Prec@1 90.625 (89.809) Prec@5 100.000 (99.650)
Epoch: [6][210/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.5520 (0.2987) Prec@1 81.250 (89.685) Prec@5 100.000 (99.652)
Epoch: [6][220/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.3624 (0.2992) Prec@1 85.938 (89.649) Prec@5 100.000 (99.646)
Epoch: [6][230/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.3912 (0.3003) Prec@1 87.500 (89.631) Prec@5 100.000 (99.655)
Epoch: [6][240/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.2641 (0.3014) Prec@1 90.625 (89.640) Prec@5 100.000 (99.643)
Epoch: [6][250/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.3713 (0.3023) Prec@1 90.625 (89.654) Prec@5 100.000 (99.626)
Epoch: [6][260/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.2954 (0.3026) Prec@1 90.625 (89.607) Prec@5 100.000 (99.635)
Epoch: [6][270/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.2571 (0.3041) Prec@1 90.625 (89.524) Prec@5 98.438 (99.631)
Epoch: [6][280/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.4710 (0.3050) Prec@1 87.500 (89.513) Prec@5 100.000 (99.639)
Epoch: [6][290/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.2288 (0.3060) Prec@1 92.188 (89.417) Prec@5 100.000 (99.646)
Epoch: [6][300/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.3717 (0.3066) Prec@1 85.938 (89.317) Prec@5 98.438 (99.652)
Epoch: [6][310/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.3180 (0.3072) Prec@1 84.375 (89.258) Prec@5 100.000 (99.658)
Epoch: [6][320/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.2658 (0.3074) Prec@1 89.062 (89.218) Prec@5 100.000 (99.659)
Epoch: [6][330/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.4726 (0.3073) Prec@1 82.812 (89.214) Prec@5 100.000 (99.655)
Epoch: [6][340/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.3695 (0.3082) Prec@1 87.500 (89.205) Prec@5 100.000 (99.656)
Epoch: [6][350/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.1988 (0.3078) Prec@1 93.750 (89.214) Prec@5 100.000 (99.657)
Epoch: [6][360/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.3191 (0.3094) Prec@1 89.062 (89.184) Prec@5 100.000 (99.641)
Epoch: [6][370/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.3906 (0.3109) Prec@1 87.500 (89.126) Prec@5 100.000 (99.642)
Epoch: [6][380/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.3901 (0.3101) Prec@1 87.500 (89.177) Prec@5 100.000 (99.639)
Epoch: [6][390/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 0.1613 (0.3114) Prec@1 93.750 (89.142) Prec@5 100.000 (99.632)
Epoch: [6][400/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.3784 (0.3119) Prec@1 84.375 (89.125) Prec@5 100.000 (99.634)
Epoch: [6][410/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.2891 (0.3121) Prec@1 89.062 (89.112) Prec@5 100.000 (99.639)
Epoch: [6][420/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.3632 (0.3133) Prec@1 87.500 (89.088) Prec@5 100.000 (99.633)
Epoch: [6][430/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.3910 (0.3139) Prec@1 89.062 (89.070) Prec@5 98.438 (99.634)
Epoch: [6][440/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.3548 (0.3138) Prec@1 82.812 (89.066) Prec@5 100.000 (99.635)
Epoch: [6][450/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.3124 (0.3136) Prec@1 87.500 (89.069) Prec@5 100.000 (99.640)
Epoch: [6][460/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.3348 (0.3135) Prec@1 89.062 (89.079) Prec@5 98.438 (99.637)
Epoch: [6][470/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1898 (0.3137) Prec@1 93.750 (89.069) Prec@5 100.000 (99.635)
Epoch: [6][480/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.3676 (0.3157) Prec@1 81.250 (88.968) Prec@5 100.000 (99.639)
Epoch: [6][490/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.4613 (0.3175) Prec@1 84.375 (88.954) Prec@5 100.000 (99.637)
Epoch: [6][500/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.3924 (0.3185) Prec@1 81.250 (88.910) Prec@5 100.000 (99.635)
Epoch: [6][510/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 0.4620 (0.3191) Prec@1 82.812 (88.891) Prec@5 100.000 (99.633)
Epoch: [6][520/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.2116 (0.3194) Prec@1 89.062 (88.874) Prec@5 100.000 (99.634)
Epoch: [6][530/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.2610 (0.3196) Prec@1 92.188 (88.862) Prec@5 100.000 (99.638)
Epoch: [6][540/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.1936 (0.3195) Prec@1 92.188 (88.892) Prec@5 100.000 (99.630)
Epoch: [6][550/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.3638 (0.3204) Prec@1 89.062 (88.890) Prec@5 100.000 (99.626)
Epoch: [6][560/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.2797 (0.3203) Prec@1 92.188 (88.901) Prec@5 100.000 (99.632)
Epoch: [6][570/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.2864 (0.3199) Prec@1 90.625 (88.917) Prec@5 100.000 (99.631)
Epoch: [6][580/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.3566 (0.3200) Prec@1 89.062 (88.928) Prec@5 100.000 (99.634)
Epoch: [6][590/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.3269 (0.3209) Prec@1 92.188 (88.904) Prec@5 98.438 (99.630)
Epoch: [6][600/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.4144 (0.3222) Prec@1 82.812 (88.878) Prec@5 100.000 (99.636)
Epoch: [6][610/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.2564 (0.3226) Prec@1 92.188 (88.848) Prec@5 100.000 (99.637)
Epoch: [6][620/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.4805 (0.3240) Prec@1 82.812 (88.791) Prec@5 98.438 (99.630)
Epoch: [6][630/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.2286 (0.3247) Prec@1 95.312 (88.775) Prec@5 100.000 (99.626)
Epoch: [6][640/782] Time 0.199 (0.193) Data 0.000 (0.001) Loss 0.4637 (0.3262) Prec@1 79.688 (88.699) Prec@5 100.000 (99.625)
Epoch: [6][650/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.5457 (0.3269) Prec@1 79.688 (88.681) Prec@5 100.000 (99.623)
Epoch: [6][660/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.2985 (0.3281) Prec@1 87.500 (88.646) Prec@5 100.000 (99.624)
Epoch: [6][670/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.3255 (0.3277) Prec@1 87.500 (88.681) Prec@5 100.000 (99.625)
Epoch: [6][680/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.5427 (0.3287) Prec@1 82.812 (88.640) Prec@5 100.000 (99.626)
Epoch: [6][690/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.2723 (0.3281) Prec@1 85.938 (88.655) Prec@5 100.000 (99.631)
Epoch: [6][700/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.3128 (0.3281) Prec@1 89.062 (88.666) Prec@5 100.000 (99.630)
Epoch: [6][710/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.4843 (0.3287) Prec@1 87.500 (88.663) Prec@5 100.000 (99.631)
Epoch: [6][720/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.4120 (0.3287) Prec@1 81.250 (88.640) Prec@5 100.000 (99.629)
Epoch: [6][730/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.2892 (0.3293) Prec@1 90.625 (88.607) Prec@5 100.000 (99.630)
Epoch: [6][740/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.3956 (0.3298) Prec@1 89.062 (88.588) Prec@5 96.875 (99.631)
Epoch: [6][750/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1340 (0.3289) Prec@1 96.875 (88.624) Prec@5 100.000 (99.634)
Epoch: [6][760/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.3250 (0.3279) Prec@1 90.625 (88.658) Prec@5 100.000 (99.637)
Epoch: [6][770/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.4336 (0.3289) Prec@1 84.375 (88.613) Prec@5 100.000 (99.635)
Epoch: [6][780/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.4251 (0.3290) Prec@1 84.375 (88.616) Prec@5 100.000 (99.632)
Test: [0/157] Time 0.079 (0.079) Loss 0.9442 (0.9442) Prec@1 75.000 (75.000) Prec@5 98.438 (98.438)
Test: [10/157] Time 0.029 (0.034) Loss 0.9695 (0.8887) Prec@1 71.875 (74.006) Prec@5 100.000 (97.301)
Test: [20/157] Time 0.029 (0.031) Loss 1.0081 (0.9162) Prec@1 73.438 (73.438) Prec@5 100.000 (96.875)
Test: [30/157] Time 0.029 (0.031) Loss 1.2067 (0.9151) Prec@1 67.188 (73.690) Prec@5 95.312 (97.228)
Test: [40/157] Time 0.028 (0.030) Loss 0.7248 (0.9068) Prec@1 78.125 (73.933) Prec@5 100.000 (97.409)
Test: [50/157] Time 0.028 (0.030) Loss 0.3566 (0.8950) Prec@1 81.250 (73.652) Prec@5 98.438 (97.426)
Test: [60/157] Time 0.029 (0.030) Loss 0.6717 (0.9057) Prec@1 67.188 (73.181) Prec@5 100.000 (97.515)
Test: [70/157] Time 0.029 (0.030) Loss 0.6448 (0.9181) Prec@1 81.250 (72.997) Prec@5 100.000 (97.447)
Test: [80/157] Time 0.029 (0.030) Loss 0.7373 (0.9121) Prec@1 73.438 (73.110) Prec@5 98.438 (97.589)
Test: [90/157] Time 0.030 (0.029) Loss 0.9625 (0.9180) Prec@1 68.750 (73.094) Prec@5 96.875 (97.476)
Test: [100/157] Time 0.029 (0.029) Loss 0.7747 (0.9175) Prec@1 73.438 (73.113) Prec@5 100.000 (97.478)
Test: [110/157] Time 0.029 (0.029) Loss 0.6029 (0.9162) Prec@1 81.250 (73.043) Prec@5 98.438 (97.508)
Test: [120/157] Time 0.029 (0.029) Loss 1.1358 (0.9167) Prec@1 59.375 (72.818) Prec@5 98.438 (97.495)
Test: [130/157] Time 0.028 (0.029) Loss 0.9448 (0.9187) Prec@1 70.312 (72.817) Prec@5 96.875 (97.424)
Test: [140/157] Time 0.029 (0.029) Loss 0.7413 (0.9068) Prec@1 71.875 (73.005) Prec@5 100.000 (97.507)
Test: [150/157] Time 0.029 (0.029) Loss 1.0658 (0.9139) Prec@1 68.750 (72.879) Prec@5 96.875 (97.475)
* Prec@1 72.960 Prec@5 97.480
Epoch: [7][0/782] Time 0.071 (0.071) Data 0.020 (0.020) Loss 0.2002 (0.2002) Prec@1 92.188 (92.188) Prec@5 100.000 (100.000)
Epoch: [7][10/782] Time 0.191 (0.184) Data 0.000 (0.002) Loss 0.2615 (0.2338) Prec@1 93.750 (92.898) Prec@5 96.875 (99.006)
Epoch: [7][20/782] Time 0.197 (0.188) Data 0.000 (0.001) Loss 0.1279 (0.2300) Prec@1 96.875 (92.634) Prec@5 100.000 (99.405)
Epoch: [7][30/782] Time 0.193 (0.189) Data 0.001 (0.001) Loss 0.1976 (0.2293) Prec@1 93.750 (92.540) Prec@5 100.000 (99.597)
Epoch: [7][40/782] Time 0.189 (0.190) Data 0.000 (0.001) Loss 0.1950 (0.2184) Prec@1 92.188 (92.797) Prec@5 98.438 (99.581)
Epoch: [7][50/782] Time 0.198 (0.191) Data 0.001 (0.001) Loss 0.1523 (0.2120) Prec@1 96.875 (92.953) Prec@5 100.000 (99.632)
Epoch: [7][60/782] Time 0.193 (0.191) Data 0.000 (0.001) Loss 0.2972 (0.2080) Prec@1 90.625 (93.212) Prec@5 100.000 (99.693)
Epoch: [7][70/782] Time 0.192 (0.192) Data 0.001 (0.001) Loss 0.1709 (0.2090) Prec@1 93.750 (93.112) Prec@5 100.000 (99.736)
Epoch: [7][80/782] Time 0.193 (0.192) Data 0.001 (0.001) Loss 0.1722 (0.2082) Prec@1 92.188 (93.056) Prec@5 100.000 (99.769)
Epoch: [7][90/782] Time 0.190 (0.192) Data 0.001 (0.001) Loss 0.1380 (0.2041) Prec@1 92.188 (93.149) Prec@5 100.000 (99.794)
Epoch: [7][100/782] Time 0.198 (0.192) Data 0.000 (0.001) Loss 0.3322 (0.2105) Prec@1 89.062 (92.976) Prec@5 98.438 (99.768)
Epoch: [7][110/782] Time 0.192 (0.192) Data 0.001 (0.001) Loss 0.2768 (0.2125) Prec@1 93.750 (92.905) Prec@5 100.000 (99.789)
Epoch: [7][120/782] Time 0.194 (0.192) Data 0.000 (0.001) Loss 0.2605 (0.2121) Prec@1 87.500 (92.820) Prec@5 100.000 (99.793)
Epoch: [7][130/782] Time 0.192 (0.192) Data 0.001 (0.001) Loss 0.2944 (0.2161) Prec@1 90.625 (92.700) Prec@5 100.000 (99.797)
Epoch: [7][140/782] Time 0.188 (0.192) Data 0.001 (0.001) Loss 0.2206 (0.2186) Prec@1 90.625 (92.609) Prec@5 100.000 (99.801)
Epoch: [7][150/782] Time 0.195 (0.192) Data 0.001 (0.001) Loss 0.4019 (0.2183) Prec@1 89.062 (92.622) Prec@5 100.000 (99.803)
Epoch: [7][160/782] Time 0.198 (0.192) Data 0.001 (0.001) Loss 0.1491 (0.2212) Prec@1 93.750 (92.411) Prec@5 100.000 (99.806)
Epoch: [7][170/782] Time 0.192 (0.192) Data 0.000 (0.001) Loss 0.2547 (0.2290) Prec@1 92.188 (92.050) Prec@5 100.000 (99.808)
Epoch: [7][180/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.1108 (0.2305) Prec@1 96.875 (92.067) Prec@5 100.000 (99.793)
Epoch: [7][190/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1989 (0.2342) Prec@1 93.750 (91.999) Prec@5 100.000 (99.771)
Epoch: [7][200/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.2595 (0.2352) Prec@1 92.188 (91.962) Prec@5 100.000 (99.782)
Epoch: [7][210/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.3086 (0.2364) Prec@1 90.625 (91.862) Prec@5 100.000 (99.785)
Epoch: [7][220/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.3065 (0.2348) Prec@1 92.188 (91.940) Prec@5 100.000 (99.781)
Epoch: [7][230/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.1806 (0.2334) Prec@1 92.188 (91.978) Prec@5 100.000 (99.790)
Epoch: [7][240/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.1427 (0.2335) Prec@1 96.875 (92.006) Prec@5 100.000 (99.799)
Epoch: [7][250/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.2093 (0.2325) Prec@1 93.750 (92.032) Prec@5 98.438 (99.795)
Epoch: [7][260/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.2535 (0.2337) Prec@1 93.750 (92.026) Prec@5 100.000 (99.790)
Epoch: [7][270/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1573 (0.2322) Prec@1 93.750 (92.078) Prec@5 100.000 (99.792)
Epoch: [7][280/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.2046 (0.2319) Prec@1 92.188 (92.082) Prec@5 100.000 (99.783)
Epoch: [7][290/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.3750 (0.2315) Prec@1 87.500 (92.102) Prec@5 100.000 (99.774)
Epoch: [7][300/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.2672 (0.2306) Prec@1 92.188 (92.146) Prec@5 100.000 (99.777)
Epoch: [7][310/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.3031 (0.2306) Prec@1 89.062 (92.147) Prec@5 100.000 (99.784)
Epoch: [7][320/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.3079 (0.2310) Prec@1 93.750 (92.139) Prec@5 100.000 (99.781)
Epoch: [7][330/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.2353 (0.2294) Prec@1 92.188 (92.197) Prec@5 100.000 (99.788)
Epoch: [7][340/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.1576 (0.2292) Prec@1 96.875 (92.220) Prec@5 100.000 (99.789)
Epoch: [7][350/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.2393 (0.2301) Prec@1 90.625 (92.196) Prec@5 98.438 (99.791)
Epoch: [7][360/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.3502 (0.2298) Prec@1 87.500 (92.209) Prec@5 100.000 (99.792)
Epoch: [7][370/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.3978 (0.2304) Prec@1 89.062 (92.188) Prec@5 98.438 (99.789)
Epoch: [7][380/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1693 (0.2308) Prec@1 92.188 (92.171) Prec@5 100.000 (99.795)
Epoch: [7][390/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.1850 (0.2311) Prec@1 93.750 (92.148) Prec@5 100.000 (99.800)
Epoch: [7][400/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.3306 (0.2322) Prec@1 90.625 (92.125) Prec@5 100.000 (99.797)
Epoch: [7][410/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.3867 (0.2333) Prec@1 85.938 (92.066) Prec@5 100.000 (99.802)
Epoch: [7][420/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.1500 (0.2340) Prec@1 96.875 (92.072) Prec@5 100.000 (99.807)
Epoch: [7][430/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.0851 (0.2332) Prec@1 93.750 (92.071) Prec@5 100.000 (99.808)
Epoch: [7][440/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.2540 (0.2332) Prec@1 92.188 (92.088) Prec@5 100.000 (99.809)
Epoch: [7][450/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.2793 (0.2332) Prec@1 92.188 (92.108) Prec@5 100.000 (99.799)
Epoch: [7][460/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.3158 (0.2337) Prec@1 87.500 (92.076) Prec@5 100.000 (99.803)
Epoch: [7][470/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.3144 (0.2341) Prec@1 89.062 (92.068) Prec@5 100.000 (99.808)
Epoch: [7][480/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.2266 (0.2344) Prec@1 93.750 (92.054) Prec@5 100.000 (99.812)
Epoch: [7][490/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.1307 (0.2342) Prec@1 95.312 (92.067) Prec@5 100.000 (99.809)
Epoch: [7][500/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.2108 (0.2342) Prec@1 95.312 (92.066) Prec@5 100.000 (99.810)
Epoch: [7][510/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.2326 (0.2336) Prec@1 89.062 (92.071) Prec@5 100.000 (99.810)
Epoch: [7][520/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.2417 (0.2327) Prec@1 90.625 (92.083) Prec@5 100.000 (99.814)
Epoch: [7][530/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.2741 (0.2319) Prec@1 90.625 (92.126) Prec@5 100.000 (99.815)
Epoch: [7][540/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.1772 (0.2324) Prec@1 92.188 (92.127) Prec@5 100.000 (99.818)
Epoch: [7][550/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1407 (0.2328) Prec@1 95.312 (92.111) Prec@5 100.000 (99.821)
Epoch: [7][560/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.1765 (0.2338) Prec@1 92.188 (92.071) Prec@5 100.000 (99.822)
Epoch: [7][570/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.2272 (0.2342) Prec@1 90.625 (92.040) Prec@5 100.000 (99.819)
Epoch: [7][580/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.0765 (0.2341) Prec@1 100.000 (92.040) Prec@5 100.000 (99.820)
Epoch: [7][590/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.2575 (0.2340) Prec@1 90.625 (92.037) Prec@5 100.000 (99.818)
Epoch: [7][600/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.2593 (0.2341) Prec@1 90.625 (92.045) Prec@5 100.000 (99.818)
Epoch: [7][610/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.2247 (0.2347) Prec@1 87.500 (92.026) Prec@5 100.000 (99.818)
Epoch: [7][620/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.2091 (0.2352) Prec@1 96.875 (92.014) Prec@5 100.000 (99.816)
Epoch: [7][630/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.2607 (0.2359) Prec@1 89.062 (91.970) Prec@5 100.000 (99.814)
Epoch: [7][640/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.2424 (0.2368) Prec@1 90.625 (91.939) Prec@5 100.000 (99.810)
Epoch: [7][650/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.3937 (0.2380) Prec@1 87.500 (91.899) Prec@5 100.000 (99.810)
Epoch: [7][660/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.1734 (0.2390) Prec@1 93.750 (91.864) Prec@5 98.438 (99.804)
Epoch: [7][670/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.2006 (0.2390) Prec@1 93.750 (91.859) Prec@5 100.000 (99.804)
Epoch: [7][680/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.3042 (0.2389) Prec@1 85.938 (91.866) Prec@5 100.000 (99.805)
Epoch: [7][690/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.3487 (0.2391) Prec@1 89.062 (91.864) Prec@5 100.000 (99.806)
Epoch: [7][700/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.1178 (0.2392) Prec@1 95.312 (91.875) Prec@5 100.000 (99.804)
Epoch: [7][710/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.0907 (0.2393) Prec@1 98.438 (91.878) Prec@5 100.000 (99.800)
Epoch: [7][720/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.4536 (0.2396) Prec@1 84.375 (91.856) Prec@5 100.000 (99.801)
Epoch: [7][730/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.1161 (0.2399) Prec@1 96.875 (91.843) Prec@5 100.000 (99.803)
Epoch: [7][740/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.2507 (0.2398) Prec@1 92.188 (91.859) Prec@5 100.000 (99.802)
Epoch: [7][750/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.1728 (0.2394) Prec@1 90.625 (91.853) Prec@5 100.000 (99.802)
Epoch: [7][760/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.2825 (0.2394) Prec@1 89.062 (91.853) Prec@5 98.438 (99.803)
Epoch: [7][770/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.1507 (0.2396) Prec@1 93.750 (91.841) Prec@5 100.000 (99.805)
Epoch: [7][780/782] Time 0.188 (0.193) Data 0.000 (0.001) Loss 0.1840 (0.2396) Prec@1 93.750 (91.847) Prec@5 100.000 (99.804)
Test: [0/157] Time 0.080 (0.080) Loss 0.7604 (0.7604) Prec@1 71.875 (71.875) Prec@5 98.438 (98.438)
Test: [10/157] Time 0.029 (0.034) Loss 0.7455 (0.8937) Prec@1 78.125 (74.432) Prec@5 98.438 (98.011)
Test: [20/157] Time 0.029 (0.032) Loss 1.0532 (0.9221) Prec@1 70.312 (74.182) Prec@5 96.875 (97.545)
Test: [30/157] Time 0.029 (0.031) Loss 1.0041 (0.9508) Prec@1 68.750 (73.740) Prec@5 100.000 (97.732)
Test: [40/157] Time 0.029 (0.030) Loss 1.2217 (0.9734) Prec@1 65.625 (73.819) Prec@5 96.875 (97.599)
Test: [50/157] Time 0.029 (0.030) Loss 0.9818 (1.0037) Prec@1 71.875 (73.100) Prec@5 100.000 (97.518)
Test: [60/157] Time 0.030 (0.030) Loss 0.5336 (0.9994) Prec@1 81.250 (73.284) Prec@5 96.875 (97.439)
Test: [70/157] Time 0.029 (0.030) Loss 1.1207 (1.0081) Prec@1 67.188 (73.327) Prec@5 96.875 (97.359)
Test: [80/157] Time 0.029 (0.030) Loss 0.9531 (0.9874) Prec@1 78.125 (73.900) Prec@5 100.000 (97.473)
Test: [90/157] Time 0.029 (0.030) Loss 1.0541 (0.9735) Prec@1 62.500 (74.038) Prec@5 100.000 (97.527)
Test: [100/157] Time 0.030 (0.030) Loss 1.0397 (0.9738) Prec@1 70.312 (73.747) Prec@5 98.438 (97.664)
Test: [110/157] Time 0.029 (0.030) Loss 1.1517 (0.9736) Prec@1 68.750 (73.564) Prec@5 93.750 (97.649)
Test: [120/157] Time 0.030 (0.029) Loss 0.9537 (0.9709) Prec@1 70.312 (73.683) Prec@5 98.438 (97.611)
Test: [130/157] Time 0.029 (0.029) Loss 0.9001 (0.9645) Prec@1 71.875 (73.724) Prec@5 100.000 (97.674)
Test: [140/157] Time 0.029 (0.029) Loss 0.9025 (0.9670) Prec@1 75.000 (73.803) Prec@5 96.875 (97.695)
Test: [150/157] Time 0.028 (0.029) Loss 0.8366 (0.9684) Prec@1 73.438 (73.769) Prec@5 100.000 (97.692)
* Prec@1 73.740 Prec@5 97.690
Epoch: [8][0/782] Time 0.069 (0.069) Data 0.021 (0.021) Loss 0.0868 (0.0868) Prec@1 98.438 (98.438) Prec@5 100.000 (100.000)
Epoch: [8][10/782] Time 0.195 (0.182) Data 0.001 (0.002) Loss 0.1949 (0.1878) Prec@1 92.188 (94.176) Prec@5 100.000 (100.000)
Epoch: [8][20/782] Time 0.194 (0.188) Data 0.001 (0.001) Loss 0.1812 (0.1821) Prec@1 93.750 (94.048) Prec@5 100.000 (99.926)
Epoch: [8][30/782] Time 0.191 (0.189) Data 0.001 (0.001) Loss 0.1191 (0.1783) Prec@1 93.750 (93.901) Prec@5 100.000 (99.950)
Epoch: [8][40/782] Time 0.200 (0.191) Data 0.001 (0.001) Loss 0.1869 (0.1803) Prec@1 93.750 (93.712) Prec@5 100.000 (99.962)
Epoch: [8][50/782] Time 0.199 (0.191) Data 0.001 (0.001) Loss 0.1360 (0.1712) Prec@1 95.312 (94.026) Prec@5 100.000 (99.969)
Epoch: [8][60/782] Time 0.195 (0.192) Data 0.000 (0.001) Loss 0.1403 (0.1662) Prec@1 93.750 (94.160) Prec@5 100.000 (99.949)
Epoch: [8][70/782] Time 0.192 (0.192) Data 0.000 (0.001) Loss 0.2038 (0.1646) Prec@1 90.625 (94.168) Prec@5 100.000 (99.956)
Epoch: [8][80/782] Time 0.190 (0.192) Data 0.000 (0.001) Loss 0.1525 (0.1637) Prec@1 93.750 (94.155) Prec@5 100.000 (99.961)
Epoch: [8][90/782] Time 0.189 (0.192) Data 0.001 (0.001) Loss 0.1920 (0.1678) Prec@1 96.875 (94.111) Prec@5 100.000 (99.931)
Epoch: [8][100/782] Time 0.190 (0.192) Data 0.001 (0.001) Loss 0.1160 (0.1691) Prec@1 93.750 (94.013) Prec@5 100.000 (99.938)
Epoch: [8][110/782] Time 0.193 (0.192) Data 0.001 (0.001) Loss 0.1303 (0.1716) Prec@1 96.875 (93.933) Prec@5 100.000 (99.916)
Epoch: [8][120/782] Time 0.189 (0.192) Data 0.000 (0.001) Loss 0.2573 (0.1731) Prec@1 87.500 (93.840) Prec@5 98.438 (99.910)
Epoch: [8][130/782] Time 0.189 (0.192) Data 0.001 (0.001) Loss 0.0638 (0.1702) Prec@1 96.875 (93.929) Prec@5 100.000 (99.917)
Epoch: [8][140/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.1071 (0.1701) Prec@1 95.312 (93.961) Prec@5 100.000 (99.922)
Epoch: [8][150/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1922 (0.1704) Prec@1 93.750 (94.009) Prec@5 100.000 (99.928)
Epoch: [8][160/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.2618 (0.1697) Prec@1 92.188 (94.051) Prec@5 100.000 (99.922)
Epoch: [8][170/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.1676 (0.1690) Prec@1 95.312 (94.088) Prec@5 100.000 (99.918)
Epoch: [8][180/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.0993 (0.1710) Prec@1 96.875 (94.044) Prec@5 100.000 (99.896)
Epoch: [8][190/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.1960 (0.1722) Prec@1 93.750 (93.995) Prec@5 100.000 (99.902)
Epoch: [8][200/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.2945 (0.1735) Prec@1 92.188 (93.968) Prec@5 100.000 (99.907)
Epoch: [8][210/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.2328 (0.1746) Prec@1 93.750 (93.928) Prec@5 100.000 (99.911)
Epoch: [8][220/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.1066 (0.1747) Prec@1 95.312 (93.948) Prec@5 100.000 (99.908)
Epoch: [8][230/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.0871 (0.1730) Prec@1 98.438 (94.000) Prec@5 100.000 (99.905)
Epoch: [8][240/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.3182 (0.1717) Prec@1 90.625 (94.074) Prec@5 100.000 (99.909)
Epoch: [8][250/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.1013 (0.1723) Prec@1 96.875 (94.030) Prec@5 100.000 (99.913)
Epoch: [8][260/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.0988 (0.1718) Prec@1 95.312 (94.031) Prec@5 100.000 (99.916)
Epoch: [8][270/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.1554 (0.1706) Prec@1 95.312 (94.061) Prec@5 100.000 (99.919)
Epoch: [8][280/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.1587 (0.1705) Prec@1 95.312 (94.050) Prec@5 100.000 (99.911)
Epoch: [8][290/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.4405 (0.1723) Prec@1 85.938 (94.002) Prec@5 100.000 (99.909)
Epoch: [8][300/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.2073 (0.1730) Prec@1 95.312 (93.994) Prec@5 100.000 (99.907)
Epoch: [8][310/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.2908 (0.1747) Prec@1 90.625 (93.941) Prec@5 98.438 (99.900)
Epoch: [8][320/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.1170 (0.1752) Prec@1 96.875 (93.930) Prec@5 100.000 (99.898)
Epoch: [8][330/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.2130 (0.1762) Prec@1 92.188 (93.901) Prec@5 100.000 (99.901)
Epoch: [8][340/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.1584 (0.1770) Prec@1 92.188 (93.874) Prec@5 100.000 (99.885)
Epoch: [8][350/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.1794 (0.1767) Prec@1 93.750 (93.901) Prec@5 100.000 (99.875)
Epoch: [8][360/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.2881 (0.1767) Prec@1 85.938 (93.871) Prec@5 100.000 (99.874)
Epoch: [8][370/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.2361 (0.1757) Prec@1 89.062 (93.910) Prec@5 100.000 (99.878)
Epoch: [8][380/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.1236 (0.1765) Prec@1 96.875 (93.914) Prec@5 100.000 (99.877)
Epoch: [8][390/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.1678 (0.1769) Prec@1 95.312 (93.898) Prec@5 100.000 (99.876)
Epoch: [8][400/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.1674 (0.1759) Prec@1 95.312 (93.949) Prec@5 100.000 (99.879)
Epoch: [8][410/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.0855 (0.1750) Prec@1 98.438 (93.982) Prec@5 100.000 (99.878)
Epoch: [8][420/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.1066 (0.1739) Prec@1 93.750 (94.021) Prec@5 100.000 (99.881)
Epoch: [8][430/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.1315 (0.1749) Prec@1 95.312 (93.978) Prec@5 100.000 (99.884)
Epoch: [8][440/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1514 (0.1768) Prec@1 93.750 (93.913) Prec@5 100.000 (99.880)
Epoch: [8][450/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.3310 (0.1773) Prec@1 89.062 (93.896) Prec@5 100.000 (99.879)
Epoch: [8][460/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.4019 (0.1786) Prec@1 87.500 (93.855) Prec@5 98.438 (99.875)
Epoch: [8][470/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.1368 (0.1788) Prec@1 92.188 (93.846) Prec@5 100.000 (99.874)
Epoch: [8][480/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.1955 (0.1791) Prec@1 93.750 (93.860) Prec@5 100.000 (99.877)
Epoch: [8][490/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.0489 (0.1791) Prec@1 98.438 (93.884) Prec@5 100.000 (99.876)
Epoch: [8][500/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.2764 (0.1799) Prec@1 90.625 (93.865) Prec@5 100.000 (99.878)
Epoch: [8][510/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.1373 (0.1801) Prec@1 95.312 (93.875) Prec@5 100.000 (99.878)
Epoch: [8][520/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.2330 (0.1800) Prec@1 89.062 (93.864) Prec@5 100.000 (99.880)
Epoch: [8][530/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.0897 (0.1797) Prec@1 98.438 (93.859) Prec@5 100.000 (99.879)
Epoch: [8][540/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.2955 (0.1804) Prec@1 89.062 (93.828) Prec@5 100.000 (99.882)
Epoch: [8][550/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.1046 (0.1807) Prec@1 96.875 (93.807) Prec@5 100.000 (99.881)
Epoch: [8][560/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.1754 (0.1816) Prec@1 93.750 (93.778) Prec@5 100.000 (99.880)
Epoch: [8][570/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.1912 (0.1818) Prec@1 92.188 (93.764) Prec@5 98.438 (99.880)
Epoch: [8][580/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 0.2338 (0.1820) Prec@1 92.188 (93.772) Prec@5 100.000 (99.882)
Epoch: [8][590/782] Time 0.187 (0.193) Data 0.001 (0.001) Loss 0.1945 (0.1824) Prec@1 92.188 (93.747) Prec@5 100.000 (99.878)
Epoch: [8][600/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.1926 (0.1828) Prec@1 93.750 (93.729) Prec@5 100.000 (99.875)
Epoch: [8][610/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.1100 (0.1831) Prec@1 98.438 (93.717) Prec@5 100.000 (99.877)
Epoch: [8][620/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.3150 (0.1834) Prec@1 90.625 (93.717) Prec@5 100.000 (99.879)
Epoch: [8][630/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.1696 (0.1834) Prec@1 95.312 (93.718) Prec@5 100.000 (99.876)
Epoch: [8][640/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1436 (0.1837) Prec@1 95.312 (93.711) Prec@5 100.000 (99.876)
Epoch: [8][650/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.1870 (0.1840) Prec@1 93.750 (93.683) Prec@5 100.000 (99.878)
Epoch: [8][660/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1906 (0.1845) Prec@1 93.750 (93.667) Prec@5 100.000 (99.877)
Epoch: [8][670/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.2222 (0.1848) Prec@1 92.188 (93.652) Prec@5 100.000 (99.879)
Epoch: [8][680/782] Time 0.186 (0.193) Data 0.001 (0.001) Loss 0.0983 (0.1844) Prec@1 96.875 (93.670) Prec@5 100.000 (99.878)
Epoch: [8][690/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.2335 (0.1844) Prec@1 89.062 (93.664) Prec@5 100.000 (99.878)
Epoch: [8][700/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.1760 (0.1848) Prec@1 93.750 (93.634) Prec@5 100.000 (99.880)
Epoch: [8][710/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.2431 (0.1861) Prec@1 93.750 (93.601) Prec@5 100.000 (99.875)
Epoch: [8][720/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.2096 (0.1868) Prec@1 90.625 (93.581) Prec@5 100.000 (99.874)
Epoch: [8][730/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.1953 (0.1866) Prec@1 93.750 (93.575) Prec@5 100.000 (99.876)
Epoch: [8][740/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.1038 (0.1865) Prec@1 96.875 (93.581) Prec@5 100.000 (99.876)
Epoch: [8][750/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1505 (0.1868) Prec@1 93.750 (93.573) Prec@5 100.000 (99.877)
Epoch: [8][760/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.1238 (0.1868) Prec@1 95.312 (93.575) Prec@5 100.000 (99.877)
Epoch: [8][770/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.3206 (0.1871) Prec@1 92.188 (93.566) Prec@5 100.000 (99.874)
Epoch: [8][780/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.0977 (0.1873) Prec@1 95.312 (93.568) Prec@5 100.000 (99.872)
Test: [0/157] Time 0.078 (0.078) Loss 1.6445 (1.6445) Prec@1 65.625 (65.625) Prec@5 96.875 (96.875)
Test: [10/157] Time 0.029 (0.033) Loss 1.2701 (1.3269) Prec@1 64.062 (67.330) Prec@5 96.875 (96.733)
Test: [20/157] Time 0.032 (0.031) Loss 1.4572 (1.3179) Prec@1 68.750 (68.527) Prec@5 96.875 (96.949)
Test: [30/157] Time 0.029 (0.031) Loss 1.3344 (1.2863) Prec@1 67.188 (69.859) Prec@5 98.438 (96.774)
Test: [40/157] Time 0.030 (0.030) Loss 0.9177 (1.2196) Prec@1 68.750 (70.846) Prec@5 100.000 (97.066)
Test: [50/157] Time 0.029 (0.030) Loss 0.5175 (1.2069) Prec@1 81.250 (70.588) Prec@5 100.000 (97.273)
Test: [60/157] Time 0.029 (0.030) Loss 1.2418 (1.1982) Prec@1 71.875 (71.055) Prec@5 95.312 (97.259)
Test: [70/157] Time 0.033 (0.030) Loss 1.4887 (1.2019) Prec@1 70.312 (70.995) Prec@5 93.750 (97.227)
Test: [80/157] Time 0.030 (0.030) Loss 0.9493 (1.1947) Prec@1 75.000 (71.142) Prec@5 100.000 (97.184)
Test: [90/157] Time 0.029 (0.030) Loss 0.7757 (1.1836) Prec@1 79.688 (71.326) Prec@5 93.750 (97.236)
Test: [100/157] Time 0.030 (0.030) Loss 1.0075 (1.1844) Prec@1 75.000 (71.225) Prec@5 96.875 (97.246)
Test: [110/157] Time 0.049 (0.030) Loss 1.2055 (1.1836) Prec@1 75.000 (71.213) Prec@5 95.312 (97.255)
Test: [120/157] Time 0.029 (0.030) Loss 0.9149 (1.1806) Prec@1 84.375 (71.384) Prec@5 95.312 (97.262)
Test: [130/157] Time 0.028 (0.030) Loss 1.1371 (1.1853) Prec@1 67.188 (71.302) Prec@5 100.000 (97.221)
Test: [140/157] Time 0.029 (0.030) Loss 1.2844 (1.1823) Prec@1 70.312 (71.332) Prec@5 95.312 (97.263)
Test: [150/157] Time 0.028 (0.030) Loss 1.1666 (1.1764) Prec@1 67.188 (71.296) Prec@5 92.188 (97.185)
* Prec@1 71.390 Prec@5 97.170
Epoch: [9][0/782] Time 0.076 (0.076) Data 0.024 (0.024) Loss 0.1548 (0.1548) Prec@1 93.750 (93.750) Prec@5 100.000 (100.000)
Epoch: [9][10/782] Time 0.191 (0.182) Data 0.000 (0.003) Loss 0.2405 (0.2055) Prec@1 89.062 (92.472) Prec@5 100.000 (99.858)
Epoch: [9][20/782] Time 0.197 (0.188) Data 0.000 (0.002) Loss 0.0708 (0.2154) Prec@1 98.438 (92.262) Prec@5 100.000 (99.926)
Epoch: [9][30/782] Time 0.193 (0.189) Data 0.001 (0.001) Loss 0.1704 (0.1997) Prec@1 93.750 (92.792) Prec@5 100.000 (99.950)
Epoch: [9][40/782] Time 0.190 (0.190) Data 0.000 (0.001) Loss 0.1613 (0.1859) Prec@1 96.875 (93.445) Prec@5 100.000 (99.962)
Epoch: [9][50/782] Time 0.192 (0.191) Data 0.001 (0.001) Loss 0.0424 (0.1725) Prec@1 100.000 (93.842) Prec@5 100.000 (99.969)
Epoch: [9][60/782] Time 0.195 (0.191) Data 0.001 (0.001) Loss 0.0860 (0.1631) Prec@1 96.875 (94.262) Prec@5 100.000 (99.974)
Epoch: [9][70/782] Time 0.187 (0.191) Data 0.001 (0.001) Loss 0.0526 (0.1546) Prec@1 96.875 (94.520) Prec@5 100.000 (99.978)
Epoch: [9][80/782] Time 0.198 (0.191) Data 0.001 (0.001) Loss 0.0700 (0.1487) Prec@1 95.312 (94.695) Prec@5 100.000 (99.981)
Epoch: [9][90/782] Time 0.193 (0.192) Data 0.001 (0.001) Loss 0.1730 (0.1443) Prec@1 96.875 (94.849) Prec@5 100.000 (99.983)
Epoch: [9][100/782] Time 0.190 (0.192) Data 0.001 (0.001) Loss 0.1877 (0.1421) Prec@1 95.312 (94.910) Prec@5 100.000 (99.985)
Epoch: [9][110/782] Time 0.191 (0.192) Data 0.001 (0.001) Loss 0.0737 (0.1408) Prec@1 98.438 (94.975) Prec@5 100.000 (99.986)
Epoch: [9][120/782] Time 0.200 (0.192) Data 0.001 (0.001) Loss 0.0497 (0.1413) Prec@1 100.000 (95.054) Prec@5 100.000 (99.987)
Epoch: [9][130/782] Time 0.190 (0.192) Data 0.001 (0.001) Loss 0.0769 (0.1386) Prec@1 96.875 (95.146) Prec@5 100.000 (99.988)
Epoch: [9][140/782] Time 0.195 (0.192) Data 0.001 (0.001) Loss 0.1178 (0.1365) Prec@1 95.312 (95.224) Prec@5 100.000 (99.989)
Epoch: [9][150/782] Time 0.199 (0.192) Data 0.000 (0.001) Loss 0.1348 (0.1379) Prec@1 95.312 (95.230) Prec@5 100.000 (99.990)
Epoch: [9][160/782] Time 0.194 (0.192) Data 0.001 (0.001) Loss 0.2050 (0.1385) Prec@1 93.750 (95.215) Prec@5 100.000 (99.990)
Epoch: [9][170/782] Time 0.195 (0.192) Data 0.001 (0.001) Loss 0.1678 (0.1363) Prec@1 96.875 (95.294) Prec@5 100.000 (99.991)
Epoch: [9][180/782] Time 0.190 (0.192) Data 0.000 (0.001) Loss 0.1842 (0.1347) Prec@1 90.625 (95.338) Prec@5 100.000 (99.991)
Epoch: [9][190/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.1566 (0.1345) Prec@1 95.312 (95.353) Prec@5 100.000 (99.992)
Epoch: [9][200/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.0861 (0.1331) Prec@1 96.875 (95.421) Prec@5 100.000 (99.984)
Epoch: [9][210/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.1773 (0.1337) Prec@1 93.750 (95.372) Prec@5 100.000 (99.978)
Epoch: [9][220/782] Time 0.200 (0.193) Data 0.000 (0.001) Loss 0.1014 (0.1330) Prec@1 95.312 (95.376) Prec@5 100.000 (99.979)
Epoch: [9][230/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.1333 (0.1347) Prec@1 93.750 (95.312) Prec@5 100.000 (99.980)
Epoch: [9][240/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.3660 (0.1344) Prec@1 87.500 (95.358) Prec@5 100.000 (99.981)
Epoch: [9][250/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1187 (0.1343) Prec@1 98.438 (95.356) Prec@5 100.000 (99.975)
Epoch: [9][260/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.0940 (0.1338) Prec@1 95.312 (95.378) Prec@5 100.000 (99.976)
Epoch: [9][270/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1642 (0.1339) Prec@1 95.312 (95.399) Prec@5 100.000 (99.977)
Epoch: [9][280/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.0465 (0.1338) Prec@1 98.438 (95.368) Prec@5 100.000 (99.972)
Epoch: [9][290/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.0741 (0.1339) Prec@1 95.312 (95.345) Prec@5 100.000 (99.973)
Epoch: [9][300/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1655 (0.1343) Prec@1 93.750 (95.323) Prec@5 100.000 (99.974)
Epoch: [9][310/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.1501 (0.1348) Prec@1 93.750 (95.282) Prec@5 100.000 (99.975)
Epoch: [9][320/782] Time 0.199 (0.193) Data 0.000 (0.001) Loss 0.2727 (0.1377) Prec@1 95.312 (95.201) Prec@5 100.000 (99.971)
Epoch: [9][330/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.1820 (0.1398) Prec@1 92.188 (95.124) Prec@5 100.000 (99.967)
Epoch: [9][340/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.1158 (0.1404) Prec@1 96.875 (95.097) Prec@5 100.000 (99.959)
Epoch: [9][350/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.0288 (0.1407) Prec@1 100.000 (95.090) Prec@5 100.000 (99.955)
Epoch: [9][360/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.1538 (0.1408) Prec@1 93.750 (95.113) Prec@5 100.000 (99.957)
Epoch: [9][370/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.1041 (0.1409) Prec@1 96.875 (95.102) Prec@5 100.000 (99.958)
Epoch: [9][380/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.1381 (0.1410) Prec@1 95.312 (95.120) Prec@5 100.000 (99.959)
Epoch: [9][390/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.1316 (0.1411) Prec@1 96.875 (95.125) Prec@5 100.000 (99.960)
Epoch: [9][400/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.1999 (0.1418) Prec@1 95.312 (95.094) Prec@5 100.000 (99.961)
Epoch: [9][410/782] Time 0.201 (0.193) Data 0.001 (0.001) Loss 0.1885 (0.1419) Prec@1 92.188 (95.115) Prec@5 100.000 (99.962)
Epoch: [9][420/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.2546 (0.1420) Prec@1 93.750 (95.097) Prec@5 100.000 (99.963)
Epoch: [9][430/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.1185 (0.1423) Prec@1 92.188 (95.080) Prec@5 100.000 (99.956)
Epoch: [9][440/782] Time 0.199 (0.193) Data 0.001 (0.001) Loss 0.2121 (0.1430) Prec@1 92.188 (95.043) Prec@5 100.000 (99.957)
Epoch: [9][450/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.2190 (0.1432) Prec@1 87.500 (95.025) Prec@5 100.000 (99.958)
Epoch: [9][460/782] Time 0.199 (0.193) Data 0.000 (0.001) Loss 0.1426 (0.1436) Prec@1 93.750 (95.007) Prec@5 100.000 (99.959)
Epoch: [9][470/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.2190 (0.1442) Prec@1 95.312 (94.997) Prec@5 100.000 (99.960)
Epoch: [9][480/782] Time 0.196 (0.193) Data 0.000 (0.001) Loss 0.1357 (0.1445) Prec@1 92.188 (94.965) Prec@5 100.000 (99.958)
Epoch: [9][490/782] Time 0.198 (0.193) Data 0.001 (0.001) Loss 0.1794 (0.1457) Prec@1 95.312 (94.927) Prec@5 100.000 (99.955)
Epoch: [9][500/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.1519 (0.1458) Prec@1 92.188 (94.923) Prec@5 100.000 (99.956)
Epoch: [9][510/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.2214 (0.1464) Prec@1 93.750 (94.915) Prec@5 100.000 (99.951)
Epoch: [9][520/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.2130 (0.1465) Prec@1 93.750 (94.929) Prec@5 100.000 (99.949)
Epoch: [9][530/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.1256 (0.1461) Prec@1 93.750 (94.942) Prec@5 100.000 (99.950)
Epoch: [9][540/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.0831 (0.1450) Prec@1 98.438 (94.986) Prec@5 100.000 (99.951)
Epoch: [9][550/782] Time 0.195 (0.193) Data 0.000 (0.001) Loss 0.1039 (0.1442) Prec@1 96.875 (95.020) Prec@5 100.000 (99.952)
Epoch: [9][560/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.1016 (0.1440) Prec@1 95.312 (95.026) Prec@5 100.000 (99.953)
Epoch: [9][570/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.1031 (0.1433) Prec@1 98.438 (95.053) Prec@5 100.000 (99.953)
Epoch: [9][580/782] Time 0.199 (0.193) Data 0.000 (0.001) Loss 0.0883 (0.1432) Prec@1 96.875 (95.065) Prec@5 100.000 (99.954)
Epoch: [9][590/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.1425 (0.1430) Prec@1 90.625 (95.072) Prec@5 100.000 (99.955)
Epoch: [9][600/782] Time 0.189 (0.193) Data 0.000 (0.001) Loss 0.2625 (0.1441) Prec@1 92.188 (95.037) Prec@5 96.875 (99.951)
Epoch: [9][610/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1058 (0.1447) Prec@1 96.875 (95.018) Prec@5 100.000 (99.946)
Epoch: [9][620/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1860 (0.1458) Prec@1 92.188 (94.998) Prec@5 100.000 (99.942)
Epoch: [9][630/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.1307 (0.1466) Prec@1 95.312 (94.953) Prec@5 100.000 (99.941)
Epoch: [9][640/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1062 (0.1469) Prec@1 98.438 (94.949) Prec@5 100.000 (99.939)
Epoch: [9][650/782] Time 0.196 (0.193) Data 0.001 (0.001) Loss 0.0320 (0.1470) Prec@1 100.000 (94.943) Prec@5 100.000 (99.940)
Epoch: [9][660/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.0674 (0.1469) Prec@1 98.438 (94.939) Prec@5 100.000 (99.941)
Epoch: [9][670/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1065 (0.1465) Prec@1 96.875 (94.963) Prec@5 100.000 (99.942)
Epoch: [9][680/782] Time 0.198 (0.193) Data 0.000 (0.001) Loss 0.3775 (0.1463) Prec@1 89.062 (94.964) Prec@5 100.000 (99.943)
Epoch: [9][690/782] Time 0.188 (0.193) Data 0.001 (0.001) Loss 0.1264 (0.1467) Prec@1 95.312 (94.946) Prec@5 100.000 (99.941)
Epoch: [9][700/782] Time 0.194 (0.193) Data 0.000 (0.001) Loss 0.0990 (0.1467) Prec@1 95.312 (94.951) Prec@5 100.000 (99.942)
Epoch: [9][710/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1576 (0.1466) Prec@1 96.875 (94.950) Prec@5 100.000 (99.943)
Epoch: [9][720/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.2190 (0.1466) Prec@1 93.750 (94.953) Prec@5 100.000 (99.941)
Epoch: [9][730/782] Time 0.195 (0.193) Data 0.001 (0.001) Loss 0.0685 (0.1466) Prec@1 98.438 (94.962) Prec@5 100.000 (99.942)
Epoch: [9][740/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.2165 (0.1469) Prec@1 90.625 (94.950) Prec@5 100.000 (99.943)
Epoch: [9][750/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1561 (0.1470) Prec@1 92.188 (94.942) Prec@5 100.000 (99.944)
Epoch: [9][760/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.0906 (0.1471) Prec@1 98.438 (94.947) Prec@5 100.000 (99.943)
Epoch: [9][770/782] Time 0.188 (0.193) Data 0.000 (0.001) Loss 0.1333 (0.1466) Prec@1 92.188 (94.952) Prec@5 100.000 (99.943)
Epoch: [9][780/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.1288 (0.1472) Prec@1 93.750 (94.928) Prec@5 100.000 (99.942)
Test: [0/157] Time 0.079 (0.079) Loss 0.8467 (0.8467) Prec@1 75.000 (75.000) Prec@5 100.000 (100.000)
Test: [10/157] Time 0.028 (0.034) Loss 1.5739 (1.3170) Prec@1 65.625 (72.159) Prec@5 96.875 (96.733)
Test: [20/157] Time 0.030 (0.032) Loss 0.8777 (1.2151) Prec@1 84.375 (73.140) Prec@5 98.438 (97.247)
Test: [30/157] Time 0.028 (0.031) Loss 0.9524 (1.1885) Prec@1 82.812 (73.992) Prec@5 98.438 (97.379)
Test: [40/157] Time 0.029 (0.030) Loss 1.1423 (1.1713) Prec@1 70.312 (73.704) Prec@5 98.438 (97.447)
Test: [50/157] Time 0.029 (0.030) Loss 0.5957 (1.1588) Prec@1 82.812 (73.775) Prec@5 95.312 (97.488)
Test: [60/157] Time 0.029 (0.030) Loss 1.6032 (1.1603) Prec@1 64.062 (73.540) Prec@5 98.438 (97.515)
Test: [70/157] Time 0.028 (0.030) Loss 1.2537 (1.1589) Prec@1 71.875 (73.570) Prec@5 98.438 (97.491)
Test: [80/157] Time 0.029 (0.030) Loss 1.0313 (1.1318) Prec@1 75.000 (73.978) Prec@5 100.000 (97.473)
Test: [90/157] Time 0.029 (0.029) Loss 1.1741 (1.1305) Prec@1 68.750 (74.073) Prec@5 96.875 (97.476)
Test: [100/157] Time 0.030 (0.029) Loss 0.8651 (1.1326) Prec@1 79.688 (74.072) Prec@5 100.000 (97.416)
Test: [110/157] Time 0.029 (0.029) Loss 1.3536 (1.1354) Prec@1 70.312 (73.860) Prec@5 95.312 (97.494)
Test: [120/157] Time 0.029 (0.029) Loss 1.5613 (1.1524) Prec@1 65.625 (73.683) Prec@5 93.750 (97.456)
Test: [130/157] Time 0.029 (0.029) Loss 1.3943 (1.1496) Prec@1 67.188 (73.581) Prec@5 98.438 (97.483)
Test: [140/157] Time 0.029 (0.029) Loss 1.1475 (1.1507) Prec@1 71.875 (73.559) Prec@5 100.000 (97.518)
Test: [150/157] Time 0.029 (0.029) Loss 0.8245 (1.1562) Prec@1 73.438 (73.417) Prec@5 98.438 (97.506)
* Prec@1 73.460 Prec@5 97.490
Epoch: [10][0/782] Time 0.075 (0.075) Data 0.022 (0.022) Loss 0.0246 (0.0246) Prec@1 100.000 (100.000) Prec@5 100.000 (100.000)
Epoch: [10][10/782] Time 0.198 (0.183) Data 0.001 (0.003) Loss 0.2338 (0.1487) Prec@1 95.312 (95.881) Prec@5 100.000 (100.000)
Epoch: [10][20/782] Time 0.194 (0.188) Data 0.000 (0.002) Loss 0.4067 (0.1809) Prec@1 89.062 (93.973) Prec@5 100.000 (100.000)
Epoch: [10][30/782] Time 0.192 (0.190) Data 0.001 (0.001) Loss 0.1266 (0.1568) Prec@1 95.312 (94.758) Prec@5 100.000 (100.000)
Epoch: [10][40/782] Time 0.196 (0.191) Data 0.000 (0.001) Loss 0.1675 (0.1402) Prec@1 96.875 (95.312) Prec@5 100.000 (100.000)
Epoch: [10][50/782] Time 0.191 (0.191) Data 0.001 (0.001) Loss 0.1077 (0.1306) Prec@1 95.312 (95.588) Prec@5 100.000 (100.000)
Epoch: [10][60/782] Time 0.192 (0.191) Data 0.000 (0.001) Loss 0.0654 (0.1211) Prec@1 96.875 (95.927) Prec@5 100.000 (100.000)
Epoch: [10][70/782] Time 0.193 (0.192) Data 0.000 (0.001) Loss 0.1208 (0.1174) Prec@1 95.312 (96.017) Prec@5 100.000 (100.000)
Epoch: [10][80/782] Time 0.198 (0.192) Data 0.001 (0.001) Loss 0.1199 (0.1134) Prec@1 96.875 (96.200) Prec@5 100.000 (100.000)
Epoch: [10][90/782] Time 0.195 (0.192) Data 0.000 (0.001) Loss 0.1189 (0.1152) Prec@1 96.875 (96.240) Prec@5 100.000 (100.000)
Epoch: [10][100/782] Time 0.193 (0.192) Data 0.001 (0.001) Loss 0.0272 (0.1129) Prec@1 100.000 (96.349) Prec@5 100.000 (100.000)
Epoch: [10][110/782] Time 0.194 (0.192) Data 0.001 (0.001) Loss 0.0573 (0.1118) Prec@1 96.875 (96.312) Prec@5 100.000 (100.000)
Epoch: [10][120/782] Time 0.193 (0.192) Data 0.001 (0.001) Loss 0.1158 (0.1113) Prec@1 96.875 (96.358) Prec@5 100.000 (100.000)
Epoch: [10][130/782] Time 0.193 (0.193) Data 0.000 (0.001) Loss 0.1427 (0.1118) Prec@1 95.312 (96.326) Prec@5 100.000 (100.000)
Epoch: [10][140/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.0483 (0.1121) Prec@1 98.438 (96.343) Prec@5 100.000 (100.000)
Epoch: [10][150/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.0876 (0.1137) Prec@1 95.312 (96.254) Prec@5 100.000 (100.000)
Epoch: [10][160/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.1533 (0.1135) Prec@1 93.750 (96.234) Prec@5 100.000 (100.000)
Epoch: [10][170/782] Time 0.192 (0.193) Data 0.000 (0.001) Loss 0.0840 (0.1133) Prec@1 95.312 (96.235) Prec@5 100.000 (100.000)
Epoch: [10][180/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.0799 (0.1149) Prec@1 95.312 (96.184) Prec@5 100.000 (100.000)
Epoch: [10][190/782] Time 0.200 (0.193) Data 0.001 (0.001) Loss 0.3482 (0.1170) Prec@1 92.188 (96.171) Prec@5 100.000 (100.000)
Epoch: [10][200/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.0182 (0.1158) Prec@1 100.000 (96.168) Prec@5 100.000 (100.000)
Epoch: [10][210/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.2199 (0.1149) Prec@1 89.062 (96.149) Prec@5 100.000 (100.000)
Epoch: [10][220/782] Time 0.197 (0.193) Data 0.000 (0.001) Loss 0.1225 (0.1158) Prec@1 96.875 (96.090) Prec@5 100.000 (100.000)
Epoch: [10][230/782] Time 0.197 (0.193) Data 0.001 (0.001) Loss 0.1689 (0.1172) Prec@1 93.750 (96.029) Prec@5 100.000 (100.000)
Epoch: [10][240/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1476 (0.1171) Prec@1 90.625 (96.019) Prec@5 100.000 (100.000)
Epoch: [10][250/782] Time 0.193 (0.193) Data 0.001 (0.001) Loss 0.1939 (0.1178) Prec@1 95.312 (96.022) Prec@5 100.000 (100.000)
Epoch: [10][260/782] Time 0.190 (0.193) Data 0.001 (0.001) Loss 0.0343 (0.1167) Prec@1 100.000 (96.031) Prec@5 100.000 (100.000)
Epoch: [10][270/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.0966 (0.1157) Prec@1 96.875 (96.079) Prec@5 100.000 (100.000)
Epoch: [10][280/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1195 (0.1155) Prec@1 95.312 (96.091) Prec@5 100.000 (100.000)
Epoch: [10][290/782] Time 0.191 (0.193) Data 0.001 (0.001) Loss 0.1003 (0.1158) Prec@1 95.312 (96.053) Prec@5 100.000 (100.000)
Epoch: [10][300/782] Time 0.190 (0.193) Data 0.000 (0.001) Loss 0.0555 (0.1156) Prec@1 98.438 (96.055) Prec@5 100.000 (100.000)
Epoch: [10][310/782] Time 0.192 (0.193) Data 0.001 (0.001) Loss 0.1001 (0.1145) Prec@1 96.875 (96.086) Prec@5 100.000 (100.000)
Epoch: [10][320/782] Time 0.189 (0.193) Data 0.001 (0.001) Loss 0.0621 (0.1152) Prec@1 96.875 (96.077) Prec@5 100.000 (100.000)
Epoch: [10][330/782] Time 0.191 (0.193) Data 0.000 (0.001) Loss 0.0714 (0.1147) Prec@1 98.438 (96.091) Prec@5 100.000 (100.000)
Epoch: [10][340/782] Time 0.194 (0.193) Data 0.001 (0.001) Loss 0.2204 (0.1160) Prec@1 93.750 (96.046) Prec@5 100.000 (100.000)
''' | {
"repo_name": "Fuchai/Philosophy-Machine",
"path": "amne/amnesia.py",
"copies": "1",
"size": "133047",
"license": "apache-2.0",
"hash": 4445425205281548300,
"line_mean": 97.994047619,
"line_max": 133,
"alpha_frac": 0.6418934662,
"autogenerated": false,
"ratio": 1.9261516634334193,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.8024281612804929,
"avg_score": 0.008752703365697972,
"num_lines": 1344
} |
"""A model based controller framework."""
from __future__ import absolute_import
from __future__ import division
#from __future__ import google_type_annotations
from __future__ import print_function
import os
import inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(os.path.dirname(currentdir))
os.sys.path.insert(0, parentdir)
import numpy as np
import time
from typing import Any, Callable
class LocomotionController(object):
"""Generates the quadruped locomotion.
The actual effect of this controller depends on the composition of each
individual subcomponent.
"""
def __init__(
self,
robot: Any,
gait_generator,
state_estimator,
swing_leg_controller,
stance_leg_controller,
clock,
):
"""Initializes the class.
Args:
robot: A robot instance.
gait_generator: Generates the leg swing/stance pattern.
state_estimator: Estimates the state of the robot (e.g. center of mass
position or velocity that may not be observable from sensors).
swing_leg_controller: Generates motor actions for swing legs.
stance_leg_controller: Generates motor actions for stance legs.
clock: A real or fake clock source.
"""
self._robot = robot
self._clock = clock
self._reset_time = self._clock()
self._time_since_reset = 0
self._gait_generator = gait_generator
self._state_estimator = state_estimator
self._swing_leg_controller = swing_leg_controller
self._stance_leg_controller = stance_leg_controller
@property
def swing_leg_controller(self):
return self._swing_leg_controller
@property
def stance_leg_controller(self):
return self._stance_leg_controller
@property
def gait_generator(self):
return self._gait_generator
@property
def state_estimator(self):
return self._state_estimator
def reset(self):
self._reset_time = self._clock()
self._time_since_reset = 0
self._gait_generator.reset(self._time_since_reset)
self._state_estimator.reset(self._time_since_reset)
self._swing_leg_controller.reset(self._time_since_reset)
self._stance_leg_controller.reset(self._time_since_reset)
def update(self):
self._time_since_reset = self._clock() - self._reset_time
self._gait_generator.update(self._time_since_reset)
self._state_estimator.update(self._time_since_reset)
self._swing_leg_controller.update(self._time_since_reset)
self._stance_leg_controller.update(self._time_since_reset)
def get_action(self):
"""Returns the control ouputs (e.g. positions/torques) for all motors."""
swing_action = self._swing_leg_controller.get_action()
# start_time = time.time()
stance_action, qp_sol = self._stance_leg_controller.get_action()
# print(time.time() - start_time)
action = []
for joint_id in range(self._robot.num_motors):
if joint_id in swing_action:
action.extend(swing_action[joint_id])
else:
assert joint_id in stance_action
action.extend(stance_action[joint_id])
action = np.array(action, dtype=np.float32)
return action, dict(qp_sol=qp_sol)
| {
"repo_name": "google-research/motion_imitation",
"path": "mpc_controller/locomotion_controller.py",
"copies": "1",
"size": "3201",
"license": "apache-2.0",
"hash": 8780771907206457000,
"line_mean": 31.3333333333,
"line_max": 86,
"alpha_frac": 0.6957200875,
"autogenerated": false,
"ratio": 3.5885650224215246,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4784285109921525,
"avg_score": null,
"num_lines": null
} |
"""A model for one project."""
from google.appengine.ext import ndb
from ctc.models import user as user_model
SETTABLE_FIELDS = [
'name', 'overview', 'organization_name', 'organization_contact',
'organization_mission', 'details', 'collaboration_link', 'code_link']
class Project(ndb.Model):
"""A model for one project."""
# TODO(samking): String and text properties means that they have to be
# defined, but they can still be the empty string. We probably want to
# require that there is actual text. We might want to use a pre-put-hook
# for this.
name = ndb.StringProperty(required=True)
overview = ndb.TextProperty(required=True)
# Details about the organization as a whole.
organization_name = ndb.StringProperty(required=True)
organization_contact = ndb.TextProperty(required=True)
organization_mission = ndb.TextProperty(required=True)
# Details about the specific project.
details = ndb.TextProperty(required=True)
collaboration_link = ndb.TextProperty(required=True)
code_link = ndb.TextProperty()
# Bookkeeping.
created_date = ndb.DateTimeProperty(required=True, auto_now_add=True)
updated_date = ndb.DateTimeProperty(required=True, auto_now=True)
owner_key = ndb.KeyProperty(required=True, kind=user_model.User)
# TODO(samking): add these fields
# tag_keys = ndb.KeyProperty(repeated=True, kind=tag.Tag)
# is_completed = ndb.BooleanProperty(required=True, default=False)
# page_views = ndb.IntegerProperty(required=True, default=0)
def populate(self, request):
"""Populates the fields in a project from a web request.
Args:
request: A WebOb.Request with string values for each settable
Project parameter.
Returns:
self for the sake of chaining.
"""
for field in SETTABLE_FIELDS:
setattr(self, field, request.get(field))
return self
def get_by_owner(owner_key):
"""Returns a list of all projects owned by the provided user."""
query = Project.query(Project.owner_key == owner_key)
query = query.order(-Project.updated_date)
return query.fetch()
| {
"repo_name": "samking/code-the-change-projects",
"path": "ctc/models/project.py",
"copies": "1",
"size": "2188",
"license": "apache-2.0",
"hash": -8373537596364097000,
"line_mean": 36.724137931,
"line_max": 77,
"alpha_frac": 0.6878427788,
"autogenerated": false,
"ratio": 3.90017825311943,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.508802103191943,
"avg_score": null,
"num_lines": null
} |
"""A model for one user."""
from google.appengine.ext import ndb
from google.appengine.api import users
class User(ndb.Model):
"""A model for one user."""
created_date = ndb.DateTimeProperty(required=True, auto_now_add=True)
email = ndb.StringProperty(required=True)
name = ndb.StringProperty(default="")
secondary_contact = ndb.StringProperty(default="")
biography = ndb.StringProperty(default="")
website = ndb.StringProperty(default="")
def populate(self, request):
"""Populates the fields in a user's profile from a web request.
Args:
request: A WebOb.Request with string values for each settable
User parameter.
Returns:
self for the sake of chaining.
"""
settable_fields = [
'name', 'secondary_contact', 'biography', 'website']
for field in settable_fields:
setattr(self, field, request.get(field))
return self
def get_current_user_key():
"""Gets the ndb.Key for the current user, creating it if necessary.
Returns None if the user is not logged in.
"""
local_user_object = None
user_id = None
appengine_user = users.get_current_user()
if appengine_user:
user_id = appengine_user.user_id()
local_user_object = User.get_by_id(user_id)
# The user is not logged in.
if not user_id:
return None
# The user is logged in but isn't in the datastore.
if not local_user_object:
local_user_object = User(id=user_id, email=appengine_user.email())
local_user_object.put()
return local_user_object.key
| {
"repo_name": "samking/code-the-change-projects",
"path": "ctc/models/user.py",
"copies": "1",
"size": "1642",
"license": "apache-2.0",
"hash": 8719726212280259000,
"line_mean": 32.5102040816,
"line_max": 74,
"alpha_frac": 0.637637028,
"autogenerated": false,
"ratio": 3.937649880095923,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0007288629737609329,
"num_lines": 49
} |
"""A model for storing information about how specific images should
be cached on slaves. This helps with always having the correct image
ahead of time as well as garbage collecting unneeded images on slaves.
"""
from __future__ import absolute_import
from sqlalchemy import Column, DateTime, ForeignKey
from sqlalchemy.orm import relationship
from changes.config import db
from changes.db.types.guid import GUID
class CachedSnapshotImage(db.Model):
"""
A cached snapshot is a snapshot image that is tracked by a caching/garbage
collection system. Not all snapshots are necessarily cached and
slaves should not expect to have any snapshots that are not
marked as a cached snapshot (thus they will have to download them
on potentially every build).
Cached snapshots are also garbage collected. And because we use
null expiration dates to indicate unexpiring snapshots, we cannot
overload the use of null to mean to not cache it. In this sense
this table is necessary instead of just adding a column to the
snapshot table.
"""
__tablename__ = 'cached_snapshot_image'
# snapshot ids are unique so we might as well make it our primary key.
#
# This is NOT autogenerated and is REQUIRED for creation.
id = Column(GUID, ForeignKey('snapshot_image.id'), nullable=False, primary_key=True)
# A slave is expected to have anything whose expiration date is
# either null (not-yet-set) or past the current time.
#
# It is also safe to garbage collect this table itself. That is,
# expired rows can be deleted. However, it is rather small
# compared to additional cruft we gather so it is not necessarily
# worth doing so.
expiration_date = Column(DateTime, nullable=True)
snapshot_image = relationship('SnapshotImage', innerjoin=True)
def __init__(self, id, **kwargs):
super(CachedSnapshotImage, self).__init__(id=id, **kwargs)
| {
"repo_name": "dropbox/changes",
"path": "changes/models/cached_snapshot_image.py",
"copies": "3",
"size": "1941",
"license": "apache-2.0",
"hash": 8131894671979353000,
"line_mean": 38.612244898,
"line_max": 88,
"alpha_frac": 0.7320968573,
"autogenerated": false,
"ratio": 4.441647597254004,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6673744454554004,
"avg_score": null,
"num_lines": null
} |
"""A model for the relationship between a user and a project."""
from google.appengine.ext import ndb
from ctc.models import user as user_model
class Collaborator(ndb.Model):
"""A model for relationship between a user and a project."""
user_key = ndb.KeyProperty(required=True, kind=user_model.User)
created_date = ndb.DateTimeProperty(required=True, auto_now_add=True)
def _pre_put_hook(self):
"""Raises an exception if a new collaborator does not have a parent."""
assert self.key.parent(), "No parent project for this collaborator."
def get_collaborator(user_key, project_key):
"""Returns a collaboration if the user is collaborating on the project."""
query = Collaborator.query(ancestor=project_key).filter(
Collaborator.user_key == user_key)
collaborator = query.fetch(limit=1)
return collaborator[0] if collaborator else None
def get_projects(user_key):
"""Returns a list of all projects that the user is contributing to."""
query = Collaborator.query(Collaborator.user_key == user_key)
query = query.order(-Collaborator.created_date)
collaborators = query.fetch()
futures = [collaborator.key.parent().get_async()
for collaborator in collaborators]
ndb.Future.wait_all(futures)
return [future.get_result() for future in futures]
def get_collaborator_count(project_key):
"""Counts the number of collaborators for a given project."""
query = Collaborator.query(ancestor=project_key)
return query.count()
def get_collaborator_emails(project_key):
"""Returns the emails of all collaborating users."""
query = Collaborator.query(ancestor=project_key)
query = query.order(Collaborator.created_date)
collaborators = query.fetch()
futures = [collaborator.user_key.get_async()
for collaborator in collaborators]
ndb.Future.wait_all(futures)
return [future.get_result().email for future in futures]
| {
"repo_name": "samking/code-the-change-projects",
"path": "ctc/models/collaborator.py",
"copies": "1",
"size": "1962",
"license": "apache-2.0",
"hash": 2753148261991698400,
"line_mean": 38.24,
"line_max": 79,
"alpha_frac": 0.7099898063,
"autogenerated": false,
"ratio": 3.6266173752310538,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48366071815310535,
"avg_score": null,
"num_lines": null
} |
"""A model of temperature diffusion over a rectangular plate."""
import numpy as np
import yaml
class Diffusion(object):
"""Model of temperature diffusion on a plate."""
def __init__(self, config_file=None):
"""Initialize the model."""
if config_file is not None:
with open(config_file, 'r') as fp:
parameters = yaml.load(fp)
for key, value in parameters.items():
setattr(self, key, value)
else:
self.nx = 8
self.ny = 6
self.dx = 1.0
self.dy = 1.0
self.alpha = 0.9
self.time = 0.0
self.dt = min(self.dx, self.dy) ** 2.0 / (4.0 * self.alpha)
self.dt /= 2.0
self.temperature = np.zeros((self.ny, self.nx))
self.new_temperature = self.temperature.copy()
def advance(self):
"""Advance the model by one time step."""
self.solve()
self.time += self.dt
def solve(self):
"""Solve the diffusion equation."""
dx2, dy2 = self.dx**2, self.dy**2
coef = self.alpha * self.dt / (2.0*(dx2 + dy2))
for i in range(1, self.ny-1):
for j in range(1, self.nx-1):
self.new_temperature[i,j] = \
self.temperature[i,j] + coef * (
dx2*(self.temperature[i,j-1] + self.temperature[i,j+1]) +
dy2*(self.temperature[i-1,j] + self.temperature[i+1,j]) -
2.0*(dx2 + dy2)*self.temperature[i,j])
self.new_temperature[(0, -1), :] = 0.0
self.new_temperature[:, (0, -1)] = 0.0
self.temperature[:] = self.new_temperature
| {
"repo_name": "csdms/bmi-live-2017",
"path": "bmi_live/diffusion.py",
"copies": "1",
"size": "1677",
"license": "mit",
"hash": 7805895429506945000,
"line_mean": 30.0555555556,
"line_max": 77,
"alpha_frac": 0.5122242099,
"autogenerated": false,
"ratio": 3.479253112033195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4491477321933195,
"avg_score": null,
"num_lines": null
} |
"""A model organizes the training of a neural network.
The general structure, and especial the fit method, are similar to the keras
Model class.
"""
#%%
import copy
import numpy as np
import time
import warnings
import natural_bm.backend as B
import natural_bm.callbacks as cbks
from natural_bm.utils import merge_OrderedDicts
from natural_bm.callbacks import CSVLogger
#%%
def check_batches(size, batch_size):
"""Checks batches on the first epoch to see if any data is missed """
if np.mod(size, batch_size) > 0:
warn = 'Batch size does not evenly divide into data. Remainders are ignored.'
warnings.warn(warn)
#%%
def make_batches(size, batch_size, epoch=None):
"""Returns a list of batch indices (tuples of indices). """
if epoch in [None, 0]:
check_batches(size, batch_size)
nb_batch = int(np.floor(size / float(batch_size)))
batches = [(i * batch_size, min(size, (i + 1) * batch_size))
for i in range(0, nb_batch)]
return batches
#%%
class Model:
"""Class that handles the training of a neural network """
def __init__(self, nnet, optimizer, trainer):
self.nnet = nnet
self.optimizer = optimizer
self.trainer = trainer
self.inputs = B.placeholder(shape=(None, self.nnet.layer_size_list[0]), name='x')
self.loss_fn = trainer.loss_fn()
loss = self.loss_fn(self.inputs)
for part in self.nnet.parts:
for pl in part.losses:
loss += pl
self.loss = loss
self.trainable_weights = self.nnet.trainable_weights
self._updates = self.trainer.updates
@property
def _train_updates(self):
training_updates = self.optimizer.get_updates(self.trainable_weights, self.loss)
updates = merge_OrderedDicts(self._updates, training_updates)
return updates
def _make_function(self, index, data, updates, name):
givens = {self.inputs: data[index]}
fn = B.function([index],
self.loss,
updates=updates,
givens=givens,
name=name)
return fn
def _make_train_function(self):
self.train_function = self._make_function(self.train_index,
self.train_data,
self._train_updates,
'train_function')
def _make_validation_function(self):
self.validation_function = self._make_function(self.valid_index,
self.validation_data,
self._updates,
'valid_function')
def _make_test_function(self):
self.test_function = self._make_function(self.test_index,
self.test_data,
self._updates,
'test_function')
def _fit_loop(self,
f,
out_labels=None,
batch_size=100,
n_epoch=100,
callbacks=None,
val_f=None,
shuffle=True,
callback_metrics=None,
initial_epoch=0):
"""Abstract fit function for f.
Assume that f returns a list, labeled by out_labels.
# Arguments
f: Backend function returning a list of tensors
out_labels: list of strings, display names of
the outputs of `f`
batch_size: integer batch size
n_epoch: number of times to iterate over the data
callbacks: list of callbacks to be called during training
val_f: Backend function to call for validation
shuffle: whether to shuffle the data at the beginning of each epoch
callback_metrics: list of strings, the display names of the metrics
passed to the callbacks. They should be the
concatenation of list the display names of the outputs of
`f` and the list of display names of the outputs of `f_val`.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
`History` object.
"""
time_start = time.time()
do_validation = False
n_valid_sample = 0
if val_f:
do_validation = True
n_valid_sample = B.eval(self.validation_data.shape[0])
index_array = np.arange(self.n_train_sample, dtype='int32')
self.history = cbks.History()
# CSVLogger needs to be second to last callback
# otherwise AIS results are not recorded
callbacks = callbacks or []
index_csv = None
for i, cb in enumerate(callbacks):
if isinstance(cb, CSVLogger):
index_csv = i
if index_csv is not None:
cb_csv = callbacks.pop(index_csv)
callbacks.append(cb_csv)
callbacks = [cbks.BaseLogger()] + callbacks + [self.history]
callbacks = cbks.CallbackList(callbacks)
out_labels = out_labels or []
callbacks.set_model(self)
callbacks.set_params({
'batch_size': batch_size,
'n_epoch': n_epoch,
'n_sample': self.n_train_sample,
'do_validation': do_validation,
'metrics': callback_metrics or [],
})
callbacks.on_train_begin()
self.stop_training = False
for epoch in range(initial_epoch, n_epoch):
callbacks.on_epoch_begin(epoch)
if shuffle:
np.random.shuffle(index_array)
batches = make_batches(self.n_train_sample, batch_size, epoch)
epoch_logs = {}
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
batch_logs = {}
batch_logs['batch'] = batch_index
batch_logs['size'] = len(batch_ids)
callbacks.on_batch_begin(batch_index, batch_logs)
# actual training
outs = f(batch_ids)
if not isinstance(outs, list):
outs = [outs]
for l, o in zip(out_labels, outs):
batch_logs[l] = o
callbacks.on_batch_end(batch_index, batch_logs)
if batch_index == len(batches) - 1: # last batch
# validation
if do_validation:
val_outs = self._valid_loop(val_f, n_valid_sample,
batch_size=batch_size)
if not isinstance(val_outs, list):
val_outs = [val_outs]
# same labels assumed
for l, o in zip(out_labels, val_outs):
epoch_logs['val_' + l] = o
callbacks.on_epoch_end(epoch, epoch_logs)
if self.stop_training:
break
# Tracks the timing of everything except train_end
# Skips train_end otherwise timing can't be included in summary callback
fit_total_time = time.time() - time_start
fit_callback_time = callbacks.cb_time
self.history.fit_total_time = fit_total_time
self.history.fit_callback_time = fit_callback_time
self.history.fit_train_time = fit_total_time - fit_callback_time
callbacks.on_train_end()
return self.history
def _valid_loop(self, f, n_sample, batch_size=100):
"""Abstract method to loop over some data in batches.
# Arguments
f: Backend function returning a list of tensors.
n_sample: integer of number of samples in data.
batch_size: integer batch size.
# Returns
Scalar loss (if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics).
"""
outs = []
batches = make_batches(n_sample, batch_size)
index_array = np.arange(n_sample, dtype='int32')
for batch_index, (batch_start, batch_end) in enumerate(batches):
batch_ids = index_array[batch_start:batch_end]
batch_outs = f(batch_ids)
if isinstance(batch_outs, list):
if batch_index == 0:
for batch_out in enumerate(batch_outs):
outs.append(0.)
for i, batch_out in enumerate(batch_outs):
outs[i] += batch_out * len(batch_ids)
else:
if batch_index == 0:
outs.append(0.)
outs[0] += batch_outs * len(batch_ids)
for i, out in enumerate(outs):
outs[i] /= n_sample
if len(outs) == 1:
return outs[0]
return outs
def fit(self,
x,
batch_size=100,
n_epoch=10,
callbacks=None,
validation_data=None,
shuffle=True,
initial_epoch=0):
"""Trains the model for a fixed number of epochs (iterations on a dataset).
# Arguments
x: Theano shared array of training data
batch_size: integer. Number of samples per gradient update.
n_epoch: integer, the number of times to iterate
over the training data arrays.
callbacks: list of callbacks to be called during training.
validation_data: Theano shared array of data on which to evaluate
the loss and any model metrics at the end of each epoch.
The model will not be trained on this data.
shuffle: boolean, whether to shuffle the training data
before each epoch.
initial_epoch: epoch at which to start training
(useful for resuming a previous training run)
# Returns
A `History` instance. Its `history` attribute contains
all information collected during training.
"""
self.train_data = x
self.n_train_sample = B.eval(x.shape[0])
self.validation_data = validation_data
# makes the generic indices to access data
self.train_index = B.placeholder(shape=(batch_size,),
dtype=B.intx(), name='train_index')
# makes the training functions
self._make_train_function()
f = self.train_function
# preps for validation
out_labels = ['cost']
if validation_data:
self.valid_index = B.placeholder(shape=(batch_size,),
dtype=B.intx(), name='valid_index')
callback_metrics = copy.copy(out_labels) + ['val_' + n for n in out_labels]
self._make_validation_function()
val_f = self.validation_function
else:
callback_metrics = copy.copy(out_labels)
val_f = None
# delegate logic to _fit_loop
return self._fit_loop(f, out_labels=out_labels,
batch_size=batch_size, n_epoch=n_epoch,
callbacks=callbacks,
val_f=val_f, shuffle=shuffle,
callback_metrics=callback_metrics,
initial_epoch=initial_epoch)
def train_on_batch(self, x):
"""Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
# Returns
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics).
"""
# makes the generic indices to access data
batch_size = B.eval(x.shape)[0]
self.train_index = B.placeholder(shape=(batch_size,),
dtype=B.intx(), name='train_index')
self.train_data = x
index = np.arange(batch_size)
self._make_train_function()
outputs = self.train_function(index)
return outputs
def predict_on_batch(self, x):
"""Runs a single gradient update on a single batch of data.
# Arguments
x: Numpy array of training data,
or list of Numpy arrays if the model has multiple inputs.
If all inputs in the model are named,
you can also pass a dictionary
mapping input names to Numpy arrays.
# Returns
Scalar training loss
(if the model has a single output and no metrics)
or list of scalars (if the model has multiple outputs
and/or metrics).
"""
# makes the generic indices to access data
batch_size = B.eval(x.shape)[0]
self.test_index = B.placeholder(shape=(batch_size,),
dtype=B.intx(), name='test_index')
self.test_data = x
index = np.arange(batch_size)
self._make_test_function()
outputs = self.test_function(index)
return outputs
| {
"repo_name": "alexhunterlang/natural_bm",
"path": "natural_bm/models.py",
"copies": "1",
"size": "13928",
"license": "mit",
"hash": -5584103061338695000,
"line_mean": 37.1589041096,
"line_max": 89,
"alpha_frac": 0.5309448593,
"autogenerated": false,
"ratio": 4.569553805774278,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.005136868812839995,
"num_lines": 365
} |
"""A Model whose structure is defined by an ontology."""
from deepchem.models import TensorGraph
from deepchem.models.tensorgraph import layers
from deepchem.metrics import to_one_hot
from deepchem.utils import get_data_dir, download_url
import tensorflow as tf
import math
import os
class OntologyModel(TensorGraph):
"""Implements ontology based models.
The model is based on Ma et al., "Using deep learning to model the hierarchical
structure and function of a cell" (https://doi.org/10.1038/nmeth.4627). The
model structure is defined by an ontology: a set of features grouped into
categories, which in turn are arranged hierarchically to form a directed
acyclic graph. An example is the Gene Ontology (GO) classifications which
groups genes into a set of hierarchical categories based on their biological
role. Using a known ontology to define the model structure has two benefits.
First, incorporating prior knowledge can sometimes lead to much more accurate
predictions for a fixed model size. Second, it makes the model's results
much easier to interpret.
To use this model, you must provide an ontology represented as a tree of
OntologyNode objects. Each node corresponds to a category in the ontology.
It defines the list of features (e.g. genes) that correspond to that category,
as well as its child nodes (subcategories). In addition, every feature and
every node has a unique string identifier that can be used to refer to it.
As an alternative to building the ontology yourself, you can use the
create_gene_ontology() function to build a representation of the GO hierarchy.
It downloads a definition of the hierarchy from the GO website, parses it,
builds OntologyNodes for all the categories, and returns a root node that you
can pass to the OntologyModel constructor.
An important feature of this model is that the outputs of its internal layers
are meaningful. During training, it tries to make each category independently
predict the labels. By default, predict() returns the predictions for the
root node of the hierarchy. You can use the prediction_for_node field to get
the output layer corresponding to a particular category:
prediction = model.predict(dataset, outputs=model.prediction_for_node[node_id])
"""
def __init__(self,
n_tasks,
feature_ids,
root_node,
mode="regression",
n_classes=2,
intermediate_loss_weight=0.3,
weight_decay_penalty=0.0,
**kwargs):
"""Create an OntologyModel.
In addition to the following arguments, this class also accepts
all the keyword arguments from TensorGraph.
Parameters
----------
n_tasks: int
the number of tasks this model predicts
feature_ids: list of str
the unique identifiers for the features this model generates predictions
based on. These strings must match the feature IDs in the OntologyNodes.
The first element of this list must correspond to the first feature in the
data, the second element to the second feature, etc.
root_node: OntologyNode
the root node of the ontology that defines this models
mode: str
the type of model to create, either "regression" or "classification"
n_classes: int
for classification models, the number of classes to predict. This is
ignored for regression models.
intermediate_loss_weight: float
the weight to multiply the loss from intermediate (non-root) categories by
weight_decay_penalty: float
the magnitude of the weight decay penalty to use for normalization
"""
super(OntologyModel, self).__init__(**kwargs)
self.n_tasks = n_tasks
self.feature_ids = feature_ids
self.mode = mode
self.n_classes = n_classes
self._feature_index = dict((f, i) for i, f in enumerate(feature_ids))
self._features = layers.Transpose(
(1, 0), in_layers=layers.Feature(shape=(None, len(feature_ids))))
self.output_for_node = {}
self.prediction_for_node = {}
if mode not in ('regression', 'classification'):
raise ValueError('Mode must be "regression" or "classification"')
# Construct layers for all nodes.
logits_for_node = {}
self._build_layers(root_node)
for id in self.output_for_node:
if mode == 'regression':
prediction = layers.Dense(
in_layers=self.output_for_node[id], out_channels=n_tasks)
else:
logits = layers.Reshape(
shape=(-1, n_tasks, n_classes),
in_layers=layers.Dense(
in_layers=self.output_for_node[id],
out_channels=n_tasks * n_classes))
prediction = layers.SoftMax(logits)
logits_for_node[id] = logits
self.prediction_for_node[id] = prediction
self.add_output(self.prediction_for_node[id])
self.set_default_outputs([self.prediction_for_node[root_node.id]])
# Create the loss function.
losses = []
loss_weights = []
weights = layers.Weights(shape=(None, n_tasks))
if mode == 'regression':
labels = layers.Label(shape=(None, n_tasks))
for id in self.prediction_for_node:
losses.append(
layers.ReduceSum(
layers.L2Loss([labels, self.prediction_for_node[id], weights])))
loss_weights.append(1.0
if id == root_node.id else intermediate_loss_weight)
else:
labels = layers.Label(shape=(None, n_tasks, n_classes))
for id in self.prediction_for_node:
losses.append(
layers.WeightedError([
layers.SoftMaxCrossEntropy([labels, logits_for_node[id]]),
weights
]))
loss_weights.append(1.0
if id == root_node.id else intermediate_loss_weight)
loss = layers.Add(in_layers=losses, weights=loss_weights)
if weight_decay_penalty != 0.0:
loss = layers.WeightDecay(weight_decay_penalty, 'l2', in_layers=loss)
self.set_loss(loss)
def _build_layers(self, node):
inputs = []
# Create inputs for the features.
if len(node.feature_ids) > 0:
indices = []
for f in node.feature_ids:
if f in self._feature_index:
indices.append([self._feature_index[f]])
else:
raise ValueError('Unknown feature "%s"' % f)
inputs.append(
layers.Transpose(
(1, 0),
in_layers=layers.Gather(
in_layers=self._features, indices=indices)))
# Create inputs for the children.
if len(node.children) > 0:
for child in node.children:
if child.id not in self.output_for_node:
self._build_layers(child)
inputs.append(self.output_for_node[child.id])
# Concatenate all inputs together.
if len(inputs) == 0:
raise ValueError('OntologyNode must have at least one child or feature')
if len(inputs) == 1:
inputs = inputs[0]
else:
inputs = layers.Concat(inputs)
# Create the output.
dense = layers.Dense(
node.n_outputs, in_layers=inputs, activation_fn=tf.tanh)
output = layers.BatchNorm(dense)
self.output_for_node[node.id] = output
def default_generator(self,
dataset,
epochs=1,
predict=False,
deterministic=True,
pad_batches=True):
for epoch in range(epochs):
for (X_b, y_b, w_b, ids_b) in dataset.iterbatches(
batch_size=self.batch_size,
deterministic=deterministic,
pad_batches=pad_batches):
feed_dict = dict()
if y_b is not None and not predict:
if self.mode == 'regression':
feed_dict[self.labels[0]] = y_b
else:
feed_dict[self.labels[0]] = to_one_hot(y_b.flatten(),
self.n_classes).reshape(
-1, self.n_tasks,
self.n_classes)
if X_b is not None:
feed_dict[self.features[0]] = X_b
if w_b is not None and not predict:
feed_dict[self.task_weights[0]] = w_b
yield feed_dict
def create_estimator_inputs(self, feature_columns, weight_column, features,
labels, mode):
tensors = {}
for layer, column in zip(self.features, feature_columns):
tensors[layer] = tf.feature_column.input_layer(features, [column])
if weight_column is not None:
tensors[self.task_weights[0]] = tf.feature_column.input_layer(
features, [weight_column])
if labels is not None:
if self.mode == 'regression':
tensors[self.labels[0]] = tf.cast(labels, self.labels[0].dtype)
else:
tensors[self.labels[0]] = tf.one_hot(
tf.cast(labels, tf.int32), self.n_classes)
return tensors
class OntologyNode(object):
"""An OntologyNode represents a category within an ontology."""
def __init__(self,
node_id=None,
n_outputs=10,
feature_ids=None,
children=None,
name=None):
"""Create a OntologyNode representing a category in an ontology.
Parameters
----------
node_id: str
a unique identifier for this category. If this is omitted, an identifier
is generated automatically.
n_outputs: int
the number of output values the corresponding layer of the OntologyModel
should produce
feature_ids: list of str
the unique IDs of all features that belong to this category (not including
ones that belong to child nodes)
children: list of OntologyNode
the list of nodes defining subcategories
name: str
a descriptive name for this category. Ir this is omitted, the name is set
to the ID.
"""
self.id = node_id
self.n_outputs = n_outputs
self.feature_ids = (feature_ids if feature_ids is not None else [])
self.children = (children if children is not None else [])
self.name = (name if name is not None else id)
def create_gene_ontology(feature_mapping,
outputs_per_feature=0.3,
min_outputs=20,
min_node_features=6,
omit_redundant_nodes=True,
ontology_file=None):
"""Create a tree of OntologyNodes describing the Gene Ontology classification.
See http://geneontology.org/ for details about the Gene Ontology classification.
Parameters
----------
feature_mapping: dict
defines the mapping of features to GO categories. Each key should be a
feature ID. The corresponding value should be a list of strings, giving the
unique identifiers of all GO categories that feature belongs to.
outputs_per_feature: float
the number of outputs for each node is set to this value times the total
number of features the node contains (including all subnodes)
min_outputs: int
the minimum number of outputs for any node
min_node_features: int
the minimum number of features corresponding to a node (including all its
subnodes). If a category has fewer features than this, no node is create
for it. Instead, its features are added directly to its parent node.
omit_redundant_nodes: bool
if True, a node will be omitted if it has only one child node and does not
directly directly correspond to any features
ontology_file: str
the path to a Gene Ontology OBO file defining the ontology. If this is
omitted, the most recent version of the ontology is downloaded from the GO
website.
"""
# If necessary, download the file defining the ontology.
if ontology_file is None:
ontology_file = os.path.join(get_data_dir(), 'go-basic.obo')
if not os.path.isfile(ontology_file):
download_url('http://purl.obolibrary.org/obo/go/go-basic.obo')
# Parse the ontology definition and create a list of terms.
terms = []
term = None
with open(ontology_file) as input:
for line in input:
if line.startswith('[Term]'):
if term is not None:
terms.append(term)
term = {'parents': []}
elif line.startswith('[Typedef]'):
if term is not None:
terms.append(term)
term = None
elif line.startswith('id:') and term is not None:
term['id'] = line.split()[1]
elif line.startswith('name:') and term is not None:
term['name'] = line[5:].strip()
elif line.startswith('is_a:') and term is not None:
term['parents'].append(line.split()[1])
elif line.startswith('is_obsolete:'):
if line.split()[1] == 'true':
term = None
if term is not None:
terms.append(term)
# Create OntologyNode objects for all the terms.
nodes = {}
for term in terms:
nodes[term['id']] = OntologyNode(term['id'], 0, name=term['name'])
# Assign parent-child relationships between nodes, and identify root nodes.
roots = []
for term in terms:
node = nodes[term['id']]
for parent in term['parents']:
nodes[parent].children.append(node)
if len(term['parents']) == 0:
roots.append(node)
# Create a single root node that combines the three GO roots.
root = OntologyNode('GO', 0, name='Gene Ontology Root Node', children=roots)
# Assign features to nodes.
for feature_id in feature_mapping:
for node_id in feature_mapping[feature_id]:
nodes[node_id].feature_ids.append(feature_id)
# Count the number of features within each node. Eliminate nodes with too few
# features and set the number of outputs for each one.
def count_features(node):
self_features = set(node.feature_ids)
all_features = set(node.feature_ids)
for i, child in enumerate(node.children[:]):
child_features = count_features(child)
all_features.update(child_features)
if len(child_features) < min_node_features:
node.children.remove(child)
self_features.update(child.feature_ids)
if omit_redundant_nodes and len(
node.children) == 1 and len(self_features) == 0:
self_features = node.children[0].feature_ids
node.children = node.children[0].children
n_features = len(self_features)
if n_features > len(node.feature_ids):
node.feature_ids = list(self_features)
node.n_outputs = max(min_outputs,
math.ceil(outputs_per_feature * n_features))
return all_features
count_features(root)
return root
| {
"repo_name": "ktaneishi/deepchem",
"path": "deepchem/models/tensorgraph/models/ontology.py",
"copies": "1",
"size": "14653",
"license": "mit",
"hash": 2828340441275304000,
"line_mean": 37.6622691293,
"line_max": 82,
"alpha_frac": 0.6424622944,
"autogenerated": false,
"ratio": 4.024443834111508,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006945831352504958,
"num_lines": 379
} |
# A modification of rnn.py by Razvan Pascanu
import numpy as np
import theano
import theano.tensor as TT
from theano.compat.python2x import OrderedDict
# number of hidden units
n = 50
# number of input units
nin = 5
# number of output units
nout = 5
# input (where first dimension is time)
u = TT.matrix()
# target (where first dimension is time)
t = TT.matrix()
# initial hidden state of the RNN
h0 = TT.vector()
# learning rate
lr = TT.scalar()
# recurrent weights as a shared variable
W = theano.shared(np.random.uniform(size=(n, n), low=-.01, high=.01).astype(np.float32))
# input to hidden layer weights
W_in = theano.shared(np.random.uniform(size=(nin, n), low=-.01, high=.01).astype(np.float32))
# hidden to output layer weights
W_out = theano.shared(np.random.uniform(size=(n, nout), low=-.01, high=.01).astype(np.float32))
# recurrent function (using tanh activation function) and linear output
# activation function
def step(u_t, h_tm1, W, W_in, W_out):
h_t = TT.tanh(TT.dot(u_t, W_in) + TT.dot(h_tm1, W))
y_t = TT.dot(h_t, W_out)
return h_t, y_t
# the hidden state `h` for the entire sequence, and the output for the
# entrie sequence `y` (first dimension is always time)
[h, y], _ = theano.scan(step,
sequences=u,
outputs_info=[h0, None],
non_sequences=[W, W_in, W_out])
# error between output and target
error = ((y - t) ** 2).sum()
# gradients on the weights using BPTT
gW, gW_in, gW_out = TT.grad(error, [W, W_in, W_out])
# training function, that computes the error and updates the weights using
# SGD.
def RMSprop(cost, params, lr=0.001, rho=0.9, epsilon=1e-6):
"""RMSprop is a more intelligent update method. Written by @newmu TheanoTutorials
"""
grads = T.grad(cost=cost, wrt=params)
updates = []
for p, g in zip(params, grads):
# running average of the magnitude of the gradient
acc = theano.shared(p.get_value() * 0.) # accumulator
acc_new = rho * acc + (1 - rho) * g ** 2
# scale the gradient based on running average (it'll find it faster as it approaches the minimum)
gradient_scaling = T.sqrt(acc_new + epsilon)
g = g / gradient_scaling
updates.append((acc, acc_new))
updates.append((p, p - lr * g))
return updates
ud = OrderedDict()
ud[W] = W - lr * gW
ud[W_in] = W_in - lr * gW_in
ud[W_out] = W_out - lr * gW_out
# h0 should be np.zeros(size)
# lr should be .01 for now, although this could be different for different updates funcs like rmsprop adagrad
fn = theano.function([h0, u, t, lr],
error,
updates=ud)
# lets train / test stuff!
trX = np.linspace(-5, 5, 101)
trY = trX ** 2 + np.random.randn(*trX.shape) * 1.3 # noise for training
plt.plot(trY, 'r.')
plt.show()
teX = np.linspace(-7, 7, 101)
teY = teX ** 2 # no noise for testing
tru = trX.reshape(-1, 1)
trt = trY.reshape(-1, 1)
teu = teX.reshape(-1, 1)
tet = teX.reshape(-1, 1)
| {
"repo_name": "youralien/minet",
"path": "rnn.py",
"copies": "1",
"size": "2995",
"license": "bsd-3-clause",
"hash": -642208896489973400,
"line_mean": 30.8617021277,
"line_max": 109,
"alpha_frac": 0.6370617696,
"autogenerated": false,
"ratio": 2.974180734856008,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9046281050552187,
"avg_score": 0.012992290780764002,
"num_lines": 94
} |
# A modification version from chainercv repository.
# (See https://github.com/chainer/chainercv/blob/master/chainercv/evaluations/eval_detection_voc.py)
from __future__ import division
import os
from collections import defaultdict
import numpy as np
from maskrcnn_benchmark.structures.bounding_box import BoxList
from maskrcnn_benchmark.structures.boxlist_ops import boxlist_iou
def do_voc_evaluation(dataset, predictions, output_folder, logger):
# TODO need to make the use_07_metric format available
# for the user to choose
pred_boxlists = []
gt_boxlists = []
for image_id, prediction in enumerate(predictions):
img_info = dataset.get_img_info(image_id)
if len(prediction) == 0:
continue
image_width = img_info["width"]
image_height = img_info["height"]
prediction = prediction.resize((image_width, image_height))
pred_boxlists.append(prediction)
gt_boxlist = dataset.get_groundtruth(image_id)
gt_boxlists.append(gt_boxlist)
result = eval_detection_voc(
pred_boxlists=pred_boxlists,
gt_boxlists=gt_boxlists,
iou_thresh=0.5,
use_07_metric=True,
)
result_str = "mAP: {:.4f}\n".format(result["map"])
for i, ap in enumerate(result["ap"]):
if i == 0: # skip background
continue
result_str += "{:<16}: {:.4f}\n".format(
dataset.map_class_id_to_class_name(i), ap
)
logger.info(result_str)
if output_folder:
with open(os.path.join(output_folder, "result.txt"), "w") as fid:
fid.write(result_str)
return result
def eval_detection_voc(pred_boxlists, gt_boxlists, iou_thresh=0.5, use_07_metric=False):
"""Evaluate on voc dataset.
Args:
pred_boxlists(list[BoxList]): pred boxlist, has labels and scores fields.
gt_boxlists(list[BoxList]): ground truth boxlist, has labels field.
iou_thresh: iou thresh
use_07_metric: boolean
Returns:
dict represents the results
"""
assert len(gt_boxlists) == len(
pred_boxlists
), "Length of gt and pred lists need to be same."
prec, rec = calc_detection_voc_prec_rec(
pred_boxlists=pred_boxlists, gt_boxlists=gt_boxlists, iou_thresh=iou_thresh
)
ap = calc_detection_voc_ap(prec, rec, use_07_metric=use_07_metric)
return {"ap": ap, "map": np.nanmean(ap)}
def calc_detection_voc_prec_rec(gt_boxlists, pred_boxlists, iou_thresh=0.5):
"""Calculate precision and recall based on evaluation code of PASCAL VOC.
This function calculates precision and recall of
predicted bounding boxes obtained from a dataset which has :math:`N`
images.
The code is based on the evaluation code used in PASCAL VOC Challenge.
"""
n_pos = defaultdict(int)
score = defaultdict(list)
match = defaultdict(list)
for gt_boxlist, pred_boxlist in zip(gt_boxlists, pred_boxlists):
pred_bbox = pred_boxlist.bbox.numpy()
pred_label = pred_boxlist.get_field("labels").numpy()
pred_score = pred_boxlist.get_field("scores").numpy()
gt_bbox = gt_boxlist.bbox.numpy()
gt_label = gt_boxlist.get_field("labels").numpy()
gt_difficult = gt_boxlist.get_field("difficult").numpy()
for l in np.unique(np.concatenate((pred_label, gt_label)).astype(int)):
pred_mask_l = pred_label == l
pred_bbox_l = pred_bbox[pred_mask_l]
pred_score_l = pred_score[pred_mask_l]
# sort by score
order = pred_score_l.argsort()[::-1]
pred_bbox_l = pred_bbox_l[order]
pred_score_l = pred_score_l[order]
gt_mask_l = gt_label == l
gt_bbox_l = gt_bbox[gt_mask_l]
gt_difficult_l = gt_difficult[gt_mask_l]
n_pos[l] += np.logical_not(gt_difficult_l).sum()
score[l].extend(pred_score_l)
if len(pred_bbox_l) == 0:
continue
if len(gt_bbox_l) == 0:
match[l].extend((0,) * pred_bbox_l.shape[0])
continue
# VOC evaluation follows integer typed bounding boxes.
pred_bbox_l = pred_bbox_l.copy()
pred_bbox_l[:, 2:] += 1
gt_bbox_l = gt_bbox_l.copy()
gt_bbox_l[:, 2:] += 1
iou = boxlist_iou(
BoxList(pred_bbox_l, gt_boxlist.size),
BoxList(gt_bbox_l, gt_boxlist.size),
).numpy()
gt_index = iou.argmax(axis=1)
# set -1 if there is no matching ground truth
gt_index[iou.max(axis=1) < iou_thresh] = -1
del iou
selec = np.zeros(gt_bbox_l.shape[0], dtype=bool)
for gt_idx in gt_index:
if gt_idx >= 0:
if gt_difficult_l[gt_idx]:
match[l].append(-1)
else:
if not selec[gt_idx]:
match[l].append(1)
else:
match[l].append(0)
selec[gt_idx] = True
else:
match[l].append(0)
n_fg_class = max(n_pos.keys()) + 1
prec = [None] * n_fg_class
rec = [None] * n_fg_class
for l in n_pos.keys():
score_l = np.array(score[l])
match_l = np.array(match[l], dtype=np.int8)
order = score_l.argsort()[::-1]
match_l = match_l[order]
tp = np.cumsum(match_l == 1)
fp = np.cumsum(match_l == 0)
# If an element of fp + tp is 0,
# the corresponding element of prec[l] is nan.
prec[l] = tp / (fp + tp)
# If n_pos[l] is 0, rec[l] is None.
if n_pos[l] > 0:
rec[l] = tp / n_pos[l]
return prec, rec
def calc_detection_voc_ap(prec, rec, use_07_metric=False):
"""Calculate average precisions based on evaluation code of PASCAL VOC.
This function calculates average precisions
from given precisions and recalls.
The code is based on the evaluation code used in PASCAL VOC Challenge.
Args:
prec (list of numpy.array): A list of arrays.
:obj:`prec[l]` indicates precision for class :math:`l`.
If :obj:`prec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
rec (list of numpy.array): A list of arrays.
:obj:`rec[l]` indicates recall for class :math:`l`.
If :obj:`rec[l]` is :obj:`None`, this function returns
:obj:`numpy.nan` for class :math:`l`.
use_07_metric (bool): Whether to use PASCAL VOC 2007 evaluation metric
for calculating average precision. The default value is
:obj:`False`.
Returns:
~numpy.ndarray:
This function returns an array of average precisions.
The :math:`l`-th value corresponds to the average precision
for class :math:`l`. If :obj:`prec[l]` or :obj:`rec[l]` is
:obj:`None`, the corresponding value is set to :obj:`numpy.nan`.
"""
n_fg_class = len(prec)
ap = np.empty(n_fg_class)
for l in range(n_fg_class):
if prec[l] is None or rec[l] is None:
ap[l] = np.nan
continue
if use_07_metric:
# 11 point metric
ap[l] = 0
for t in np.arange(0.0, 1.1, 0.1):
if np.sum(rec[l] >= t) == 0:
p = 0
else:
p = np.max(np.nan_to_num(prec[l])[rec[l] >= t])
ap[l] += p / 11
else:
# correct AP calculation
# first append sentinel values at the end
mpre = np.concatenate(([0], np.nan_to_num(prec[l]), [0]))
mrec = np.concatenate(([0], rec[l], [1]))
mpre = np.maximum.accumulate(mpre[::-1])[::-1]
# to calculate area under PR curve, look for points
# where X axis (recall) changes value
i = np.where(mrec[1:] != mrec[:-1])[0]
# and sum (\Delta recall) * prec
ap[l] = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1])
return ap
| {
"repo_name": "mlperf/training_results_v0.6",
"path": "NVIDIA/benchmarks/maskrcnn/implementations/pytorch/maskrcnn_benchmark/data/datasets/evaluation/voc/voc_eval.py",
"copies": "4",
"size": "8153",
"license": "apache-2.0",
"hash": 286635414634895650,
"line_mean": 36.7453703704,
"line_max": 100,
"alpha_frac": 0.5605298663,
"autogenerated": false,
"ratio": 3.4488155668358713,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0005834438242972598,
"num_lines": 216
} |
"""A modified copy of ProxyTypes 0.9 (https://pypi.io/project/ProxyTypes/)."""
"""
========== NOTICE OF MODIFICATION ==========
This version HAS BEEN MODIFIED from the original 'proxies.py' file by Luke Deen
Taylor. The original file was published on July 20, 2006.
Modifications made on July 18, 2016:
- Rewriting for compliance with the PEP 8 style guide
- Supporting for Python 3
- Movinging from the old format syntax (%) to the newer .format() syntax.
Modifications made on July 19, 2016:
- Removing CallbackProxy, LazyProxy, CallbackWrapper, and LazyWrapper
- Removing use of __slots__ because of conflicts
- Renaming this file from proxies.py to proxytypes.py
Overall, these modifications serve as a clean-up and removal of classes I don't
need, rather than a change to the functionality or structure of the code that
remains after my removals.
=========== ORIGINAL AUTHORSHIP AND LICENSING ==========
ProxyTypes was originally written by Phillip J. Eby, and is ZPL licensed.
The ZPL is as follows:
Zope Public License (ZPL) Version 2.0
-----------------------------------------------
This software is Copyright (c) Zope Corporation (tm) and
Contributors. All rights reserved.
This license has been certified as open source. It has also
been designated as GPL compatible by the Free Software
Foundation (FSF).
Redistribution and use in source and binary forms, with or
without modification, are permitted provided that the
following conditions are met:
1. Redistributions in source code must retain the above
copyright notice, this list of conditions, and the following
disclaimer.
2. Redistributions in binary form must reproduce the above
copyright notice, this list of conditions, and the following
disclaimer in the documentation and/or other materials
provided with the distribution.
3. The name Zope Corporation (tm) must not be used to
endorse or promote products derived from this software
without prior written permission from Zope Corporation.
4. The right to distribute this software or to use it for
any purpose does not give you the right to use Servicemarks
(sm) or Trademarks (tm) of Zope Corporation. Use of them is
covered in a separate agreement (see
http://www.zope.com/Marks).
5. If any files are modified, you must cause the modified
files to carry prominent notices stating that you changed
the files and the date of any change.
Disclaimer
THIS SOFTWARE IS PROVIDED BY ZOPE CORPORATION ``AS IS''
AND ANY EXPRESSED OR IMPLIED WARRANTIES, INCLUDING, BUT
NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY
AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED. IN
NO EVENT SHALL ZOPE CORPORATION OR ITS CONTRIBUTORS BE
LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE
OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS
SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH
DAMAGE.
This software consists of contributions made by Zope
Corporation and many individuals on behalf of Zope
Corporation. Specific attributions are listed in the
accompanying credits file.
"""
class AbstractProxy(object):
"""Delegates all operations (except ``.__subject__``) to another object."""
# Delegate getting, setting, and deleting attributes
def __getattribute__(self, attr, oga=object.__getattribute__):
subject = oga(self, "__subject__")
if attr == "__subject__":
return subject
return getattr(subject, attr)
def __setattr__(self, attr, val, osa=object.__setattr__):
if attr == "__subject__":
osa(self, attr, val)
else:
setattr(self.__subject__, attr, val)
def __delattr__(self, attr, oda=object.__delattr__):
if attr == "__subject__":
oda(self, attr)
else:
delattr(self.__subject__, attr)
# Delegate the getting, setting, and deleting of items with []
def __getitem__(self, arg):
return self.__subject__[arg]
def __setitem__(self, arg, val):
self.__subject__[arg] = val
def __delitem__(self, arg):
del self.__subject__[arg]
# Delegate the getting, setting, and deleting of slices with []
def __getslice__(self, i, j):
return self.__subject__[i:j]
def __setslice__(self, i, j, val):
self.__subject__[i:j] = val
def __delslice__(self, i, j):
del self.__subject__[i:j]
# Delegate calling
def __call__(self, *args, **kwargs):
return self.__subject__(*args, **kwargs)
# Delegate true/false testing
def __nonzero__(self):
return bool(self.__subject__)
# Delegate the 'in' operator
def __contains__(self, ob):
return ob in self.__subject__
# Delegate magic methods with no arguments
for name in ("repr", "str", "hash", "len", "abs", "complex", "int", "long",
"float", "iter", "oct", "hex"):
exec(("def __{}__(self):"
" return {}(self.__subject__)").format(name, name))
for name in "cmp", "coerce", "divmod":
exec(("def __{}__(self, ob):"
" return {}(self.__subject__, ob)").format(name, name))
# Delegate comparison operators
for name, operator in [
("lt", "<"), ("gt", ">"), ("le", "<="), ("ge", ">="),
("eq", "=="), ("ne", "!=")
]:
exec(("def __{}__(self, ob):"
" return self.__subject__ {} ob").format(name, operator))
# Delegate unary operators
for name, op in [("neg", "-"), ("pos", "+"), ("invert", "~")]:
exec(("def __{}__(self):"
" return {} self.__subject__").format(name, op))
# Delegate arithmetic, bitwise, and shift operators
for name, op in [
("or", "|"), ("and", "&"), ("xor", "^"), # Bitwise operators
("lshift", "<<"), ("rshift", ">>"), # Shift operators
("add", "+"), ("sub", "-"), ("mul", "*"), ("div", "/"), # Arithmetic
("mod", "%"), ("truediv", "/"), ("floordiv", "//") # Weird arithmetic
]:
exec("\n".join([
"def __{0}__(self, ob):",
" return self.__subject__ {1} ob",
"def __r{0}__(self, ob):",
" return ob {1} self.__subject__",
"def __i{0}__(self, ob):",
" self.__subject__ {1}= ob",
" return self"
]).format(name, op))
del name, op
# Oddball signatures
def __rdivmod__(self, ob):
return divmod(ob, self.__subject__)
def __pow__(self, *args):
return pow(self.__subject__, *args)
def __ipow__(self, ob):
self.__subject__ **= ob
return self
def __rpow__(self, ob):
return pow(ob, self.__subject__)
class ObjectProxy(AbstractProxy):
"""Proxy for a specific object."""
def __init__(self, subject):
self.__subject__ = subject
class AbstractWrapper(AbstractProxy):
"""Mixin to allow extra behaviors and attributes on proxy instance."""
def __getattribute__(self, attr, oga=object.__getattribute__):
if attr.startswith("__"):
subject = oga(self, "__subject__")
if attr == "__subject__":
return subject
return getattr(subject, attr)
return oga(self, attr)
def __getattr__(self, attr, oga=object.__getattribute__):
return getattr(oga(self, "__subject__"), attr)
def __setattr__(self, attr, val, osa=object.__setattr__):
if (
attr == "__subject__" or
hasattr(type(self), attr) and not
attr.startswith("__")
):
osa(self, attr, val)
else:
setattr(self.__subject__, attr, val)
def __delattr__(self, attr, oda=object.__delattr__):
if (
attr == "__subject__" or
hasattr(type(self), attr) and not attr.startswith("__")
):
oda(self, attr)
else:
delattr(self.__subject__, attr)
class ObjectWrapper(ObjectProxy, AbstractWrapper):
pass
| {
"repo_name": "controversial/ui2",
"path": "ui2/subclassing/proxytypes.py",
"copies": "1",
"size": "8322",
"license": "mit",
"hash": -4635998452837173000,
"line_mean": 31.6352941176,
"line_max": 79,
"alpha_frac": 0.6017784186,
"autogenerated": false,
"ratio": 4.051606621226874,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5153385039826874,
"avg_score": null,
"num_lines": null
} |
"""A modified image folder class
We modify the official PyTorch image folder (https://github.com/pytorch/vision/blob/master/torchvision/datasets/folder.py)
so that this class can load images from both current directory and its subdirectories.
"""
import torch.utils.data as data
from PIL import Image
import os
IMG_EXTENSIONS = [
'.jpg', '.JPG', '.jpeg', '.JPEG',
'.png', '.PNG', '.ppm', '.PPM', '.bmp', '.BMP',
'.tif', '.TIF', '.tiff', '.TIFF',
]
def is_image_file(filename):
return any(filename.endswith(extension) for extension in IMG_EXTENSIONS)
def make_dataset(dir, max_dataset_size=float("inf")):
images = []
assert os.path.isdir(dir), '%s is not a valid directory' % dir
for root, _, fnames in sorted(os.walk(dir)):
for fname in fnames:
if is_image_file(fname):
path = os.path.join(root, fname)
images.append(path)
images = sorted(images)
return images[:min(max_dataset_size, len(images))]
def default_loader(path):
return Image.open(path).convert('RGB')
class ImageFolder(data.Dataset):
def __init__(self, root, transform=None, return_paths=False,
loader=default_loader):
imgs = make_dataset(root)
if len(imgs) == 0:
raise(RuntimeError("Found 0 images in: " + root + "\n"
"Supported image extensions are: " + ",".join(IMG_EXTENSIONS)))
self.root = root
self.imgs = imgs
self.transform = transform
self.return_paths = return_paths
self.loader = loader
def __getitem__(self, index):
path = self.imgs[index]
img = self.loader(path)
if self.transform is not None:
img = self.transform(img)
if self.return_paths:
return img, path
else:
return img
def __len__(self):
return len(self.imgs)
| {
"repo_name": "google/retiming",
"path": "third_party/data/image_folder.py",
"copies": "1",
"size": "1913",
"license": "apache-2.0",
"hash": 316054085994140740,
"line_mean": 27.9848484848,
"line_max": 122,
"alpha_frac": 0.5969681129,
"autogenerated": false,
"ratio": 3.7145631067961165,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.48115312196961163,
"avg_score": null,
"num_lines": null
} |
# A modified main pdb debugger loop (see pdb.py in the Python library!)
from pdb import *
import sys,os,traceback
def main():
mainpyfile = sys.argv[1] # Get script filename
if not os.path.exists(mainpyfile):
print 'Error:', mainpyfile, 'does not exist'
sys.exit(1)
del sys.argv[0] # Hide "pdb.py" from argument list
# Replace pdb's dir with script's dir in front of module search path.
sys.path[0] = os.path.dirname(mainpyfile)
pdb = Pdb()
# 1st customization: prompt w/ a line feed!
pdb.prompt = '(PDB)\n'
# 2nd customization: not an infinite loop!
try:
pdb._runscript(mainpyfile)
if pdb._user_requested_quit:
return
print "The program finished and will not be restarted"
except SystemExit:
# In most cases SystemExit does not warrant a post-mortem session.
print "The program exited via sys.exit(). Exit status: ",
print sys.exc_info()[1]
except:
traceback.print_exc()
print "Uncaught exception. Entering post mortem debugging"
t = sys.exc_info()[2]
while t.tb_next is not None:
t = t.tb_next
pdb.interaction(t.tb_frame,t)
# When invoked as main program, invoke the debugger on a script
if __name__=='__main__':
main()
# under Windows, we need to run Python w/ the -i flag; this ensures that we die!
sys.exit(0)
| {
"repo_name": "ArcherSys/ArcherSys",
"path": "Lua/SciTE/scite-debug/xpdb.py",
"copies": "1",
"size": "1418",
"license": "mit",
"hash": 3361615781902957600,
"line_mean": 32.7619047619,
"line_max": 81,
"alpha_frac": 0.6269393512,
"autogenerated": false,
"ratio": 3.6927083333333335,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4819647684533333,
"avg_score": null,
"num_lines": null
} |
# A modified version of https://github.com/petermuehlbacher/diffusion-maps-algorithm/blob/master/diffusion%20maps.py
# A python implementation of the diffusion maps algorithm introduced by [Lafon](https://sites.google.com/site/stefansresearchpapers/home/dissertation.pdf).
import numpy as np
from numpy import linalg as LA
from PIL import Image
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from matplotlib.offsetbox import AnnotationBbox, OffsetImage
import os, math
newDim = 64
def normalize(arr):
arr=arr.astype('float32')
if arr.max() > 1.0:
arr/=255.0
return arr
def weightedAverage(pixel):
return 0.299*pixel[0] + 0.587*pixel[1] + 0.114*pixel[2]
def getImgData(path, preview=True):
filelist = os.listdir( path )
imglist = []
for filename in filelist:
img = Image.open(path+filename)
img = img.resize((newDim,newDim))
img = np.asarray(img)
grey = np.zeros((img.shape[0], img.shape[1])) # init 2D numpy array
for rownum in range(len(img)):
for colnum in range(len(img[rownum])):
grey[rownum][colnum] = weightedAverage(img[rownum][colnum])
grey = normalize(grey)
imglist.append(grey)
data=[]
for img in imglist:
vector = img.flatten()
data.append(vector)
if preview:
for img in imglist:
plt.imshow(img, cmap = cm.Greys_r)
plt.show()
return data
def diffusionMapping(data, k, t, **kwargs):
try:
kwargs['dim'] or kwargs['delta']
except KeyError:
raise KeyError('specify either dim or delta as keyword argument!')
dataList=[] # create list whose indices will serve as references for the vectors from now on
for x in data:
dataList.append(x)
X = range(len(dataList))
# construct Markov matrix
v = []
for x in X:
vx = 0
for y in X:
_x = np.array(dataList[x])
_y = np.array(dataList[y])
vx += k(_x,_y)
v.append(math.sqrt(vx))
a = []
for x in X:
a.append([])
for y in X:
_x = np.array(dataList[x])
_y = np.array(dataList[y])
a[x].append(k(_x,_y)/(v[x]*v[y]))
# compute eigenvectors of (a_ij)
phi = []
eigval, eigvec = LA.eigh(np.array(a))
for i in range(len(eigvec)):
phi.append(eigvec[:, i])
# reverse order
eigval[:] = eigval[::-1]
phi[:] = phi[::-1]
# compute dimension
#(for better performance you may want to combine this with an iterative way of computing eigenvalues/vectors)
if kwargs['dim']:
embeddim = kwargs['dim']
elif kwargs['delta']:
i=1
while eigval[i]**t>kwargs['delta']*eigval[1]**t:
i+=1
embeddim = i
# compute embedding coordinates
Psi = []
for x in X:
Psi.append([])
for j in range(embeddim):
i=j+1 # ignore the first eigenvector/value as this is only constant
Psi[x].append((eigval[i]**t)*phi[i][x]/v[x])
return (Psi, dataList)
def plotDiffusionMap(data,showPlot=False):
showImages=False
coordinates, dataList = diffusionMapping(data, lambda x,y: math.exp(-LA.norm(x-y)/1024), 1,dim=2)
a = np.asarray(coordinates)
x = a[:,0]
y = a[:,1]
if showPlot:
fig, ax = plt.subplots()
labels = ['image {0}'.format(i+1) for i in range(len(x))]
for label, xpt, ypt in zip(labels, x, y):
plt.annotate(
label,
xy = (xpt, ypt), xytext = (-20, 20),
textcoords = 'offset points', ha = 'right', va = 'bottom',
bbox = dict(boxstyle = 'round,pad=0.5', fc = 'white', alpha = 0.5),
arrowprops = dict(arrowstyle = '->', connectionstyle = 'arc3,rad=0'))
ax.plot(x, y, 'ro')
plt.show()
return x,y
| {
"repo_name": "govinda-kamath/clustering_on_transcript_compatibility_counts",
"path": "Trapnell_pipeline/diffusion_maps.py",
"copies": "1",
"size": "3935",
"license": "mit",
"hash": 272965916727974600,
"line_mean": 29.503875969,
"line_max": 155,
"alpha_frac": 0.5730622618,
"autogenerated": false,
"ratio": 3.4517543859649122,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9381062612758547,
"avg_score": 0.02875080700127292,
"num_lines": 129
} |
# A modified version of the implementation from the following paper:
# TENER: Adapting Transformer Encoder for Named Entity Recognition
# Hang Yan, Bocao Deng, Xiaonan Li, Xipeng Qiu
import math
import torch
import torch.nn.functional as F
from torch import Tensor, nn
from hanlp.common.structure import ConfigTracker
class RelativeSinusoidalPositionalEmbedding(nn.Module):
"""This module produces sinusoidal positional embeddings of any length.
Padding symbols are ignored.
Args:
embedding_dim: embedding size of each position
padding_idx:
Returns:
"""
def __init__(self, embedding_dim, padding_idx, init_size=1024):
super().__init__()
self.embedding_dim = embedding_dim
self.padding_idx = padding_idx
assert init_size % 2 == 0
weights = self.get_embedding(
init_size + 1,
embedding_dim,
padding_idx,
)
self.register_buffer('weights', weights)
def get_embedding(self, num_embeddings, embedding_dim, padding_idx=None):
"""Build sinusoidal embeddings.
This matches the implementation in tensor2tensor, but differs slightly
from the description in Section 3.5 of "Attention Is All You Need".
Args:
num_embeddings:
embedding_dim:
padding_idx: (Default value = None)
Returns:
"""
half_dim = embedding_dim // 2
emb = math.log(10000) / (half_dim - 1)
emb = torch.exp(torch.arange(half_dim, dtype=torch.float) * -emb)
emb = torch.arange(-num_embeddings // 2, num_embeddings // 2, dtype=torch.float).unsqueeze(1) * emb.unsqueeze(0)
emb = torch.cat([torch.sin(emb), torch.cos(emb)], dim=1).view(num_embeddings, -1)
if embedding_dim % 2 == 1:
# zero pad
emb = torch.cat([emb, torch.zeros(num_embeddings, 1)], dim=1)
if padding_idx is not None:
emb[padding_idx, :] = 0
self.origin_shift = num_embeddings // 2 + 1
return emb
def forward(self, inputs: Tensor):
"""Input is expected to be of size [bsz x seqlen].
Args:
inputs: Tensor:
Returns:
"""
bsz, seq_len = inputs.size()
max_pos = self.padding_idx + seq_len
if max_pos > self.origin_shift:
# recompute/expand embeddings if needed
weights = self.get_embedding(
max_pos * 2,
self.embedding_dim,
self.padding_idx,
)
weights = weights.to(self.weights.device)
del self.weights
self.origin_shift = weights.size(0) // 2
self.register_buffer('weights', weights)
positions = torch.arange(-seq_len, seq_len).to(inputs.device).long() + self.origin_shift # 2*seq_len
embed = self.weights.index_select(0, positions.long()).detach()
return embed
class RelativeMultiHeadAttn(nn.Module):
def __init__(self, in_features, num_heads, dropout, r_w_bias=None, r_r_bias=None, init_seq_length=1024,
k_as_x=True):
"""
Args:
in_features:
num_heads:
dropout:
r_w_bias: n_head x head_dim or None
r_r_bias: n_head x head_dim or None
init_seq_length:
k_as_x:
"""
super().__init__()
self.k_as_x = k_as_x
if k_as_x:
self.qv_linear = nn.Linear(in_features, in_features * 2, bias=False)
else:
self.qkv_linear = nn.Linear(in_features, in_features * 3, bias=False)
self.n_head = num_heads
self.head_dim = in_features // num_heads
self.dropout_layer = nn.Dropout(dropout)
self.pos_embed = RelativeSinusoidalPositionalEmbedding(self.head_dim, 0, init_seq_length)
if r_r_bias is None or r_w_bias is None: # Biases are not shared
self.r_r_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(num_heads, in_features // num_heads)))
self.r_w_bias = nn.Parameter(nn.init.xavier_normal_(torch.zeros(num_heads, in_features // num_heads)))
else:
self.r_r_bias = r_r_bias # r_r_bias就是v
self.r_w_bias = r_w_bias # r_w_bias就是u
def forward(self, x, mask):
"""
Args:
x: batch_size x max_len x d_model
mask: batch_size x max_len
Returns:
"""
batch_size, max_len, d_model = x.size()
pos_embed = self.pos_embed(mask) # l x head_dim
if self.k_as_x:
qv = self.qv_linear(x) # batch_size x max_len x d_model2
q, v = torch.chunk(qv, chunks=2, dim=-1)
k = x.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
else:
qkv = self.qkv_linear(x) # batch_size x max_len x d_model3
q, k, v = torch.chunk(qkv, chunks=3, dim=-1)
k = k.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
q = q.view(batch_size, max_len, self.n_head, -1).transpose(1, 2)
v = v.view(batch_size, max_len, self.n_head, -1).transpose(1, 2) # b x n x l x d
rw_head_q = q + self.r_r_bias[:, None]
AC = torch.einsum('bnqd,bnkd->bnqk', [rw_head_q, k]) # b x n x l x d, n是head
D_ = torch.einsum('nd,ld->nl', self.r_w_bias, pos_embed)[None, :, None] # head x 2max_len, 每个head对位置的bias
B_ = torch.einsum('bnqd,ld->bnql', q, pos_embed) # bsz x head x max_len x 2max_len,每个query对每个shift的偏移
E_ = torch.einsum('bnqd,ld->bnql', k, pos_embed) # bsz x head x max_len x 2max_len, key对relative的bias
BD = B_ + D_ # bsz x head x max_len x 2max_len, 要转换为bsz x head x max_len x max_len
if self.k_as_x:
BD = self._shift(BD)
attn = AC + BD
else:
BDE = self._shift(BD) + self._transpose_shift(E_)
attn = AC + BDE
attn = attn.masked_fill(mask[:, None, None, :].eq(0), float('-inf'))
attn = F.softmax(attn, dim=-1)
attn = self.dropout_layer(attn)
v = torch.matmul(attn, v).transpose(1, 2).reshape(batch_size, max_len, d_model) # b x n x l x d
return v
def _shift(self, BD):
"""类似
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
-3 -2 -1 0 1 2
转换为
0 1 2
-1 0 1
-2 -1 0
Args:
BD: batch_size x n_head x max_len x 2max_len
Returns:
batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = BD.size()
zero_pad = BD.new_zeros(bsz, n_head, max_len, 1)
BD = torch.cat([BD, zero_pad], dim=-1).view(bsz, n_head, -1, max_len) # bsz x n_head x (2max_len+1) x max_len
BD = BD.narrow(dim=2, start=0, length=2 * max_len) \
.view(bsz, n_head, max_len, -1) # bsz x n_head x 2max_len x max_len
BD = BD.narrow(dim=-1, start=max_len, length=max_len)
return BD
def _transpose_shift(self, E):
"""类似
-3 -2 -1 0 1 2
-30 -20 -10 00 10 20
-300 -200 -100 000 100 200
转换为
0 -10 -200
1 00 -100
2 10 000
Args:
E: batch_size x n_head x max_len x 2max_len
Returns:
batch_size x n_head x max_len x max_len
"""
bsz, n_head, max_len, _ = E.size()
zero_pad = E.new_zeros(bsz, n_head, max_len, 1)
# bsz x n_head x -1 x (max_len+1)
E = torch.cat([E, zero_pad], dim=-1).view(bsz, n_head, -1, max_len)
indice = (torch.arange(max_len) * 2 + 1).to(E.device)
E = E.index_select(index=indice, dim=-2).transpose(-1, -2) # bsz x n_head x max_len x max_len
return E
class RelativeTransformerLayer(nn.Module):
def __init__(self,
in_features,
num_heads=4,
feedforward_dim=256,
dropout=0.2,
dropout_attn=None,
after_norm=True,
k_as_x=True,
init_seq_length=1024):
super().__init__()
if dropout_attn is None:
dropout_attn = dropout
self.after_norm = after_norm
self.norm1 = nn.LayerNorm(in_features)
self.norm2 = nn.LayerNorm(in_features)
self.self_attn = RelativeMultiHeadAttn(in_features,
num_heads,
dropout=dropout_attn,
init_seq_length=init_seq_length,
k_as_x=k_as_x)
self.ffn = nn.Sequential(nn.Linear(in_features, feedforward_dim),
nn.LeakyReLU(),
nn.Dropout(dropout, inplace=True),
nn.Linear(feedforward_dim, in_features),
nn.Dropout(dropout, inplace=True))
def forward(self, x, mask):
"""
Args:
x: batch_size x max_len x hidden_size
mask: batch_size x max_len, 为0的地方为pad
Returns:
batch_size x max_len x hidden_size
"""
residual = x
if not self.after_norm:
x = self.norm1(x)
x = self.self_attn(x, mask)
x = x + residual
if self.after_norm:
x = self.norm1(x)
residual = x
if not self.after_norm:
x = self.norm2(x)
x = self.ffn(x)
x = residual + x
if self.after_norm:
x = self.norm2(x)
return x
class RelativeTransformer(nn.Module):
def __init__(self,
in_features,
num_layers,
feedforward_dim,
num_heads,
dropout,
dropout_attn=None,
after_norm=True,
init_seq_length=1024,
k_as_x=True):
super().__init__()
self.layers = nn.ModuleList([
RelativeTransformerLayer(in_features, feedforward_dim, num_heads, dropout, dropout_attn, after_norm,
init_seq_length=init_seq_length, k_as_x=k_as_x)
for _ in range(num_layers)
])
def forward(self, x: Tensor, mask: Tensor):
"""
Args:
x: batch_size x max_len
mask: batch_size x max_len. 有value的地方为1
x: Tensor:
mask: Tensor:
Returns:
"""
for layer in self.layers:
x = layer(x, mask)
return x
class RelativeTransformerEncoder(RelativeTransformer, ConfigTracker):
def __init__(self,
in_features,
num_layers=2,
num_heads=4,
feedforward_dim=256,
dropout=0.1,
dropout_attn=0.1,
after_norm=True,
k_as_x=True,
):
super().__init__(in_features, num_layers, num_heads, feedforward_dim, dropout, dropout_attn, after_norm)
ConfigTracker.__init__(self, locals())
def get_output_dim(self):
return self.config['in_features']
| {
"repo_name": "hankcs/HanLP",
"path": "hanlp/layers/transformers/relative_transformer.py",
"copies": "1",
"size": "11300",
"license": "apache-2.0",
"hash": 8012631892058092000,
"line_mean": 33.1707317073,
"line_max": 120,
"alpha_frac": 0.5210563883,
"autogenerated": false,
"ratio": 3.352677236015555,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4373733624315555,
"avg_score": null,
"num_lines": null
} |
"""A module containg the JavaLikeScanner class."""
class JavaLikeScanner(object):
"""
A class which allows a given string to be scanned through and broken up into various tokens.
"""
def __init__(self, contents):
"""
Create the scanner and initalize its contents.
:param str contents: The contents of the scanner.
"""
self.contents = contents
def __get_token(self):
"""
Find and return the next token and its pre-delimiters if it has any. If there is no next token,
then return None.
:returns: The next token and its pre-delimiters as a dictionary.
"""
token_info = {'token': "", 'pre-delimiter': ""}
# If the scanner has contents, then look for the next token
if len(self.contents) > 0:
# Check over each character in the scanner until a token is found, or the end of the scanner is
# reached
for character in self.contents:
if character != " " and character != "\n" and character != "\t":
# If the character is not a delimiter, then add it to the token
token_info['token'] = token_info['token'] + character
else:
if len(token_info['token']) == 0:
# If a token character hasn't been found yet, then the delimiter must be a pre-delimiter
token_info['pre-delimiter'] = token_info['pre-delimiter'] + character
else:
# Since the next delimiter has been reached after the token, then break to return the token
break
# If a token was found, then return the token and pre-delimiters
if token_info['token'] != "":
return token_info
else:
# Since no token was found, return None
return None
def has_next(self):
"""
Return whether or not there is a valid next token in the scanner or not.
:returns: Whether or not there is a next token in the scanner as a boolean.
"""
token = self.__get_token()
if token is not None:
return True
else:
return False
def next(self):
"""
Return the next token in the scanner and remove that token from the scanner.
Returns None if there is no next token in the scanner.
:returns: The next token in the scanner as a string.
"""
if self.has_next():
# Since there is a next token, remove the token and its pre-delimiters from the scanner, and
# return the token
token = self.__get_token()
size = len(token['pre-delimiter']) + len(token['token'])
self.contents = self.contents[size:]
return token['token']
else:
# Since there is no next token in the scanner, return None
return None
def has_next_line(self):
"""
Return whether or not there is a next line in the scanner.
:returns: Whether or not there is a next line in the scanner as a boolean.
"""
if self.contents != "":
return True
else:
return False
def next_line(self):
"""
Return the next line in the scanner and remove that line from the scanner.
Returns None if there is not a next line in the scanner.
:returns: The next line in the scanner as a string.
"""
if self.has_next_line():
line = ""
has_delimiter = False
for character in self.contents:
if character != "\n":
line = line + character
else:
has_delimiter = True
break
size = len(line)
# Account for the delimiter
if has_delimiter:
size = size + 1
self.contents = self.contents[size:]
return line
else:
return None
def has_next_int(self):
"""
Return whether the next token in the scanner is an integer or not.
:returns: Whether or not the next token in the scanner is an integer as a boolean.
"""
token = self.__get_token()
# Handle the possiblity of an empty token
if token is None:
return False
# Attempt to convert the token into an integer in order to tell if it is an integer or not
try:
int(token['token'])
return True
except ValueError:
return False
def next_int(self):
"""
Return the next integer in the scanner and remove that integer from the scanner.
Returns None if there is not a next token in the scanner, or if the next token in the scanner is
not an integer.
:returns: The next integer in the scanner as an integer.
"""
if self.has_next_int():
token = self.__get_token()
token_integer = int(token['token'])
# Remove the token and its pre-delimiters from the scanner and return it
size = len(token['pre-delimiter']) + len(token['token'])
self.contents = self.contents[size:]
return token_integer
else:
return None
| {
"repo_name": "ExcaliburZero/javalikescanner",
"path": "javalikescanner/javalikescanner.py",
"copies": "1",
"size": "4414",
"license": "mit",
"hash": 8343071674538951000,
"line_mean": 28.0394736842,
"line_max": 98,
"alpha_frac": 0.6746714998,
"autogenerated": false,
"ratio": 3.5482315112540195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47229030110540193,
"avg_score": null,
"num_lines": null
} |
"""A module containing a canonicalized game"""
import functools
import numpy as np
from gameanalysis import gamereader
from gameanalysis import rsgame
from gameanalysis import utils
# TODO There's an issue here where incomplete payoffs for single strategy roles
# contribute to incomplete profiles. There's not an obvious way to remedy this
# with the current api in a way that works well.
class _CanonGame(rsgame._RsGame): # pylint: disable=protected-access
"""A game canonicalized to remove single strategy roles"""
def __init__(self, game):
role_mask = game.num_role_strats > 1
super().__init__(
tuple(r for r, m in zip(game.role_names, role_mask) if m),
tuple(s for s, m in zip(game.strat_names, role_mask) if m),
game.num_role_players[role_mask])
self._game = game
self._players = game.num_role_players[~role_mask]
self._inds = np.cumsum(role_mask * game.num_role_strats)[~role_mask]
self._mask = role_mask.repeat(game.num_role_strats)
@property
def num_complete_profiles(self):
"""Get the number of profiles with full data"""
return self._game.num_complete_profiles
@property
def num_profiles(self):
"""Get the number of profiles"""
return self._game.num_profiles
@functools.lru_cache(maxsize=1)
def profiles(self):
"""Get all profiles with any payoff data"""
return self._game.profiles()[:, self._mask]
@functools.lru_cache(maxsize=1)
def payoffs(self):
"""Get all payoff parallel with profiles()"""
return self._game.payoffs()[:, self._mask]
def deviation_payoffs(self, mixture, *, jacobian=False, **kw):
"""Get the deviation payoffs for a mixture"""
unmix = np.insert(mixture, self._inds, 1.0)
if not jacobian:
return self._game.deviation_payoffs(unmix, **kw)[self._mask]
dev, jac = self._game.deviation_payoffs(unmix, jacobian=True, **kw)
return dev[self._mask], jac[self._mask][:, self._mask]
def get_payoffs(self, profiles):
"""Get the payoffs for a profile or profiles"""
unprofs = np.insert(profiles, self._inds, self._players, -1)
return self._game.get_payoffs(unprofs)[..., self._mask]
@utils.memoize
def max_strat_payoffs(self):
"""Get the maximum strategy payoffs"""
return self._game.max_strat_payoffs()[self._mask]
@utils.memoize
def min_strat_payoffs(self):
"""Get the minimum strategy payoffs"""
return self._game.min_strat_payoffs()[self._mask]
def restrict(self, restriction):
"""Restrict viable strategies for a canon game"""
unrest = np.insert(restriction, self._inds, True)
return _CanonGame(self._game.restrict(unrest))
def _add_constant(self, constant):
"""Add a constant to a canon game"""
return _CanonGame(self._game + constant)
def _multiply_constant(self, constant):
"""Multiple canon game payoffs by a constant"""
return _CanonGame(self._game * constant)
def _add_game(self, _):
"""Add another game to canon game"""
return NotImplemented
def to_json(self):
"""Convert canon game to json object"""
base = super().to_json()
base['game'] = self._game.to_json()
base['type'] = 'canon.1'
return base
def __contains__(self, profile):
unprof = np.insert(profile, self._inds, self._players, -1)
return unprof in self._game
def __eq__(self, othr):
# pylint: disable-msg=protected-access
return super().__eq__(othr) and self._game == othr._game
def __hash__(self):
return hash((super().__hash__(), self._game))
def __repr__(self):
return '{}, {:d} / {:d})'.format(
super().__repr__()[:-1], self.num_profiles, self.num_all_profiles)
def canon(game):
"""Canonicalize a game by removing single strategy roles
Parameters
----------
game : RsGame
The game to canonizalize.
"""
return _CanonGame(game)
def canon_json(jgame):
"""Read a canonicalized game from json"""
return canon(gamereader.loadj(jgame['game']))
| {
"repo_name": "egtaonline/GameAnalysis",
"path": "gameanalysis/canongame.py",
"copies": "1",
"size": "4228",
"license": "apache-2.0",
"hash": -5540206288041642000,
"line_mean": 32.824,
"line_max": 79,
"alpha_frac": 0.6199148534,
"autogenerated": false,
"ratio": 3.657439446366782,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9776774589621855,
"avg_score": 0.00011594202898550725,
"num_lines": 125
} |
'''A module containing a class for representing and manipulating
creature information from the Pathfinder RPG'''
import re
import string
__all__ = ['Creature']
ABILITIES = ['Str', 'Dex', 'Con', 'Int', 'Wis', 'Cha']
ATTRIBUTES = [
'DEFENSE', 'hp', 'AC', 'touch', 'flat-footed',
'Fort', 'Ref', 'Will', 'Defensive', 'DR', 'Resist', 'Immune',
'STATISTICS', 'Base'
]
class Creature(object):
'''Class representing a Creature from the Pathfinder RPG'''
def __init__(self):
self.name = ''
self.cr = '0'
self.mr = '0'
# defenses
self.hp = '0'
self.hd = '0'
self.ac = {'AC': '0', 'touch': '0', 'flat-footed': '0'}
self.saves = {'Fort': '0', 'Ref': '0', 'Will': '0'}
# statistics
self.ability_scores = {
'Str': '0', 'Dex': '0', 'Con': '0',
'Int': '0', 'Wis': '0', 'Cha': '0'
}
self.bab = '0'
self.cmb = '0'
self.cmd = '0'
def __repr__(self):
values = [
self.cr, self.name, '\n',
self.hp, self.hd, str(self.ac), str(self.saves), '\n',
str(self.ability_scores), self.bab, self.cmb, self.cmd
]
return ' '.join(values)
def __str__(self):
values = [
self.cr, self.name, '\n',
'hp', self.hp,
'HD', self.hd, '\n',
'AC', self.ac['AC'],
'touch', self.ac['touch'],
'flat-footed', self.ac['flat-footed'], '\n',
'Fort', self.saves['Fort'],
'Ref', self.saves['Ref'],
'Will', self.saves['Will'], '\n',
'Str', self.ability_scores['Str'],
'Dex', self.ability_scores['Dex'],
'Con', self.ability_scores['Con'],
'Int', self.ability_scores['Int'],
'Wis', self.ability_scores['Wis'],
'Cha', self.ability_scores['Cha'], '\n',
'BAB', self.bab,
'CMB', self.cmb,
'CMD', self.cmd, '\n\n'
]
return ' '.join(values)
| {
"repo_name": "lot9s/pathfinder-rpg-utils",
"path": "data-mining/bestiary/core/creature.py",
"copies": "1",
"size": "2074",
"license": "mit",
"hash": -5442362062639989000,
"line_mean": 28.6285714286,
"line_max": 66,
"alpha_frac": 0.4527483124,
"autogenerated": false,
"ratio": 3.1956856702619416,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4148433982661942,
"avg_score": null,
"num_lines": null
} |
'''A module containing a class for representing a Pathfinder RPG character.'''
import json
from pf_class import PFClass
from pf_class_instance import PFClassInstance
__all__ = ['PFCharacter']
CLASS_DIR = "res/json/class/"
JSON_EXTENSION = ".json"
class PFCharacter(object):
'''Class representing a character from the Pathfinder RPG'''
def __init__(self, file_name):
'''Constructs PFCharacter objects
:param file_name: path to the JSON file for this character
'''
json_dict = json.load(open(file_name, 'r'))
# -populate class members-
self.name = json_dict['name']
# This dictionary represents a character's investment in each of its
# classes. Each entry is a PFClassInstance object
self.classes = {}
# add all classes associated with this character
for x in json_dict['classes'].keys():
file_name = CLASS_DIR + x + JSON_EXTENSION
class_level = json_dict['classes'][x]
self.__add_class(file_name, class_level)
def __repr__(self):
values = [self.name, '\n']
return ' '.join(values)
def __str__(self):
values = [self.name, '\n']
# add class information to string representation
for x in self.classes.keys():
item = self.classes[x]
values.extend( [item[1].name, str(item[0])] )
return ' '.join(values)
def __add_class(self, file_name, level=1):
'''Associates this character with a new class
This method will do nothing if the class associated with the provided
file name has already been added to this character.
:param file_name: path to the json file for the desired class
:param level: number of levels of this class to associate with character
'''
new_class = PFClass(file_name)
# update the self.classes dictionary if character does not have levels
# in this class
if not any(x.name == new_class.name for x in self.classes.itervalues()):
new_entry = PFClassInstance(new_class, level)
self.classes[new_class.name] = new_entry
def get_template_values(self):
'''Retrieves a dictionary of values for use with string.Template
:returns: dictionary of values for use with string.Template
'''
template_vals = {'name' : self.name, 'NAME' : self.name.upper()}
# iterate over classes dictionary to populate template_vals dictionary
display_classes = []
for i, key in enumerate(self.classes.keys()):
# store some values for convenience
idx = str(i+1)
prefix = 'class' + idx
instance = self.classes[key]
# -add class-related values-
template_vals[prefix] = instance.name
template_vals[prefix + '_level'] = instance.level
# add defense-related values
template_vals[prefix + '_hit_die'] = instance.hit_die
template_vals[prefix + '_hit_points'] = instance.hit_points
template_vals[prefix + '_fort'] = instance.saves['Fort']
template_vals[prefix + '_ref'] = instance.saves['Ref']
template_vals[prefix + '_will'] = instance.saves['Will']
# add offense-related values
template_vals[prefix + '_bab'] = instance.bab
# collect LaTeX display-related values
display_classes.append(instance.name + ' ' + str(instance.level))
# add LaTeX display-related values to template_vals dictionary
if len(self.classes.keys()) > 1:
template_vals['display_classes'] = ", ".join(display_classes)
else:
template_vals['display_classes'] = display_classes[0]
return template_vals
| {
"repo_name": "lot9s/pathfinder-rpg-utils",
"path": "character-sheets/core/pf_character.py",
"copies": "1",
"size": "3958",
"license": "mit",
"hash": 5267108699212380000,
"line_mean": 36.3396226415,
"line_max": 80,
"alpha_frac": 0.5851440121,
"autogenerated": false,
"ratio": 4.392896781354051,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5478040793454051,
"avg_score": null,
"num_lines": null
} |
'''A module containing a class for representing a Pathfinder RPG
character\'s investment in a particular character class'''
from pf_class import PFClass
__all__ = ['PFClassInstance']
class PFClassInstance(object):
'''Class representing a Pathfinder RPG character\'s investment in a
particular character class'''
def __init__(self, _class, _level=1):
'''Constructs PFClassInstance objects
:param _class: a PFClass object
:param _level: number of levels invested in class, defaults to 1
'''
self.__class = _class
self.name = _class.name
self.level = _level
# calculate hit points
self.hit_points = 0
self.hit_die = _class.hit_die
self.__calculate_hit_points()
# calculate base attack bonus
self.bab = _class.base_attack * self.level
# calculate saving throws
self.saves = {}
self.__calculate_saving_throws()
def __calculate_bad_saving_throw(self):
return self.level / 3
def __calculate_good_saving_throw(self):
return 2 + (self.level / 2)
def __calculate_hit_points(self):
if self.level == 1:
self.hit_points = self.hit_die
else:
hp_per_level = (self.hit_die / 2) + 1
self.hit_points = self.hit_die + (hp_per_level * (self.level - 1))
def __calculate_saving_throws(self):
for key in self.__class.saves.keys():
if self.__class.saves[key] == 1:
self.saves[key] = self.__calculate_good_saving_throw()
else:
self.saves[key] = self.__calculate_bad_saving_throw()
| {
"repo_name": "lot9s/pathfinder-rpg-utils",
"path": "character-sheets/core/pf_class_instance.py",
"copies": "1",
"size": "1723",
"license": "mit",
"hash": -2548139484598539300,
"line_mean": 29.2280701754,
"line_max": 78,
"alpha_frac": 0.5705165409,
"autogenerated": false,
"ratio": 3.9700460829493087,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5040562623849308,
"avg_score": null,
"num_lines": null
} |
'''A module containing a class for storing Creature objects in a
SQLite database.'''
import csv
import sqlite3
__all__ = ['CreatureDB']
class CreatureDB(object):
'''Class for storing Creature objects in a SQLite database.'''
def __init__(self, name='creature.db', use_nominal_cr=False):
self.min_cr = 0.0
self.max_cr = float('inf')
# set flags
self.using_nominal_cr = use_nominal_cr
# initialize database
self.connection = sqlite3.connect(name)
self.connection.text_factory = str
self._create_table()
def _construct_table_columns(self):
'''Constructs a tuple that defines the columns in
the "creatures" table
:returns tuple that defines the columns in "creatures" table
'''
columns = ('id integer primary key autoincrement',
'name varchar(45)')
# set type of CR column depending on flag
if self.using_nominal_cr:
columns = columns + ('CR varchar(10)',)
else:
columns = columns + ('CR real',)
# add the remaining database fields to column tuple
main_entry_columns = (
'hp integer', 'HD integer',
'ac integer', 'touch_ac integer', 'flatfooted_ac integer',
'Fort integer', 'Ref integer', 'Will integer',
'Str integer', 'Dex integer', 'Con integer',
'Int integer', 'Wis integer', 'Cha integer',
'BAB integer', 'CMB integer', 'CMD integer'
)
columns = columns + main_entry_columns
return columns
def _construct_tuple_insert_values(self, creature):
'''Constructs a tuple of Creature values for insertion into
the "creatures" table
:returns tuple of values for insertion into "creatures" table
'''
values = (creature.name,)
# set value of CR column depending on flag
if self.using_nominal_cr:
values = values + ('CR ' + creature.cr,)
else:
values = values + (creature.cr,)
# add the remaining database fields to values tuple
main_entry_values = (
creature.hp,
creature.hd,
creature.ac['AC'],
creature.ac['touch'],
creature.ac['flat-footed'],
creature.saves['Fort'],
creature.saves['Ref'],
creature.saves['Will'],
creature.ability_scores['Str'],
creature.ability_scores['Dex'],
creature.ability_scores['Con'],
creature.ability_scores['Int'],
creature.ability_scores['Wis'],
creature.ability_scores['Cha'],
creature.bab,
creature.cmb,
creature.cmd
)
values = values + main_entry_values
return values
def _create_table(self):
'''Creates a SQLite table with the given name for storing
Creature objects if it does not already exist
:param name: a string value for the name of the table
'''
# create table
columns = self._construct_table_columns()
query = '''create table if not exists creatures
(
%s,%s,
%s,%s,
%s,%s,%s,
%s,%s,%s,
%s,%s,%s,%s,%s,%s,%s,
%s, %s, %s
)''' % columns
self.connection.execute(query)
def add_creature(self, creature):
'''Adds a Creature object as a row in the appropriate table
of the SQLite database
:param creature: a Creature object to be added to the database
'''
# check that creature CR is within desired range
creature_cr = float(creature.cr)
if creature_cr < self.min_cr or creature_cr > self.max_cr:
return
# ignore duplicate creatures
if self.is_creature_in_db(creature):
return
# insert creature into database
values = self._construct_tuple_insert_values(creature)
query = '''insert into creatures
(
name,CR,
hp,HD,
ac,touch_ac,flatfooted_ac,
Fort, Ref, Will,
Str,Dex,Con,Int,Wis,Cha,
BAB,CMB,CMD
)
values
(
?,?,
?,?,
?,?,?,
?,?,?,
?,?,?,?,?,?,
?,?,?
)'''
self.connection.execute(query, values)
def commit_and_close(self):
'''Commits any uncommitted changes to the SQLite database and
closes the connection
'''
self.connection.commit()
self.connection.close()
def export_as_csv(self, file_name='creature.csv'):
'''Exports the data in this object as a .csv file.
:param file_name: the name of the output csv file
'''
cursor = self.connection.cursor()
data = cursor.execute('select * from creatures')
# write data to output file
csv_file = open(file_name, 'w')
writer = csv.writer(csv_file)
writer.writerow([
'id',
'name', 'CR',
'hp', 'HD',
'ac', 'touch_ac', 'flatfooted_ac',
'Fort', 'Ref', 'Will',
'Str', 'Dex', 'Con', 'Int', 'Wis', 'Cha',
'BAB', 'CMB', 'CMD'
])
writer.writerows(data)
csv_file.close()
def is_creature_in_db(self, creature):
''' Determines whether or not a datbase entry exists for a
given creature
:returns True if entry exists, False otherwise
'''
# set value of CR column depending on flag
creature_cr = creature.cr
if self.using_nominal_cr:
creature_cr = 'CR ' + creature.cr
# query database for creature
values = (creature.name, creature_cr)
query = '''select * from creatures where name=? and cr=?'''
cursor = self.connection.cursor()
cursor.execute(query, values)
return cursor.fetchone() is not None
| {
"repo_name": "lot9s/pathfinder-rpg-utils",
"path": "data-mining/bestiary/db/creatureDB.py",
"copies": "1",
"size": "6403",
"license": "mit",
"hash": -6970374430452077000,
"line_mean": 33.7989130435,
"line_max": 70,
"alpha_frac": 0.5092925191,
"autogenerated": false,
"ratio": 4.352821210061183,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.015532780125879992,
"num_lines": 184
} |
"""A module containing a core representation of an IATI Dataset."""
from lxml import etree
import iati.exceptions
import iati.utilities
import iati.validator
class Dataset:
"""Representation of an IATI XML file that may be validated against a Schema.
Attributes:
xml_str (str): An XML string representation of the Dataset.
xml_tree (ElementTree): A tree representation of the Dataset.
Note:
Should it be modified after initialisation, the current content of the Dataset is deemed to be that which was last asigned to either `self.xml_str` or `self.xml_tree`.
Warning:
The behaviour of simultaneous assignment to both `self.xml_str` and `self.xml_tree` is undefined.
Does not fully hide the lxml internal workings.
Todo:
`xml_str` and `xml_tree` are not great names. They are also too tied together. It should be determined whether this close relationship is really desired.
Implement a number of helper functions for common operations.
Implement getters and setters for attributes.
Implement an addition override to allow for combation of Datasets.
"""
def __init__(self, xml):
"""Initialise a Dataset.
Args:
xml (str or ElementTree): A representation of the XML to encapsulate.
May be either a string or a lxml ElementTree.
Raises:
TypeError: If an attempt to pass something that is not a string or ElementTree is made.
ValueError: If a provided XML string is not valid XML.
Warning:
The required parameters to create a Dataset may change. See the TODO.
Todo:
It should be possible to create a Dataset from a file. In this situation, having `xml` as a required parameter does not seem sensible. Need to better consider this situation.
Add a way to determine whether a Dataset fully conforms to the IATI Standard and / or modify the Dataset so that it does.
Need a way to avoid encoding issues preventing valid IATI datasets being instantiated as pyIATI Datasets.
"""
self._xml_str = None
self._xml_tree = None
if isinstance(xml, (etree._Element, etree._ElementTree)): # pylint: disable=W0212
self.xml_tree = xml
else:
self.xml_str = xml
@property
def xml_str(self):
"""str: An XML string representation of the Dataset.
Raises:
ValueError: If a value that is being assigned is not a valid XML string.
TypeError: If a value that is being assigned is not a string.
Todo:
Clarify error messages, for example when a mismatched encoding is used.
Perhaps pass on the original lxml error message instead of trying to intrepret what might have gone wrong when running `etree.fromstring()`.
"""
return self._xml_str
@xml_str.setter
def xml_str(self, value):
if isinstance(value, (etree._Element, etree._ElementTree)): # pylint: disable=W0212
msg = "If setting a Dataset with an ElementTree, use the xml_tree property, not the xml_str property."
iati.utilities.log_error(msg)
raise TypeError(msg)
else:
try:
value_stripped = value.strip()
validation_error_log = iati.validator.validate_is_xml(value_stripped)
# Convert the input to bytes, as etree.fromstring works most consistently with bytes objects, especially if an XML encoding declaration has been used.
if isinstance(value_stripped, str):
value_stripped_bytes = value_stripped.encode()
elif isinstance(value_stripped, bytes):
value_stripped_bytes = value_stripped
if not validation_error_log.contains_errors():
self.xml_tree = etree.fromstring(value_stripped_bytes)
self._xml_str = value_stripped
else:
if validation_error_log.contains_error_of_type(TypeError):
raise TypeError
else:
raise iati.exceptions.ValidationError(validation_error_log)
except (AttributeError, TypeError):
msg = "Datasets can only be ElementTrees or strings containing valid XML, using the xml_tree and xml_str attributes respectively. Actual type: {0}".format(type(value))
iati.utilities.log_error(msg)
raise TypeError(msg)
@property
def xml_tree(self):
"""ElementTree: A tree representation of the Dataset.
Raises:
TypeError: If a value that is being assigned is not an ElementTree.
Warning:
Does not fully hide the lxml internal workings.
Todo:
Check use of ElementTree in setter.
"""
return self._xml_tree.getroottree()
@xml_tree.setter
def xml_tree(self, value):
if isinstance(value, etree._Element): # pylint: disable=W0212
self._xml_tree = value
self._xml_str = etree.tostring(value, pretty_print=True)
elif isinstance(value, etree._ElementTree): # pylint: disable=W0212
root = value.getroot()
self._xml_tree = root
self._xml_str = etree.tostring(root, pretty_print=True)
else:
msg = "If setting a Dataset with the xml_property, an ElementTree should be provided, not a {0}.".format(type(value))
iati.utilities.log_error(msg)
raise TypeError(msg)
def _raw_source_at_line(self, line_number):
"""Return the raw value of the XML source at the specified line.
Args:
line_number (int): A zero-indexed line number.
Returns:
str: The source of the XML at the specified line.
Raises:
TypeError: When `line_number` is not an integer.
ValueError: When `line_number` is negative or more than the number of lines in the file.
"""
if not isinstance(line_number, int) or isinstance(line_number, bool):
raise TypeError
if line_number < 0:
raise ValueError
try:
# this is led with an empty string since the `sourceline` attribute is 1-indexed.
split_lines = [''] + self.xml_str.split('\n')
return split_lines[line_number]
except IndexError:
raise ValueError
@property
def version(self):
"""Return the version of the Standard that this Dataset is specified against.
Returns:
iati.Version / None: The version of the Standard that this Dataset is specified against. None if the version cannot be detected.
Todo:
Consider if this should raise an error if the Dataset is specified at a version of the Standard that does not exist.
"""
root_tree = self.xml_tree.getroot()
default_version = '1.01'
version_iati_root = root_tree.get('version', default_version).strip()
if version_iati_root.startswith('1'):
# Version 1 data, so need to check that all child `iati-activity` or `iati-organisation` elements are at the same version
versions_in_children = list()
for child_tree in root_tree.getchildren(): # This is expected to return a list of `iati-activity` or `iati-organisation` elements.
activity_version = child_tree.get('version', default_version).strip()
versions_in_children.append(activity_version)
if len(set(versions_in_children)) == 1 and versions_in_children[0] == version_iati_root:
version = version_iati_root
else:
version = None
else:
# Not version 1 data, so can return the version specified in `iati-activities/@version`
version = version_iati_root
if version is None:
return version
return iati.Version(version)
def source_at_line(self, line_number):
"""Return the value of the XML source at the specified line.
Args:
line_number (int): A zero-indexed line number.
Returns:
str: The source of the XML at the specified line. Leading and trailing whitespace is trimmed.
Raises:
TypeError: When `line_number` is not an integer.
ValueError: When `line_number` is negative or more than the number of lines in the file.
Todo:
Test with minified XML.
"""
return self._raw_source_at_line(line_number).strip()
def source_around_line(self, line_number, surrounding_lines=1):
"""Return the value of the XML source at the specified line, plus the specified amount of surrounding context.
Args:
line_number (int): A zero-indexed line number.
surrounding_lines (int): The number of lines of context to provide either side of the specified line number. Default 1.
Returns:
str: The source of the XML at the specified line, plus the specified number of lines of surrounding context.
Should there be fewer lines of XML than are asked for, the entire Dataset will be returned.
Raises:
TypeError: When `line_number` is not an integer.
TypeError: When `surrounding_lines` is not an integer.
ValueError: When `line_number` is negative or more than the number of lines in the file.
ValueError: When `surrounding_lines` is negative.
Todo:
Test with minified XML.
"""
if not isinstance(surrounding_lines, int) or isinstance(surrounding_lines, bool):
raise TypeError
if surrounding_lines < 0:
raise ValueError
lines_arr = []
lower_line_number = max(line_number - surrounding_lines, 1)
upper_line_number = min(line_number + surrounding_lines + 1, len(self.xml_str.split('\n')) + 1)
for line_num in range(lower_line_number, upper_line_number):
lines_arr.append(self._raw_source_at_line(line_num))
return '\n'.join(lines_arr)
| {
"repo_name": "IATI/iati.core",
"path": "iati/data.py",
"copies": "1",
"size": "10271",
"license": "mit",
"hash": -513188287255252540,
"line_mean": 39.12109375,
"line_max": 186,
"alpha_frac": 0.6252555739,
"autogenerated": false,
"ratio": 4.522677234698371,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.564793280859837,
"avg_score": null,
"num_lines": null
} |
"""A module containing a core representation of IATI Codelists."""
import collections
from lxml import etree
import iati.resources
import iati.utilities
class Codelist:
"""Representation of a Codelist as defined within the IATI SSOT.
Attributes:
complete (bool): Whether the Codelist is complete or not. If complete, attributes making use of this Codelist must only contain values present on the Codelist. If not complete, this is merely strongly advised.
codes (:obj:`set` of :obj:`iati.Code`): The codes demonstrating the range of values that the Codelist may represent.
name (str): The name of the Codelist.
Warning:
There are currently a large number of attributes that have been taken straight from the XML without being implemented in code. Some of these may change during implementation.
The `codes` attribute is currently a set. While functionally correct, it may be slightly confusing because the class is a CodeLIST.
Todo:
Create a custom class inheriting from set that only allows Codes to be added.
Implement and document attributes that are not yet implemented and documented.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, name, xml=None):
"""Initialise a Codelist.
Any Codes contained within the specified XML are added.
Args:
name (str): The name of the codelist being initialised.
xml (str): An XML representation of a codelist.
Note:
Instances of a Codelist should remain independent of a particular version of the IATI Standard. Versioning should be handled elsewhere.
Warning:
The format of the constructor is likely to change. It needs to be less reliant on the name acting as a UID, and allow for other attributes to be defined.
Todo:
Raise warnings or errors if the Codelist is unable to initialise correctly.
"""
def parse_from_xml(xml):
"""Parse a Codelist from the XML that defines it.
Warning:
In modifying the parameters required for creating an instance of the class, this is likely to move in some manner.
Todo:
Define relevant tests and error handling.
Handle Codelists without description or name elements.
Better document side-effects.
"""
tree = iati.utilities.convert_xml_to_tree(xml)
self.name = tree.attrib['name']
for code_el in tree.findall('codelist-items/codelist-item'):
value = code_el.findtext('code')
name = code_el.findtext('name/narrative') or code_el.findtext('name')
if (value is None) and (name is None):
msg = "The provided Codelist ({0}) has a Code that does not contain a name or value.".format(self.name)
iati.utilities.log_warning(msg)
if value is None:
value = ''
if name is None:
name = ''
self.codes.add(iati.Code(value, name))
try:
self.complete = True if tree.attrib['complete'] == '1' else False
except KeyError:
pass
self.complete = None
self.codes = set()
self.name = name
# a number of placeholder attributes that Codelists have, though are not yet implemented
self._name_prose = None
self._description = None
self._language = None
self._url = None
self._ref = None
self._category_codelist = None
if xml:
parse_from_xml(xml)
def __eq__(self, other):
"""Check Codelist equality.
This allows uniqueness to be correctly defined upon insertion into a set.
Todo:
Utilise all attributes as part of the equality process.
"""
return (self.name == other.name) and (self.complete == other.complete) and (collections.Counter(self.codes) == collections.Counter(other.codes))
def __ne__(self, other):
"""Check Codelist inequality."""
return not self == other
def __hash__(self):
"""Hash the Codelist.
This allows uniqueness to be correctly defined upon insertion into a set.
Todo:
Utilise all attributes as part of the equality process.
"""
sorted_codes = sorted(self.codes, key=lambda x: x.value)
return hash((self.name, self.complete, tuple(sorted_codes)))
@property
def xsd_restriction(self):
"""Output the Codelist as an XSD simpleType restriction.
This tree may be used to specify the type of given elements, allowing insertion and validation within a Schema.
Returns:
etree.Element: An XSD simpleType representing this Codelist.
Warning:
It is planned to change from Schema-based to Data-based Codelist validation. As such, this property may be removed.
The name attribute of the generated type is not good and needs changing.
Does not fully hide the lxml internal workings.
Todo:
See whether there are only Codelists of a type other than string.
Improve naming of the type to reduce potential of clashes.
"""
type_base_el = etree.Element(
iati.constants.NAMESPACE + 'simpleType',
name='{0}-type'.format(self.name),
nsmap=iati.constants.NSMAP
)
restriction_base_el = etree.Element(
iati.constants.NAMESPACE + 'restriction',
base='xsd:string',
nsmap=iati.constants.NSMAP
)
for code in self.codes:
restriction_base_el.append(code.xsd_enumeration)
type_base_el.append(restriction_base_el)
return type_base_el
class Code:
"""Representation of a Code contained within a Codelist.
Attributes:
name (str): The name of the code.
value (str): The value of the code.
Todo:
Implement and document attributes that are not yet implemented and documented.
"""
# pylint: disable=too-many-instance-attributes
def __init__(self, value, name=''):
"""Initialise a Code.
Args:
value (str): The value of the code being initialised.
name (str): The name of the code being initialised.
Note:
Instances of a Code should remain independent of a particular version of the IATI Standard. Versioning should be handled elsewhere.
Warning:
The format of the constructor is likely to change. It should include mandatory parameters, and allow for other attributes to be defined.
"""
self.name = name
self.value = value
# a number of placeholder attributes that Codelists have, though are not yet implemented
self._description = None
self._category = None
self._url = None
self._public_database = False
self._status = None
self._activation_date = None
self._withdrawal_date = None
def __eq__(self, other):
"""Check Code equality.
This allows uniqueness to be correctly defined upon insertion into a set.
Todo:
Utilise all attributes as part of the equality process.
Test comparison with strings.
"""
try:
return ((self.name) == (other.name)) and ((self.value) == (other.value))
except AttributeError:
return self.value == other
def __ne__(self, other):
"""Check Code inequality."""
return not self == other
def __hash__(self):
"""Hash the Code.
This allows uniqueness to be correctly defined upon insertion into a set.
Todo:
Utilise all attributes as part of the hashing process.
Be able to deal with checks against both Codes and strings.
"""
return hash((self.value))
@property
def xsd_enumeration(self):
"""Output the Code as an etree enumeration element.
Returns:
etree.Element: An XSD enumeration representing this Codelist.
Warning:
It is planned to change from Schema-based to Data-based Codelist validation. As such, this property may be removed.
Does not fully hide the lxml internal workings.
"""
return etree.Element(
iati.constants.NAMESPACE + 'enumeration',
value=self.value,
nsmap=iati.constants.NSMAP
)
| {
"repo_name": "IATI/iati.core",
"path": "iati/codelists.py",
"copies": "1",
"size": "8670",
"license": "mit",
"hash": -4449260308753380400,
"line_mean": 32.7354085603,
"line_max": 217,
"alpha_frac": 0.6176470588,
"autogenerated": false,
"ratio": 4.668820678513732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5786467737313732,
"avg_score": null,
"num_lines": null
} |
"""A module containing a core representation of IATI Rulesets.
Todo:
Consider how we should handle lxml errors.
Remove references to `case`.
"""
# no-member errors are due to using `setattr()` # pylint: disable=no-member
import collections
import decimal
import json
import re
import sre_constants
from datetime import datetime
import jsonschema
import iati.default
import iati.utilities
_VALID_RULE_TYPES = ["atleast_one", "dependent", "sum", "date_order", "no_more_than_one", "regex_matches", "regex_no_matches", "startswith", "unique"]
def constructor_for_rule_type(rule_type):
"""Locate the constructor for specific Rule types.
Args:
rule_type (str): The name of the type of Rule to identify the class for.
Returns:
type: A constructor for a class that inherits from Rule.
Raises:
KeyError: When a non-permitted `rule_type` is provided.
"""
possible_rule_types = {
'atleast_one': RuleAtLeastOne,
'date_order': RuleDateOrder,
'dependent': RuleDependent,
'no_more_than_one': RuleNoMoreThanOne,
'regex_matches': RuleRegexMatches,
'regex_no_matches': RuleRegexNoMatches,
'startswith': RuleStartsWith,
'sum': RuleSum,
'unique': RuleUnique
}
return possible_rule_types[rule_type]
class Ruleset:
"""Representation of a Ruleset as defined within the IATI SSOT.
Attributes:
rules (set): The Rules contained within this Ruleset.
"""
def __init__(self, ruleset_str=None):
"""Initialise a Ruleset.
Args:
ruleset_str (str): A string that represents a Ruleset.
Raises:
TypeError: When `ruleset_str` is not a string.
ValueError: When `ruleset_str` does not validate against the Ruleset Schema or cannot be correctly decoded.
"""
self.rules = set()
if ruleset_str is None:
ruleset_str = ''
try:
ruleset_dict = json.loads(ruleset_str, object_pairs_hook=iati.utilities.dict_raise_on_duplicates)
except TypeError:
raise ValueError('Provided Ruleset string is not a string.')
except json.decoder.JSONDecodeError:
if ruleset_str.strip() == '':
ruleset_dict = {}
else:
raise ValueError('Provided Ruleset string is not valid JSON.')
self._validate_ruleset(ruleset_dict)
try:
self._set_rules(ruleset_dict)
except AttributeError:
raise ValueError('Provided Ruleset validates against the Ruleset Schema, but should not. See: https://github.com/IATI/IATI-Rulesets/issues/49')
def __eq__(self, other):
"""Check Ruleset equality.
This allows uniqueness to be correctly defined upon insertion into a set.
"""
return collections.Counter(self.rules) == collections.Counter(other.rules)
def __ne__(self, other):
"""Check Ruleset inequality."""
return not self == other
def __hash__(self):
"""Hash the Ruleset.
This allows uniqueness to be correctly defined upon insertion into a set.
"""
return hash(id(self))
def is_valid_for(self, dataset):
"""Validate a Dataset against the Ruleset.
Args:
Dataset (iati.Dataset): A Dataset to be checked for validity against the Ruleset.
Returns:
bool:
`True` when the Dataset is valid against the Ruleset.
`False` when part or all of the Dataset is not valid against the Ruleset.
Todo:
Better design how Skips and ValueErrors are treated. The current True/False/Skip/Error thing is a bit clunky.
"""
for rule in self.rules:
try:
if rule.is_valid_for(dataset) is False:
return False
except ValueError:
return False
return True
def _validate_ruleset(self, ruleset_dict):
"""Validate a Ruleset against the Ruleset Schema.
Args:
ruleset_dict (dict): A JSON-format Ruleset parsed into a dictionary.
Raises:
ValueError: When `ruleset_dict` does not validate against the Ruleset Schema.
"""
try:
jsonschema.validate(ruleset_dict, iati.default.ruleset_schema())
except jsonschema.ValidationError:
raise ValueError('Provided Ruleset does not validate against the Ruleset Schema')
def _set_rules(self, ruleset_dict):
"""Set the Rules of the Ruleset.
Extract each case of each Rule from the Ruleset and add to initialised `rules` set.
Args:
ruleset_dict (dict): A JSON-format Ruleset parsed into a dictionary.
"""
for context, rule in ruleset_dict.items():
for rule_type, cases in rule.items():
for case in cases['cases']:
constructor = constructor_for_rule_type(rule_type)
new_rule = constructor(context, case)
self.rules.add(new_rule)
class Rule:
"""Representation of a Rule contained within a Ruleset.
Acts as a base class for specific types of Rule that actually check the content of the data.
Attributes:
context (str): An XPath expression to locate the elements that the Rule is to be checked against.
case (dict): Specific configuration for this instance of the Rule.
Todo:
Determine whether this should be an Abstract Base Class.
"""
def __init__(self, context, case):
"""Initialise a Rule.
Raises:
TypeError: When a parameter is of an incorrect type.
ValueError: When a rule_type is not one of the permitted Rule types.
"""
self._case = case
self._context = self._validated_context(context)
self._valid_rule_configuration(case)
self._set_case_attributes(case)
self._normalize_xpaths()
def __str__(self):
"""Return string to state what the Rule is checking."""
return 'This is a Rule.'
def __eq__(self, other):
"""Check Rule equality.
This allows uniqueness to be correctly defined upon insertion into a set.
"""
return (self.name == other.name) and (str(self) == str(other))
def __ne__(self, other):
"""Check Rule inequality."""
return not self == other
def __hash__(self):
"""Hash the Rule.
This allows uniqueness to be correctly defined upon insertion into a set.
"""
return hash((self.name, str(self)))
@property
def context(self):
"""str: An XPath expression to locate the elements that the Rule is to be checked against."""
return self._context
@property
def name(self):
"""str: The type of Rule, as specified in a JSON Ruleset."""
return self._name
def _validated_context(self, context):
"""Check that a valid `context` is given for a Rule.
Args:
context (str): The XPath expression that selects XML elements that the Rule acts against.
Returns:
str: A valid XPath.
Raises:
TypeError: When an argument is given that is not a string.
ValueError: When `context` is an empty string.
"""
if isinstance(context, str):
if context != '':
return context
raise ValueError
raise TypeError
def _normalize_xpath(self, path):
"""Normalize a single XPath by combining it with `context`.
Args:
path (str): An XPath.
Raises:
AttributeError: When the `context` isn't set.
ValueError: When `path` is an empty string.
Todo:
Add some logging.
Re-evaluate this.
"""
if path == '':
raise ValueError
return '/'.join([self.context, path])
def _normalize_condition(self):
"""Normalize `condition` xpaths."""
try:
self.normalized_paths.append(self._normalize_xpath(self.condition))
except AttributeError:
pass
def _normalize_xpaths(self):
"""Normalize xpaths by combining them with `context`.
Note:
May be overridden in child class that does not use `paths`.
"""
self.normalized_paths = [self._normalize_xpath(path) for path in self.paths]
self._normalize_condition()
def _valid_rule_configuration(self, case):
"""Check that a configuration being passed into a Rule is valid for the given type of Rule.
Args:
case (dict): A dictionary of values, generally parsed as a case from a Ruleset.
Raises:
AttributeError: When the Rule name is unset or does not have the required attributes.
ValueError: When the case is not valid for the type of Rule.
Note:
The `name` attribute on the class must be set to a valid rule_type before this function is called.
"""
try:
jsonschema.validate(case, self._ruleset_schema_section())
except jsonschema.ValidationError:
raise ValueError
def _set_case_attributes(self, case):
"""Make the required attributes within a case their own attributes in the class.
Args:
case (dict): The case to take values from.
Todo:
Set non-required properties such as a `condition`.
"""
required_attributes = self._case_attributes(self._ruleset_schema_section())
for attrib in required_attributes:
setattr(self, attrib, case[attrib])
optional_attributes = self._case_attributes(self._ruleset_schema_section(), False)
for attrib in optional_attributes:
try:
setattr(self, attrib, case[attrib])
except KeyError:
pass
def _case_attributes(self, partial_schema, required=True):
"""Determine the attributes that must be present given the Schema for the Rule type.
Args:
partial_schema (dict): The partial JSONSchema to extract attribute names from.
required (bool): Specifies whether the attributes to be returned should be required or optional according to the Ruleset specification.
Returns:
list of str: The names of required or optional attributes.
"""
if required:
return [key for key in partial_schema['properties'].keys() if key != 'condition']
return [key for key in partial_schema['properties'].keys() if key == 'condition']
def _ruleset_schema_section(self):
"""Locate the section of the Ruleset Schema relevant for the Rule.
In doing so, makes required properties required.
Returns:
dict: A dictionary of the relevant part of the Ruleset Schema, based on the Rule's name.
Raises:
AttributeError: When the Rule name is unset or does not have the required attributes.
"""
ruleset_schema = iati.default.ruleset_schema()
partial_schema = ruleset_schema['patternProperties']['.+']['properties'][self.name]['properties']['cases']['items'] # pylint: disable=E1101
# make all attributes other than 'condition' in the partial schema required
partial_schema['required'] = self._case_attributes(partial_schema)
# ensure that the 'paths' array is not empty
if 'paths' in partial_schema['properties'].keys():
partial_schema['properties']['paths']['minItems'] = 1
return partial_schema
def _find_context_elements(self, dataset):
"""Find the specific elements in context for the Rule.
Args:
dataset (iati.Dataset): The Dataset to be chacked for validity against the Rule.
Returns:
list of elements: Results of XPath query.
Raises:
AttributeError: When an argument is given that does not have the required attributes.
"""
return dataset.xml_tree.xpath(self.context)
def _extract_text_from_element_or_attribute(self, context, path):
"""Return a list of strings regardless of whether XPath result is an attribute or an element.
Args:
context (etree._Element): An xml Element.
path (str): An XPath query string.
Returns:
list of str: Text values from XPath query results.
Note:
`Element.text` will return `None` if it contains no text. This is bad. As such, this is converted to an empty string to prevent TypeErrors.
`path` should be validated outside of this function to avoid unexpected errors.
"""
xpath_results = context.xpath(path)
results = [result if isinstance(result, str) else result.text for result in xpath_results]
return ['' if result is None else result for result in results]
def _condition_met_for(self, context_element):
"""Check for condtions of a given case.
Args:
dataset (iati.Dataset): The Dataset to be checked for validity against a Rule.
Returns:
bool: Returns `False` when condition not met.
Returns `True` when condition is met.
None: Returns `None` when condition met.
Warning:
Current implementation may be vulnerable to XPath injection vulnerabilities.
Todo:
Need to assess the possibility of risk and potential counter-measures/avoidance strategies if needed.
Need to decide whether the implementation of this in Rules should `return None` or `continue`.
Rename function to sound more truthy.
"""
try:
if context_element.xpath(self.condition):
return True
except AttributeError:
return False
return False
def is_valid_for(self, dataset):
"""Check whether a Dataset is valid against the Rule.
Args:
dataset (iati.Dataset): The Dataset to be checked for validity against the Rule.
Returns:
bool or None:
`True` when the Dataset is valid against the Rule.
`False` when the Dataset is not valid against the Rule.
`None` when a condition is met to skip validation.
Raises:
TypeError: When a Dataset is not given as an argument.
ValueError: When a check encounters a completely incorrect value that it is unable to recover from within the definition of the Rule.
Note:
May be overridden in child class that does not have the same return structure for boolean results.
Todo:
Better design how Skips and ValueErrors are treated. The current True/False/Skip/Error thing is a bit clunky.
"""
try:
context_elements = self._find_context_elements(dataset)
except AttributeError:
raise TypeError
if context_elements == list():
return None
for context_element in context_elements:
if self._condition_met_for(context_element):
return None
rule_check_result = self._check_against_Rule(context_element)
if rule_check_result is False:
return False
elif rule_check_result is None:
return None
return True
class RuleAtLeastOne(Rule):
"""Representation of a Rule that checks that there is at least one Element matching a given XPath.
Attributes:
paths (list of str): A list of XPath expressions. These are evaluated to locate the elements that the Rule is to operate on.
"""
def __init__(self, context, case):
"""Initialise an `atleast_one` rule."""
self._name = 'atleast_one'
super(RuleAtLeastOne, self).__init__(context, case)
def __str__(self):
"""Return string stating what RuleAtLeastOne is checking."""
if len(self.paths) == 1:
return '`{self.paths[0]}` must be present within each `{self.context}`.'.format(**locals())
return 'At least one of `{0}` must be present within each `{self.context}`.'.format('` or `'.join(self.paths), **locals())
def _check_against_Rule(self, context_element):
"""Check `context_element` has at least one specified Element or Attribute.
Args:
context_element (etree._Element): An XML Element.
Returns:
bool: Return `False` when the case is found in the Dataset.
Return `True` when the case is not found in the Dataset.
"""
for path in self.paths:
if context_element.xpath(path):
return False
return True
def is_valid_for(self, dataset):
"""Check whether a Dataset is valid against the Rule.
Args:
dataset (iati.Dataset): The Dataset to be checked for validity against the Rule.
Returns:
bool or None:
`True` when the Dataset is valid against the Rule.
`False` when the Dataset is not valid against the Rule.
`None` when a condition is met to skip validation.
Raises:
TypeError: When a Dataset is not given as an argument.
"""
parent = super(RuleAtLeastOne, self).is_valid_for(dataset)
if parent is True:
return False
elif parent is None:
return None
return True
class RuleDateOrder(Rule):
"""Representation of a Rule that checks that the date value of `more` is the most recent value in comparison to the date value of `less`.
Attributes:
less (str): An XPath expression to locate the element containing the date that should be in the past.
more (str): An XPath expression to locate the element containing the date that should be in the future.
special_case (str): A value that will be treated as the present when provided as the `less` or `more` value.
"""
def __init__(self, context, case):
"""Initialise a `date_order` rule."""
self._name = 'date_order'
self.special_case = 'NOW' # Was a constant sort of
super(RuleDateOrder, self).__init__(context, case)
def __str__(self):
"""Return string stating what RuleDateOrder is checking."""
if self.less == self.special_case and self.more == self.special_case:
unformatted_str = '`{self.less}` must be chronologically before `{self.more}`. Try working that one out.'
elif self.less == self.special_case:
unformatted_str = '`{self.more}` must be in the future within each `{self.context}`.'
elif self.more == self.special_case:
unformatted_str = '`{self.less}` must be in the past within each `{self.context}`.'
else:
unformatted_str = '`{self.less}` must be chronologically before `{self.more}` within each `{self.context}`.'
return unformatted_str.format(**locals())
def _normalize_xpaths(self):
"""Normalize xpaths by combining them with `context`."""
self.normalized_paths = list()
if self.less is not self.special_case:
self.normalized_paths.append(self._normalize_xpath(self.less))
if self.more is not self.special_case:
self.normalized_paths.append(self._normalize_xpath(self.more))
self._normalize_condition()
def _get_date(self, context_element, path):
"""Retrieve datetime object from an XPath string.
Args:
context_element (etree._Element): An XML Element.
path: (an XPath): The ultimate XPath query to find the desired elements.
Returns:
datetime.datetime: A datetime object.
Raises:
ValueError:
When a non-permitted number of unique dates are given for a `less` or `more` value.
When datetime cannot convert a string of non-permitted characters.
When non-permitted trailing characters are found after the core date string characters.
Note:
Though technically permitted, any dates with a leading '-' character are almost certainly incorrect and are therefore treated as data errors.
Todo:
Consider breaking this function down further.
"""
if path == self.special_case:
return datetime.today()
dates = self._extract_text_from_element_or_attribute(context_element, path)
if dates == list() or not dates[0]:
return None
# Checks that anything after the YYYY-MM-DD string is a permitted timezone character
pattern = re.compile(r'^([+-]([01][0-9]|2[0-3]):([0-5][0-9])|Z)?$')
if (len(set(dates)) == 1) and pattern.match(dates[0][10:]):
if len(dates[0]) < 10:
# '%d' and '%m' are documented as requiring zero-padded dates.as input. This is actually for output. As such, a separate length check is required to ensure zero-padded values.
raise ValueError
return datetime.strptime(dates[0][:10], '%Y-%m-%d')
raise ValueError
def _check_against_Rule(self, context_element):
"""Assert that the date value of `less` is chronologically before the date value of `more`.
Args:
context_element (etree._Element): An XML Element.
Return:
bool: Return `True` when `less` is chronologically before `more`.
Return `False` when `less` is not chronologically before `more`.
None: When a condition is met to skip validation.
Raises:
ValueError: When a date is given that is not in the correct xsd:date format.
Note:
`date` restricted to 10 characters in order to exclude possible timezone values.
"""
early_date = self._get_date(context_element, self.less)
later_date = self._get_date(context_element, self.more)
try:
if early_date > later_date:
return False
except TypeError:
return None
return True
class RuleDependent(Rule):
"""Representation of a Rule that checks that if one of the Elements or Attributes in a given `path` exists then all its dependent Elements or Attributes must also exist.
Attributes:
paths (list of str): A list of XPath expressions. These are evaluated to locate the elements that the Rule is to operate on.
"""
def __init__(self, context, case):
"""Initialise a `dependent` rule."""
self._name = 'dependent'
super(RuleDependent, self).__init__(context, case)
def __str__(self):
"""Return string stating what TestRuleDependent is checking."""
if len(self.paths) == 1:
return 'Within each `{self.context}`, either `{self.paths[0]}` exists or it does not. As such, this Rule is always True.'.format(**locals())
return 'Within each `{self.context}`, either none of `{0}` must exist, or they must all exist.'.format('` or `'.join(self.paths), **locals())
def _check_against_Rule(self, context_element):
"""Assert that either all given `paths` or none of the given `paths` exist for the `context_element`.
Args:
context_element (etree._Element): An XML Element.
Returns:
bool: Return `True` when all dependent `paths` are found in the Dataset, if any exist.
Return `False` when only some of the dependent `paths` are found in the Dataset.
"""
unique_paths = set(self.paths)
found_paths = 0
for path in unique_paths:
results = context_element.xpath(path)
if results != list():
found_paths += 1
if found_paths not in [0, len(unique_paths)]:
return False
return True
class RuleNoMoreThanOne(Rule):
"""Representation of a Rule that checks that there is no more than one Element or Attribute matching a given XPath.
Attributes:
paths (list of str): A list of XPath expressions. These are evaluated to locate the elements that the Rule is to operate on.
"""
def __init__(self, context, case):
"""Initialise a `no_more_than_one` rule."""
self._name = 'no_more_than_one'
super(RuleNoMoreThanOne, self).__init__(context, case)
def __str__(self):
"""Return string stating what RuleNoMoreThanOne is checking."""
if len(self.paths) == 1:
return '`{self.paths[0]}` must occur zero or one times within each `{self.context}`.'.format(**locals())
return 'There must be no more than one element or attribute matched at `{0}` within each `{self.context}`.'.format('` or `'.join(self.paths), **locals())
def _check_against_Rule(self, context_element):
"""Check `context_element` has no more than one result for a specified Element or Attribute.
Args:
context_element (etree._Element): An XML Element.
Returns:
bool: Return `True` when one result or no results are found in the Dataset.
Return `False` when more than one result is found in the Dataset.
"""
unique_paths = set(self.paths)
found_elements = 0
for path in unique_paths:
results = context_element.xpath(path)
found_elements += len(results)
if found_elements > 1:
return False
return True
class RuleRegexMatches(Rule):
"""Representation of a Rule that checks that the text of the given paths must match the regex value.
Attributes:
paths (list of str): A list of XPath expressions. These are evaluated to locate the elements that the Rule is to operate on.
regex (str): A Perl-style regular expression.
"""
def __init__(self, context, case):
"""Initialise a `regex_matches` Rule.
Raises:
ValueError: When the case does not contain valid regex.
"""
self._name = 'regex_matches'
super(RuleRegexMatches, self).__init__(context, case)
if self.regex == '':
raise ValueError
try:
re.compile(self.regex)
except sre_constants.error:
raise ValueError
def __str__(self):
"""Return string stating what RuleRegexMatches is checking."""
if len(self.paths) == 1:
return 'Each `{self.paths[0]}` within each `{self.context}` must match the regular expression `{self.regex}`.'.format(**locals())
return 'Each instance of `{0}` within each `{self.context}` must match the regular expression `{self.regex}`.'.format('` and `'.join(self.paths), **locals())
def _check_against_Rule(self, context_element):
"""Assert that the text of the given `paths` matches the regex value.
Args:
context_element (etree._Element): An XML Element.
Returns:
bool: Return `True` when the given `path` text matches the given regex.
Return `False` when the given `path` text does not match the given regex.
"""
pattern = re.compile(self.regex)
for path in self.paths:
strings_to_check = self._extract_text_from_element_or_attribute(context_element, path)
for string_to_check in strings_to_check:
if not pattern.search(string_to_check):
return False
return True
class RuleRegexNoMatches(Rule):
"""Representation of a Rule that checks that the text of the given `paths` must not match the regex value.
Attributes:
paths (list of str): A list of XPath expressions. These are evaluated to locate the elements that the Rule is to operate on.
regex (str): A Perl-style regular expression.
"""
def __init__(self, context, case):
"""Initialise a `regex_no_matches` Rule.
Raises:
ValueError: When the case does not contain valid regex.
"""
self._name = 'regex_no_matches'
super(RuleRegexNoMatches, self).__init__(context, case)
if self.regex == '':
raise ValueError
try:
re.compile(self.regex)
except sre_constants.error:
raise ValueError
def __str__(self):
"""Return string stating what RuleRegexNoMatches is checking."""
if len(self.paths) == 1:
return 'Each `{self.paths[0]}` within each `{self.context}` must not match the regular expression `{self.regex}`.'.format(**locals())
return 'Each instance of `{0}` within each `{self.context}` must not match the regular expression `{self.regex}`.'.format('` and `'.join(self.paths), **locals())
def _check_against_Rule(self, context_element):
"""Assert that no text of the given `paths` matches the regex value.
Args:
context_element (etree._Element): An XML Element.
Returns:
bool: Return `True` when the given `path` text does not match the given regex.
Return `False` when the given `path` text matches the given regex.
"""
pattern = re.compile(self.regex)
for path in self.paths:
strings_to_check = self._extract_text_from_element_or_attribute(context_element, path)
for string_to_check in strings_to_check:
if pattern.search(string_to_check):
return False
return True
class RuleStartsWith(Rule):
"""Representation of a Rule that checks that the prefixing text of each text value for `path` matches the `start` text value.
Attributes:
paths (list of str): A list of XPath expressions. These are evaluated to locate the elements that the Rule is to operate on.
start (str): An XPath expression to locate a single element. The text of this element is used as the prefix value for the Rule.
"""
def __init__(self, context, case):
"""Initialise a `startswith` Rule."""
self._name = 'startswith'
super(RuleStartsWith, self).__init__(context, case)
def __str__(self):
"""Return string stating what RuleStartsWith is checking."""
if len(self.paths) == 1:
return 'Each `{self.paths[0]}` within each `{self.context}` must start with the value present at `{self.start}`.'.format(**locals())
return 'Each instance of `{0}` within each `{self.context}` must start with the value present at `{self.start}`.'.format('` and `'.join(self.paths), **locals())
def _normalize_xpaths(self):
"""Normalize xpaths by combining them with `context`."""
super(RuleStartsWith, self)._normalize_xpaths()
self.normalized_paths.append(self._normalize_xpath(self.start))
def _check_against_Rule(self, context_element):
"""Assert that the prefixing text of all given `paths` starts with the text of `start`.
Args:
context_element (etree._Element): An XML Element.
Returns:
bool: Return `True` when the `path` text starts with the text value of `start`.
Return `False` when the `path` text does not start with the text value of `start`.
Raises:
ValueError: When more than one element or attribute is retured for the prefix value.
When no results are returned for the prefix value.
"""
start_results = self._extract_text_from_element_or_attribute(context_element, self.start)
if len(start_results) > 1:
raise ValueError
try:
prefix = start_results[0]
except IndexError:
raise ValueError
for path in self.paths:
strings_to_check = self._extract_text_from_element_or_attribute(context_element, path)
for string_to_check in strings_to_check:
if not string_to_check.startswith(prefix):
return False
return True
class RuleSum(Rule):
"""Representation of a Rule that checks that the values in given `path` attributes must sum to the given `sum` value.
Attributes:
paths (list of str): A list of XPath expressions. These are evaluated to locate the elements that the Rule is to operate on.
sum (float): The value that the contents of the located elements and attributes must sum to.
"""
def __init__(self, context, case):
"""Initialise a `sum` rule."""
self._name = 'sum'
super(RuleSum, self).__init__(context, case)
def __str__(self):
"""Return string stating what RuleSum is checking."""
return 'Within each `{self.context}`, the sum of values matched at `{0}` must be `{self.sum}`.'.format('` and `'.join(self.paths), **locals())
def _check_against_Rule(self, context_element):
"""Assert that the total of the values given in `paths` match the given `sum` value.
Args:
context_element (etree._Element): An XML Element.
Returns:
bool: Return `True` when the `path` values total to the `sum` value.
Return `False` when the `path` values do not total to the `sum` value.
None: When no elements are found for the specified `paths`.
Raises:
ValueError: When the `path` value is not numeric.
"""
unique_paths = set(self.paths)
values_in_context = list()
for path in unique_paths:
values_to_sum = self._extract_text_from_element_or_attribute(context_element, path)
for value in values_to_sum:
try:
values_in_context.append(decimal.Decimal(value))
except decimal.InvalidOperation:
raise ValueError
if values_in_context == list():
return None
if sum(values_in_context) != decimal.Decimal(str(self.sum)):
return False
return True
class RuleUnique(Rule):
"""Representation of a Rule that checks that the text of each given path must be unique.
Attributes:
paths (list of str): A list of XPath expressions. These are evaluated to locate the elements that the Rule is to operate on.
"""
def __init__(self, context, case):
"""Initialise a `unique` rule."""
self._name = 'unique'
super(RuleUnique, self).__init__(context, case)
def __str__(self):
"""Return string stating what RuleUnique is checking."""
return 'Within each `{self.context}`, the text contained within each of the elements and attributes matched by `{0}` must be unique.'.format('` and `'.join(self.paths), **locals())
def _check_against_Rule(self, context_element):
"""Assert that the given `paths` are not found for `context_element` more than once.
Args:
context_element (etree._Element): An XML Element.
Returns:
bool: Return `True` when repeated text is not found in the Dataset.
Return `False` when repeated text is found in the Dataset.
Todo:
Consider better methods for specifying which elements in the tree contain non-permitted duplication, such as bucket sort.
"""
unique_paths = set(self.paths)
all_content = list()
unique_content = set()
for path in unique_paths:
strings_to_check = self._extract_text_from_element_or_attribute(context_element, path)
for string_to_check in strings_to_check:
all_content.append(string_to_check)
unique_content.add(string_to_check)
if len(all_content) != len(unique_content):
return False
return True
| {
"repo_name": "IATI/iati.core",
"path": "iati/rulesets.py",
"copies": "1",
"size": "35984",
"license": "mit",
"hash": -1227688374581741000,
"line_mean": 35.3474747475,
"line_max": 191,
"alpha_frac": 0.613355936,
"autogenerated": false,
"ratio": 4.424443624738719,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5537799560738719,
"avg_score": null,
"num_lines": null
} |
"""A module containing a core representation of IATI Schemas."""
import collections
from lxml import etree
import iati.codelists
import iati.constants
import iati.exceptions
import iati.resources
import iati.utilities
class Schema:
"""Representation of a Schema as defined within the IATI SSOT. This is used as a base class for ActivitySchema and OrganisationSchema and should not be instantiated directly.
Attributes:
codelists (set): The Codelists associated with this Schema.
rulesets (set): The Rulesets associated with this Schema.
ROOT_ELEMENT_NAME (str): The name of the root element within the XML Schema that the class represents.
Warning:
The private attribute allowing access to the base Schema Tree is likely to change in determining a good way of accessing the contained schema content.
Todo:
Determine a good API for accessing the XMLSchema that the iati.Schema represents.
"""
ROOT_ELEMENT_NAME = ''
def __init__(self, path):
"""Initialise a Schema.
Args:
path (str): The path to the Schema that is being initialised.
Raises:
iati.exceptions.SchemaError: An error occurred during the creation of the Schema.
Warning:
The format of the constructor is likely to change. It needs to be less reliant on the name acting as a UID, and allow for other attributes to be provided at this point.
The raised exceptions are likely to change upon review of IATI-specific exceptions.
Need to define a good API for accessing public and private attributes. Requiring something along the lines of `schema.schema` is likely not ideal. An improved understanding of use cases will be required for this.
Todo:
Better use the try-except pattern.
Allow the base schema to be modified after initialisation.
Create test instance where the SchemaError is raised.
"""
self._schema_base_tree = None
self._source_path = path
self.codelists = set()
self.rulesets = set()
try:
loaded_tree = iati.utilities.load_as_tree(path)
except OSError:
msg = "Failed to load tree at '{0}' when creating Schema.".format(path)
iati.utilities.log_error(msg)
raise iati.exceptions.SchemaError
else:
self._schema_base_tree = loaded_tree
def __eq__(self, other):
"""Check Schema equality.
This allows uniqueness to be correctly defined.
Todo:
Utilise all attributes as part of the equality process.
Determine a better method of checking whether the contained Rulesets are equal.
"""
# perform cheap checks first
if (len(self.codelists) != len(other.codelists)) or (len(self.rulesets) != len(other.rulesets)):
return False
# turn the tree into something that can be easily compared
self_tree_str = etree.tostring(self.flatten_includes(self._schema_base_tree), pretty_print=True)
other_tree_str = etree.tostring(other.flatten_includes(other._schema_base_tree), pretty_print=True) # pylint: disable=protected-access
# compare Rulesets - cannot use `collections.Counter` since it works on hash values, which differ between equal Rulesets
self_rulesets = list(self.rulesets)
other_rulesets = list(other.rulesets)
for self_rs in self_rulesets:
other_rulesets = [other_rs for other_rs in other_rulesets if other_rs != self_rs]
return (self_tree_str == other_tree_str) and (collections.Counter(self.codelists) == collections.Counter(other.codelists)) and (len(other_rulesets) == 0)
def _change_include_to_xinclude(self, tree):
"""Change the method in which common elements are included.
lxml does not contain functionality to access elements within imports defined along the lines of: `<xsd:include schemaLocation="NAME.xsd" />`
It does, however, contains functionality to access elements within imports defined along the lines of: `<xi:include href="NAME.xsd" parse="xml" />`
when there is a namespace defined against the root schema element as `xmlns:xi="http://www.w3.org/2001/XInclude"`
This changes instances of the former to the latter.
Args:
tree (etree._ElementTree): The tree within which xsd:include is to be changed to xi:include.
Returns:
etree._ElementTree: The modified tree.
Todo:
Add more robust tests for schemas at different versions.
Check whether this is safe in the general case, so allowing it to be performed in __init__().
Make resource locations more able to handle the general case.
Consider moving this out of Schema().
Tidy this up.
Consider using XSLT.
"""
# identify the old info
include_xpath = (iati.constants.NAMESPACE + 'include')
include_el = tree.getroot().find(include_xpath)
if include_el is None:
return tree
include_location = include_el.attrib['schemaLocation']
# add namespace for XInclude
xi_name = 'xi'
xi_uri = 'http://www.w3.org/2001/XInclude'
iati.utilities.add_namespace(tree, xi_name, xi_uri)
new_nsmap = {}
for key, value in iati.constants.NSMAP.items():
new_nsmap[key] = value
new_nsmap[xi_name] = xi_uri
# create a new element
xinclude_el = etree.Element(
'{' + xi_uri + '}include',
href=iati.resources.create_schema_path(include_location[:-4], self._get_version()),
parse='xml',
nsmap=new_nsmap
)
# make the path to `xml.xsd` reference the correct file
import_xpath = (iati.constants.NAMESPACE + 'import')
import_el = tree.getroot().find(import_xpath)
import_el.attrib['schemaLocation'] = iati.resources.create_schema_path('xml', self._get_version())
# insert the new element
tree.getroot().insert(import_el.getparent().index(import_el) + 1, xinclude_el)
# remove the old element
etree.strip_elements(tree.getroot(), include_xpath)
return tree
def _get_version(self):
"""Return the version that this schema is defined as.
Returns:
iati.Version or None: The version stated for the schema, according to the value defined in the 'version' attribute at root of the XSD schema. Returns None if there is no 'version' attribute.
"""
version = self._schema_base_tree.getroot().get('version')
if version is None:
return version
return iati.Version(version)
def flatten_includes(self, tree):
"""Flatten includes so that all nodes are accessible through lxml.
Identify the contents of files defined as `<xsd:include schemaLocation="NAME.xsd" />` and bring in the contents.
Args:
tree (etree._ElementTree): The tree to flatten.
Returns:
etree._ElementTree: The flattened tree.
Todo:
Add more robust tests for schemas at different versions.
Consider moving this out of Schema().
Tidy this up.
"""
# change the include to a format that lxml can read
tree = self._change_include_to_xinclude(tree)
# adopt the included elements
tree.xinclude()
# remove nested schema elements
schema_xpath = (iati.constants.NAMESPACE + 'schema')
for nested_schema_el in tree.getroot().findall(schema_xpath):
if isinstance(nested_schema_el, etree._Element): # pylint: disable=protected-access
# move contents of nested schema elements up a level
for elem in nested_schema_el[:]:
# do not duplicate an import statement
if 'schemaLocation' in elem.attrib:
continue
tree.getroot().insert(nested_schema_el.getparent().index(nested_schema_el) + 1, elem)
# remove the nested schema elements
etree.strip_elements(tree.getroot(), schema_xpath)
return tree
def validator(self):
"""Return a schema that can be used for validation.
Takes the base schema and converts it into an object that lxml can deal with.
Returns:
etree.XMLSchema: A schema that can be used for validation.
Raises:
iati.exceptions.SchemaError: An error occurred in the creation of the validator.
"""
try:
return iati.utilities.convert_tree_to_schema(self._schema_base_tree)
except etree.XMLSchemaParseError as err:
iati.utilities.log_error(err)
raise iati.exceptions.SchemaError('Problem parsing Schema')
class ActivitySchema(Schema):
"""Representation of an IATI Activity Schema as defined within the IATI SSOT."""
ROOT_ELEMENT_NAME = 'iati-activities'
class OrganisationSchema(Schema):
"""Representation of an IATI Organisation Schema as defined within the IATI SSOT."""
ROOT_ELEMENT_NAME = 'iati-organisations'
| {
"repo_name": "IATI/iati.core",
"path": "iati/schemas.py",
"copies": "1",
"size": "9284",
"license": "mit",
"hash": 2082357394841270000,
"line_mean": 37.3636363636,
"line_max": 224,
"alpha_frac": 0.6460577337,
"autogenerated": false,
"ratio": 4.406264831514001,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012334654310821,
"num_lines": 242
} |
"""A module containing a core representation of IATI Version Numbers, plus how they are handled and compared.
Todo:
Check whether there is any other version-related functionality to bring into this module.
Ensure that everything in this module should be here.
"""
from decimal import Decimal
import re
import semantic_version
import iati.utilities
class Version(semantic_version.Version):
"""Representation of an IATI Standard Version Number."""
def __init__(self, version):
"""Initialise a Version Number.
Args:
version (str / Decimal): A representation of an IATI version number.
Raises:
TypeError: If an attempt to pass something that is not a string or Decimal is made.
ValueError: If a provided value is not a permitted version number.
"""
if not isinstance(version, str) and not isinstance(version, Decimal):
raise TypeError('A Version object must be created from a string or Decimal, not a {0}'.format(type(version)))
# check to see if IATIver
try:
if self._is_iatidecimal(version):
integer = str(int(version))
decimal = str(int(version * 100) - 101)
super(Version, self).__init__('.'.join([integer, decimal, '0']), True)
elif self._is_iativer(version):
integer = version.split('.')[0]
decimal = str(int(version.split('.')[1]) - 1)
super(Version, self).__init__('.'.join([integer, decimal, '0']), True)
elif self._is_semver(version):
super(Version, self).__init__(version, True)
else:
raise ValueError
except (TypeError, ValueError):
raise ValueError('A valid version number must be specified.')
@property
def integer(self):
"""int: The IATIver Integer Component of the Version."""
return self.major
@integer.setter
def integer(self, value):
self.major = value
@property
def decimal(self):
"""int: The IATIver Decimal Component of the Version.
This differs from the minor component since it starts at .01 (1) rather than .0 (0).
"""
return self.minor + 1
@decimal.setter
def decimal(self, value):
self.minor = value
@property
def iativer_str(self):
"""string: An IATIver-format string representation of the Version Number.
Note:
The name of this property may change.
"""
return str(self.integer) + '.0' + str(self.decimal)
@property
def semver_str(self):
"""string: A SemVer-format string representation of the Version Number.
Note:
The name of this property may change.
"""
return '.'.join([str(self.major), str(self.minor), str(self.patch)])
def __repr__(self):
"""str: A representation of the Version Number that will allow a copy of this object to be instantiated."""
return "iati.Version('" + self.semver_str + "')"
def __str__(self):
"""str: A representation of the Version Number as would exist on the Version Codelist.
Warning:
At present this always results in an IATIver string. This may change should SemVer be adopted.
The helper methods must be used if a specific format is required.
"""
return self.iativer_str
def _is_iatidecimal(self, version):
"""Determine whether a version string is a Decimal and is a permitted value.
Args:
version (string or Decimal): The string for which to check conformance.
Returns:
bool: True if the provided string is a permitted IATIver-format version number. False if not.
"""
if not isinstance(version, Decimal):
return False
valid_values = [Decimal('1.0' + str(val)) for val in range(1, 10)]
return version in valid_values
def _is_iativer(self, version_string):
"""Determine whether a version string is in an IATIver format and is a permitted value.
Args:
version_string (string): The string for which to check conformance.
Returns:
bool: True if the provided string is a permitted IATIver-format version number. False if not.
"""
# a regex for what makes a valid IATIver Version Number format string
iativer_re = re.compile(r'^((1\.0[1-9])|(((1\d+)|([2-9](\d+)?))\.0[1-9](\d+)?))$')
return iativer_re.match(version_string)
def _is_semver(self, version_string):
"""Determine whether a version string is in a SemVer format and is a permitted value.
Args:
version_string (string): The string for which to check conformance.
Returns:
bool: True if the provided string is a permitted in SemVer-format version number. False if not.
"""
is_semver_format = semantic_version.validate(version_string)
try:
is_permitted_value = semantic_version.Version(version_string).major != 0
except ValueError:
return False
return is_semver_format and is_permitted_value
def next_major(self):
"""Obtain a Version object that represents the next version after a Major Upgrade.
Returns:
iati.Version: A Version object that represents the next version after a Major Upgrade.
"""
next_major = super(Version, self).next_major()
return Version(str(next_major))
def next_minor(self):
"""Obtain a Version object that represents the next version after a Minor Upgrade.
Returns:
iati.Version: A Version object that represents the next version after a Minor Upgrade.
"""
next_minor = super(Version, self).next_minor()
return Version(str(next_minor))
def next_integer(self):
"""Obtain a Version object that represents the next version after an Integer Upgrade.
Returns:
iati.Version: A Version object that represents the next version after an Integer Upgrade.
"""
return self.next_major()
def next_decimal(self):
"""Obtain a Version object that represents the next version after a Decimal Upgrade.
Returns:
iati.Version: A Version object that represents the next version after a Decimal Upgrade.
"""
return self.next_minor()
next_patch = property()
"""Override the parent class's function to provide the next Patch Version.
Implementation based on https://stackoverflow.com/a/235657
Note:
The Error that is raised has a slightly different message than if the attribute had never existed.
Raises:
AttributeError: An error that indicates that this attribute does not exist.
"""
STANDARD_VERSIONS_SUPPORTED = [Version(version_iativer) for version_iativer in ['1.04', '1.05', '2.01', '2.02', '2.03']]
"""Define all versions of the Standard fully supported by pyIATI."""
STANDARD_VERSIONS = [Version(version_iativer) for version_iativer in ['1.01', '1.02', '1.03']] + STANDARD_VERSIONS_SUPPORTED
"""Define all versions of the Standard.
Todo:
This constant to be populated by the values in the Version codelist, rather than hard-coded.
Consider if functionality should extend to working with development versions of the Standard (e.g. during an upgrade process).
"""
STANDARD_VERSION_LATEST = max(STANDARD_VERSIONS)
"""The latest version of the IATI Standard."""
STANDARD_VERSIONS_MAJOR = list(set([
minor_version.major for minor_version in STANDARD_VERSIONS
]))
"""The major versions of the IATI Standard.
Todo:
Change from being ints to being Version()s.
"""
STANDARD_VERSIONS_MINOR = STANDARD_VERSIONS
"""The minor versions of the IATI Standard."""
STANDARD_VERSION_ANY = '*'
"""A value to represent that something is applicable to all versions of the IATI Standard - it is version independent.
Warning:
Assumptions should not be made as to the value of this constant other than it: `is not None`
"""
def allow_fully_supported_version(input_func):
"""Decorate function by ensuring versions are fully supported by pyIATI.
In terms of value:
* Valid Decimal Versions will remain unchanged.
* Invalid Decimal Versions will cause an error to be raised.
* Other values will cause an error to be raised.
Args:
input_func (function): The function to decorate. Takes the `version` argument as its first argument.
Returns:
function: The input function, wrapped such that it is called with a fully supported iati.Version representing a Decimal Version.
"""
def wrap_allow_fully_supported_version(*args, **kwargs):
"""Act as a wrapper to ensure a version number is a Decimal that is fully supported by pyIATI.
Raises:
ValueError: If the input version is not a Decimal iati.Version that pyIATI fully supports.
"""
version = _extract_version_arg(args)
if not _is_fully_supported(version):
raise ValueError('{0} is not a fully supported version of the IATI Standard in a normalised representation.'.format(repr(version)))
return input_func(*args, **kwargs)
return wrap_allow_fully_supported_version
def allow_known_version(input_func):
"""Decorate function by ensuring versions are Decimal Versions of IATI that pyIATI knows exists.
In terms of value:
* Valid Decimal Versions will remain unchanged.
* Invalid Decimal Versions will cause an error to be raised.
* Other values will cause an error to be raised.
Args:
input_func (function): The function to decorate. Takes the `version` argument as its first argument.
Returns:
function: The input function, wrapped such that it is called with an iati.Version representing a real Decimal Version.
"""
def wrap_allow_known_version(*args, **kwargs):
"""Act as a wrapper to ensure a version number is a Decimal that exists.
Raises:
ValueError: If the input version is not a known Decimal iati.Version.
"""
version = _extract_version_arg(args)
if not _is_known(version):
raise ValueError('{0} is not a known version of the IATI Standard in a normalised representation.'.format(repr(version)))
return input_func(*args, **kwargs)
return wrap_allow_known_version
def allow_possible_version(input_func):
"""Decorate function by ensuring values specified to represent a Version can actually do so.
In terms of value:
* Permitted values representing an Integer or Decimal Version in a known format will remain unchanged.
* STANDARD_VERSION_ANY will remain unchanged, as a way of representing all versions.
* strings, integers and Decimals with values that cannot represent a Version will cause a ValueError.
* Values of types other than string, Decimal, integer and iati.Version will cause a TypeError.
Args:
input_func (function): The function to decorate. Takes the `version` argument as its first argument.
Returns:
function: The input function, wrapped such that the return value is known to represent some IATI Version Number.
"""
def wrap_allow_possible_version(*args, **kwargs):
"""Act as a wrapper to ensure a value represents a possible version number.
Raises:
TypeError: If the input version is not an iati.Version, string, Decimal or integer.
ValueError: If the input version is a string, Decimal or Integer, but the value cannot represent a Version Number.
"""
version = _extract_version_arg(args)
_prevent_non_version_representations(version)
return input_func(*args, **kwargs)
return wrap_allow_possible_version
def decimalise_integer(input_func):
"""Decorate function by converting input version numbers to a normalised format Decimal Version.
In terms of value:
* Decimal Versions will remain unchanged.
* Integer Versions will return the latest Decimal Version within the Integer.
In terms of type:
* strings and Decimals will become iati.Versions.
* iati.Versions will remain unchanged.
Args:
input_func (function): The function to decorate. Takes the `version` argument as its first argument.
Returns:
function: The input function, wrapped such that it is called with a iati.Version representing a Decimal Version.
"""
def wrap_decimalise_integer(*args, **kwargs):
"""Act as a wrapper to convert input Integer Version numbers to a normalised format Decimal Version."""
version = _extract_version_arg(args)
version = _decimalise_integer(version)
return input_func(version, *args[1:], **kwargs)
return wrap_decimalise_integer
def normalise_decimals(input_func):
"""Decorate function by converting an input version into an iati.Version if a value is specified that is a permitted way to represent a Decimal Version.
Args:
input_func (function): The function to decorate. Takes the `version` argument as its first argument.
Returns:
function: The input function, wrapped such that it is called with an iati.Version if a Decimal version is provided.
"""
def wrap_normalise_decimals(*args, **kwargs):
"""Act as a wrapper to ensure a version number is an iati.Version if a Decimal version is specified."""
version = _extract_version_arg(args)
version = _normalise_decimal_version(version)
return input_func(version, *args[1:], **kwargs)
return wrap_normalise_decimals
def versions_for_integer(integer):
"""Return a list containing the supported versions for the input integer version.
Args:
integer (int): The integer version to find the supported version for.
Returns:
list of iati.Version: Containing the supported versions for the input integer.
"""
return [version for version in iati.version.STANDARD_VERSIONS if version.major == int(integer)]
def _decimalise_integer(version):
"""Convert a version number into the most appropriate Decimal Version.
* Integer Versions will return the latest Decimal Version within the Integer. If the Integer is invalid, returns the first Decimal that would exist in the Integer.
* All other inputs will remain unchanged.
Args:
version (Any): The value to convert to a Decimal Version if it represents an Integer Version.
Returns:
Any: The Decimal Version of the Standard that the input version relates to, or the input unchanged.
"""
# handle major versions
try:
if not isinstance(version, (int, str)) or isinstance(version, bool):
raise TypeError
elif isinstance(version, str) and str(int(version)) != version: # detect strings containing numbers and whitespace
raise ValueError
major_version = int(version)
if major_version in iati.version.STANDARD_VERSIONS_MAJOR:
version = max(versions_for_integer(major_version))
elif str(major_version) == str(version): # specifying only a major component
version = Version(str(major_version) + '.0.0')
except (ValueError, TypeError, OverflowError):
pass
return version
def _extract_version_arg(arg_list):
"""Extract a version argument from an args list, raising an error if something is wrong.
Args:
arg_list (list): The input args to extract a version argument from. The `version` argument is expected to be the first argument.
Returns:
Any: The value in the specified argument index.
Raises:
TypeError: If the argument list is not long enough to access the specified index (since the function the argument list was taken from does not permit the required number of attributes).
"""
try:
version = arg_list[0]
except IndexError:
raise TypeError('The first argument of this function must be a specified version.')
return version
def _is_fully_supported(version):
"""Detect whether a Version is fully supported by pyIATI.
Args:
version (Any): The Version to check support of.
Returns:
bool: True if version is a fully supported iati.Version. False in all other cases.
"""
return version in iati.version.STANDARD_VERSIONS_SUPPORTED
def _is_known(version):
"""Detect whether a Version is a version of the Standard that pyIATI knows to exist.
Args:
version (iati.Version): The Version to check support of.
Returns:
bool: True if version is an iati.Version known by pyIATI to be a released version. False in all other cases.
"""
return version in iati.version.STANDARD_VERSIONS
def _normalise_decimal_version(version):
"""Normalise the format of Decimal Versions.
If the specified version is a value that can act as a Decimal Version of the IATI Standard, convert it to an iati.Version.
Any other value will be returned as-is.
Args:
version (Any): A value that may be a known method to represent a Decimal Version of the IATI Standard.
Returns:
Any: An iati.Version if the input value represents a Decimal Version of the IATI Standard. The input version in all other cases.
"""
try:
version = Version(version)
except (TypeError, ValueError):
pass
return version
def _prevent_non_version_representations(version):
"""Detect whether a value specified to be a Version could possibly represent a Version.
In terms of value:
* Permitted values representing an Integer or Decimal Version in a known format will remain unchanged.
* STANDARD_VERSION_ANY will remain unchanged, as a way of representing all versions.
* strings, integers and Decimals with values that cannot represent a Version will cause a ValueError.
* Values of types other than string, Decimal, integer and iati.Version will cause a TypeError.
Args:
version (Any): The value to check to see whether it may represent a Version in a known manner.
Raises:
TypeError: If anything other than an iati.Version, string, Decimal or integer is provided.
ValueError: If a string, Decimal or integer has a value that is not in a format that is known to represent an IATI Version Number.
"""
if not isinstance(version, (str, Decimal, int, Version)) or isinstance(version, bool):
raise TypeError('IATI Version Numbers may only be represented as a string, Decimal, int or iati.Version. A {0} was provided.'.format(type(version)))
try:
Version(version)
except ValueError:
try:
if version == '0' or (not version.isdigit() and version != STANDARD_VERSION_ANY): # accept string representations of positive numbers
raise ValueError('{0} is not a known representation of a potential IATI Version Number'.format(version))
except AttributeError: # invalid decimal
raise ValueError('Only permitted versions at major version 1 may be represented using `decimal.Decimals` - {0} is not a permitted v1.0x version.'.format(version))
except TypeError:
# will be an int or None or iati.Version if reaching this point
if not isinstance(version, Version) and version < 1:
raise ValueError('IATI Integer Versions are all positive. {0} is a non-positive number.'.format(version))
return version
| {
"repo_name": "IATI/iati.core",
"path": "iati/version.py",
"copies": "1",
"size": "19616",
"license": "mit",
"hash": 6932783670576749000,
"line_mean": 35.6654205607,
"line_max": 193,
"alpha_frac": 0.6740925775,
"autogenerated": false,
"ratio": 4.428990742831339,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0017766043713025336,
"num_lines": 535
} |
'''A module containing a function for building Creature objects from
root HtmlElement objects related to d20pfsrd.com Bestiary pages'''
import re
import string
from core.creature import Creature
__all__ = ['build']
ABILITIES = ['Str', 'Dex', 'Con', 'Int', 'Wis', 'Cha']
ATTRIBUTES = [
'DEFENSE', 'hp', 'AC', 'touch', 'flat-footed',
'Fort', 'Ref', 'Will', 'Defensive', 'DR', 'Resist', 'Immune',
'Vulnerabilities', 'Weakness',
'STATISTICS', 'Base', 'Atk', 'CMB', 'CMD', 'Feats', 'Skills'
]
def _check_text_for_spaces(text, keywords, start=0):
'''Checks text for spaces before and after certain keywords. If a
space is not present, it gets inserted into the text in the
appropriate place.
:param text: the text to be checked
:param keywords: list of words required to have spaces follow them
:param start: starting index in text to begin checking at
:returns version of 'text' with spaces where they should be
'''
_text = text
for word in keywords:
indx = _text.find(word, start)
# check for space after keyword
if _text[indx + len(word)] != ' ':
_text = _insert_text_into_text(_text, indx + len(word), ' ')
indx = _text.find(word, start)
# check for space before keyword
if _text[indx-1] != ' ':
_text = _insert_text_into_text(_text, indx, ' ')
return _text
def _format_creature_entry(entry):
'''Returns copy of provided Creature entry formatted such that
it is easily parsable
:param entry: Creature entry scraped from d20pfsrd Bestiary page
:returns: a formatted copy of the Creature entry
'''
# handle unicode characters
_entry = entry.replace(u'\xe2', u'-')
_entry = _entry.encode('ascii', 'ignore')
# massage text in some necessary ways
_entry = _entry.replace('*', '')
_entry = _entry.replace('flatfooted', 'flat-footed')
_entry = _entry.replace('Reflex', 'Ref')
# add spaces where needed
_entry = _entry.replace(',', ', ')
_entry = _entry.replace('(', ' (')
_entry = _check_text_for_spaces(_entry, ATTRIBUTES)
_entry = _check_text_for_spaces(_entry, ABILITIES,
_entry.find('STATISTICS'))
# replace all occurrences of white space with a single ' '
_entry = re.sub(r'\s+', ' ', _entry)
return _entry
def _format_creature_name(name):
'''Returns copy of name argument formatted appropriately
:param name: a string containing an unformatted Creature name
:returns: a formatted Creature name
'''
new_name = name.encode('ascii', 'ignore') # remove unicode chars
new_name = new_name.lower()
# capitalize space-separated words
new_name = string.capwords(new_name, ' ')
# capitalize words following a hyphen
indx = new_name.find('-') + 1
new_name = new_name[:indx] + new_name[indx].upper() + new_name[indx+1:]
# capitalize words following a left parenthesis
indx = new_name.find('(') + 1
new_name = new_name[:indx] + new_name[indx].upper() + new_name[indx+1:]
return new_name
def _insert_text_into_text(orig_text, index, insert_text):
'''Creates a new string by inserting one string into another at
some specified index
:param orig_text: the original string
:param index: index of original string to insert text into
:param insert_text: string that will be inserted into the original
:returns the new string after the insertion
'''
return "%s%s%s" % (orig_text[:index], insert_text, orig_text[index:])
def _populate_ability_scores(words, creature):
'''Populates a Creature object's ability score values using
the Creature's entry on d20pfsrd.com split into individual
words
:param words: text of d20pfsrd Bestiary page as list of words
:param creature: Creature object to be populated
'''
for key in creature.ability_scores.keys():
index = words.index(key, words.index('STATISTICS'))
parsed_ability = words[index+1]
parsed_ability = parsed_ability.replace(',', '')
parsed_ability = parsed_ability.replace(';', '')
if parsed_ability == '' or '-' in parsed_ability:
creature.ability_scores[key] = '-1'
else:
creature.ability_scores[key] = parsed_ability
def _populate_ac(words, creature):
'''Populates a Creature object's armor class values using the
Creature's entry on d20pfsrd.com split into individual words
:param words: text of d20pfsrd Bestiary page as list of words
:param creature: Creature object to be populated
'''
for key in creature.ac.keys():
index = words.index(key, words.index('DEFENSE'))
parsed_ac = words[index+1]
parsed_ac = parsed_ac.replace(',', '')
parsed_ac = parsed_ac.replace(';', '')
creature.ac[key] = parsed_ac
def _populate_bab(words, creature):
'''Populates a Creature object's base attack bonus value using the
Creature's entry on d20pfsrd.com split into individual words
:param words: text of d20pfsrd Bestiary page as list of words
:param creature: Creature object to be populated
'''
index = words.index('Atk', words.index('STATISTICS'))
parsed_bab = words[index+1]
parsed_bab = parsed_bab.replace(',', '')
parsed_bab = parsed_bab.replace(';', '')
parsed_bab = parsed_bab.replace('+', '')
creature.bab = parsed_bab
def _populate_cmb(words, creature):
'''Populates a Creature object's Combat Maneuver Bonus (CMB) value
using the Creature's entry on d20pfsrd.com split into individual
words
:param words: text of d20pfsrd Bestiary page as list of words
:param creature: Creature object to be populated
'''
index = words.index('CMB', words.index('STATISTICS'))
parsed_cmb = words[index+1]
parsed_cmb = parsed_cmb.replace(',', '')
parsed_cmb = parsed_cmb.replace(';', '')
parsed_cmb = parsed_cmb.replace('+', '')
if parsed_cmb == '-' or parsed_cmb == '--':
creature.cmb = '-1'
else:
creature.cmb = parsed_cmb
def _populate_cmd(words, creature):
'''Populates a Creature object's Combat Maneuver Defense (CMD)
value using the Creature's entry on d20pfsrd.com split into individual
words
:param words: text of d20pfsrd Bestiary page as list of words
:param creature: Creature object to be populated
'''
index = words.index('CMD', words.index('STATISTICS'))
parsed_cmd = words[index+1]
parsed_cmd = parsed_cmd.replace(',', '')
parsed_cmd = parsed_cmd.replace(';', '')
creature.cmd = parsed_cmd
if parsed_cmd == '-' or parsed_cmd == '--':
creature.cmd = '-1'
else:
creature.cmd = parsed_cmd
def _populate_cr_and_mr(text, creature):
'''Populate's a Creature object's Challenge Rating (CR) and
Mythic Rank (MR) values using text taken from the header of
a Creature's entry on d20pfsrd.com
It is expected that the given text will be of the form 'CR X/MR Y'
:param text: a string containing an unformatted Creature CR
:param creature: Creature object to be populated
'''
cr_text = text
creature_cr = '0'
creature_mr = '0'
# if not present, insert spaces where needed
if not cr_text[:3] == 'CR ':
cr_text = _insert_text_into_text(cr_text, 2, ' ')
# replace any occurrence of * with ''
cr_text = cr_text.replace('*', '')
# case 1: text contains mythic rank
if 'MR' in cr_text:
ranks = cr_text.split('/M')
# get challenge rating
cr_words = ranks[0].split(' ')
creature_cr = cr_words[1]
# get mythic rank
mr_words = ranks[1].split(' ')
creature_mr = mr_words[1]
# case 2: text does not contain mythic rank
else:
cr_words = cr_text.split(' ')
cr_text = cr_words[1]
# handle Challenge Ratings with fractional values
if '/' in cr_text:
cr_text = str(float(cr_text[0]) / float(cr_text[2]))
# truncate strings with long floating point values
if len(cr_text) > 4:
cr_text = cr_text[:4]
creature_cr = cr_text
creature.cr = creature_cr
creature.mr = creature_mr
def _populate_from_header_values(root, creature):
'''Populates a Creature object with values that are normally
found in the header section of a d20pfsrd.com Bestiary
entry: name, CR, MR
:param root: root element of an HtmlElement tree
:param creature: Creature object to be populated
'''
# get html element with Creature's name and CR
info_element = root.cssselect('td.sites-layout-tile tr')
# get separate strings for the Creature's name and CR
info_text = info_element[0].text_content()
info_text = info_text.strip()
# replace all occurrences of white space with a single ' '
info_text = re.sub(r'\s+', ' ', info_text)
# get Creature's name and CR
creature_name = info_text[:info_text.index('CR')-1]
creature_cr = info_text[info_text.index('CR'):]
# update Creature after formatting
creature.name = _format_creature_name(creature_name)
# update Creature CR and MR after extraction from text
_populate_cr_and_mr(creature_cr, creature)
def _populate_from_entry_values(root, creature):
'''Populates a Creature object with values that are normally
found in the main section of a d20pfsrd.com Bestiary entry
:param root: root element of an HtmlElement tree
:param creature: Creature object to be populated
'''
# get the page's Creature text
content = root.cssselect('.sites-canvas-main')
content_element = content[0]
content_text = content_element.text_content()
# format Creature text such that it is easily parsable
content_text = _format_creature_entry(content_text)
content_words = content_text.split(' ')
# update all Creature values
_populate_hp_and_hd(content_words, creature)
_populate_ac(content_words, creature)
_populate_saves(content_words, creature)
_populate_ability_scores(content_words, creature)
_populate_bab(content_words, creature)
_populate_cmb(content_words, creature)
_populate_cmd(content_words, creature)
def _populate_hp_and_hd(words, creature):
'''Populates a Creature object's hit point and Hit Dice (HD)
values using the Creature's entry on d20pfsrd.com split into
individual words
:param words: text of d20pfsrd Bestiary page as list of words
:param creature: Creature object to be populated
'''
# get the Creature's hp value
index = words.index('hp', words.index('DEFENSE'))
index = index + 1 # want word after 'hp' in entry
parsed_hp = words[index]
parsed_hp = parsed_hp.strip()
creature.hp = parsed_hp
# get the Creature's Hit Dice (HD) value
index = index + 1 # want expression after hp value
parsed_hd = words[index]
# handle case where 'each' is after hp value
if 'each' in parsed_hd:
index = index + 1
parsed_hd = words[index]
parsed_hd = parsed_hd.replace(',', '')
parsed_hd = parsed_hd.replace(';', '')
# case 1: hit dice listed in form NdM
if 'd' in parsed_hd:
parsed_hd = parsed_hd[1 : parsed_hd.index('d')]
# case 2: hit diced listed in form N HD
else:
parsed_hd = parsed_hd[1:]
creature.hd = parsed_hd
def _populate_saves(words, creature):
'''Populates a Creature object's saving throw values using the
Creature's entry on d20pfsrd.coms split into individual
words
:param words: text of d20pfsrd Bestiary page as list of words
:param creature: Creature object to be populated
'''
for key in creature.saves.keys():
index = words.index(key, words.index('DEFENSE'))
parsed_save = words[index+1]
parsed_save = parsed_save.replace(',', '')
parsed_save = parsed_save.replace(';', '')
parsed_save = parsed_save.replace('+', '')
creature.saves[key] = parsed_save
def build(root):
'''Creates a Creature object using data in root HtmlElement
of a Bestiary page from d20pfsrd.com
:param root: root HtmlElement of d20pfsrd.com Bestiary page
:returns: a Creature object
'''
creature = Creature()
# populate Creature object with values
_populate_from_header_values(root, creature)
_populate_from_entry_values(root, creature)
return creature
| {
"repo_name": "lot9s/pathfinder-rpg-utils",
"path": "data-mining/bestiary/core/builders/creature/d20pfsrd.py",
"copies": "1",
"size": "12506",
"license": "mit",
"hash": -249179587914230460,
"line_mean": 35.7823529412,
"line_max": 76,
"alpha_frac": 0.6418519111,
"autogenerated": false,
"ratio": 3.5690639269406392,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4710915838040639,
"avg_score": null,
"num_lines": null
} |
"""A module containing analytical objects specific to a particular experiment.
"""
import os
import numpy as np
import pandas as pd
from whaler.analysis import Analysis
class Reactions():
"""
"""
def __init__(self):
self.A = Analysis()
# Analysis output filenames.
self.crude_N2_out = "crudeN2_Es.csv"
self.N2_act_out = "N2_act_Es.csv"
self.N2_bond_out = "N2_act_bonds.csv"
# Physical constants.
self.kB = 3.1668114/1000000
self.temp = 298.15
self.kcal_eH = 627.509
def write_crude_N2(self):
"""
"""
self.A.write_data(
"cruderxn", self.crude_N2_out, self.crude_N2_act(), format='%.1f')
def write_N2_act(self):
"""
"""
self.A.write_data(
"N2act", self.N2_act_out, self.therm_N2_act(), format='%.1f')
def write_N2_bonds(self):
"""
"""
self.A.write_data(
"bonds", self.N2_bond_out, self.MMN2_bonds(), format='%.3f')
def MMN2_bonds(self):
"""Tabulates the M-M, M-N, and N-N bond lengths in M2(L)4, M2(L)4N, and
M2(L)4N2 structures.
"""
# Generate structure sets.
short_gEs = self.A.gEs.dropna(axis=0, how='all')
base_structs = {
struct : short_gEs.loc[struct, 'Ground State']
for struct in short_gEs.index if struct[-1] == '4'
}
N_structs = {
struct : short_gEs.loc[struct, 'Ground State']
for struct in short_gEs.index if struct[-2:] == '4N'
}
N2_structs = {
struct : short_gEs.loc[struct, 'Ground State']
for struct in short_gEs.index if struct[-3:] == '4N2'
}
# Acquire bond lengths.
gs_M_M = {
struct : self.A.bondlength(struct, state, 'M', 'M', 'z')
for struct,state in base_structs.items()
}
es_M_M = {
struct : self.A.bondlength(struct,
self.A.spinflip[state], 'M', 'M', 'z')
for struct,state in base_structs.items()
}
gs_M_MN = {
struct[:-1] : self.A.bondlength(struct, state, 'M', 'M', 'z')
for struct,state in N_structs.items()
}
gs_M_MN2 = {
struct[:-2] : self.A.bondlength(struct, state, 'M', 'M', 'z')
for struct,state in N2_structs.items()
}
gs_M2_N = {
struct[:-1] : self.A.bondlength(struct, state, 'M', 'N', 'z')
for struct,state in N_structs.items()
}
gs_M2_N2 = {
struct[:-2] : self.A.bondlength(struct, state, 'M', 'N', 'z', 1)
for struct,state in N2_structs.items()
}
gs_M2N_N = {
struct[:-2] : self.A.bondlength(struct, state, 'N', 'N', 'z')
for struct,state in N2_structs.items()
}
# Construct the data table.
headers = [
'M-M gs', 'M-M es', 'M-MN2', 'M2-N2', 'M2N-N', 'M-MN', 'M2-N']
results = [
gs_M_M, es_M_M, gs_M_MN2, gs_M2_N2, gs_M2N_N, gs_M_MN, gs_M2_N]
resultsdict = {k:v for k,v in zip(headers, results)}
lengths = pd.DataFrame.from_dict(data=resultsdict, orient='columns')
lengths = lengths[headers]
print(lengths)
return lengths
def crude_N2_act(self):
"""Subtracts the crude (geo) energy of each M2(L)4 structure and N2 from
the corresponding M2(L)4N and M2(L)4N2 structures, tabulating the
results in kcal/mol.
"""
# Make a dictionary of all structures with ground state energies.
short_gEs = self.A.gEs.dropna(axis=0, how='all')
struct_Es = {
struct : short_gEs.loc[struct][:-1].min()
for struct in short_gEs.index}
# Calculate the energy differences.
structs = []
nitride = []
nitrogen = []
N2_E = self.A.finalE("N2_4Sgeo.log", os.path.join(self.A.loc, "N2"))
for k,v in struct_Es.items():
structs.append(k)
try:
nitride.append(struct_Es[k + 'N']*2 - v*2 - N2_E)
except:
nitride.append(np.nan)
try:
nitrogen.append(struct_Es[k + 'N2'] - v - N2_E)
except:
nitrogen.append(np.nan)
# Tabulate the data.
headers = ['Add N2', 'Add N']
results = np.array([nitrogen, nitride]).T
rxn_Es = pd.DataFrame(data=results, index=structs, columns=headers)
rxn_Es = rxn_Es.dropna(axis=0, how='all')
print(rxn_Es.sort_values('Add N')*self.kcal_eH)
return rxn_Es*self.kcal_eH
def therm_N2_act(self):
"""Subtracts the thermodynamically-corrected energy of each M2(L)4
structure and N2 from the corresponding M2(L)4N and M2(L)4N2 structures, tabulating the results in kcal/mol.
"""
# Calculate G for all of the structures.
therm = self.A.therm_Es.dropna(axis=0, how='all')
therm['Symm #'] = [self.symm(struct) for struct in therm.index]
# S (rot) = kB*T(ln(qrot/sn)+N), N = 1, 1.5
therm['S*T (rot)'] = (
self.kB * self.temp *
(np.log(therm['qrot']/therm['Symm #']) + therm['rot #'])
)
therm['S*T (tot)'] = (
therm['S*T (el)'] + therm['S*T (vib)'] + therm['S*T (trans)']
+ therm['S*T (rot)']
)
# G = H - T*S
therm['G'] = therm['H'] - therm['S*T (tot)']
# Calculate the energy differences.
structs = []
nitride = []
nitrogen = []
N2_G = therm.loc['N2','G']
for base in therm.index:
structs.append(base)
base_G = therm.loc[base, 'G']
try:
nitride.append(therm.loc[base + 'N', 'G']*2 - base_G*2 - N2_G)
except KeyError:
nitride.append(np.nan)
try:
nitrogen.append(therm.loc[base + 'N2', 'G'] - base_G - N2_G)
except KeyError:
nitrogen.append(np.nan)
# Tabulate the data.
headers = ['Add N2', 'Add N']
results = np.array([nitrogen, nitride]).T
rxn_Es = pd.DataFrame(data=results, index=structs, columns=headers)
rxn_Es = rxn_Es.dropna(axis=0, how='all')
print(rxn_Es.sort_values('Add N')*self.kcal_eH)
return rxn_Es*self.kcal_eH
def symm(self, structure):
"""Gives the symmetry numbers for N2, M2(L)4, M2(L)4N, and M2(L)4N2.
"""
sn = 1
if 'N2' in structure:
sn = sn*2
if 'OO4' in structure:
sn = sn*4*2*3*3*3*3
if '2N' in structure:
sn = sn*4*3*3*3*3
return sn | {
"repo_name": "tristanbrown/whaler",
"path": "whaler/custom.py",
"copies": "1",
"size": "7129",
"license": "mit",
"hash": -395039050241628600,
"line_mean": 32.3177570093,
"line_max": 116,
"alpha_frac": 0.4821153037,
"autogenerated": false,
"ratio": 3.2419281491587086,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4224043452858709,
"avg_score": null,
"num_lines": null
} |
'''a module containing analytic tools necessary for understanding the
behavior of learning automata.'''
# Written by Steven Porretta.
class Tsetlin(object):
'''Analysis tools for the Tsetlin automaton.'''
def stationary_probability_analytic(c, N):
'''This computes the exact stationary probability, iff the
automaton is 2 action. Since the requirement is only for p1
at infinity, then p2 is discarded, due to time constraints.'''
N = N / 2 # Assume that N has been doubled to 2N.
d1 = 1 - c[0]
d2 = 1 - c[1]
term1 = pow((c[0] / c[1]), N) * ((c[0] - d1) / (c[1] - d2))
numer = pow(c[1], N) - pow(d2, N)
term2 = numer / (pow(c[0], N) - pow(d1, N))
p1_inf = 1 / (1 + term1 * term2)
return p1_inf
def number_of_states_estimate(c, desired_accuracy=0.95):
'''Find the probability of selecting the mininum penalties for a 2
action automaton with a desired accuracy. (95% by default)'''
# Need range between 0, 1. Accuracy is in {0, 1}.
low = 0
high = 100
mid = int((low + high) / 2)
mini = 0
computed_accuracy = 0
# Apparently, the internet knows all... googled binary search lel.
while(low <= high):
mid = int((low + high) / 2)
a = Tsetlin.stationary_probability_analytic(c, mid)
computed_accuracy = a
if(computed_accuracy >= desired_accuracy):
high = mid - 1
mini = mid
else:
low = mid + 1
return mini
| {
"repo_name": "0xSteve/learning_automata_simulator",
"path": "analyse_LA.py",
"copies": "1",
"size": "1615",
"license": "mit",
"hash": -7076460461305947000,
"line_mean": 34.8888888889,
"line_max": 74,
"alpha_frac": 0.5523219814,
"autogenerated": false,
"ratio": 3.5809312638580932,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4633253245258093,
"avg_score": null,
"num_lines": null
} |
"""A module containing classes for move refactoring
`create_move()` is a factory for creating move refactoring objects
based on inputs.
"""
from rope.base import (pyobjects, codeanalyze, exceptions, pynames,
taskhandle, evaluate, worder, libutils)
from rope.base.change import ChangeSet, ChangeContents, MoveResource
from rope.refactor import importutils, rename, occurrences, sourceutils, \
functionutils
def create_move(project, resource, offset=None):
"""A factory for creating Move objects
Based on `resource` and `offset`, return one of `MoveModule`,
`MoveGlobal` or `MoveMethod` for performing move refactoring.
"""
if offset is None:
return MoveModule(project, resource)
this_pymodule = project.get_pymodule(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
if pyname is not None:
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.PyModule) or \
isinstance(pyobject, pyobjects.PyPackage):
return MoveModule(project, pyobject.get_resource())
if isinstance(pyobject, pyobjects.PyFunction) and \
isinstance(pyobject.parent, pyobjects.PyClass):
return MoveMethod(project, resource, offset)
if isinstance(pyobject, pyobjects.PyDefinedObject) and \
isinstance(pyobject.parent, pyobjects.PyModule) or \
isinstance(pyname, pynames.AssignedName):
return MoveGlobal(project, resource, offset)
raise exceptions.RefactoringError(
'Move only works on global classes/functions/variables, modules and '
'methods.')
class MoveMethod(object):
"""For moving methods
It makes a new method in the destination class and changes
the body of the old method to call the new method. You can
inline the old method to change all of its occurrences.
"""
def __init__(self, project, resource, offset):
self.project = project
this_pymodule = self.project.get_pymodule(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
self.method_name = worder.get_name_at(resource, offset)
self.pyfunction = pyname.get_object()
if self.pyfunction.get_kind() != 'method':
raise exceptions.RefactoringError('Only normal methods'
' can be moved.')
def get_changes(self, dest_attr, new_name=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File` to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
"""
changes = ChangeSet('Moving method <%s>' % self.method_name)
if resources is None:
resources = self.project.get_python_files()
if new_name is None:
new_name = self.get_method_name()
resource1, start1, end1, new_content1 = \
self._get_changes_made_by_old_class(dest_attr, new_name)
collector1 = codeanalyze.ChangeCollector(resource1.read())
collector1.add_change(start1, end1, new_content1)
resource2, start2, end2, new_content2 = \
self._get_changes_made_by_new_class(dest_attr, new_name)
if resource1 == resource2:
collector1.add_change(start2, end2, new_content2)
else:
collector2 = codeanalyze.ChangeCollector(resource2.read())
collector2.add_change(start2, end2, new_content2)
result = collector2.get_changed()
import_tools = importutils.ImportTools(self.project)
new_imports = self._get_used_imports(import_tools)
if new_imports:
goal_pymodule = libutils.get_string_module(
self.project, result, resource2)
result = _add_imports_to_module(
import_tools, goal_pymodule, new_imports)
if resource2 in resources:
changes.add_change(ChangeContents(resource2, result))
if resource1 in resources:
changes.add_change(ChangeContents(resource1,
collector1.get_changed()))
return changes
def get_method_name(self):
return self.method_name
def _get_used_imports(self, import_tools):
return importutils.get_imports(self.project, self.pyfunction)
def _get_changes_made_by_old_class(self, dest_attr, new_name):
pymodule = self.pyfunction.get_module()
indents = self._get_scope_indents(self.pyfunction)
body = 'return self.%s.%s(%s)\n' % (
dest_attr, new_name, self._get_passed_arguments_string())
region = sourceutils.get_body_region(self.pyfunction)
return (pymodule.get_resource(), region[0], region[1],
sourceutils.fix_indentation(body, indents))
def _get_scope_indents(self, pyobject):
pymodule = pyobject.get_module()
return sourceutils.get_indents(
pymodule.lines, pyobject.get_scope().get_start()) + \
sourceutils.get_indent(self.project)
def _get_changes_made_by_new_class(self, dest_attr, new_name):
old_pyclass = self.pyfunction.parent
if dest_attr not in old_pyclass:
raise exceptions.RefactoringError(
'Destination attribute <%s> not found' % dest_attr)
pyclass = old_pyclass[dest_attr].get_object().get_type()
if not isinstance(pyclass, pyobjects.PyClass):
raise exceptions.RefactoringError(
'Unknown class type for attribute <%s>' % dest_attr)
pymodule = pyclass.get_module()
resource = pyclass.get_module().get_resource()
start, end = sourceutils.get_body_region(pyclass)
pre_blanks = '\n'
if pymodule.source_code[start:end].strip() != 'pass':
pre_blanks = '\n\n'
start = end
indents = self._get_scope_indents(pyclass)
body = pre_blanks + sourceutils.fix_indentation(
self.get_new_method(new_name), indents)
return resource, start, end, body
def get_new_method(self, name):
return '%s\n%s' % (
self._get_new_header(name),
sourceutils.fix_indentation(self._get_body(),
sourceutils.get_indent(self.project)))
def _get_unchanged_body(self):
return sourceutils.get_body(self.pyfunction)
def _get_body(self, host='host'):
self_name = self._get_self_name()
body = self_name + ' = None\n' + self._get_unchanged_body()
pymodule = libutils.get_string_module(self.project, body)
finder = occurrences.create_finder(
self.project, self_name, pymodule[self_name])
result = rename.rename_in_module(finder, host, pymodule=pymodule)
if result is None:
result = body
return result[result.index('\n') + 1:]
def _get_self_name(self):
return self.pyfunction.get_param_names()[0]
def _get_new_header(self, name):
header = 'def %s(self' % name
if self._is_host_used():
header += ', host'
definition_info = functionutils.DefinitionInfo.read(self.pyfunction)
others = definition_info.arguments_to_string(1)
if others:
header += ', ' + others
return header + '):'
def _get_passed_arguments_string(self):
result = ''
if self._is_host_used():
result = 'self'
definition_info = functionutils.DefinitionInfo.read(self.pyfunction)
others = definition_info.arguments_to_string(1)
if others:
if result:
result += ', '
result += others
return result
def _is_host_used(self):
return self._get_body('__old_self') != self._get_unchanged_body()
class MoveGlobal(object):
"""For moving global function and classes"""
def __init__(self, project, resource, offset):
self.project = project
this_pymodule = self.project.get_pymodule(resource)
self.old_pyname = evaluate.eval_location(this_pymodule, offset)
if self.old_pyname is None:
raise exceptions.RefactoringError(
'Move refactoring should be performed on a '
'class/function/variable.')
if self._is_variable(self.old_pyname):
self.old_name = worder.get_name_at(resource, offset)
pymodule = this_pymodule
else:
self.old_name = self.old_pyname.get_object().get_name()
pymodule = self.old_pyname.get_object().get_module()
self._check_exceptional_conditions()
self.source = pymodule.get_resource()
self.tools = _MoveTools(self.project, self.source,
self.old_pyname, self.old_name)
self.import_tools = self.tools.import_tools
def _import_filter(self, stmt):
module_name = libutils.modname(self.source)
if isinstance(stmt.import_info, importutils.NormalImport):
# Affect any statement that imports the source module
return any(module_name == name
for name, alias in stmt.import_info.names_and_aliases)
elif isinstance(stmt.import_info, importutils.FromImport):
# Affect statements importing from the source package
if '.' in module_name:
package_name, basename = module_name.rsplit('.', 1)
if (stmt.import_info.module_name == package_name and
any(basename == name
for name, alias in stmt.import_info.names_and_aliases)):
return True
return stmt.import_info.module_name == module_name
return False
def _check_exceptional_conditions(self):
if self._is_variable(self.old_pyname):
pymodule = self.old_pyname.get_definition_location()[0]
try:
pymodule.get_scope().get_name(self.old_name)
except exceptions.NameNotFoundError:
self._raise_refactoring_error()
elif not (isinstance(self.old_pyname.get_object(),
pyobjects.PyDefinedObject) and
self._is_global(self.old_pyname.get_object())):
self._raise_refactoring_error()
def _raise_refactoring_error(self):
raise exceptions.RefactoringError(
'Move refactoring should be performed on a global class, function '
'or variable.')
def _is_global(self, pyobject):
return pyobject.get_scope().parent == pyobject.get_module().get_scope()
def _is_variable(self, pyname):
return isinstance(pyname, pynames.AssignedName)
def get_changes(self, dest, resources=None,
task_handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.project.get_python_files()
if dest is None or not dest.exists():
raise exceptions.RefactoringError(
'Move destination does not exist.')
if dest.is_folder() and dest.has_child('__init__.py'):
dest = dest.get_child('__init__.py')
if dest.is_folder():
raise exceptions.RefactoringError(
'Move destination for non-modules should not be folders.')
if self.source == dest:
raise exceptions.RefactoringError(
'Moving global elements to the same module.')
return self._calculate_changes(dest, resources, task_handle)
def _calculate_changes(self, dest, resources, task_handle):
changes = ChangeSet('Moving global <%s>' % self.old_name)
job_set = task_handle.create_jobset('Collecting Changes',
len(resources))
for file_ in resources:
job_set.started_job(file_.path)
if file_ == self.source:
changes.add_change(self._source_module_changes(dest))
elif file_ == dest:
changes.add_change(self._dest_module_changes(dest))
elif self.tools.occurs_in_module(resource=file_):
pymodule = self.project.get_pymodule(file_)
# Changing occurrences
placeholder = '__rope_renaming_%s_' % self.old_name
source = self.tools.rename_in_module(placeholder,
resource=file_)
should_import = source is not None
# Removing out of date imports
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.import_tools.organize_imports(
pymodule, sort=False, import_filter=self._import_filter)
# Adding new import
if should_import:
pymodule = self.tools.new_pymodule(pymodule, source)
source, imported = importutils.add_import(
self.project, pymodule, self._new_modname(dest),
self.old_name)
source = source.replace(placeholder, imported)
source = self.tools.new_source(pymodule, source)
if source != file_.read():
changes.add_change(ChangeContents(file_, source))
job_set.finished_job()
return changes
def _source_module_changes(self, dest):
placeholder = '__rope_moving_%s_' % self.old_name
handle = _ChangeMoveOccurrencesHandle(placeholder)
occurrence_finder = occurrences.create_finder(
self.project, self.old_name, self.old_pyname)
start, end = self._get_moving_region()
renamer = ModuleSkipRenamer(occurrence_finder, self.source,
handle, start, end)
source = renamer.get_changed_module()
pymodule = libutils.get_string_module(self.project, source, self.source)
source = self.import_tools.organize_imports(pymodule, sort=False)
if handle.occurred:
pymodule = libutils.get_string_module(
self.project, source, self.source)
# Adding new import
source, imported = importutils.add_import(
self.project, pymodule, self._new_modname(dest), self.old_name)
source = source.replace(placeholder, imported)
return ChangeContents(self.source, source)
def _new_modname(self, dest):
return libutils.modname(dest)
def _dest_module_changes(self, dest):
# Changing occurrences
pymodule = self.project.get_pymodule(dest)
source = self.tools.rename_in_module(self.old_name, pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
moving, imports = self._get_moving_element_with_imports()
pymodule, has_changed = self._add_imports2(pymodule, imports)
module_with_imports = self.import_tools.module_imports(pymodule)
source = pymodule.source_code
lineno = 0
if module_with_imports.imports:
lineno = module_with_imports.imports[-1].end_line - 1
else:
while lineno < pymodule.lines.length() and \
pymodule.lines.get_line(lineno + 1).\
lstrip().startswith('#'):
lineno += 1
if lineno > 0:
cut = pymodule.lines.get_line_end(lineno) + 1
result = source[:cut] + '\n\n' + moving + source[cut:]
else:
result = moving + source
# Organizing imports
source = result
pymodule = libutils.get_string_module(self.project, source, dest)
source = self.import_tools.organize_imports(pymodule, sort=False,
unused=False)
# Remove unused imports of the old module
pymodule = libutils.get_string_module(self.project, source, dest)
source = self.import_tools.organize_imports(
pymodule, sort=False, selfs=False, unused=True,
import_filter=self._import_filter)
return ChangeContents(dest, source)
def _get_moving_element_with_imports(self):
return moving_code_with_imports(
self.project, self.source, self._get_moving_element())
def _get_module_with_imports(self, source_code, resource):
pymodule = libutils.get_string_module(
self.project, source_code, resource)
return self.import_tools.module_imports(pymodule)
def _get_moving_element(self):
start, end = self._get_moving_region()
moving = self.source.read()[start:end]
return moving.rstrip() + '\n'
def _get_moving_region(self):
pymodule = self.project.get_pymodule(self.source)
lines = pymodule.lines
if self._is_variable(self.old_pyname):
logical_lines = pymodule.logical_lines
lineno = logical_lines.logical_line_in(
self.old_pyname.get_definition_location()[1])[0]
start = lines.get_line_start(lineno)
end_line = logical_lines.logical_line_in(lineno)[1]
else:
scope = self.old_pyname.get_object().get_scope()
start = lines.get_line_start(scope.get_start())
end_line = scope.get_end()
# Include comment lines before the definition
start_line = lines.get_line_number(start)
while start_line > 1 and lines.get_line(start_line - 1).startswith('#'):
start_line -= 1
start = lines.get_line_start(start_line)
while end_line < lines.length() and \
lines.get_line(end_line + 1).strip() == '':
end_line += 1
end = min(lines.get_line_end(end_line) + 1, len(pymodule.source_code))
return start, end
def _add_imports2(self, pymodule, new_imports):
source = self.tools.add_imports(pymodule, new_imports)
if source is None:
return pymodule, False
else:
resource = pymodule.get_resource()
pymodule = libutils.get_string_module(
self.project, source, resource)
return pymodule, True
class MoveModule(object):
"""For moving modules and packages"""
def __init__(self, project, resource):
self.project = project
if not resource.is_folder() and resource.name == '__init__.py':
resource = resource.parent
if resource.is_folder() and not resource.has_child('__init__.py'):
raise exceptions.RefactoringError(
'Cannot move non-package folder.')
dummy_pymodule = libutils.get_string_module(self.project, '')
self.old_pyname = pynames.ImportedModule(dummy_pymodule,
resource=resource)
self.source = self.old_pyname.get_object().get_resource()
if self.source.is_folder():
self.old_name = self.source.name
else:
self.old_name = self.source.name[:-3]
self.tools = _MoveTools(self.project, self.source,
self.old_pyname, self.old_name)
self.import_tools = self.tools.import_tools
def get_changes(self, dest, resources=None,
task_handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.project.get_python_files()
if dest is None or not dest.is_folder():
raise exceptions.RefactoringError(
'Move destination for modules should be packages.')
return self._calculate_changes(dest, resources, task_handle)
def _calculate_changes(self, dest, resources, task_handle):
changes = ChangeSet('Moving module <%s>' % self.old_name)
job_set = task_handle.create_jobset('Collecting changes',
len(resources))
for module in resources:
job_set.started_job(module.path)
if module == self.source:
self._change_moving_module(changes, dest)
else:
source = self._change_occurrences_in_module(dest,
resource=module)
if source is not None:
changes.add_change(ChangeContents(module, source))
job_set.finished_job()
if self.project == self.source.project:
changes.add_change(MoveResource(self.source, dest.path))
return changes
def _new_modname(self, dest):
destname = libutils.modname(dest)
if destname:
return destname + '.' + self.old_name
return self.old_name
def _new_import(self, dest):
return importutils.NormalImport([(self._new_modname(dest), None)])
def _change_moving_module(self, changes, dest):
if not self.source.is_folder():
pymodule = self.project.get_pymodule(self.source)
source = self.import_tools.relatives_to_absolutes(pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
source = self._change_occurrences_in_module(dest, pymodule)
source = self.tools.new_source(pymodule, source)
if source != self.source.read():
changes.add_change(ChangeContents(self.source, source))
def _change_occurrences_in_module(self, dest, pymodule=None,
resource=None):
if not self.tools.occurs_in_module(pymodule=pymodule,
resource=resource):
return
if pymodule is None:
pymodule = self.project.get_pymodule(resource)
new_name = self._new_modname(dest)
module_imports = importutils.get_module_imports(self.project, pymodule)
changed = False
source = None
if libutils.modname(dest):
changed = self._change_import_statements(dest, new_name,
module_imports)
if changed:
source = module_imports.get_changed_source()
source = self.tools.new_source(pymodule, source)
pymodule = self.tools.new_pymodule(pymodule, source)
new_import = self._new_import(dest)
source = self.tools.rename_in_module(
new_name, imports=True, pymodule=pymodule,
resource=resource if not changed else None)
should_import = self.tools.occurs_in_module(
pymodule=pymodule, resource=resource, imports=False)
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.remove_old_imports(pymodule)
if should_import:
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.add_imports(pymodule, [new_import])
source = self.tools.new_source(pymodule, source)
if source is not None and source != pymodule.resource.read():
return source
return None
def _change_import_statements(self, dest, new_name, module_imports):
moving_module = self.source
parent_module = moving_module.parent
changed = False
for import_stmt in module_imports.imports:
if not any(name_and_alias[0] == self.old_name
for name_and_alias in
import_stmt.import_info.names_and_aliases) and \
not any(name_and_alias[0] == libutils.modname(self.source)
for name_and_alias in
import_stmt.import_info.names_and_aliases):
continue
# Case 1: Look for normal imports of the moving module.
if isinstance(import_stmt.import_info, importutils.NormalImport):
continue
# Case 2: The moving module is from-imported.
changed = self._handle_moving_in_from_import_stmt(
dest, import_stmt, module_imports, parent_module) or changed
# Case 3: Names are imported from the moving module.
context = importutils.importinfo.ImportContext(self.project, None)
if not import_stmt.import_info.is_empty() and \
import_stmt.import_info.get_imported_resource(context) == \
moving_module:
import_stmt.import_info = importutils.FromImport(
new_name, import_stmt.import_info.level,
import_stmt.import_info.names_and_aliases)
changed = True
return changed
def _handle_moving_in_from_import_stmt(self, dest, import_stmt,
module_imports, parent_module):
changed = False
context = importutils.importinfo.ImportContext(self.project, None)
if import_stmt.import_info.get_imported_resource(context) == \
parent_module:
imports = import_stmt.import_info.names_and_aliases
new_imports = []
for name, alias in imports:
# The moving module was imported.
if name == self.old_name:
changed = True
new_import = importutils.FromImport(
libutils.modname(dest), 0,
[(self.old_name, alias)])
module_imports.add_import(new_import)
else:
new_imports.append((name, alias))
# Update the imports if the imported names were changed.
if new_imports != imports:
changed = True
if new_imports:
import_stmt.import_info = importutils.FromImport(
import_stmt.import_info.module_name,
import_stmt.import_info.level,
new_imports)
else:
import_stmt.empty_import()
return changed
class _ChangeMoveOccurrencesHandle(object):
def __init__(self, new_name):
self.new_name = new_name
self.occurred = False
def occurred_inside_skip(self, change_collector, occurrence):
pass
def occurred_outside_skip(self, change_collector, occurrence):
start, end = occurrence.get_primary_range()
change_collector.add_change(start, end, self.new_name)
self.occurred = True
class _MoveTools(object):
def __init__(self, project, source, pyname, old_name):
self.project = project
self.source = source
self.old_pyname = pyname
self.old_name = old_name
self.import_tools = importutils.ImportTools(self.project)
def remove_old_imports(self, pymodule):
old_source = pymodule.source_code
module_with_imports = self.import_tools.module_imports(pymodule)
class CanSelect(object):
changed = False
old_name = self.old_name
old_pyname = self.old_pyname
def __call__(self, name):
try:
if name == self.old_name and \
pymodule[name].get_object() == \
self.old_pyname.get_object():
self.changed = True
return False
except exceptions.AttributeNotFoundError:
pass
return True
can_select = CanSelect()
module_with_imports.filter_names(can_select)
new_source = module_with_imports.get_changed_source()
if old_source != new_source:
return new_source
def rename_in_module(self, new_name, pymodule=None,
imports=False, resource=None):
occurrence_finder = self._create_finder(imports)
source = rename.rename_in_module(
occurrence_finder, new_name, replace_primary=True,
pymodule=pymodule, resource=resource)
return source
def occurs_in_module(self, pymodule=None, resource=None, imports=True):
finder = self._create_finder(imports)
for occurrence in finder.find_occurrences(pymodule=pymodule,
resource=resource):
return True
return False
def _create_finder(self, imports):
return occurrences.create_finder(self.project, self.old_name,
self.old_pyname, imports=imports,
keywords=False)
def new_pymodule(self, pymodule, source):
if source is not None:
return libutils.get_string_module(
self.project, source, pymodule.get_resource())
return pymodule
def new_source(self, pymodule, source):
if source is None:
return pymodule.source_code
return source
def add_imports(self, pymodule, new_imports):
return _add_imports_to_module(self.import_tools, pymodule, new_imports)
def _add_imports_to_module(import_tools, pymodule, new_imports):
module_with_imports = import_tools.module_imports(pymodule)
for new_import in new_imports:
module_with_imports.add_import(new_import)
return module_with_imports.get_changed_source()
def moving_code_with_imports(project, resource, source):
import_tools = importutils.ImportTools(project)
pymodule = libutils.get_string_module(project, source, resource)
# Strip comment prefix, if any. These need to stay before the moving
# section, but imports would be added between them.
lines = codeanalyze.SourceLinesAdapter(source)
start = 1
while start < lines.length() and lines.get_line(start).startswith('#'):
start += 1
moving_prefix = source[:lines.get_line_start(start)]
pymodule = libutils.get_string_module(
project, source[lines.get_line_start(start):], resource)
origin = project.get_pymodule(resource)
imports = []
for stmt in import_tools.module_imports(origin).imports:
imports.append(stmt.import_info)
back_names = []
for name in origin:
if name not in pymodule:
back_names.append(name)
imports.append(import_tools.get_from_import(resource, back_names))
source = _add_imports_to_module(import_tools, pymodule, imports)
pymodule = libutils.get_string_module(project, source, resource)
source = import_tools.relatives_to_absolutes(pymodule)
pymodule = libutils.get_string_module(project, source, resource)
source = import_tools.organize_imports(pymodule, selfs=False)
pymodule = libutils.get_string_module(project, source, resource)
# extracting imports after changes
module_imports = import_tools.module_imports(pymodule)
imports = [import_stmt.import_info
for import_stmt in module_imports.imports]
start = 1
if module_imports.imports:
start = module_imports.imports[-1].end_line
lines = codeanalyze.SourceLinesAdapter(source)
while start < lines.length() and not lines.get_line(start).strip():
start += 1
# Reinsert the prefix which was removed at the beginning
moving = moving_prefix + source[lines.get_line_start(start):]
return moving, imports
class ModuleSkipRenamerHandle(object):
def occurred_outside_skip(self, change_collector, occurrence):
pass
def occurred_inside_skip(self, change_collector, occurrence):
pass
class ModuleSkipRenamer(object):
"""Rename occurrences in a module
This class can be used when you want to treat a region in a file
separately from other parts when renaming.
"""
def __init__(self, occurrence_finder, resource, handle=None,
skip_start=0, skip_end=0, replacement=''):
"""Constructor
if replacement is `None` the region is not changed. Otherwise
it is replaced with `replacement`.
"""
self.occurrence_finder = occurrence_finder
self.resource = resource
self.skip_start = skip_start
self.skip_end = skip_end
self.replacement = replacement
self.handle = handle
if self.handle is None:
self.handle = ModuleSkipRenamerHandle()
def get_changed_module(self):
source = self.resource.read()
change_collector = codeanalyze.ChangeCollector(source)
if self.replacement is not None:
change_collector.add_change(self.skip_start, self.skip_end,
self.replacement)
for occurrence in self.occurrence_finder.find_occurrences(
self.resource):
start, end = occurrence.get_primary_range()
if self.skip_start <= start < self.skip_end:
self.handle.occurred_inside_skip(change_collector, occurrence)
else:
self.handle.occurred_outside_skip(change_collector, occurrence)
result = change_collector.get_changed()
if result is not None and result != source:
return result
| {
"repo_name": "ruchee/vimrc",
"path": "vimfiles/bundle/vim-python/submodules/rope/rope/refactor/move.py",
"copies": "3",
"size": "33302",
"license": "mit",
"hash": 446212810677330100,
"line_mean": 41.4770408163,
"line_max": 80,
"alpha_frac": 0.5986427242,
"autogenerated": false,
"ratio": 4.14152468598433,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00044747342240936805,
"num_lines": 784
} |
"""A module containing classes for move refactoring
`create_move()` is a factory for creating move refactoring objects
based on inputs.
"""
from rope.base import pyobjects, codeanalyze, exceptions, pynames, taskhandle, evaluate, worder
from rope.base.change import ChangeSet, ChangeContents, MoveResource
from rope.refactor import importutils, rename, occurrences, sourceutils, functionutils
def create_move(project, resource, offset=None):
"""A factory for creating Move objects
Based on `resource` and `offset`, return one of `MoveModule`,
`MoveGlobal` or `MoveMethod` for performing move refactoring.
"""
if offset is None:
return MoveModule(project, resource)
this_pymodule = project.pycore.resource_to_pyobject(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
if pyname is None:
raise exceptions.RefactoringError(
'Move only works on classes, functions, modules and methods.')
pyobject = pyname.get_object()
if isinstance(pyobject, pyobjects.PyModule) or \
isinstance(pyobject, pyobjects.PyPackage):
return MoveModule(project, pyobject.get_resource())
if isinstance(pyobject, pyobjects.PyFunction) and \
isinstance(pyobject.parent, pyobjects.PyClass):
return MoveMethod(project, resource, offset)
if isinstance(pyobject, pyobjects.PyDefinedObject) and \
isinstance(pyobject.parent, pyobjects.PyModule):
return MoveGlobal(project, resource, offset)
raise exceptions.RefactoringError(
'Move only works on global classes/functions, modules and methods.')
class MoveMethod(object):
"""For moving methods
It makes a new method in the destination class and changes
the body of the old method to call the new method. You can
inline the old method to change all of its occurrences.
"""
def __init__(self, project, resource, offset):
self.project = project
self.pycore = project.pycore
this_pymodule = self.pycore.resource_to_pyobject(resource)
pyname = evaluate.eval_location(this_pymodule, offset)
self.method_name = worder.get_name_at(resource, offset)
self.pyfunction = pyname.get_object()
if self.pyfunction.get_kind() != 'method':
raise exceptions.RefactoringError('Only normal methods'
' can be moved.')
def get_changes(self, dest_attr, new_name=None, resources=None,
task_handle=taskhandle.NullTaskHandle()):
"""Return the changes needed for this refactoring
Parameters:
- `dest_attr`: the name of the destination attribute
- `new_name`: the name of the new method; if `None` uses
the old name
- `resources` can be a list of `rope.base.resources.File`\s to
apply this refactoring on. If `None`, the restructuring
will be applied to all python files.
"""
changes = ChangeSet('Moving method <%s>' % self.method_name)
if resources is None:
resources = self.pycore.get_python_files()
if new_name is None:
new_name = self.get_method_name()
resource1, start1, end1, new_content1 = \
self._get_changes_made_by_old_class(dest_attr, new_name)
collector1 = codeanalyze.ChangeCollector(resource1.read())
collector1.add_change(start1, end1, new_content1)
resource2, start2, end2, new_content2 = \
self._get_changes_made_by_new_class(dest_attr, new_name)
if resource1 == resource2:
collector1.add_change(start2, end2, new_content2)
else:
collector2 = codeanalyze.ChangeCollector(resource2.read())
collector2.add_change(start2, end2, new_content2)
result = collector2.get_changed()
import_tools = importutils.ImportTools(self.pycore)
new_imports = self._get_used_imports(import_tools)
if new_imports:
goal_pymodule = self.pycore.get_string_module(result,
resource2)
result = _add_imports_to_module(
import_tools, goal_pymodule, new_imports)
if resource2 in resources:
changes.add_change(ChangeContents(resource2, result))
if resource1 in resources:
changes.add_change(ChangeContents(resource1,
collector1.get_changed()))
return changes
def get_method_name(self):
return self.method_name
def _get_used_imports(self, import_tools):
return importutils.get_imports(self.pycore, self.pyfunction)
def _get_changes_made_by_old_class(self, dest_attr, new_name):
pymodule = self.pyfunction.get_module()
indents = self._get_scope_indents(self.pyfunction)
body = 'return self.%s.%s(%s)\n' % (dest_attr, new_name,
self._get_passed_arguments_string())
region = sourceutils.get_body_region(self.pyfunction)
return (pymodule.get_resource(), region[0], region[1],
sourceutils.fix_indentation(body, indents))
def _get_scope_indents(self, pyobject):
pymodule = pyobject.get_module()
return sourceutils.get_indents(
pymodule.lines, pyobject.get_scope().get_start()) + \
sourceutils.get_indent(self.pycore)
def _get_changes_made_by_new_class(self, dest_attr, new_name):
old_pyclass = self.pyfunction.parent
if dest_attr not in old_pyclass:
raise exceptions.RefactoringError(
'Destination attribute <%s> not found' % dest_attr)
pyclass = old_pyclass[dest_attr].get_object().get_type()
if not isinstance(pyclass, pyobjects.PyClass):
raise exceptions.RefactoringError(
'Unknown class type for attribute <%s>' % dest_attr)
pymodule = pyclass.get_module()
resource = pyclass.get_module().get_resource()
start, end = sourceutils.get_body_region(pyclass)
pre_blanks = '\n'
if pymodule.source_code[start:end].strip() != 'pass':
pre_blanks = '\n\n'
start = end
indents = self._get_scope_indents(pyclass)
body = pre_blanks + sourceutils.fix_indentation(
self.get_new_method(new_name), indents)
return resource, start, end, body
def get_new_method(self, name):
return '%s\n%s' % (
self._get_new_header(name),
sourceutils.fix_indentation(self._get_body(),
sourceutils.get_indent(self.pycore)))
def _get_unchanged_body(self):
return sourceutils.get_body(self.pyfunction)
def _get_body(self, host='host'):
self_name = self._get_self_name()
body = self_name + ' = None\n' + self._get_unchanged_body()
pymodule = self.pycore.get_string_module(body)
finder = occurrences.create_finder(
self.pycore, self_name, pymodule[self_name])
result = rename.rename_in_module(finder, host, pymodule=pymodule)
if result is None:
result = body
return result[result.index('\n') + 1:]
def _get_self_name(self):
return self.pyfunction.get_param_names()[0]
def _get_new_header(self, name):
header = 'def %s(self' % name
if self._is_host_used():
header += ', host'
definition_info = functionutils.DefinitionInfo.read(self.pyfunction)
others = definition_info.arguments_to_string(1)
if others:
header += ', ' + others
return header + '):'
def _get_passed_arguments_string(self):
result = ''
if self._is_host_used():
result = 'self'
definition_info = functionutils.DefinitionInfo.read(self.pyfunction)
others = definition_info.arguments_to_string(1)
if others:
if result:
result += ', '
result += others
return result
def _is_host_used(self):
return self._get_body('__old_self') != self._get_unchanged_body()
class MoveGlobal(object):
"""For moving global function and classes"""
def __init__(self, project, resource, offset):
self.pycore = project.pycore
this_pymodule = self.pycore.resource_to_pyobject(resource)
self.old_pyname = evaluate.eval_location(this_pymodule, offset)
self.old_name = self.old_pyname.get_object().get_name()
pymodule = self.old_pyname.get_object().get_module()
self.source = pymodule.get_resource()
self.tools = _MoveTools(self.pycore, self.source,
self.old_pyname, self.old_name)
self.import_tools = self.tools.import_tools
self._check_exceptional_conditions()
def _check_exceptional_conditions(self):
if self.old_pyname is None or \
not isinstance(self.old_pyname.get_object(), pyobjects.PyDefinedObject):
raise exceptions.RefactoringError(
'Move refactoring should be performed on a class/function.')
moving_pyobject = self.old_pyname.get_object()
if not self._is_global(moving_pyobject):
raise exceptions.RefactoringError(
'Move refactoring should be performed on a global class/function.')
def _is_global(self, pyobject):
return pyobject.get_scope().parent == pyobject.get_module().get_scope()
def get_changes(self, dest, resources=None,
task_handle=taskhandle.NullTaskHandle()):
if resources is None:
resources = self.pycore.get_python_files()
if dest is None or not dest.exists():
raise exceptions.RefactoringError(
'Move destination does not exist.')
if dest.is_folder() and dest.has_child('__init__.py'):
dest = dest.get_child('__init__.py')
if dest.is_folder():
raise exceptions.RefactoringError(
'Move destination for non-modules should not be folders.')
if self.source == dest:
raise exceptions.RefactoringError(
'Moving global elements to the same module.')
return self._calculate_changes(dest, resources, task_handle)
def _calculate_changes(self, dest, resources, task_handle):
changes = ChangeSet('Moving global <%s>' % self.old_name)
job_set = task_handle.create_jobset('Collecting Changes',
len(resources))
for file_ in resources:
job_set.started_job(file_.path)
if file_ == self.source:
changes.add_change(self._source_module_changes(dest))
elif file_ == dest:
changes.add_change(self._dest_module_changes(dest))
elif self.tools.occurs_in_module(resource=file_):
pymodule = self.pycore.resource_to_pyobject(file_)
# Changing occurrences
placeholder = '__rope_renaming_%s_' % self.old_name
source = self.tools.rename_in_module(placeholder,
resource=file_)
should_import = source is not None
# Removing out of date imports
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.remove_old_imports(pymodule)
# Adding new import
if should_import:
pymodule = self.tools.new_pymodule(pymodule, source)
source, imported = importutils.add_import(
self.pycore, pymodule, self._new_modname(dest), self.old_name)
source = source.replace(placeholder, imported)
source = self.tools.new_source(pymodule, source)
if source != file_.read():
changes.add_change(ChangeContents(file_, source))
job_set.finished_job()
return changes
def _source_module_changes(self, dest):
placeholder = '__rope_moving_%s_' % self.old_name
handle = _ChangeMoveOccurrencesHandle(placeholder)
occurrence_finder = occurrences.create_finder(
self.pycore, self.old_name, self.old_pyname)
start, end = self._get_moving_region()
renamer = ModuleSkipRenamer(occurrence_finder, self.source,
handle, start, end)
source = renamer.get_changed_module()
if handle.occurred:
pymodule = self.pycore.get_string_module(source, self.source)
# Adding new import
source, imported = importutils.add_import(
self.pycore, pymodule, self._new_modname(dest), self.old_name)
source = source.replace(placeholder, imported)
return ChangeContents(self.source, source)
def _new_modname(self, dest):
return self.pycore.modname(dest)
def _dest_module_changes(self, dest):
# Changing occurrences
pymodule = self.pycore.resource_to_pyobject(dest)
source = self.tools.rename_in_module(self.old_name, pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
moving, imports = self._get_moving_element_with_imports()
source = self.tools.remove_old_imports(pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
pymodule, has_changed = self._add_imports2(pymodule, imports)
module_with_imports = self.import_tools.module_imports(pymodule)
source = pymodule.source_code
lineno = 0
if module_with_imports.imports:
lineno = module_with_imports.imports[-1].end_line - 1
else:
while lineno < pymodule.lines.length() and \
pymodule.lines.get_line(lineno + 1).lstrip().startswith('#'):
lineno += 1
if lineno > 0:
cut = pymodule.lines.get_line_end(lineno) + 1
result = source[:cut] + '\n\n' + moving + source[cut:]
else:
result = moving + source
# Organizing imports
source = result
pymodule = self.pycore.get_string_module(source, dest)
source = self.import_tools.organize_imports(pymodule, sort=False,
unused=False)
return ChangeContents(dest, source)
def _get_moving_element_with_imports(self):
return moving_code_with_imports(
self.pycore, self.source, self._get_moving_element())
def _get_module_with_imports(self, source_code, resource):
pymodule = self.pycore.get_string_module(source_code, resource)
return self.import_tools.module_imports(pymodule)
def _get_moving_element(self):
start, end = self._get_moving_region()
moving = self.source.read()[start:end]
return moving.rstrip() + '\n'
def _get_moving_region(self):
pymodule = self.pycore.resource_to_pyobject(self.source)
lines = pymodule.lines
scope = self.old_pyname.get_object().get_scope()
start = lines.get_line_start(scope.get_start())
end_line = scope.get_end()
while end_line < lines.length() and \
lines.get_line(end_line + 1).strip() == '':
end_line += 1
end = min(lines.get_line_end(end_line) + 1, len(pymodule.source_code))
return start, end
def _add_imports2(self, pymodule, new_imports):
source = self.tools.add_imports(pymodule, new_imports)
if source is None:
return pymodule, False
else:
resource = pymodule.get_resource()
pymodule = self.pycore.get_string_module(source, resource)
return pymodule, True
class MoveModule(object):
"""For moving modules and packages"""
def __init__(self, project, resource):
self.project = project
self.pycore = project.pycore
if not resource.is_folder() and resource.name == '__init__.py':
resource = resource.parent
if resource.is_folder() and not resource.has_child('__init__.py'):
raise exceptions.RefactoringError(
'Cannot move non-package folder.')
dummy_pymodule = self.pycore.get_string_module('')
self.old_pyname = pynames.ImportedModule(dummy_pymodule,
resource=resource)
self.source = self.old_pyname.get_object().get_resource()
if self.source.is_folder():
self.old_name = self.source.name
else:
self.old_name = self.source.name[:-3]
self.tools = _MoveTools(self.pycore, self.source,
self.old_pyname, self.old_name)
self.import_tools = self.tools.import_tools
def get_changes(self, dest, resources=None,
task_handle=taskhandle.NullTaskHandle()):
moving_pyobject = self.old_pyname.get_object()
if resources is None:
resources = self.pycore.get_python_files()
if dest is None or not dest.is_folder():
raise exceptions.RefactoringError(
'Move destination for modules should be packages.')
return self._calculate_changes(dest, resources, task_handle)
def _calculate_changes(self, dest, resources, task_handle):
changes = ChangeSet('Moving module <%s>' % self.old_name)
job_set = task_handle.create_jobset('Collecting changes',
len(resources))
for module in resources:
job_set.started_job(module.path)
if module == self.source:
self._change_moving_module(changes, dest)
else:
source = self._change_occurrences_in_module(dest,
resource=module)
if source is not None:
changes.add_change(ChangeContents(module, source))
job_set.finished_job()
if self.project == self.source.project:
changes.add_change(MoveResource(self.source, dest.path))
return changes
def _new_modname(self, dest):
destname = self.pycore.modname(dest)
if destname:
return destname + '.' + self.old_name
return self.old_name
def _new_import(self, dest):
return importutils.NormalImport([(self._new_modname(dest), None)])
def _change_moving_module(self, changes, dest):
if not self.source.is_folder():
pymodule = self.pycore.resource_to_pyobject(self.source)
source = self.import_tools.relatives_to_absolutes(pymodule)
pymodule = self.tools.new_pymodule(pymodule, source)
source = self._change_occurrences_in_module(dest, pymodule)
source = self.tools.new_source(pymodule, source)
if source != self.source.read():
changes.add_change(ChangeContents(self.source, source))
def _change_occurrences_in_module(self, dest, pymodule=None,
resource=None):
if not self.tools.occurs_in_module(pymodule=pymodule,
resource=resource):
return
if pymodule is None:
pymodule = self.pycore.resource_to_pyobject(resource)
new_name = self._new_modname(dest)
new_import = self._new_import(dest)
source = self.tools.rename_in_module(
new_name, imports=True, pymodule=pymodule, resource=resource)
should_import = self.tools.occurs_in_module(
pymodule=pymodule, resource=resource, imports=False)
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.remove_old_imports(pymodule)
if should_import:
pymodule = self.tools.new_pymodule(pymodule, source)
source = self.tools.add_imports(pymodule, [new_import])
source = self.tools.new_source(pymodule, source)
if source != pymodule.resource.read():
return source
class _ChangeMoveOccurrencesHandle(object):
def __init__(self, new_name):
self.new_name = new_name
self.occurred = False
def occurred_inside_skip(self, change_collector, occurrence):
pass
def occurred_outside_skip(self, change_collector, occurrence):
start, end = occurrence.get_primary_range()
change_collector.add_change(start, end, self.new_name)
self.occurred = True
class _MoveTools(object):
def __init__(self, pycore, source, pyname, old_name):
self.pycore = pycore
self.source = source
self.old_pyname = pyname
self.old_name = old_name
self.import_tools = importutils.ImportTools(self.pycore)
def remove_old_imports(self, pymodule):
old_source = pymodule.source_code
module_with_imports = self.import_tools.module_imports(pymodule)
class CanSelect(object):
changed = False
old_name = self.old_name
old_pyname = self.old_pyname
def __call__(self, name):
try:
if name == self.old_name and \
pymodule[name].get_object() == \
self.old_pyname.get_object():
self.changed = True
return False
except exceptions.AttributeNotFoundError:
pass
return True
can_select = CanSelect()
module_with_imports.filter_names(can_select)
new_source = module_with_imports.get_changed_source()
if old_source != new_source:
return new_source
def rename_in_module(self, new_name, pymodule=None,
imports=False, resource=None):
occurrence_finder = self._create_finder(imports)
source = rename.rename_in_module(
occurrence_finder, new_name, replace_primary=True,
pymodule=pymodule, resource=resource)
return source
def occurs_in_module(self, pymodule=None, resource=None, imports=True):
finder = self._create_finder(imports)
for occurrence in finder.find_occurrences(pymodule=pymodule,
resource=resource):
return True
return False
def _create_finder(self, imports):
return occurrences.create_finder(self.pycore, self.old_name,
self.old_pyname, imports=imports)
def new_pymodule(self, pymodule, source):
if source is not None:
return self.pycore.get_string_module(
source, pymodule.get_resource())
return pymodule
def new_source(self, pymodule, source):
if source is None:
return pymodule.source_code
return source
def add_imports(self, pymodule, new_imports):
return _add_imports_to_module(self.import_tools, pymodule, new_imports)
def _add_imports_to_module(import_tools, pymodule, new_imports):
module_with_imports = import_tools.module_imports(pymodule)
for new_import in new_imports:
module_with_imports.add_import(new_import)
return module_with_imports.get_changed_source()
def moving_code_with_imports(pycore, resource, source):
import_tools = importutils.ImportTools(pycore)
pymodule = pycore.get_string_module(source, resource)
origin = pycore.resource_to_pyobject(resource)
imports = []
for stmt in import_tools.module_imports(origin).imports:
imports.append(stmt.import_info)
back_names = []
for name in origin:
if name not in pymodule:
back_names.append(name)
imports.append(import_tools.get_from_import(resource, back_names))
source = _add_imports_to_module(import_tools, pymodule, imports)
pymodule = pycore.get_string_module(source, resource)
source = import_tools.relatives_to_absolutes(pymodule)
pymodule = pycore.get_string_module(source, resource)
source = import_tools.organize_imports(pymodule, selfs=False)
pymodule = pycore.get_string_module(source, resource)
# extracting imports after changes
module_imports = import_tools.module_imports(pymodule)
imports = [import_stmt.import_info
for import_stmt in module_imports.imports]
start = 1
if module_imports.imports:
start = module_imports.imports[-1].end_line
lines = codeanalyze.SourceLinesAdapter(source)
while start < lines.length() and not lines.get_line(start).strip():
start += 1
moving = source[lines.get_line_start(start):]
return moving, imports
class ModuleSkipRenamerHandle(object):
def occurred_outside_skip(self, change_collector, occurrence):
pass
def occurred_inside_skip(self, change_collector, occurrence):
pass
class ModuleSkipRenamer(object):
"""Rename occurrences in a module
This class can be used when you want to treat a region in a file
separately from other parts when renaming.
"""
def __init__(self, occurrence_finder, resource, handle=None,
skip_start=0, skip_end=0, replacement=''):
"""Constructor
if replacement is `None` the region is not changed. Otherwise
it is replaced with `replacement`.
"""
self.occurrence_finder = occurrence_finder
self.resource = resource
self.skip_start = skip_start
self.skip_end = skip_end
self.replacement = replacement
self.handle = handle
if self.handle is None:
self.handle = ModuleSkipHandle()
def get_changed_module(self):
source = self.resource.read()
change_collector = codeanalyze.ChangeCollector(source)
if self.replacement is not None:
change_collector.add_change(self.skip_start, self.skip_end,
self.replacement)
for occurrence in self.occurrence_finder.find_occurrences(self.resource):
start, end = occurrence.get_primary_range()
if self.skip_start <= start < self.skip_end:
self.handle.occurred_inside_skip(change_collector, occurrence)
else:
self.handle.occurred_outside_skip(change_collector, occurrence)
result = change_collector.get_changed()
if result is not None and result != source:
return result
| {
"repo_name": "seawaywen/vim-config",
"path": "bundle/python-mode/pymode/libs3/rope/refactor/move.py",
"copies": "32",
"size": "26600",
"license": "apache-2.0",
"hash": 6179609204233381000,
"line_mean": 41.3566878981,
"line_max": 95,
"alpha_frac": 0.6083458647,
"autogenerated": false,
"ratio": 4.057352043929225,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": null,
"num_lines": null
} |
"""A module containing convenient methods for general machine learning"""
from __future__ import print_function
from __future__ import division
from __future__ import unicode_literals
from __future__ import absolute_import
from builtins import zip
from builtins import int
from builtins import range
from future import standard_library
standard_library.install_aliases()
from past.utils import old_div
from builtins import object
__author__ = 'wittawat'
import autograd.numpy as np
import time
class ContextTimer(object):
"""
A class used to time an execution of a code snippet.
Use it with with .... as ...
For example,
with ContextTimer() as t:
# do something
time_spent = t.secs
From https://www.huyng.com/posts/python-performance-analysis
"""
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
if self.verbose:
print('elapsed time: %f ms' % (self.secs*1000))
# end class ContextTimer
class NumpySeedContext(object):
"""
A context manager to reset the random seed by numpy.random.seed(..).
Set the seed back at the end of the block.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
rstate = np.random.get_state()
self.cur_state = rstate
np.random.seed(self.seed)
return self
def __exit__(self, *args):
np.random.set_state(self.cur_state)
# end NumpySeedContext
class ChunkIterable(object):
"""
Construct an Iterable such that each call to its iterator returns a tuple
of two indices (f, t) where f is the starting index, and t is the ending
index of a chunk. f and t are (chunk_size) apart except for the last tuple
which will always cover the rest.
"""
def __init__(self, start, end, chunk_size):
self.start = start
self.end = end
self.chunk_size = chunk_size
def __iter__(self):
s = self.start
e = self.end
c = self.chunk_size
# Probably not a good idea to use list. Waste memory.
L = list(range(s, e, c))
L.append(e)
return zip(L, L[1:])
# end ChunkIterable
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def dist_matrix(X, Y):
"""
Construct a pairwise Euclidean distance matrix of size X.shape[0] x Y.shape[0]
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 = sx[:, np.newaxis] - 2.0*X.dot(Y.T) + sy[np.newaxis, :]
# to prevent numerical errors from taking sqrt of negative numbers
D2[D2 < 0] = 0
D = np.sqrt(D2)
return D
def dist2_matrix(X, Y):
"""
Construct a pairwise Euclidean distance **squared** matrix of size
X.shape[0] x Y.shape[0]
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 = sx[:, np.newaxis] - 2.0*np.dot(X, Y.T) + sy[np.newaxis, :]
return D2
def meddistance(X, subsample=None, mean_on_fail=True):
"""
Compute the median of pairwise distances (not distance squared) of points
in the matrix. Useful as a heuristic for setting Gaussian kernel's width.
Parameters
----------
X : n x d numpy array
mean_on_fail: True/False. If True, use the mean when the median distance is 0.
This can happen especially, when the data are discrete e.g., 0/1, and
there are more slightly more 0 than 1. In this case, the m
Return
------
median distance
"""
if subsample is None:
D = dist_matrix(X, X)
Itri = np.tril_indices(D.shape[0], -1)
Tri = D[Itri]
med = np.median(Tri)
if med <= 0:
# use the mean
return np.mean(Tri)
return med
else:
assert subsample > 0
rand_state = np.random.get_state()
np.random.seed(9827)
n = X.shape[0]
ind = np.random.choice(n, min(subsample, n), replace=False)
np.random.set_state(rand_state)
# recursion just one
return meddistance(X[ind, :], None, mean_on_fail)
def is_real_num(X):
"""return true if x is a real number.
Work for a numpy array as well. Return an array of the same dimension."""
def each_elem_true(x):
try:
float(x)
return not (np.isnan(x) or np.isinf(x))
except:
return False
f = np.vectorize(each_elem_true)
return f(X)
def tr_te_indices(n, tr_proportion, seed=9282 ):
"""Get two logical vectors for indexing train/test points.
Return (tr_ind, te_ind)
"""
rand_state = np.random.get_state()
np.random.seed(seed)
Itr = np.zeros(n, dtype=bool)
tr_ind = np.random.choice(n, int(tr_proportion*n), replace=False)
Itr[tr_ind] = True
Ite = np.logical_not(Itr)
np.random.set_state(rand_state)
return (Itr, Ite)
def subsample_ind(n, k, seed=32):
"""
Return a list of indices to choose k out of n without replacement
"""
with NumpySeedContext(seed=seed):
ind = np.random.choice(n, k, replace=False)
return ind
def subsample_rows(X, k, seed=29):
"""
Subsample k rows from the matrix X.
"""
n = X.shape[0]
if k > n:
raise ValueError('k exceeds the number of rows.')
ind = subsample_ind(n, k, seed=seed)
return X[ind, :]
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
"""
Fit a multivariate normal to the data X (n x d) and draw J points
from the fit.
- reg: regularizer to use with the covariance matrix
- eig_pow: raise eigenvalues of the covariance matrix to this power to construct
a new covariance matrix before drawing samples. Useful to shrink the spread
of the variance.
"""
with NumpySeedContext(seed=seed):
d = X.shape[1]
mean_x = np.mean(X, 0)
cov_x = np.cov(X.T)
if d==1:
cov_x = np.array([[cov_x]])
[evals, evecs] = np.linalg.eig(cov_x)
evals = np.maximum(0, np.real(evals))
assert np.all(np.isfinite(evals))
evecs = np.real(evecs)
shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
return V
def bound_by_data(Z, Data):
"""
Determine lower and upper bound for each dimension from the Data, and project
Z so that all points in Z live in the bounds.
Z: m x d
Data: n x d
Return a projected Z of size m x d.
"""
n, d = Z.shape
Low = np.min(Data, 0)
Up = np.max(Data, 0)
LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)
Z = np.maximum(LowMat, Z)
Z = np.minimum(UpMat, Z)
return Z
def one_of_K_code(arr):
"""
Make a one-of-K coding out of the numpy array.
For example, if arr = ([0, 1, 0, 2]), then return a 2d array of the form
[[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 0, 1]]
"""
U = np.unique(arr)
n = len(arr)
nu = len(U)
X = np.zeros((n, nu))
for i, u in enumerate(U):
Ii = np.where( np.abs(arr - u) < 1e-8 )
#ni = len(Ii)
X[Ii[0], i] = 1
return X
def fullprint(*args, **kwargs):
"https://gist.github.com/ZGainsforth/3a306084013633c52881"
from pprint import pprint
import numpy
opt = numpy.get_printoptions()
numpy.set_printoptions(threshold='nan')
pprint(*args, **kwargs)
numpy.set_printoptions(**opt)
def standardize(X):
mx = np.mean(X, 0)
stdx = np.std(X, axis=0)
# Assume standard deviations are not 0
Zx = old_div((X-mx),stdx)
assert np.all(np.isfinite(Zx))
return Zx
def outer_rows(X, Y):
"""
Compute the outer product of each row in X, and Y.
X: n x dx numpy array
Y: n x dy numpy array
Return an n x dx x dy numpy array.
"""
# Matlab way to do this. According to Jonathan Huggins, this is not
# efficient. Use einsum instead. See below.
#n, dx = X.shape
#dy = Y.shape[1]
#X_col_rep = X[:, np.tile(range(dx), (dy, 1)).T.reshape(-1) ]
#Y_tile = np.tile(Y, (1, dx))
#Z = X_col_rep*Y_tile
#return np.reshape(Z, (n, dx, dy))
return np.einsum('ij,ik->ijk', X, Y)
def randn(m, n, seed=3):
with NumpySeedContext(seed=seed):
return np.random.randn(m, n)
def matrix_inner_prod(A, B):
"""
Compute the matrix inner product <A, B> = trace(A^T * B).
"""
assert A.shape[0] == B.shape[0]
assert A.shape[1] == B.shape[1]
return A.reshape(-1).dot(B.reshape(-1))
def get_classpath(obj):
"""
Return the full module and class path of the obj. For instance,
kgof.density.IsotropicNormal
Return a string.
"""
return obj.__class__.__module__ + '.' + obj.__class__.__name__
def merge_dicts(*dict_args):
"""
Given any number of dicts, shallow copy and merge into a new dict,
precedence goes to key value pairs in latter dicts.
http://stackoverflow.com/questions/38987/how-to-merge-two-python-dictionaries-in-a-single-expression
"""
result = {}
for dictionary in dict_args:
result.update(dictionary)
return result
| {
"repo_name": "wittawatj/kernel-gof",
"path": "kgof/util.py",
"copies": "1",
"size": "9350",
"license": "mit",
"hash": -1391773571418708000,
"line_mean": 27.1626506024,
"line_max": 104,
"alpha_frac": 0.5960427807,
"autogenerated": false,
"ratio": 3.2476554359152483,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9291147714444942,
"avg_score": 0.010510100434061309,
"num_lines": 332
} |
"""A module containing convenient methods for general machine learning"""
from __future__ import print_function
from builtins import object
__author__ = 'wittawat'
import autograd.numpy as np
import time
class ContextTimer(object):
"""
A class used to time an executation of a code snippet.
Use it with with .... as ...
For example,
with ContextTimer() as t:
# do something
time_spent = t.secs
From https://www.huyng.com/posts/python-performance-analysis
"""
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
if self.verbose:
print('elapsed time: %f ms' % (self.secs*1000))
# end class ContextTimer
class NumpySeedContext(object):
"""
A context manager to reset the random seed by numpy.random.seed(..).
Set the seed back at the end of the block.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
rstate = np.random.get_state()
self.cur_state = rstate
np.random.seed(self.seed)
return self
def __exit__(self, *args):
np.random.set_state(self.cur_state)
# end class NumpySeedContext
class ChunkIterable(object):
"""
Construct an Iterable such that each call to its iterator returns a tuple
of two indices (f, t) where f is the starting index, and t is the ending
index of a chunk. f and t are (chunk_size) apart except for the last tuple
which will always cover the rest.
"""
def __init__(self, start, end, chunk_size):
self.start = start
self.end = end
self.chunk_size = chunk_size
def __iter__(self):
s = self.start
e = self.end
c = self.chunk_size
# Probably not a good idea to use list. Waste memory.
L = list(range(s, e, c))
L.append(e)
return zip(L, L[1:])
# end ChunkIterable
def constrain(val, min_val, max_val):
return min(max_val, max(min_val, val))
def dist_matrix(X, Y):
"""
Construct a pairwise Euclidean distance matrix of size X.shape[0] x Y.shape[0]
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 = sx[:, np.newaxis] - 2.0*np.dot(X, Y.T) + sy[np.newaxis, :]
# to prevent numerical errors from taking sqrt of negative numbers
D2[D2 < 0] = 0
D = np.sqrt(D2)
return D
def meddistance(X, subsample=None, mean_on_fail=True):
"""
Compute the median of pairwise distances (not distance squared) of points
in the matrix. Useful as a heuristic for setting Gaussian kernel's width.
Parameters
----------
X : n x d numpy array
mean_on_fail: True/False. If True, use the mean when the median distance is 0.
This can happen especially, when the data are discrete e.g., 0/1, and
there are more slightly more 0 than 1. In this case, the m
Return
------
median distance
"""
if subsample is None:
D = dist_matrix(X, X)
Itri = np.tril_indices(D.shape[0], -1)
Tri = D[Itri]
med = np.median(Tri)
if med <= 0:
# use the mean
return np.mean(Tri)
return med
else:
assert subsample > 0
rand_state = np.random.get_state()
np.random.seed(9827)
n = X.shape[0]
ind = np.random.choice(n, min(subsample, n), replace=False)
np.random.set_state(rand_state)
# recursion just one
return meddistance(X[ind, :], None, mean_on_fail)
def is_real_num(x):
"""return true if x is a real number"""
try:
float(x)
return not (np.isnan(x) or np.isinf(x))
except ValueError:
return False
def tr_te_indices(n, tr_proportion, seed=9282 ):
"""Get two logical vectors for indexing train/test points.
Return (tr_ind, te_ind)
"""
rand_state = np.random.get_state()
np.random.seed(seed)
Itr = np.zeros(n, dtype=bool)
tr_ind = np.random.choice(n, int(tr_proportion*n), replace=False)
Itr[tr_ind] = True
Ite = np.logical_not(Itr)
np.random.set_state(rand_state)
return (Itr, Ite)
def subsample_ind(n, k, seed=28):
"""
Return a list of indices to choose k out of n without replacement
"""
rand_state = np.random.get_state()
np.random.seed(seed)
ind = np.random.choice(n, k, replace=False)
np.random.set_state(rand_state)
return ind
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
"""
Fit a multivariate normal to the data X (n x d) and draw J points
from the fit.
- reg: regularizer to use with the covariance matrix
- eig_pow: raise eigenvalues of the covariance matrix to this power to construct
a new covariance matrix before drawing samples. Useful to shrink the spread
of the variance.
"""
with NumpySeedContext(seed=seed):
d = X.shape[1]
mean_x = np.mean(X, 0)
cov_x = np.cov(X.T)
if d==1:
cov_x = np.array([[cov_x]])
[evals, evecs] = np.linalg.eig(cov_x)
evals = np.maximum(0, np.real(evals))
assert np.all(np.isfinite(evals))
evecs = np.real(evecs)
shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
return V
| {
"repo_name": "wittawatj/interpretable-test",
"path": "freqopttest/util.py",
"copies": "1",
"size": "5510",
"license": "mit",
"hash": -2589667794061698000,
"line_mean": 27.8481675393,
"line_max": 85,
"alpha_frac": 0.598185118,
"autogenerated": false,
"ratio": 3.390769230769231,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.44889543487692307,
"avg_score": null,
"num_lines": null
} |
"""A module containing convenient methods for general machine learning"""
__author__ = 'wittawat'
import numpy as np
import time
class ContextTimer(object):
"""
A class used to time an executation of a code snippet.
Use it with with .... as ...
For example,
with ContextTimer() as t:
# do something
time_spent = t.secs
From https://www.huyng.com/posts/python-performance-analysis
"""
def __init__(self, verbose=False):
self.verbose = verbose
def __enter__(self):
self.start = time.time()
return self
def __exit__(self, *args):
self.end = time.time()
self.secs = self.end - self.start
if self.verbose:
print 'elapsed time: %f ms' % (self.secs*1000)
# end class ContextTimer
class NumpySeedContext(object):
"""
A context manager to reset the random seed by numpy.random.seed(..).
Set the seed back at the end of the block.
"""
def __init__(self, seed):
self.seed = seed
def __enter__(self):
rstate = np.random.get_state()
self.cur_state = rstate
np.random.seed(self.seed)
return self
def __exit__(self, *args):
np.random.set_state(self.cur_state)
def dist_matrix(X, Y):
"""
Construct a pairwise Euclidean distance matrix of size X.shape[0] x Y.shape[0]
"""
sx = np.sum(X**2, 1)
sy = np.sum(Y**2, 1)
D2 = sx[:, np.newaxis] - 2.0*X.dot(Y.T) + sy[np.newaxis, :]
# to prevent numerical errors from taking sqrt of negative numbers
D2[D2 < 0] = 0
D = np.sqrt(D2)
return D
def meddistance(X, subsample=None, mean_on_fail=True):
"""
Compute the median of pairwise distances (not distance squared) of points
in the matrix. Useful as a heuristic for setting Gaussian kernel's width.
Parameters
----------
X : n x d numpy array
mean_on_fail: True/False. If True, use the mean when the median distance is 0.
This can happen especially, when the data are discrete e.g., 0/1, and
there are more slightly more 0 than 1. In this case, the m
Return
------
median distance
"""
if subsample is None:
D = dist_matrix(X, X)
Itri = np.tril_indices(D.shape[0], -1)
Tri = D[Itri]
med = np.median(Tri)
if med <= 0:
# use the mean
return np.mean(Tri)
return med
else:
assert subsample > 0
rand_state = np.random.get_state()
np.random.seed(9827)
n = X.shape[0]
ind = np.random.choice(n, min(subsample, n), replace=False)
np.random.set_state(rand_state)
# recursion just one
return meddistance(X[ind, :], None, mean_on_fail)
def is_real_num(x):
"""return true if x is a real number"""
try:
float(x)
return not (np.isnan(x) or np.isinf(x))
except ValueError:
return False
def tr_te_indices(n, tr_proportion, seed=9282 ):
"""Get two logical vectors for indexing train/test points.
Return (tr_ind, te_ind)
"""
rand_state = np.random.get_state()
np.random.seed(seed)
Itr = np.zeros(n, dtype=bool)
tr_ind = np.random.choice(n, int(tr_proportion*n), replace=False)
Itr[tr_ind] = True
Ite = np.logical_not(Itr)
np.random.set_state(rand_state)
return (Itr, Ite)
def subsample_ind(n, k, seed=32):
"""
Return a list of indices to choose k out of n without replacement
"""
rand_state = np.random.get_state()
np.random.seed(seed)
ind = np.random.choice(n, k, replace=False)
np.random.set_state(rand_state)
return ind
def subsample_rows(X, k, seed=29):
"""
Subsample k rows from the matrix X.
"""
n = X.shape[0]
if k > n:
raise ValueError('k exceeds the number of rows.')
ind = subsample_ind(n, k, seed=seed)
return X[ind, :]
def cca(X, Y, reg=1e-5):
"""
- X: n x dx data matrix
- Y: n x dy data matrix
Return (vals, Vx, Vy) where vals is a numpy array of decreasing eigenvalues,
Vx is a square matrixk whose columns are eigenvectors for X corresponding to vals.
Vy is a square matrixk whose columns are eigenvectors for Y corresponding to vals.
"""
#return _cca_one_eig(X, Y, reg)
return _cca_two_eig(X, Y, reg)
def _cca_two_eig(X, Y, reg=1e-5):
"""
CCA formulation solving two eigenvalue problems.
"""
dx = X.shape[1]
dy = Y.shape[1]
assert X.shape[0] == Y.shape[0]
n = X.shape[0]
mx = np.mean(X, 0)
my = np.mean(Y, 0)
# dx x dy
Cxy = X.T.dot(Y)/n - np.outer(mx, my)
Cxx = np.cov(X.T)
#print Cxx
Cyy = np.cov(Y.T)
# Cxx, Cyy have to be invertible
if dx == 1:
CxxICxy = Cxy/Cxx
else:
CxxICxy = np.linalg.solve(Cxx + reg*np.eye(dx), Cxy)
if dy==1:
CyyICyx = Cxy.T/Cyy
else:
CyyICyx = np.linalg.solve(Cyy + reg*np.eye(dy), Cxy.T)
# problem for a
avals, aV = np.linalg.eig(CxxICxy.dot(CyyICyx))
#print avals
#print 'aV'
#print aV
# problem for b
bvals, bV = np.linalg.eig(CyyICyx.dot(CxxICxy))
#print bvals
#print 'bV'
#print bV
#from IPython.core.debugger import Tracer
#Tracer()()
dim = min(dx, dy)
# sort descendingly
Ia = np.argsort(-avals)
avals = avals[Ia[:dim]]
aV = aV[:, Ia[:dim]]
Ib = np.argsort(-bvals)
bvals = bvals[Ib[:dim]]
bV = bV[:, Ib[:dim]]
np.testing.assert_array_almost_equal(avals, bvals)
return np.real(avals), np.real(aV), np.real(bV)
def _cca_one_eig(X, Y, reg=1e-5):
"""
CCA formulation with one big block diagonal eigenvalue problem.
"""
#raise RuntimeError('There is a bug in this one. Eigenvalues can be outside [-1, 1]. See _cca_one_eig() instead')
dx = X.shape[1]
dy = Y.shape[1]
assert X.shape[0] == Y.shape[0]
n = X.shape[0]
mx = np.mean(X, 0)
my = np.mean(Y, 0)
# dx x dy
Cxy = X.T.dot(Y)/n - np.outer(mx, my)
Cxx = np.cov(X.T)
#print Cxx
Cyy = np.cov(Y.T)
# Cxx, Cyy have to be invertible
if dx == 1:
CxxICxy = Cxy/Cxx
else:
CxxICxy = np.linalg.solve(Cxx+reg*np.eye(dx), Cxy)
if dy==1:
CyyICyx = Cxy.T/Cyy
else:
CyyICyx = np.linalg.solve(Cyy+reg*np.eye(dy), Cxy.T)
# CCA block matrix
#print CyyICyx
R1 = np.hstack((np.zeros((dx, dx)), CxxICxy ))
R2 = np.hstack((CyyICyx, np.zeros((dy, dy))) )
B = np.vstack((R1, R2))
assert B.shape[0] == B.shape[1]
# eigen problem
vals, V = np.linalg.eig(B)
dim = min(dx, dy)
# sort descendingly
I = np.argsort(-vals)
vals = vals[I[:dim]]
V = V[:, I]
Vx = V[:dx, :dim]
Vy = V[dx:, :dim]
return np.real(vals), np.real(Vx), np.real(Vy)
def fit_gaussian_draw(X, J, seed=28, reg=1e-7, eig_pow=1.0):
"""
Fit a multivariate normal to the data X (n x d) and draw J points
from the fit.
- reg: regularizer to use with the covariance matrix
- eig_pow: raise eigenvalues of the covariance matrix to this power to construct
a new covariance matrix before drawing samples. Useful to shrink the spread
of the variance.
"""
with NumpySeedContext(seed=seed):
d = X.shape[1]
mean_x = np.mean(X, 0)
cov_x = np.cov(X.T)
if d==1:
cov_x = np.array([[cov_x]])
[evals, evecs] = np.linalg.eig(cov_x)
evals = np.maximum(0, np.real(evals))
assert np.all(np.isfinite(evals))
evecs = np.real(evecs)
shrunk_cov = evecs.dot(np.diag(evals**eig_pow)).dot(evecs.T) + reg*np.eye(d)
V = np.random.multivariate_normal(mean_x, shrunk_cov, J)
return V
def bound_by_data(Z, Data):
"""
Determine lower and upper bound for each dimension from the Data, and project
Z so that all points in Z live in the bounds.
Z: m x d
Data: n x d
Return a projected Z of size m x d.
"""
n, d = Z.shape
Low = np.min(Data, 0)
Up = np.max(Data, 0)
LowMat = np.repeat(Low[np.newaxis, :], n, axis=0)
UpMat = np.repeat(Up[np.newaxis, :], n, axis=0)
Z = np.maximum(LowMat, Z)
Z = np.minimum(UpMat, Z)
return Z
def one_of_K_code(arr):
"""
Make a one-of-K coding out of the numpy array.
For example, if arr = ([0, 1, 0, 2]), then return a 2d array of the form
[[1, 0, 0],
[0, 1, 0],
[1, 0, 0],
[0, 0, 2]]
"""
U = np.unique(arr)
n = len(arr)
nu = len(U)
X = np.zeros((n, nu))
for i, u in enumerate(U):
Ii = np.where( np.abs(arr - u) < 1e-8 )
#ni = len(Ii)
X[Ii[0], i] = 1
return X
def fullprint(*args, **kwargs):
"https://gist.github.com/ZGainsforth/3a306084013633c52881"
from pprint import pprint
import numpy
opt = numpy.get_printoptions()
numpy.set_printoptions(threshold='nan')
pprint(*args, **kwargs)
numpy.set_printoptions(**opt)
def standardize(X):
mx = np.mean(X, 0)
stdx = np.std(X, axis=0)
# Assume standard deviations are not 0
Zx = (X-mx)/stdx
assert np.all(np.isfinite(Zx))
return Zx
| {
"repo_name": "Diviyan-Kalainathan/causal-humans",
"path": "Cause-effect/lib/fsic/util.py",
"copies": "3",
"size": "9209",
"license": "mit",
"hash": 4800485878089776000,
"line_mean": 25.9269005848,
"line_max": 117,
"alpha_frac": 0.5748724074,
"autogenerated": false,
"ratio": 3.003587736464449,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011728157330157389,
"num_lines": 342
} |
""" A module containing example functions for reading and plotting user output
It is important to make sure the functions work as the error messages can be
varied and messy to handle. The software raises ImportError if there is
something wrong with importing or running the code.
"""
import matplotlib.pyplot as plt
def line_reader(line, names, sep=', ', *args):
""" Maps a line of output to names given as string
Parameters:
line (str): a line of the output file
names (str): names of the output file data as a string
sep (str): separator for the data in the line
Returns:
dict: The data in the line mapped to the names
"""
#Takes names and splits it to keys corresponding to each value in the line
keys = names.split()
#Splits the line and casts them as integers
vals = [float(val) for val in line.strip().split(sep)]
#Maps the values to the keys and makes a dictionary
return dict(zip(keys, vals))
def file_reader(output_file, names, sep=', ', *args):
""" Maps a line of output to names given as string
Parameters:
output_file (file object): output file file object
names (str): names of the output file data
sep (str): separator for the data in the line
Returns:
dict: The data in the line mapped to the names
"""
file_data = output_file.read().strip().split('\n')
keys = names.split()
data = {}
for key in keys:
data[key] = []
for line in file_data:
vals = [float(val) for val in line.strip().split(sep)]
for key, val in zip(keys, vals):
data[key].append(val)
return data
def plot(filename, feedback, save_image, y, x=None, *args):
""" Plots the given values to file
Parameters:
filename (str): name of the file where the plot is saved
x (list): values of the x component
y (list): values of the y component
ax2: (axes object): axes object containing previous axes data
Returns:
axes object: an axes object that can be used to combine the plots
"""
if not x:
x = ('x', [i for i in range(len(y[1]))])
if feedback:
#Adds the plot to the combined axes
feedback.plot(x[1],y[1])
if save_image:
fig = feedback.get_figure()
fig.savefig(filename)
return feedback
else:
#Creates and plots the current figure
fig = plt.figure()
ax = fig.add_subplot(111)
ax.plot(x[1],y[1])
ax.set_xlabel(x[0])
ax.set_ylabel(y[0])
if save_image:
fig.savefig(filename)
return ax
| {
"repo_name": "smarisa/sdpt11",
"path": "neronet/scripts/example.py",
"copies": "2",
"size": "2671",
"license": "mit",
"hash": 8054393256256765000,
"line_mean": 32.8101265823,
"line_max": 78,
"alpha_frac": 0.6166229876,
"autogenerated": false,
"ratio": 4.016541353383459,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5633164340983459,
"avg_score": null,
"num_lines": null
} |
'''A module containing functions that allow one to scrape data from
the Bestiary pages of d20pfsrd.com and place it in a database'''
import argparse
import traceback
from lxml.html import parse
from core.builders.creature.d20pfsrd import build as d20_build
from core.builders.creature.dict import build as dict_build
from db.creatureDB import CreatureDB
__all__ = []
# --- Constants ---
# The maximum number of retries allowed when attempting to download
# a web page
MAX_ATTEMPTS = 3
# TODO: Content Collection Modes
MODE_3PP = 1 # collect 3rd party content only
MODE_ALL = 2 # collect all content
MODE_STANDARD = 0 # collect non-3rd party content only
# Each of these lists is used to filter content scraped from the
# Bestiary pages of d20pfsrd.com depending on the Content Collection
# Mode.
PROBLEM_LINKS = []
PROBLEM_SUFFIXES = []
THIRD_PARTY_PUBLISHERS = []
THIRD_PARTY_SUFFIXES = []
# --- Functions ---
def create_db_entries_from_csv(db_conn, file_name='CREATURES_SPECIAL.csv'):
'''Creates a row in a CreatureDB object using a .csv file
containing creature attributes as described in the documentation
for this project
:param db_conn: an open Connection object to a CreatureDB
:param file_name: name of .csv file containing creature data
'''
# get creature data from .csv file
creature_keys = []
creature_file = open(file_name, 'r')
for next_line in creature_file:
creature_features = next_line.strip().split(',')
# skip first line
if next_line.startswith('CR,'):
creature_keys = creature_features
continue
# create Creature object
creature_dict = dict(zip(creature_keys, creature_features))
creature = dict_build(creature_dict)
# add Creature object to database
db_conn.add_creature(creature)
# clean up
creature_file.close()
def create_db_entry_from_link(db_conn, link, mode=MODE_STANDARD):
'''Attempts to create a row in a CreatureDB object using a link to a
Creature page on d20pfsrd.com
:param db_conn: an open Connection object to a CreatureDB
:param link: link to non-3rd party creature on d20pfsrd
:param mode: the content collection mode set by the user
'''
for i in range(MAX_ATTEMPTS):
try:
html_tree = parse(link)
root = html_tree.getroot()
# if link is acceptable, create Creature entry in db
if not is_problem_page(root, mode):
creature = d20_build(root)
db_conn.add_creature(creature)
# if I/O exception raised, try again
except IOError:
continue
# if successful, break out of loop
else:
break
# if not successful, exit cleanly
else:
raise Exception('ERROR: failed to download', link)
def get_creature_links(page, mode=MODE_STANDARD):
'''Gets the list of links to all desired content on the given page
:param page: link to Bestiary page on d20pfsrd
:param mode: the content collection mode set by the user
:returns: list of links to all desired content on page
'''
parsed_html = parse(page)
root = parsed_html.getroot()
elements = root.cssselect('div a')
creature_links = []
for element in elements:
link = element.get('href')
if (link is not None and 'monster-listings/' in link and
not is_problem_link(link, mode)):
creature_links.append(link)
return creature_links
def get_html_indeces():
'''Gets the list of links to pages of creatures clustered by
Challenge Rating (CR)
'''
index_file = open('INDEX.txt', 'r')
creature_indeces = index_file.readlines()
for i, item in enumerate(creature_indeces):
creature_indeces[i] = creature_indeces[i].rstrip()
return creature_indeces
def is_3pp_link(link):
'''Determines whether or not the provided link leads to 3rd party
content
:param link: string containing link to Bestiary page on d20pfsrd
:returns: True if link leads to 3rd party content, False otherwise
'''
# check if link contains a suffix denoting its 3rd party status
if link.endswith(tuple(THIRD_PARTY_SUFFIXES)):
return True
# check if page the link leads to contains 3rd party content
html_tree = parse(link)
root = html_tree.getroot()
if is_3pp_page(root):
return True
return False
def is_3pp_page(root):
'''Determines whether or not the given HtmlElement node contains
3rd party content
:param root: root HtmlElement of a Bestiary page from d20pfsrd.com
:returns: True if page contains 3rd party content, False otherwise
'''
# check if publisher is a 3rd-party publisher
footers = root.cssselect('.sites-tile-name-footer')
if footers:
for footer in footers:
footer_text = footer.text_content()
if (u'\xc2' in footer_text or
'(c)' in footer_text or 'Copyright' in footer_text):
for publisher in THIRD_PARTY_PUBLISHERS:
if publisher in footer_text:
return True
# check if title indicates that creature has 3rd-party affiliation
title_element = root.cssselect('title')
title = title_element[0].text
if title and '3pp' in title:
return True
return False
def is_problem_link(link, mode=MODE_STANDARD):
'''Determines whether or not the provided link is a "problem"
link
In this context, a "problem" link is defined as one that
leads to undesirable content.
:param link: string containing link to Bestiary page on d20pfsrd
:param mode: the content collection mode set by the user
:returns: True if the link is a "problem" link, False otherwise
'''
# check if link is on list of problematic links
for problem_link in PROBLEM_LINKS:
if problem_link in link:
return True
if link.endswith(tuple(PROBLEM_SUFFIXES)):
return True
# check if link contains 3rd party content
is_3pp_link_ = is_3pp_link(link)
if mode == MODE_STANDARD and is_3pp_link_:
return True
if mode == MODE_3PP and not is_3pp_link_:
return True
return False
def is_problem_page(root, mode=MODE_STANDARD):
'''Determines whether or not the content in the provided HtmlElemnt
node is desired
:param root: root HtmlElement of a Bestiary page from d20pfsrd.com
:param mode: the content collection mode set by the user
:returns: True if content on page is not desired, False otherwise
'''
if mode == MODE_STANDARD:
return is_3pp_page(root)
if mode == MODE_3PP:
return not is_3pp_page(root)
return False
def load_list(file_name):
'''Gets list of newline-separated strings from file
:param file_name: file containing list of strings
:returns list of strings
'''
file_ = open(file_name, 'r')
list_ = file_.read().split('\n')
file_.close()
return list_
# --- Script ---
# By default, if this module is executed as a script, it will try to
# build a database of non-3rd party Pathfinder creatures by scraping
# creature data from d20pfsrd.com
#
# The resulting database will be exported in both .db (SQLite 3) and
# .csv formats.
if __name__ == '__main__':
THIRD_PARTY_PUBLISHERS = load_list('3PP.txt')
THIRD_PARTY_SUFFIXES = load_list('LINKS_3PP_SUFFIXES.txt')
PROBLEM_LINKS = load_list('LINKS_PROBLEM.txt')
PROBLEM_SUFFIXES = load_list('LINKS_PROBLEM_SUFFIXES.txt')
# default settings
db_name = 'creature.db'
cr_range = [0.0, float('inf')]
cr_flag = False
content_mode = MODE_STANDARD
# create parser for command line arguments
parser = argparse.ArgumentParser(description='Builds a creature database')
# -argument- challenge rating storage mode
parser.add_argument('-C', action='store_true',
help='store CR values as strings, not integers')
# -argument- range of accepted challenge rating values
parser.add_argument('--cr-range',
nargs=2, metavar=('MIN', 'MAX'), type=float,
help='sets valid range of CR values')
# -argument- content collection mode
content_mode_choices = ['standard', '3pp', 'all']
parser.add_argument('--content',
nargs=1, choices=content_mode_choices,
help='sets type of creatures in db')
# parse command line arguments
args = vars(parser.parse_args())
# handle command line arguments
for key in args:
if key == 'C':
cr_flag = args['C']
if key == 'cr_range' and args['cr_range']:
cr_range = args['cr_range']
if key == 'content' and args['content']:
content_mode = content_mode_choices.index(args['content'][0])
# create sqlite3 database
db_connection = CreatureDB(db_name, cr_flag)
db_connection.min_cr = cr_range[0]
db_connection.max_cr = cr_range[1]
# add entries to creature db via links to pages on d20pfsrd.com
try:
# create creature db entry for each reachable link
indeces = get_html_indeces()
for index in indeces:
links = get_creature_links(index, content_mode)
# iterate over each link of the current index
for creature_link in links:
create_db_entry_from_link(db_connection, creature_link,
content_mode)
# create creature db entry for each link in special index
special_index_file = open('INDEX_SPECIAL.txt', 'r')
for line in special_index_file:
create_db_entry_from_link(db_connection, line.strip(), content_mode)
except Exception as e:
traceback.print_exc()
# add entries to creature database via .csv file
if not content_mode == MODE_3PP:
create_db_entries_from_csv(db_connection, 'CREATURES_SPECIAL.csv')
if not content_mode == MODE_STANDARD:
create_db_entries_from_csv(db_connection, '3PP_CREATURES_SPECIAL.csv')
# clean up
db_connection.export_as_csv()
db_connection.commit_and_close()
| {
"repo_name": "lot9s/pathfinder-rpg-utils",
"path": "data-mining/bestiary/crawler.py",
"copies": "1",
"size": "10350",
"license": "mit",
"hash": 6754591112642021000,
"line_mean": 34.2040816327,
"line_max": 80,
"alpha_frac": 0.6418357488,
"autogenerated": false,
"ratio": 3.7870472008781557,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9863547174743704,
"avg_score": 0.013067154986890407,
"num_lines": 294
} |
"""A module containing `numpy`-specific plugins for mypy."""
from __future__ import annotations
import typing as t
import numpy as np
try:
import mypy.types
from mypy.types import Type
from mypy.plugin import Plugin, AnalyzeTypeContext
from mypy.nodes import MypyFile, ImportFrom, Statement
from mypy.build import PRI_MED
_HookFunc = t.Callable[[AnalyzeTypeContext], Type]
MYPY_EX: t.Optional[ModuleNotFoundError] = None
except ModuleNotFoundError as ex:
MYPY_EX = ex
__all__: t.List[str] = []
def _get_precision_dict() -> t.Dict[str, str]:
names = [
("_NBitByte", np.byte),
("_NBitShort", np.short),
("_NBitIntC", np.intc),
("_NBitIntP", np.intp),
("_NBitInt", np.int_),
("_NBitLongLong", np.longlong),
("_NBitHalf", np.half),
("_NBitSingle", np.single),
("_NBitDouble", np.double),
("_NBitLongDouble", np.longdouble),
]
ret = {}
for name, typ in names:
n: int = 8 * typ().dtype.itemsize
ret[f'numpy.typing._nbit.{name}'] = f"numpy._{n}Bit"
return ret
def _get_extended_precision_list() -> t.List[str]:
extended_types = [np.ulonglong, np.longlong, np.longdouble, np.clongdouble]
extended_names = {
"uint128",
"uint256",
"int128",
"int256",
"float80",
"float96",
"float128",
"float256",
"complex160",
"complex192",
"complex256",
"complex512",
}
return [i.__name__ for i in extended_types if i.__name__ in extended_names]
#: A dictionary mapping type-aliases in `numpy.typing._nbit` to
#: concrete `numpy.typing.NBitBase` subclasses.
_PRECISION_DICT: t.Final = _get_precision_dict()
#: A list with the names of all extended precision `np.number` subclasses.
_EXTENDED_PRECISION_LIST: t.Final = _get_extended_precision_list()
def _hook(ctx: AnalyzeTypeContext) -> Type:
"""Replace a type-alias with a concrete ``NBitBase`` subclass."""
typ, _, api = ctx
name = typ.name.split(".")[-1]
name_new = _PRECISION_DICT[f"numpy.typing._nbit.{name}"]
return api.named_type(name_new)
if t.TYPE_CHECKING or MYPY_EX is None:
def _index(iterable: t.Iterable[Statement], id: str) -> int:
"""Identify the first ``ImportFrom`` instance the specified `id`."""
for i, value in enumerate(iterable):
if getattr(value, "id", None) == id:
return i
else:
raise ValueError("Failed to identify a `ImportFrom` instance "
f"with the following id: {id!r}")
class _NumpyPlugin(Plugin):
"""A plugin for assigning platform-specific `numpy.number` precisions."""
def get_type_analyze_hook(self, fullname: str) -> t.Optional[_HookFunc]:
"""Set the precision of platform-specific `numpy.number` subclasses.
For example: `numpy.int_`, `numpy.longlong` and `numpy.longdouble`.
"""
if fullname in _PRECISION_DICT:
return _hook
return None
def get_additional_deps(self, file: MypyFile) -> t.List[t.Tuple[int, str, int]]:
"""Import platform-specific extended-precision `numpy.number` subclasses.
For example: `numpy.float96`, `numpy.float128` and `numpy.complex256`.
"""
ret = [(PRI_MED, file.fullname, -1)]
if file.fullname == "numpy":
# Import ONLY the extended precision types available to the
# platform in question
imports = ImportFrom(
"numpy.typing._extended_precision", 0,
names=[(v, v) for v in _EXTENDED_PRECISION_LIST],
)
imports.is_top_level = True
# Replace the much broader extended-precision import
# (defined in `numpy/__init__.pyi`) with a more specific one
for lst in [file.defs, file.imports]: # type: t.List[Statement]
i = _index(lst, "numpy.typing._extended_precision")
lst[i] = imports
return ret
def plugin(version: str) -> t.Type[_NumpyPlugin]:
"""An entry-point for mypy."""
return _NumpyPlugin
else:
def plugin(version: str) -> t.Type[_NumpyPlugin]:
"""An entry-point for mypy."""
raise MYPY_EX
| {
"repo_name": "anntzer/numpy",
"path": "numpy/typing/mypy_plugin.py",
"copies": "6",
"size": "4408",
"license": "bsd-3-clause",
"hash": -3188405779034545000,
"line_mean": 32.6488549618,
"line_max": 88,
"alpha_frac": 0.5798548094,
"autogenerated": false,
"ratio": 3.704201680672269,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0006423222426890446,
"num_lines": 131
} |
"""A module containing tests for the library representation of Codelists."""
import copy
import pytest
from lxml import etree
import iati.codelists
class TestCodelistsNonClass:
"""Test codelists functionality that is not contained within a class.
Note:
There was once functionality regarding mapping files here. That was removed.
"""
pass
class TestCodelists:
"""A container for tests relating to Codelists."""
@pytest.fixture
def name_to_set(self):
"""Set a name to give Codelists.
Returns:
str: Something that can be provided as a name to Codelists.
"""
return "test Codelist name"
def test_codelist_default_attributes(self):
"""Check a Codelist's default attributes are correct."""
with pytest.raises(TypeError) as excinfo:
iati.Codelist() # pylint: disable=E1120
assert ('__init__() missing 1 required positional argument' in str(excinfo.value)) or ('__init__() takes at least 2 arguments' in str(excinfo.value))
def test_codelist_name_instance(self, name_to_set):
"""Check a Codelist's attributes are correct when defined with only a name."""
codelist = iati.Codelist(name_to_set)
assert set() == codelist.codes
assert codelist.name == name_to_set
def test_codelist_add_code(self, name_to_set):
"""Check a Code can be added to a Codelist."""
codelist = iati.Codelist(name_to_set)
codelist.codes.add(iati.Code(''))
num_codes = len(codelist.codes)
assert num_codes == 1
@pytest.mark.xfail
def test_codelist_add_code_decline_non_code(self, name_to_set):
"""Check something that is not a Code cannot be added to a Codelist."""
codelist = iati.Codelist(name_to_set)
not_a_code = True
codelist.codes.add(not_a_code)
num_codes = len(codelist.codes)
assert num_codes == 0
def test_codelist_define_from_xml(self, name_to_set):
"""Check that a Codelist can be generated from an XML codelist definition."""
path = iati.resources.create_codelist_path('BudgetType', '2.02')
xml_str = iati.utilities.load_as_string(path)
codelist = iati.Codelist(name_to_set, xml=xml_str)
code_names = ['Original', 'Revised']
code_values = ['1', '2']
assert codelist.name == 'BudgetType'
assert len(codelist.codes) == 2
for code in codelist.codes:
assert code.name in code_names
assert code.value in code_values
@pytest.mark.fixed_to_202
def test_codelist_complete(self):
"""Check that a complete Codelist can be generated from an XML codelist definition."""
codelist_name = 'BudgetType'
path = iati.resources.create_codelist_path(codelist_name, '2.02')
xml_str = iati.utilities.load_as_string(path)
codelist = iati.Codelist(codelist_name, xml=xml_str)
assert codelist.complete is True
@pytest.mark.fixed_to_202
def test_codelist_incomplete(self):
"""Check that an incomplete Codelist can be generated from an XML codelist definition."""
codelist_name = 'Country'
path = iati.resources.create_codelist_path(codelist_name, '2.02')
xml_str = iati.utilities.load_as_string(path)
codelist = iati.Codelist(codelist_name, xml=xml_str)
assert codelist.complete is False
def test_codelist_type_xsd(self, name_to_set):
"""Check that a Codelist can turn itself into a type to use for validation."""
code_value_to_set = "test Code value"
codelist = iati.Codelist(name_to_set)
code = iati.Code(code_value_to_set)
codelist.codes.add(code)
type_tree = codelist.xsd_restriction
assert isinstance(type_tree, etree._Element) # pylint: disable=protected-access
assert type_tree.tag == iati.constants.NAMESPACE + 'simpleType'
assert type_tree.attrib['name'] == name_to_set + '-type'
assert type_tree.nsmap == iati.constants.NSMAP
assert len(type_tree) == 1
assert type_tree[0].tag == iati.constants.NAMESPACE + 'restriction'
assert type_tree[0].nsmap == iati.constants.NSMAP
assert len(type_tree[0]) == 1
assert type_tree[0][0].tag == iati.constants.NAMESPACE + 'enumeration'
assert type_tree[0][0].attrib['value'] == code_value_to_set
assert type_tree[0][0].nsmap == iati.constants.NSMAP
class TestCodes:
"""A container for tests relating to Codes."""
def test_code_no_attributes(self):
"""Check a Code cannot be instantiated with no arguments."""
with pytest.raises(TypeError):
_ = iati.Code() # pylint: disable=no-value-for-parameter
def test_code_value_instance(self):
"""Check a Code's attributes are correct when being defined with only a value."""
value_to_set = "test Code value"
code = iati.Code(value_to_set)
assert code.name == ''
assert code.value == value_to_set
def test_code_value_and_name_instance(self):
"""Check a Code's attributes are correct when being defined with a value and name."""
value_to_set = "test Code value"
name_to_set = "test Code name"
code = iati.Code(value_to_set, name_to_set)
assert code.name == name_to_set
assert code.value == value_to_set
def test_code_enumeration_element(self):
"""Check that a Code correctly outputs an enumeration element.
Todo:
Test enumerating a Code with no value.
"""
value_to_set = "test Code value"
code = iati.Code(value_to_set)
enum_el = code.xsd_enumeration
assert isinstance(enum_el, etree._Element) # pylint: disable=protected-access
assert enum_el.tag == iati.constants.NAMESPACE + 'enumeration'
assert enum_el.attrib['value'] == value_to_set
assert enum_el.nsmap == iati.constants.NSMAP
class TestCodelistEquality:
"""A container for tests relating to Codelist equality - both direct and via hashing."""
@pytest.mark.parametrize('codelist', iati.default.codelists('2.02').values())
def test_codelist_same_object_equal(self, codelist, cmp_func_equal_val_and_hash):
"""Check that a Codelist is deemed to be equal with itself."""
assert cmp_func_equal_val_and_hash(codelist, codelist)
@pytest.mark.parametrize('codelist', iati.default.codelists('2.02').values())
def test_codelist_same_diff_object_equal(self, codelist, cmp_func_equal_val_and_hash):
"""Check that two instances of the same Codelist are deemed to be equal."""
codelist_copy = copy.deepcopy(codelist)
assert cmp_func_equal_val_and_hash(codelist, codelist_copy)
@pytest.mark.parametrize('codelist', iati.default.codelists('2.02').values())
def test_codelist_diff_name_not_equal(self, codelist, cmp_func_different_val_and_hash):
"""Check that two different Codelists are not deemed to be equal.
The two Codelists have different names, but are otherwise identical.
"""
codelist_copy = copy.deepcopy(codelist)
codelist_copy.name = codelist.name + 'with a difference'
assert cmp_func_different_val_and_hash(codelist, codelist_copy)
@pytest.mark.parametrize('codelist', iati.default.codelists('2.02').values())
def test_codelist_diff_completeness_not_equal(self, codelist, cmp_func_different_val_and_hash):
"""Check that two different Codelists are not deemed to be equal.
The two Codelists have different completeness, but are otherwise identical.
"""
codelist_copy = copy.deepcopy(codelist)
codelist_copy.complete = not codelist.complete
assert cmp_func_different_val_and_hash(codelist, codelist_copy)
@pytest.mark.parametrize('codelist', iati.default.codelists('2.02').values())
def test_codelist_diff_num_codes_not_equal(self, codelist, cmp_func_different_val_and_hash):
"""Check that two different Codelists are not deemed to be equal.
One Codelist contains a Code that the other does not, but they are otherwise identical.
"""
codelist_copy = copy.deepcopy(codelist)
codelist_copy.codes.add(iati.Code(''))
assert cmp_func_different_val_and_hash(codelist, codelist_copy)
@pytest.mark.parametrize('codelist', iati.default.codelists('2.02').values())
def test_codelist_diff_code_name_not_equal(self, codelist, cmp_func_different_val):
"""Check that two different Codelists are not deemed to be equal.
One contained Code has a different name, but the Codelists are otherwise identical.
"""
codelist_copy = copy.deepcopy(codelist)
code = codelist_copy.codes.pop()
code.name = code.name + 'with a difference'
codelist_copy.codes.add(code)
assert cmp_func_different_val(codelist, codelist_copy)
@pytest.mark.parametrize('codelist', iati.default.codelists('2.02').values())
def test_codelist_diff_code_name_same_hash(self, codelist, cmp_func_equal_hash):
"""Check that two not-equal Codelists are deemed to have the same hash.
One contained Code has a different name, but the Codelists are otherwise identical.
The hash should be the same since the important part of a `Code` is the `value` attribute. The name is not deemed to change its hash.
"""
codelist_copy = copy.deepcopy(codelist)
code = codelist_copy.codes.pop()
code.name = code.name + 'with a difference'
codelist_copy.codes.add(code)
assert cmp_func_equal_hash(codelist, codelist_copy)
@pytest.mark.parametrize('codelist', iati.default.codelists('2.02').values())
def test_codelist_diff_code_value_not_equal(self, codelist, cmp_func_different_val_and_hash):
"""Check that two different Codelists are not deemed to be equal.
One contained Code has a different value, but the Codelists are otherwise identical.
"""
codelist_copy = copy.deepcopy(codelist)
code = codelist_copy.codes.pop()
code.value = code.value + 'with a difference'
codelist_copy.codes.add(code)
assert cmp_func_different_val_and_hash(codelist, codelist_copy)
| {
"repo_name": "IATI/iati.core",
"path": "iati/tests/test_codelists.py",
"copies": "1",
"size": "10333",
"license": "mit",
"hash": -5273858268014389000,
"line_mean": 39.36328125,
"line_max": 157,
"alpha_frac": 0.6602148456,
"autogenerated": false,
"ratio": 3.7089016511127064,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9861555513145053,
"avg_score": 0.0015121967135306319,
"num_lines": 256
} |
"""A module containing tests for the library representation of default values."""
import pytest
import iati.codelists
import iati.constants
import iati.default
import iati.schemas
import iati.tests.utilities
class TestDefault:
"""A container for tests relating to Default data."""
@pytest.fixture(params=[
iati.default.codelists,
iati.default.codelist_mapping,
iati.default.ruleset,
iati.default.activity_schema,
iati.default.organisation_schema
])
def default_data_func_version_param(self, request):
"""Return a default data function that takes only a version as a parameter."""
return request.param
def test_invalid_version(self, std_ver_minor_uninst_valueerr_str_decimal, default_data_func_version_param):
"""Check that an invalid version causes an error when obtaining default data."""
with pytest.raises(ValueError):
default_data_func_version_param(std_ver_minor_uninst_valueerr_str_decimal)
def test_major_version_matches_minor(self, std_ver_major_uninst_valid_known, default_data_func_version_param):
"""Check that specifying a major version returns the same info as the corresponding decimal."""
minor_version = iati.version._decimalise_integer(std_ver_major_uninst_valid_known) # pylint: disable=protected-access
assert default_data_func_version_param(std_ver_major_uninst_valid_known) == default_data_func_version_param(minor_version)
class TestDefaultCodelists:
"""A container for tests relating to default Codelists."""
@pytest.fixture(params=[
'Country', # Codelist that has always been Non-Embedded
'ActivityStatus', # Codelist that has always been Embedded
'ActivityScope', # Codelist migrated from Embedded to NE alongside 2.03
])
def codelist_name(self, request):
"""Return the name of a valid Codelist."""
request.applymarker(pytest.mark.latest_version('2.03'))
return request.param
@pytest.fixture
def codelists_with_no_name_codes(self):
"""Return the names of Codelists where Codes do not have names."""
return ['FileFormat', 'Version']
def test_invalid_version_single_codelist(self, std_ver_minor_uninst_valueerr_str_decimal, codelist_name):
"""Check that an invalid version causes an error when obtaining a single default Codelist.
Note:
This is a separate test since the function takes a parameter other than the `version`.
"""
with pytest.raises(ValueError):
iati.default.codelist(codelist_name, std_ver_minor_uninst_valueerr_str_decimal)
def test_default_codelist_valid_at_all_versions(self, codelist_name, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that a named default Codelist may be located.
Todo:
Check internal values beyond the codelists being the correct type.
"""
codelist = iati.default.codelist(codelist_name, std_ver_minor_mixedinst_valid_fullsupport)
assert isinstance(codelist, iati.Codelist)
assert codelist.name == codelist_name
for code in codelist.codes:
assert isinstance(code, iati.Code)
@pytest.mark.parametrize("version, codelist_name, expected_type", [
('1.04', 'AidTypeFlag', iati.Codelist),
('1.05', 'AidTypeFlag', iati.Codelist),
('2.01', 'AidTypeFlag', ValueError),
('2.02', 'AidTypeFlag', ValueError),
('1.04', 'BudgetStatus', ValueError),
('1.05', 'BudgetStatus', ValueError),
('2.01', 'BudgetStatus', ValueError),
('2.02', 'BudgetStatus', iati.Codelist),
('2.03', 'BudgetStatus', iati.Codelist)
])
@pytest.mark.latest_version('2.03')
def test_default_codelist_valid_only_at_some_versions(self, codelist_name, version, expected_type):
"""Check that a codelist that is valid at some version/s is not valid in other versions.
Example:
AidTypeFlag was an embedded codelist in v1.04 and v1.05, but is not valid at any version after this.
For example, BudgetStatus was added as an embedded codelist in v2.02, so is not valid prior to this.
"""
try: # Note pytest.raises() is not used here in order to keep this test flexible for parameterization.
result = iati.default.codelist(codelist_name, version)
except ValueError as excinfo:
result = excinfo
assert isinstance(result, expected_type)
@pytest.mark.parametrize("name", iati.tests.utilities.generate_test_types(['str'], True))
def test_default_codelist_invalid_at_all_versions(self, name, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that trying to find a default Codelist with an invalid name raises an error."""
with pytest.raises(ValueError) as excinfo:
iati.default.codelist(name, std_ver_minor_mixedinst_valid_fullsupport)
assert 'There is no default Codelist in version' in str(excinfo.value)
def test_default_codelists_type(self, codelist_lengths_by_version):
"""Check that the default Codelists are of the correct type.
Todo:
Switch from type-checking to behavior-checking, which is more Pythonic.
"""
codelists = iati.default.codelists(codelist_lengths_by_version.version)
assert isinstance(codelists, dict)
assert len(codelists.values()) == codelist_lengths_by_version.expected_length
for codelist in codelists.values():
assert isinstance(codelist, iati.Codelist)
for code in codelist.codes:
assert isinstance(code, iati.Code)
def test_default_codelists_codes_have_name(self, std_ver_minor_mixedinst_valid_fullsupport, codelists_with_no_name_codes):
"""Check that Codelists with Codes that should have names do have names.
Codes in a Codelist should have a name. This checks that default Codelists have names. A small number of Codelists are excluded because they are known not to have names.
"""
codelists = iati.default.codelists(std_ver_minor_mixedinst_valid_fullsupport)
relevant_codelists = [codelist for codelist in codelists.values() if codelist.name not in codelists_with_no_name_codes]
for codelist in relevant_codelists:
for code in codelist.codes:
assert code.name != ''
def test_default_codelists_no_name_codes_have_no_name(self, std_ver_minor_mixedinst_valid_fullsupport, codelists_with_no_name_codes):
"""Check that Codelists with Codes that are known to have no name have no name.
Ideally all Codes would have a name. There are a couple of Codelists where Codes do not. This test is intended to identify the point in time that names are added.
"""
codelists = iati.default.codelists(std_ver_minor_mixedinst_valid_fullsupport)
relevant_codelists = [codelist for codelist in codelists.values() if codelist.name in codelists_with_no_name_codes]
for codelist in relevant_codelists:
for code in codelist.codes:
assert code.name == ''
def test_codelists_in_mapping_exist(self, std_ver_minor_inst_valid_fullsupport):
"""Check that the Codelists mentioned in a Codelist mapping file at a given version actually exist."""
codelist_names = iati.default.codelists(std_ver_minor_inst_valid_fullsupport).keys()
mapping = iati.default.codelist_mapping(std_ver_minor_inst_valid_fullsupport)
for expected_codelist in mapping.keys():
assert expected_codelist in codelist_names
@pytest.mark.fixed_to_202
def test_codelist_mapping_condition(self):
"""Check that the Codelist mapping file is having conditions read.
Todo:
Split into multiple tests.
"""
mapping = iati.default.codelist_mapping('2.02')
assert mapping['Sector'][0]['condition'] == "@vocabulary = '1' or not(@vocabulary)"
assert mapping['Version'][0]['condition'] is None
def test_codelist_mapping_xpath(self, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that the Codelist mapping file is being read for both org and activity mappings.
Todo:
Split into multiple tests.
"""
mapping = iati.default.codelist_mapping(std_ver_minor_mixedinst_valid_fullsupport)
currency_xpaths = [currency_mapping['xpath'] for currency_mapping in mapping['Currency']]
expected_xpaths = [
'//iati-activity/@default-currency',
'//iati-activity/budget/value/@currency',
'//iati-activity/crs-add/loan-status/@currency',
'//iati-activity/fss/forecast/@currency',
'//iati-activity/planned-disbursement/value/@currency',
'//iati-activity/transaction/value/@currency',
'//iati-organisation/@default-currency',
'//iati-organisation/total-budget/value/@currency',
'//iati-organisation/recipient-org-budget/value/@currency',
'//iati-organisation/recipient-country-budget/value/@currency'
]
for xpath in expected_xpaths:
assert xpath in currency_xpaths
assert mapping['InvalidCodelistName'] == []
def test_default_codelists_length(self, codelist_lengths_by_version):
"""Check that the default Codelists for each version contain the expected number of Codelists."""
codelists = iati.default.codelists(codelist_lengths_by_version.version)
assert len(codelists) == codelist_lengths_by_version.expected_length
class TestDefaultRulesets:
"""A container for tests relating to default Rulesets."""
def test_default_ruleset(self, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that the default Ruleset is correct.
Todo:
Check internal values beyond the Ruleset being the correct type.
"""
ruleset = iati.default.ruleset(std_ver_minor_mixedinst_valid_fullsupport)
assert isinstance(ruleset, iati.Ruleset)
@pytest.mark.fixed_to_202
def test_default_ruleset_validation_rules_valid(self, schema_ruleset):
"""Check that a fully valid IATI file does not raise any type of error (including rules/rulesets)."""
data = iati.tests.resources.load_as_dataset('valid_std_ruleset', '2.02')
result = iati.validator.full_validation(data, schema_ruleset)
assert iati.validator.is_xml(data.xml_str)
assert iati.validator.is_iati_xml(data, schema_ruleset)
assert not result.contains_errors()
@pytest.mark.parametrize("rule_error, invalid_dataset_name, info_text", [
(
'err-rule-at-least-one-conformance-fail',
'ruleset-std/invalid_std_ruleset_missing_sector_element',
'At least one of `sector` or `transaction/sector` must be present within each `//iati-activity`.'
),
(
'err-rule-date-order-conformance-fail',
'ruleset-std/invalid_std_ruleset_bad_date_order',
'`activity-date[@type=\'1\']/@iso-date` must be chronologically before `activity-date[@type=\'3\']/@iso-date` within each `//iati-activity`.'
),
(
'err-rule-regex-matches-conformance-fail',
'ruleset-std/invalid_std_ruleset_bad_identifier',
'Each instance of `reporting-org/@ref` and `iati-identifier` and `participating-org/@ref` and `transaction/provider-org/@ref` and `transaction/receiver-org/@ref` within each `//iati-activity` must match the regular expression `[^\\/\\&\\|\\?]+`.' # noqa: disable=E501 # pylint: disable=line-too-long
),
(
'err-rule-sum-conformance-fail',
'ruleset-std/invalid_std_ruleset_does_not_sum_100',
'Within each `//iati-activity`, the sum of values matched at `recipient-country/@percentage` and `recipient-region/@percentage` must be `100`.'
)
# Note the Rules relating to 'dependent', 'no_more_than_one', 'regex_no_matches', 'startswith' and 'unique' are not used in the Standard Ruleset.
])
@pytest.mark.fixed_to_202
def test_default_ruleset_validation_rules_invalid(self, schema_ruleset, rule_error, invalid_dataset_name, info_text):
"""Check that the expected rule error is detected when validating files containing invalid data for that rule.
Note:
The fixed strings being checked here may be a tad annoying to maintain.
`test_rule_string_output_general` and `test_rule_string_output_specific` in `test_rulesets.py` do something related for Rules. As such, something more generic may work better in the future.
Todo:
Consider whether this test should remove all warnings and assert that there is only the expected warning contained within the test file.
Check that the expected missing elements appear the the help text for the given element.
"""
data = iati.tests.resources.load_as_dataset(invalid_dataset_name, '2.02')
result = iati.validator.full_validation(data, schema_ruleset)
errors_for_rule_error = result.get_errors_or_warnings_by_name(rule_error)
errors_for_ruleset = result.get_errors_or_warnings_by_name('err-ruleset-conformance-fail')
assert iati.validator.is_xml(data.xml_str)
assert iati.validator.is_iati_xml(data, schema_ruleset)
assert not iati.validator.is_valid(data, schema_ruleset)
assert len(errors_for_rule_error) == 1
assert len(errors_for_ruleset) == 1
assert info_text in errors_for_rule_error[0].info
class TestDefaultSchemas:
"""A container for tests relating to default Schemas."""
def test_default_activity_schemas(self, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that the default ActivitySchemas are correct.
Todo:
Check internal values beyond the schemas being the correct type.
Test that unpopulated Schemas can be obtained with only partially supported versions.
"""
schema = iati.default.activity_schema(std_ver_minor_mixedinst_valid_fullsupport)
assert isinstance(schema, iati.ActivitySchema)
def test_default_organisation_schemas(self, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that the default ActivitySchemas are correct.
Todo:
Check internal values beyond the schemas being the correct type.
Test that unpopulated Schemas can be obtained with only partially supported versions.
"""
schema = iati.default.organisation_schema(std_ver_minor_mixedinst_valid_fullsupport)
assert isinstance(schema, iati.OrganisationSchema)
@pytest.mark.parametrize("population_status", [[], [True]])
@pytest.mark.parametrize("schema_func", [
iati.default.activity_schema,
iati.default.organisation_schema
])
def test_default_schemas_populated(self, population_status, schema_func, codelist_lengths_by_version):
"""Check that the default Codelists for each version contain the expected number of Codelists."""
schema = schema_func(codelist_lengths_by_version.version, *population_status)
assert len(schema.codelists) == codelist_lengths_by_version.expected_length
assert len(schema.rulesets) == 1
@pytest.mark.parametrize("schema_func", [
iati.default.activity_schema,
iati.default.organisation_schema
])
def test_default_schemas_unpopulated(self, schema_func, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that the default Codelists for each version contain the expected number of Codelists."""
schema = schema_func(std_ver_minor_mixedinst_valid_fullsupport, False)
assert schema.codelists == set()
assert schema.rulesets == set()
class TestDefaultModifications:
"""A container for tests relating to the ability to modify defaults."""
@pytest.fixture
def codelist_name(self):
"""Return the name of a Codelist that exists at all versions of the Standard."""
return 'Country'
@pytest.fixture
def codelist(self, request, codelist_name):
"""Return a default Codelist that is part of the IATI Standard."""
request.applymarker(pytest.mark.fixed_to_202)
return iati.default.codelist(codelist_name, '2.02')
@pytest.fixture
def codelist_non_default(self):
"""Return a Codelist that is not part of the IATI Standard."""
return iati.Codelist('custom codelist')
@pytest.fixture
def new_code(self):
"""Return a Code object that has not been added to a Codelist."""
return iati.Code('new code value', 'new code name')
def test_default_codelist_modification(self, codelist_name, new_code, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that a default Codelist cannot be modified by adding Codes to returned lists."""
default_codelist = iati.default.codelist(codelist_name, std_ver_minor_mixedinst_valid_fullsupport)
base_default_codelist_length = len(default_codelist.codes)
default_codelist.codes.add(new_code)
unmodified_codelist = iati.default.codelist(codelist_name, std_ver_minor_mixedinst_valid_fullsupport)
assert len(default_codelist.codes) == base_default_codelist_length + 1
assert len(unmodified_codelist.codes) == base_default_codelist_length
def test_default_codelists_modification(self, codelist_name, new_code, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that default Codelists cannot be modified by adding Codes to returned lists with default parameters."""
default_codelists = iati.default.codelists(std_ver_minor_mixedinst_valid_fullsupport)
codelist_of_interest = default_codelists[codelist_name]
base_default_codelist_length = len(codelist_of_interest.codes)
codelist_of_interest.codes.add(new_code)
unmodified_codelists = iati.default.codelists(std_ver_minor_mixedinst_valid_fullsupport)
unmodified_codelist_of_interest = unmodified_codelists[codelist_name]
assert len(codelist_of_interest.codes) == base_default_codelist_length + 1
assert len(unmodified_codelist_of_interest.codes) == base_default_codelist_length
@pytest.mark.parametrize("default_call", [
iati.default.activity_schema,
iati.default.organisation_schema
])
def test_default_x_schema_modification_unpopulated(self, default_call, codelist, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that unpopulated default Schemas cannot be modified.
Note:
Implementation is by attempting to add a Codelist to the Schema.
"""
default_schema = default_call(std_ver_minor_mixedinst_valid_fullsupport, False)
base_codelist_count = len(default_schema.codelists)
default_schema.codelists.add(codelist)
unmodified_schema = default_call(std_ver_minor_mixedinst_valid_fullsupport, False)
assert len(default_schema.codelists) == base_codelist_count + 1
assert len(unmodified_schema.codelists) == base_codelist_count
@pytest.mark.parametrize("default_call", [
iati.default.activity_schema,
iati.default.organisation_schema
])
def test_default_x_schema_modification_populated(self, default_call, codelist_non_default, std_ver_minor_mixedinst_valid_fullsupport):
"""Check that populated default Schemas cannot be modified.
Note:
Implementation is by attempting to add a Codelist to the Schema.
"""
default_schema = default_call(std_ver_minor_mixedinst_valid_fullsupport, True)
base_codelist_count = len(default_schema.codelists)
default_schema.codelists.add(codelist_non_default)
unmodified_schema = default_call(std_ver_minor_mixedinst_valid_fullsupport, True)
assert len(default_schema.codelists) == base_codelist_count + 1
assert len(unmodified_schema.codelists) == base_codelist_count
| {
"repo_name": "IATI/iati.core",
"path": "iati/tests/test_default.py",
"copies": "1",
"size": "20111",
"license": "mit",
"hash": -5403593243775354000,
"line_mean": 47.34375,
"line_max": 312,
"alpha_frac": 0.6844512953,
"autogenerated": false,
"ratio": 3.851944072016855,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0021877570850252904,
"num_lines": 416
} |
"""A module containing tests for the library representation of IATI data.
Todo:
Implement tests for strict checking once validation work is underway.
"""
import collections
import math
from lxml import etree
import pytest
import iati.data
import iati.default
import iati.tests.utilities
class TestDatasets:
"""A container for tests relating to Datasets."""
@pytest.fixture
def dataset_initialised(self):
"""Return an initialised Dataset to work from in other tests."""
return iati.tests.resources.load_as_dataset('valid_not_iati')
def test_dataset_no_params(self):
"""Test Dataset creation with no parameters."""
with pytest.raises(TypeError) as excinfo:
iati.Dataset() # pylint: disable=E1120
assert ('__init__() missing 1 required positional argument' in str(excinfo.value)) or ('__init__() takes exactly 2 arguments' in str(excinfo.value))
def test_dataset_empty_string(self):
"""Test Dataset creation with an empty string."""
with pytest.raises(ValueError):
_ = iati.Dataset('')
def test_dataset_valid_xml_string(self):
"""Test Dataset creation with a valid XML string that is not IATI data."""
xml_str = iati.tests.resources.load_as_string('valid_not_iati')
data = iati.Dataset(xml_str)
assert data.xml_str == xml_str.strip()
assert etree.tostring(data.xml_tree) == etree.tostring(iati.tests.utilities.XML_TREE_VALID)
def test_dataset_xml_string_leading_whitespace(self):
"""Test Dataset creation with a valid XML string that is not IATI data."""
xml_str = iati.tests.resources.load_as_string('leading_whitespace_xml')
data = iati.Dataset(xml_str)
tree = etree.fromstring(xml_str.strip())
assert data.xml_str == xml_str.strip()
assert etree.tostring(data.xml_tree) == etree.tostring(tree)
def test_dataset_valid_iati_string(self):
"""Test Dataset creation with a valid IATI XML string."""
pass
def test_dataset_invalid_xml_string(self):
"""Test Dataset creation with a string that is not valid XML."""
with pytest.raises(iati.exceptions.ValidationError) as excinfo:
iati.Dataset(iati.tests.resources.load_as_string('invalid'))
assert excinfo.value.error_log.contains_error_called('err-not-xml-empty-document')
@pytest.mark.parametrize("not_xml", iati.tests.utilities.generate_test_types(['bytes', 'str'], True))
def test_dataset_not_xml(self, not_xml):
"""Test Dataset creation when it's passed a type that is not a string or etree."""
with pytest.raises(TypeError) as excinfo:
iati.Dataset(not_xml)
assert 'Datasets can only be ElementTrees or strings containing valid XML, using the xml_tree and xml_str attributes respectively. Actual type:' in str(excinfo.value)
def test_dataset_tree(self):
"""Test Dataset creation with an etree that is not valid IATI data."""
tree = iati.tests.utilities.XML_TREE_VALID
data = iati.Dataset(tree)
assert etree.tostring(data.xml_tree, pretty_print=True) == etree.tostring(tree, pretty_print=True)
assert data.xml_str == etree.tostring(tree, pretty_print=True)
def test_dataset_iati_tree(self):
"""Test Dataset creation with a valid IATI etree.
Todo:
Implement this function.
"""
pass
def test_dataset_xml_str_assignment_valid_str(self, dataset_initialised):
"""Test assignment to the xml_str property with a valid XML string.
Todo:
Check that the tree is updated correctly.
"""
xml_str = iati.tests.resources.load_as_string('valid_not_iati')
data = dataset_initialised
data.xml_str = xml_str
assert data.xml_str == xml_str.strip()
def test_dataset_xml_str_assignment_invalid_str(self, dataset_initialised):
"""Test assignment to the xml_str property with an invalid XML string."""
xml_str = iati.tests.resources.load_as_string('invalid')
data = dataset_initialised
with pytest.raises(iati.exceptions.ValidationError) as excinfo:
data.xml_str = xml_str
excinfo.value.error_log.contains_error_called('err-not-xml-empty-document')
def test_dataset_xml_str_assignment_tree(self, dataset_initialised):
"""Test assignment to the xml_str property with an ElementTree."""
data = dataset_initialised
with pytest.raises(TypeError) as excinfo:
data.xml_str = iati.tests.utilities.XML_TREE_VALID
assert str(excinfo.value) == 'If setting a Dataset with an ElementTree, use the xml_tree property, not the xml_str property.'
@pytest.mark.parametrize("invalid_value", iati.tests.utilities.generate_test_types(['bytes', 'str']))
def test_dataset_xml_str_assignment_invalid_value(self, dataset_initialised, invalid_value):
"""Test assignment to the xml_str property with a value that is very much not valid."""
data = dataset_initialised
with pytest.raises(ValueError):
data.xml_str = invalid_value
@pytest.mark.parametrize("invalid_type", iati.tests.utilities.generate_test_types(['bytes', 'str'], True))
def test_dataset_xml_str_assignment_invalid_type(self, dataset_initialised, invalid_type):
"""Test assignment to the xml_str property with a value that is very much not valid."""
data = dataset_initialised
with pytest.raises(TypeError) as excinfo:
data.xml_str = invalid_type
assert 'Datasets can only be ElementTrees or strings containing valid XML, using the xml_tree and xml_str attributes respectively. Actual type:' in str(excinfo.value)
def test_dataset_xml_tree_assignment_valid_tree(self, dataset_initialised):
"""Test assignment to the xml_tree property with a valid ElementTree.
Todo:
Check that the xml_tree attribute is updated to the new tree.
"""
data = dataset_initialised
data.xml_tree = iati.tests.utilities.XML_TREE_VALID
assert data.xml_str == etree.tostring(iati.tests.utilities.XML_TREE_VALID, pretty_print=True)
def test_dataset_xml_tree_assignment_invalid_tree(self, dataset_initialised):
"""Test assignment to the xml_tree property with an invalid ElementTree.
Todo:
Create an invalid tree and test it.
"""
pass
def test_dataset_xml_tree_assignment_str(self, dataset_initialised):
"""Test assignment to the xml_tree property with an XML string."""
xml_str = iati.tests.resources.load_as_string('valid_not_iati')
data = dataset_initialised
with pytest.raises(TypeError) as excinfo:
data.xml_tree = xml_str
assert 'If setting a Dataset with the xml_property, an ElementTree should be provided, not a' in str(excinfo.value)
@pytest.mark.parametrize("invalid_value", iati.tests.utilities.generate_test_types(['str'], True))
def test_dataset_xml_tree_assignment_invalid_value(self, dataset_initialised, invalid_value):
"""Test assignment to the xml_tree property with a value that is very much not valid."""
data = dataset_initialised
with pytest.raises(TypeError) as excinfo:
data.xml_tree = invalid_value
assert 'If setting a Dataset with the xml_property, an ElementTree should be provided, not a' in str(excinfo.value)
class TestDatasetWithEncoding:
"""A container for tests relating to creating a Dataset from various types of input.
This may be files vs strings, or may revolve around character encoding.
"""
BASE_XML_NEEDING_ENCODING = """<?xml version="1.0" encoding="{}"?>
<iati-activities version="xx">
<iati-activity>
<iati-identifier></iati-identifier>
</iati-activity>
</iati-activities>"""
@pytest.fixture(params=[
BASE_XML_NEEDING_ENCODING,
BASE_XML_NEEDING_ENCODING + '\n', # trailing newline
BASE_XML_NEEDING_ENCODING + ' ' # trailing space
])
def xml_needing_encoding(self, request):
"""An XML string with a placeholder for an encoding through use of `str.format()`"""
return request.param
@pytest.fixture(params=[
BASE_XML_NEEDING_ENCODING,
'\n' + BASE_XML_NEEDING_ENCODING, # leading newline
' ' + BASE_XML_NEEDING_ENCODING, # leading space
BASE_XML_NEEDING_ENCODING + '\n', # trailing newline
BASE_XML_NEEDING_ENCODING + ' ' # trailing space
])
def xml_needing_encoding_use_as_str(self, request):
"""An XML string with a placeholder for an encoding through use of `str.format()`.
Some values work when used as a `str`, but not as `bytes`.
"""
return request.param
def test_instantiation_dataset_from_string(self):
"""Test that a Dataset instantiated directly from a string (rather than a file) correctly creates an iati.data.Dataset and the input data is contained within the object."""
xml_str = """<?xml version="1.0"?>
<iati-activities version="xx">
<iati-activity>
<iati-identifier></iati-identifier>
</iati-activity>
</iati-activities>"""
dataset = iati.data.Dataset(xml_str)
assert isinstance(dataset, iati.data.Dataset)
assert dataset.xml_str == xml_str
def test_instantiation_dataset_from_string_with_encoding(self, xml_needing_encoding_use_as_str):
"""Test that an encoded Dataset instantiated directly from a string (rather than a file or bytes object) correctly creates an iati.data.Dataset and the input data is contained within the object."""
xml = xml_needing_encoding_use_as_str.format('UTF-8')
with pytest.raises(iati.exceptions.ValidationError) as validation_err:
iati.data.Dataset(xml)
assert len(validation_err.value.error_log) == 1
assert validation_err.value.error_log.contains_error_called('err-encoding-in-str')
@pytest.mark.parametrize("encoding", [
"UTF-8",
"utf-8",
"UTF-16",
"utf-16",
"UTF-32",
"utf-32",
"ASCII",
"ISO-8859-1",
"ISO-8859-2",
"BIG5",
"EUC-JP"
])
def test_instantiation_dataset_from_encoded_string_with_encoding(self, xml_needing_encoding, encoding):
"""Test that an encoded Dataset instantiated directly from an encoded string (rather than a file) correctly creates an iati.data.Dataset and the input data is contained within the object.
Note:
The use of UTF-8 and UTF-16 is strongly recommended for IATI datasets, however other encodings are specificed here to demonstrate compatibility.
"""
xml = xml_needing_encoding.format(encoding)
xml_encoded = xml.encode(encoding) # Encode the whole string in line with the specified encoding
dataset = iati.data.Dataset(xml_encoded)
assert isinstance(dataset, iati.data.Dataset)
assert dataset.xml_str == xml_encoded.strip()
@pytest.mark.parametrize("encoding_declared, encoding_used", [
("UTF-16", "UTF-8"),
("UTF-16", "ISO-8859-1"),
("UTF-16", "ASCII"),
("UTF-16", "BIG5"),
("UTF-16", "EUC-JP")
])
def test_instantiation_dataset_from_encoded_string_with_encoding_mismatch(self, xml_needing_encoding, encoding_declared, encoding_used):
"""Test that an error is raised when attempting to create a Dataset where an encoded string is encoded significantly differently from what is defined within the XML encoding declaration.
Todo:
Amend error message, when the todo in iati.data.Dataset.xml_str() has been resolved.
Note:
There are a number of other errors that may be raised with alternative encoding mismatches. These are not supported since it does not appear likely enough that they will occur and be a large issue in practice.
This is due to a pair of issues with libxml2 (the underlying library behind lxml):
1. It only supports a limited number of encodings out-of-the-box.
2. Different encoding pairs (whether supported or unsupported by libxml2; byte-equivalent-subsets or distinct encodings; and more), will return different error codes in what one would expect to act as equivalent situations.
"""
xml = xml_needing_encoding.format(encoding_declared)
xml_encoded = xml.encode(encoding_used) # Encode the whole string in line with the specified encoding
with pytest.raises(iati.exceptions.ValidationError) as excinfo:
_ = iati.data.Dataset(xml_encoded)
assert excinfo.value.error_log.contains_error_called('err-encoding-invalid')
@pytest.mark.parametrize("encoding", ["CP424"])
def test_instantiation_dataset_from_encoded_string_with_unsupported_encoding(self, xml_needing_encoding, encoding):
"""Test that an error is raised when attempting to create a dataset where an encoded string is encoded significantly differently from what is defined within the XML encoding declaration.
Todo:
Amend error message, when the todo in iati.data.Dataset.xml_str() has been resolved.
"""
xml = xml_needing_encoding.format(encoding)
xml_encoded = xml.encode(encoding) # Encode the whole string in line with the specified encoding
with pytest.raises(iati.exceptions.ValidationError) as excinfo:
_ = iati.data.Dataset(xml_encoded)
assert excinfo.value.error_log.contains_error_called('err-encoding-unsupported')
class TestDatasetSourceFinding:
"""A container for tests relating to finding source context within a Dataset."""
@pytest.fixture(params=[
iati.tests.resources.load_as_dataset('valid_not_iati'),
iati.tests.resources.load_as_dataset('valid_iati', '2.02')
])
def data(self, request):
"""A Dataset to test."""
request.applymarker(pytest.mark.fixed_to_202)
return request.param
@pytest.fixture
def split_xml_str(self, data):
"""The XML from the provided Dataset, split by line."""
return [''] + data.xml_str.split('\n')
@pytest.fixture
def num_lines_xml(self, split_xml_str):
"""The number of lines in the XML string."""
return len(split_xml_str)
def test_dataset_xml_str_source_at_line_valid_line_number(self, data, split_xml_str):
"""Test obtaining source of a particular line. Line numbers are valid."""
for idx, line in enumerate(split_xml_str):
assert data.source_at_line(idx) == line.strip()
@pytest.mark.parametrize("line_el_pair", [
{'line': 3, 'el': '//parent'},
{'line': 4, 'el': '//child'},
{'line': 5, 'el': '//another-child'},
{'line': 7, 'el': '//sub-child'}
])
def test_dataset_xml_str_source_at_line_matches_tree(self, line_el_pair):
"""Test obtaining source of a particular line. Line numbers are valid.
Ensure that the line numbers from which source is being returned are the same ones provided by the `sourceline` attribute from tree elements.
"""
data = iati.tests.resources.load_as_dataset('valid_not_iati')
split_xml_str = [''] + data.xml_str.split('\n')
line_num = line_el_pair['line']
el_from_tree = data.xml_tree.xpath(line_el_pair['el'])[0]
str_from_tree = etree.tostring(el_from_tree, pretty_print=True).strip().decode('utf-8').split('\n')[0]
assert el_from_tree.sourceline == line_num
assert data.source_at_line(line_num) == str_from_tree
assert data.source_at_line(line_num) == split_xml_str[line_num].strip()
def test_dataset_xml_str_source_at_line_invalid_line_number(self, data, num_lines_xml):
"""Test obtaining source of a particular line. Line numbers are not valid."""
with pytest.raises(ValueError):
data.source_at_line(-1)
with pytest.raises(ValueError):
data.source_at_line(num_lines_xml)
@pytest.mark.parametrize("invalid_value", iati.tests.utilities.generate_test_types(['int'], True))
def test_dataset_xml_str_source_at_line_invalid_line_type(self, invalid_value, data):
"""Test obtaining source of a particular line. Line numbers are not valid."""
with pytest.raises(TypeError):
data.source_at_line(invalid_value)
def test_dataset_xml_str_source_around_line_valid_line_number(self, data, split_xml_str, num_lines_xml):
"""Test obtaining source around a particular line.
The line is in the middle of an XML document so that there will be full context both before and after the specified line number.
Line numbers are valid.
Uses the default number of surrounding context lines.
"""
for line_num in range(2, num_lines_xml):
desired_source = '\n'.join(split_xml_str[line_num - 1:line_num + 2])
actual_source = data.source_around_line(line_num)
assert actual_source == desired_source
def test_dataset_xml_str_source_around_line_valid_line_number_custom_context(self, data, split_xml_str, num_lines_xml):
"""Test obtaining source around a particular line.
The lines are in the middle of an XML document so that there will be full context both before and after the specified line number.
Line numbers are valid.
Uses a custom number of surrounding context lines.
"""
for context_lines in range(1, math.ceil(num_lines_xml / 2)):
for line_num in range(context_lines, num_lines_xml - context_lines):
desired_source = '\n'.join(split_xml_str[max(line_num - context_lines, 1):line_num + context_lines + 1])
actual_source = data.source_around_line(line_num, context_lines)
assert actual_source == desired_source
def test_dataset_xml_str_source_around_line_first_line(self, data, split_xml_str):
"""Test obtaining source around a particular line.
The line is at the start of an XML document such that there will not be full context before the specified line, but will be afterwards.
Line numbers are valid.
Uses the default number of surrounding context lines.
"""
assert data.source_around_line(0) == '\n'.join(split_xml_str[1:2])
def test_dataset_xml_str_source_around_line_early_line_custom_context(self, data, split_xml_str, num_lines_xml):
"""Test obtaining source around a particular line.
The lines are around the start of an XML document such that there will not be full context before the specified line, but will be afterwards.
Line numbers are valid.
Uses a custom number of surrounding context lines.
"""
for context_lines in range(1, math.ceil(num_lines_xml / 2)):
for line_num in range(0, context_lines):
desired_source = '\n'.join(split_xml_str[1:line_num + context_lines + 1])
actual_source = data.source_around_line(line_num, context_lines)
assert actual_source == desired_source
def test_dataset_xml_str_source_around_line_last_line(self, data, split_xml_str, num_lines_xml):
"""Test obtaining source around a particular line.
The line is at the end of an XML document such that there will not be full context after the specified line, but will be before.
Line numbers are valid.
Uses the default number of surrounding context lines.
"""
assert data.source_around_line(num_lines_xml - 1) == '\n'.join(split_xml_str[-2:])
def test_dataset_xml_str_source_around_line_late_line_custom_context(self, data, split_xml_str, num_lines_xml):
"""Test obtaining source around a particular line.
The lines are around the end of an XML document such that there will not be full context after the specified line, but will be before.
Line numbers are valid.
Uses the default number of surrounding context lines.
"""
for context_lines in range(1, math.ceil(num_lines_xml / 2)):
for line_num in range(0, context_lines):
desired_source = '\n'.join(split_xml_str[-(line_num + context_lines + 1):])
actual_source = data.source_around_line(num_lines_xml - line_num - 1, context_lines)
assert actual_source == desired_source
def test_dataset_xml_str_source_around_line_single_line(self, data, split_xml_str, num_lines_xml):
"""Test obtaining source around a particular line.
The context is such that only the specified line will be returned.
"""
for line_num in range(0, num_lines_xml):
assert data.source_around_line(line_num, 0) == split_xml_str[line_num]
assert data.source_around_line(line_num, 0).strip() == data.source_at_line(line_num)
def test_dataset_xml_str_source_around_line_full_file(self, data, num_lines_xml):
"""Test obtaining source around a particular line.
The context is such that the full file will be returned.
"""
line_num = int(num_lines_xml / 2)
context_lines = num_lines_xml
assert data.source_around_line(line_num, context_lines) == data.xml_str
def test_dataset_xml_str_source_around_line_negative_context_lines(self, data, num_lines_xml):
"""Test obtaining source around a particular line.
The number of context lines is negative.
"""
for line_num in range(0, num_lines_xml):
with pytest.raises(ValueError):
data.source_around_line(line_num, -1)
@pytest.mark.parametrize("invalid_value", iati.tests.utilities.generate_test_types(['int'], True))
def test_dataset_xml_str_source_around_line_invalid_context_lines(self, invalid_value, data, num_lines_xml):
"""Test obtaining source of a particular line.
The specified number of context lines is not an integer.
"""
for line_num in range(0, num_lines_xml):
with pytest.raises(TypeError):
data.source_around_line(line_num, invalid_value)
class TestDatasetVersionDetection:
"""A container for tests relating to detecting the version of a Dataset."""
@pytest.fixture(params=[
('iati-activities', 'iati-activity'),
('iati-organisations', 'iati-organisation')
])
def iati_tag_names(self, request):
"""Return the tag names for an activity or organisaion dataset."""
output = collections.namedtuple('output', 'root_element child_element')
return output(root_element=request.param[0], child_element=request.param[1])
def test_detect_version_v1_simple(self, iati_tag_names, std_ver_minor_inst_valid_known_v1):
"""Check that a version 1 Dataset is detected correctly.
Also checks that version numbers containing whitespace do not affect version detection.
"""
data = iati.Dataset("""
<{0} version="{2}">
<{1} version="{2}"></{1}>
<{1} version="{2} "></{1}>
<{1} version=" {2}"></{1}>
<{1} version=" {2} "></{1}>
</{0}>
""".format(iati_tag_names.root_element, iati_tag_names.child_element, std_ver_minor_inst_valid_known_v1))
result = data.version
assert result == std_ver_minor_inst_valid_known_v1
def test_detect_version_explicit_parent_mismatch_explicit_child(self, iati_tag_names):
"""Check that no version is detected for a v1 Dataset where a version within the `iati-activities` element does not match the versions specified within all `iati-activity` child elements."""
data = iati.Dataset("""
<{0} version="1.02">
<{1} version="1.02"></{1}>
<{1} version="1.03"></{1}>
</{0}>
""".format(iati_tag_names.root_element, iati_tag_names.child_element))
result = data.version
assert result is None
def test_detect_version_implicit_parent_matches_implicit_child(self, iati_tag_names):
"""Check that the default version is detected for a Dataset where no versions are declared (i.e. the default version is assumed for all `iati-activities` and `iati-activity` child elements)."""
data = iati.Dataset("""
<{0}>
<{1}></{1}>
<{1}></{1}>
</{0}>
""".format(iati_tag_names.root_element, iati_tag_names.child_element))
result = data.version
assert result == iati.Version('1.01')
def test_detect_version_explicit_parent_matches_implicit_child(self, iati_tag_names):
"""Check that the default version is detected for a Dataset with the default version explicitly defined at `iati-activities` level, but where all `iati-activity` child elements are not defined (i.e. the default version is assumed)."""
data = iati.Dataset("""
<{0} version="1.01">
<{1}></{1}>
<{1}></{1}>
</{0}>
""".format(iati_tag_names.root_element, iati_tag_names.child_element))
result = data.version
assert result == iati.Version('1.01')
def test_detect_version_implicit_parent_matches_explicit_and_implicit_child(self, iati_tag_names):
"""Check that the default version is detected for a Dataset with no version not defined at `iati-activities` level (i.e. the default version is assumed), but where at least one `iati-activity` child element has the default version defined."""
data = iati.Dataset("""
<{0}>
<{1} version="1.01"></{1}>
<{1}></{1}>
</{0}>
""".format(iati_tag_names.root_element, iati_tag_names.child_element))
result = data.version
assert result == iati.Version('1.01')
def test_detect_version_explicit_parent_mismatch_implicit_child(self, iati_tag_names):
"""Check that no version is detected for a Dataset that has a non-default version defined at the `iati-activities` level, but no version is defined in any `iati-activity` child element (i.e. the default version is assumed)."""
data = iati.Dataset("""
<{0} version="1.02">
<{1}></{1}>
<{1}></{1}>
</{0}>
""".format(iati_tag_names.root_element, iati_tag_names.child_element))
result = data.version
assert result is None
def test_detect_version_imlicit_parent_mismatch_explicit_child(self, iati_tag_names):
"""Check that no version is detected for a Dataset that has no version defined at the `iati-activities` level (i.e. the default version is assumed), but at least one non-default version is defined in any `iati-activity` child element."""
data = iati.Dataset("""
<{0}>
<{1} version="1.02"></{1}>
<{1}></{1}>
</{0}>
""".format(iati_tag_names.root_element, iati_tag_names.child_element))
result = data.version
assert result is None
def test_detect_version_v2_simple(self, iati_tag_names, std_ver_minor_inst_valid_known_v2):
"""Check that a version 2 Dataset is detected correctly."""
data = iati.Dataset("""
<{0} version="{2}">
<{1}></{1}>
<{1}></{1}>
</{0}>
""".format(iati_tag_names.root_element, iati_tag_names.child_element, std_ver_minor_inst_valid_known_v2))
result = data.version
assert result == std_ver_minor_inst_valid_known_v2
@pytest.mark.fixed_to_202
def test_cannot_assign_to_version_property(self):
"""Check that it is not possible to assign to the `version` property."""
data = iati.tests.resources.load_as_dataset('valid_iati', '2.02')
with pytest.raises(AttributeError) as excinfo:
data.version = 'test'
assert "can't set attribute" in str(excinfo.value)
| {
"repo_name": "IATI/iati.core",
"path": "iati/tests/test_data.py",
"copies": "1",
"size": "28049",
"license": "mit",
"hash": 5162484489800089000,
"line_mean": 44.906710311,
"line_max": 250,
"alpha_frac": 0.6515383793,
"autogenerated": false,
"ratio": 3.8624345910217572,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0016489815053031787,
"num_lines": 611
} |
"""A module containing tests for the pyIATI representation of Standard metadata."""
import copy
import math
import operator
import pytest
import iati.tests.utilities
from iati.tests.fixtures.versions import iativer, semver, split_decimal, split_iativer, split_semver
class TestVersionInit:
"""A container for tests relating to initialisation of Standard Versions."""
def test_version_no_params(self):
"""Test Version creation with no parameters."""
with pytest.raises(TypeError):
iati.Version() # pylint: disable=E1120
def test_version_not_string(self, std_ver_minor_uninst_typeerr):
"""Test Version creation with a non-string."""
with pytest.raises(TypeError) as excinfo:
iati.Version(std_ver_minor_uninst_typeerr)
assert 'A Version object must be created from a string or Decimal, not a ' in str(excinfo.value)
assert str(type(std_ver_minor_uninst_typeerr)) in str(excinfo.value)
def test_version_supported_iati_versions(self, std_ver_minor_uninst_valid_fullsupport):
"""Test Version creation with supported IATI version numbers."""
iati.Version(std_ver_minor_uninst_valid_fullsupport)
def test_version_valid_decimal(self, std_ver_minor_uninst_valid_decimal_possible):
"""Test Version creations with valid decimal version numbers."""
integer_component, decimal_component = split_decimal(std_ver_minor_uninst_valid_decimal_possible)
version = iati.Version(std_ver_minor_uninst_valid_decimal_possible)
assert version.integer == integer_component
assert version.major == integer_component
assert version.decimal == decimal_component
assert version.minor == decimal_component - 1
assert version.patch == 0
def test_version_invalid_float(self, std_ver_minor_uninst_valid_decimal_possible):
"""Test Version creation with a float that would be valid as a Decimal."""
float_version = float(std_ver_minor_uninst_valid_decimal_possible)
with pytest.raises(TypeError):
iati.Version(float_version)
def test_version_invalid_decimal(self, std_ver_minor_uninst_valueerr_decimal):
"""Test Version creation with a Decimal that is not a valid decimal version number."""
with pytest.raises(ValueError) as excinfo:
iati.Version(std_ver_minor_uninst_valueerr_decimal)
assert str(excinfo.value) == 'A valid version number must be specified.'
def test_version_valid_iativer(self, std_ver_minor_uninst_valid_iativer_possible):
"""Test Version creations with correctly constructed IATIver version numbers."""
integer_component, decimal_component = split_iativer(std_ver_minor_uninst_valid_iativer_possible)
version = iati.Version(std_ver_minor_uninst_valid_iativer_possible)
assert version.integer == integer_component
assert version.major == integer_component
assert version.decimal == decimal_component
assert version.minor == decimal_component - 1
assert version.patch == 0
def test_version_invalid_iativer(self, std_ver_minor_uninst_valueerr_iativer):
"""Test Version creation with a string that is not a valid IATIver version number, but looks like it could be."""
with pytest.raises(ValueError) as excinfo:
iati.Version(std_ver_minor_uninst_valueerr_iativer)
assert str(excinfo.value) == 'A valid version number must be specified.'
def test_version_valid_semver_3_part(self, std_ver_minor_uninst_valid_semver_possible):
"""Test Version creation with valid SemVer version numbers."""
major_component, minor_component, patch_component = split_semver(std_ver_minor_uninst_valid_semver_possible)
version = iati.Version(std_ver_minor_uninst_valid_semver_possible)
assert version.major == major_component
assert version.integer == major_component
assert version.minor == minor_component
assert version.decimal == minor_component + 1
assert version.patch == patch_component
def semver_version_invalid_major_0(self, str_ver_minor_uninst_valueerr_v0):
"""Test version creation with a Major version of 0."""
with pytest.raises(ValueError) as excinfo:
iati.Version(str_ver_minor_uninst_valueerr_v0)
assert str(excinfo.value) == 'A valid version number must be specified.'
class TestVersionComparison:
"""A container for tests relating to comparison between Standard Versions."""
@pytest.fixture(params=[
# with patch components of zero
('1.01', '1.01', '='), # equal IATIver - zero minor
('1.0.0', '1.0.0', '='), # equal SemVer - zero minor
('1.01', '1.0.0', '='), # equal IATIver and SemVer - zero minor
('1.0.0', '1.01', '='), # equal Semver and IATIVer - zero minor
('1.02', '1.02', '='), # equal IATIver - non-zero minor
('1.1.0', '1.1.0', '='), # equal SemVer - non-zero minor
('1.02', '1.1.0', '='), # equal IATIver and SemVer - non-zero minor
('1.1.0', '1.02', '='), # equal SemVer and IATIver - non-zero minor
('1.01', '1.02', '<'), # less than IATIver - minor
('1.0.0', '1.1.0', '<'), # less than SemVer - minor
('1.01', '1.1.0', '<'), # less than IATIver and SemVer - minor
('1.0.0', '1.02', '<'), # less than SemVer and IATIver - minor
('1.01', '2.01', '<'), # less than IATIver - major
('1.0.0', '2.0.0', '<'), # less than SemVer - major
('1.01', '2.0.0', '<'), # less than IATIver and SemVer - major
('1.0.0', '2.01', '<'), # less than SemVer and IATIVer - major
('1.1.0', '1.0.0', '>'), # more than SemVer - minor
('1.1.0', '1.01', '>'), # more than IATIver and SemVer - minor
('1.02', '1.0.0', '>'), # more than SemVer and IATIver - minor
('2.01', '1.01', '>'), # more than IATIver - major
('2.0.0', '1.0.0', '>'), # more than SemVer - major
('2.01', '1.0.0', '>'), # more than IATIver and SemVer - major
('2.0.0', '1.01', '>'), # more than SemVer and IATIVer - major
# non-zero patch components
('1.02', '1.1.7', '<'), # less than IATIver and SemVer - different patch
('1.1.7', '1.02', '>'), # more equal SemVer and IATIver - different patch
('1.1.6', '1.1.7', '<'), # less than SemVer - patch
('1.1.7', '1.1.6', '>') # more than SemVer - patch
])
def version_relationship(self, request):
"""Return a tuple containing a pair of Version Numbers and their relationships.
The first two items in the tuple are Version Numbers.
The third item is a string containing symbols indicating the relationship.
* =: The two values are equal.
* <: The first value is less than the second.
* >: The first value is more than the second.
"""
return request.param
@pytest.fixture(params=[
(operator.eq, ['=']),
(operator.ne, ['<', '>']),
(operator.lt, ['<']),
(operator.le, ['<', '=']),
(operator.gt, ['>']),
(operator.ge, ['>', '='])
])
def comparison_op_mapping(self, request):
"""Return a tuple containing a comparison operator and a list of symbols it represents."""
return request.param
def test_comparisons(self, version_relationship, comparison_op_mapping):
"""Test that the relationships between two Versions are correctly detected."""
version_1 = iati.Version(version_relationship[0])
version_2 = iati.Version(version_relationship[1])
expected_relationships = version_relationship[2]
comparison_op, op_relationships = comparison_op_mapping
should_pass = len([op for op in op_relationships if op in expected_relationships]) > 0
result = comparison_op(version_1, version_2)
assert result == should_pass
class TestVersionModification:
"""A container for tests relating to modifying Version Numbers after they are instantiated."""
CHANGE_AMOUNT = 10
"""int: The amount that Components are modified by."""
@pytest.fixture(params=[
('major', 0),
('integer', 0),
('minor', 1),
('decimal', 1),
('patch', 2)
])
def modifiable_attrib(self, request):
"""Return a tuple containing the name of a component within a Version, plus the index as it appears when components are ordered from most to least major."""
return request.param
def test_attribute_components_writable_valid_values(self, std_ver_minor_inst_valid_possible, modifiable_attrib):
"""Test that the core Version Number Component attributes are writable."""
attrib_name, idx = modifiable_attrib
components = split_semver(std_ver_minor_inst_valid_possible.semver_str)
components[idx] = components[idx] + self.CHANGE_AMOUNT
version_new = iati.Version(semver(components[0], components[1], components[2]))
setattr(std_ver_minor_inst_valid_possible, attrib_name, components[idx])
assert std_ver_minor_inst_valid_possible == version_new
@pytest.mark.parametrize("not_int", iati.tests.utilities.generate_test_types(['int'], True))
def test_attribute_components_writable_invalid_values(self, std_ver_minor_inst_valid_single, modifiable_attrib, not_int):
"""Test that core Version Number Components can have invalid values set."""
attrib_name, _ = modifiable_attrib
setattr(std_ver_minor_inst_valid_single, attrib_name, not_int)
class TestVersionRepresentation:
"""A container for tests relating to how Standard Versions are represented when output."""
def test_iativer_string_output(self, std_ver_minor_uninst_valid_iativer_possible):
"""Test that the string output for an IATIver version is as expected."""
integer_component, decimal_component = split_iativer(std_ver_minor_uninst_valid_iativer_possible)
semver_str = semver(integer_component, decimal_component - 1, 0)
version = iati.Version(std_ver_minor_uninst_valid_iativer_possible)
assert str(version) == std_ver_minor_uninst_valid_iativer_possible
assert repr(version) == "iati.Version('" + semver_str + "')"
assert version.iativer_str == std_ver_minor_uninst_valid_iativer_possible
assert version.semver_str == semver_str
def test_semver_string_output(self, std_ver_minor_uninst_valid_semver_possible):
"""Test that the str() output for an SemVer version is in IATIver-format."""
major_component, minor_component, _ = split_semver(std_ver_minor_uninst_valid_semver_possible)
iativer_str = iativer(major_component, minor_component + 1)
version = iati.Version(std_ver_minor_uninst_valid_semver_possible)
assert str(version) == iativer_str
assert repr(version) == "iati.Version('" + std_ver_minor_uninst_valid_semver_possible + "')"
assert version.iativer_str == iativer_str
assert version.semver_str == std_ver_minor_uninst_valid_semver_possible
class TestVersionBumping:
"""A container for tests relating to bumping of Version Numbers."""
def test_version_bump_major(self, std_ver_minor_uninst_valid_semver_possible):
"""Test that the next valid Major/Integer version can be located."""
major_component, _, _ = split_semver(std_ver_minor_uninst_valid_semver_possible)
next_major_version = iati.Version(semver(major_component + 1, 0, 0))
version = iati.Version(std_ver_minor_uninst_valid_semver_possible)
assert isinstance(version.next_major(), iati.Version)
assert version.next_major() == next_major_version
assert isinstance(version.next_integer(), iati.Version)
assert version.next_integer() == next_major_version
def test_version_bump_minor(self, std_ver_minor_uninst_valid_semver_possible):
"""Test that the next valid Minor/Decimal version can be located."""
major_component, minor_component, _ = split_semver(std_ver_minor_uninst_valid_semver_possible)
next_minor_version = iati.Version(semver(major_component, minor_component + 1, 0))
version = iati.Version(std_ver_minor_uninst_valid_semver_possible)
assert isinstance(version.next_minor(), iati.Version)
assert version.next_minor() == next_minor_version
assert isinstance(version.next_decimal(), iati.Version)
assert version.next_decimal() == next_minor_version
class TestVersionImplementationDetailHiding:
"""A container for tests relating to ensuring implementation detail is hidden.
The implementation of the Version class makes use of a Semantic Versioning library by inheriting from a base class.
The utilised base class contains attributes that are not desired.
Tests in this container check that attributes that are not desired have been hidden.
"""
def test_version_bump_patch(self, std_ver_minor_inst_valid_possible):
"""Test that the next Patch version cannot be obtained."""
with pytest.raises(AttributeError):
std_ver_minor_inst_valid_possible.next_patch()
with pytest.raises(AttributeError):
std_ver_minor_inst_valid_possible.next_patch # pylint: disable=pointless-statement
def test_version_attrib_prerelease(self, std_ver_minor_inst_valid_possible):
"""Test that the 'prerelease' attribute has been set to None on initialisation."""
assert std_ver_minor_inst_valid_possible.prerelease is None
def test_version_attrib_build(self, std_ver_minor_inst_valid_possible):
"""Test that the 'build' attribute has been set to None on initialisation."""
assert std_ver_minor_inst_valid_possible.build is None
def test_version_attrib_partial(self, std_ver_minor_inst_valid_possible):
"""Test that the 'partial' attribute has been set to True on initialisation."""
assert std_ver_minor_inst_valid_possible.partial is True
class TestVersionConstants:
"""A container for tests relating to constants that define useful groups of IATI version numbers."""
@pytest.fixture(params=[
iati.version.STANDARD_VERSIONS,
iati.version.STANDARD_VERSIONS_SUPPORTED,
iati.version.STANDARD_VERSIONS_MINOR
])
def standard_versions_list(self, request):
"""Return a list of Version Numbers."""
return request.param
def test_standard_versions_all_are_versions(self, standard_versions_list):
"""Check that each item in standard versions is a Version instance."""
for version in standard_versions_list:
assert isinstance(version, iati.Version)
def test_standard_versions_correct_format(self, standard_versions_list):
"""Check that standard versions is in the correct format."""
assert isinstance(standard_versions_list, list)
@pytest.mark.latest_version('2.03')
def test_standard_versions_correct_number(self):
"""Check that standard versions has the expected number of items."""
assert len(iati.version.STANDARD_VERSIONS) == 8
@pytest.mark.latest_version('2.03')
def test_standard_versions_correct_number_supported(self):
"""Check that supported standard versions has the expected number of items."""
assert len(iati.version.STANDARD_VERSIONS_SUPPORTED) == 5
def test_standard_versions_major_all_are_integers(self):
"""Check that each major version is an integer."""
for major_version in iati.version.STANDARD_VERSIONS_MAJOR:
assert isinstance(major_version, int)
@pytest.mark.latest_version('2.03')
def test_standard_versions_major_correct_number(self):
"""Check that the correct number of major versions are detected."""
assert len(iati.version.STANDARD_VERSIONS_MAJOR) == 2
@pytest.mark.latest_version('2.03')
def test_standard_versions_minor_correct_number(self):
"""Check that the correct number of minor versions are detected."""
assert len(iati.version.STANDARD_VERSIONS_MINOR) == 8
def test_standard_version_any_has_length(self):
"""Check that the value to represent any version is a value with length."""
assert iati.version.STANDARD_VERSION_ANY != ''
class TestVersionDecorators:
"""A container for tests that cover all version decorators."""
def func_with_no_args(self):
"""A function that takes no arguments."""
return True
@pytest.mark.parametrize('decorator', [
iati.version.allow_fully_supported_version,
iati.version.allow_known_version,
iati.version.allow_possible_version,
iati.version.decimalise_integer,
iati.version.normalise_decimals
])
def test_version_decorators_require_arg(self, decorator):
"""Test that decorators raise a TypeError when given a function that requires no arguments."""
with pytest.raises(TypeError):
decorator(self.func_with_no_args)()
# pylint: disable=protected-access
class VersionSupportChecksBase:
"""A container for functions and fixtures used to check version support.
These are in their own class to reduce the number of public methods in the parent class below the linting limit of 20.
"""
@iati.version.allow_fully_supported_version
def return_fully_supported_version(version): # pylint: disable=no-self-argument
"""Return the version parameter, but only if it's fully supported by pyIATI. Check undertaken with decorator."""
return version
@iati.version.allow_known_version
def return_known_version(version): # pylint: disable=no-self-argument
"""Return the version parameter, but only if it's known of by pyIATI. Check undertaken with decorator."""
return version
@iati.version.allow_possible_version
def return_possibly_version(version): # pylint: disable=no-self-argument
"""Return the version parameter, but only if it's a possible representation of a version number. Check undertaken with decorator."""
return version
@pytest.fixture(params=[return_fully_supported_version])
def decorated_func_full_support(self, request):
"""Return a decorated function that returns a version of the IATI Standard that is fully supported by pyIATI."""
return request.param
@pytest.fixture(params=[return_known_version])
def decorated_func_known(self, request):
"""Return a decorated function that returns a version of the IATI Standard that pyIATI knows exists."""
return request.param
@pytest.fixture(params=[
return_possibly_version,
iati.version._prevent_non_version_representations
])
def possibly_version_func(self, request):
"""Return a function that returns a value that represents a possible IATI Version. Other values cause an error."""
return request.param
@pytest.fixture(params=[
iati.version._is_fully_supported,
iati.version._is_known
])
def truthy_func(self, request):
"""Return a function to check whether an input value is True or False based on whether it's a valid version."""
return request.param
@pytest.fixture(params=[
return_fully_supported_version,
return_known_version
])
def decorated_func(self, request):
"""Return a function to restrict whether an input value is a valid version, and raise a ValueError if it is not."""
return request.param
@pytest.fixture(params=[
return_fully_supported_version,
iati.version._is_fully_supported,
return_known_version,
iati.version._is_known
])
def func_to_test(self, request):
"""Return a function to check for TypeErrors being raised when provided values other than iati.Versions."""
return request.param
class TestVersionSupportChecks(VersionSupportChecksBase):
"""A container for tests relating to the detection of how much pyIATI supports particular versions."""
def test_fully_supported_version_fully_supported(self, std_ver_minor_inst_valid_fullsupport, decorated_func_full_support):
"""Check that fully supported IATI Versions are detected as such."""
version = std_ver_minor_inst_valid_fullsupport
assert iati.version._is_fully_supported(version) is True
assert decorated_func_full_support(version) == version
def test_fully_supported_version_partially_supported(self, std_ver_minor_inst_valid_partsupport, decorated_func_full_support):
"""Check that partially supported IATI Versions are detected as not fully supported."""
assert iati.version._is_fully_supported(std_ver_minor_inst_valid_partsupport) is False
with pytest.raises(ValueError):
decorated_func_full_support(std_ver_minor_inst_valid_partsupport)
def test_known_version_known(self, std_ver_minor_inst_valid_known, decorated_func_known):
"""Check that known IATI Versions are detected as such."""
assert iati.version._is_known(std_ver_minor_inst_valid_known) is True
assert decorated_func_known(std_ver_minor_inst_valid_known) == std_ver_minor_inst_valid_known
def test_known_version_not_known(self, std_ver_minor_inst_valid_unknown, decorated_func_known):
"""Check that unknown IATI Versions are detected as such."""
assert iati.version._is_known(std_ver_minor_inst_valid_unknown) is False
with pytest.raises(ValueError):
decorated_func_known(std_ver_minor_inst_valid_unknown)
def test_supported_version_str(self, std_ver_minor_uninst_valid_possible, truthy_func, decorated_func):
"""Check that Version Numbers cause an error if provided as anything other than an iati.Version."""
assert truthy_func(std_ver_minor_uninst_valid_possible) is False
with pytest.raises(ValueError):
decorated_func(std_ver_minor_uninst_valid_possible)
def test_supported_version_junk_value(self, std_ver_minor_uninst_typeerr, truthy_func, decorated_func):
"""Check that supported IATI Versions cause an error if a junk value is provided."""
assert truthy_func(std_ver_minor_uninst_typeerr) is False
with pytest.raises(ValueError):
decorated_func(std_ver_minor_uninst_typeerr)
def test_non_version_representation_valid_version_obj(self, std_ver_minor_inst_valid_possible, possibly_version_func):
"""Test that instantiated iati.Versions are detected as being valid representations of an IATI Version Number."""
original_value = copy.deepcopy(std_ver_minor_inst_valid_possible)
version = possibly_version_func(std_ver_minor_inst_valid_possible)
assert version == original_value
assert version is std_ver_minor_inst_valid_possible
def test_non_version_representation_valid_val_decimal(self, std_ver_minor_uninst_valid_possible, possibly_version_func):
"""Test that values that can become iati.Versions are detected as being valid representations of an IATI Version Number."""
original_value = copy.deepcopy(std_ver_minor_uninst_valid_possible)
version = possibly_version_func(std_ver_minor_uninst_valid_possible)
assert version == original_value
assert version is std_ver_minor_uninst_valid_possible
def test_non_version_representation_valid_val_integer(self, std_ver_major_uninst_valid_possible, possibly_version_func):
"""Test that positive integers are detected as being valid representations of an IATI Version Number."""
original_value = copy.deepcopy(std_ver_major_uninst_valid_possible)
version = possibly_version_func(std_ver_major_uninst_valid_possible)
assert version == original_value
assert version is std_ver_major_uninst_valid_possible
def test_non_version_representation_valid_val_any(self, possibly_version_func):
"""Test that the specified ANY_VERSION values are detected as being valid representations of an IATI Version Number."""
version = possibly_version_func(iati.version.STANDARD_VERSION_ANY)
assert version == iati.version.STANDARD_VERSION_ANY
def test_non_version_representation_invalid_val_integer(self, std_ver_all_uninst_valueerr, possibly_version_func):
"""Test that non-positive integers are detected as not being valid representations of an IATI Version Number."""
with pytest.raises(ValueError):
possibly_version_func(std_ver_all_uninst_valueerr)
def test_non_version_representation_invalid_type(self, std_ver_all_uninst_typeerr, possibly_version_func):
"""Test that values of a type that cannot represent a Version cause a TypeError."""
with pytest.raises(TypeError):
possibly_version_func(std_ver_all_uninst_typeerr)
class TestVersionNormalisation:
"""A container for tests relating to normalising how versions are passed into functions."""
@iati.version.decimalise_integer
def return_decimalised_integer(version): # pylint: disable=no-self-argument
"""Return the version parameter, but converted to an iati.Version representing the newest Decimal Version in the given Integer Version if something that can be treated as an Integer Version is provided."""
return version
@iati.version.normalise_decimals
def return_normalised_decimal(version): # pylint: disable=no-self-argument
"""Return the version parameter, but converted to an iati.Version if something that can be treated as a Decimal Version is provided."""
return version
INTEGER_TO_DECIMAL_FUNCTIONS = [
return_decimalised_integer,
iati.version._decimalise_integer
]
@pytest.fixture(params=INTEGER_TO_DECIMAL_FUNCTIONS)
def integer_decimalisation_func(self, request):
"""Return a function for which the return value can be checked."""
return request.param
DECIMAL_S13N_FUNCTIONS = [
return_normalised_decimal,
iati.version._normalise_decimal_version
]
@pytest.fixture(params=DECIMAL_S13N_FUNCTIONS)
def decimal_normalisation_func(self, request):
"""Return a function for which the return value can be checked."""
return request.param
@pytest.fixture(params=INTEGER_TO_DECIMAL_FUNCTIONS + DECIMAL_S13N_FUNCTIONS)
def junk_ignoring_func(self, request):
"""Return a function that does not modify junk values before returning them."""
return request.param
# decimal normalisation
def test_decimal_versions_normalised(self, std_ver_minor_uninst_valid_possible, decimal_normalisation_func):
"""Check that values that represent Decimal Versions of the IATI Standard are converted to iati.Versions."""
assert decimal_normalisation_func(std_ver_minor_uninst_valid_possible) == iati.Version(std_ver_minor_uninst_valid_possible)
def test_integer_versions_not_normalised(self, std_ver_major_uninst_valid_possible, decimal_normalisation_func):
"""Check that values that represent Integer Versions of the IATI Standard are returned as-is when normalising Decimal Versions."""
assert decimal_normalisation_func(std_ver_major_uninst_valid_possible) == std_ver_major_uninst_valid_possible
# integer decimalisation
def test_decimal_version_conversion_valid_version(self, std_ver_minor_inst_valid_known, integer_decimalisation_func):
"""Check that known Decimal Versions remain unchanged."""
assert integer_decimalisation_func(std_ver_minor_inst_valid_known) == std_ver_minor_inst_valid_known
def test_decimal_version_conversion_valid_decimal_representation(self, std_ver_minor_uninst_valid_known, integer_decimalisation_func):
"""Check that values that can be used to create actual Decimal Versions are left alone."""
assert integer_decimalisation_func(std_ver_minor_uninst_valid_known) == std_ver_minor_uninst_valid_known
@pytest.mark.parametrize('integer_version, expected_decimal', [
('1', iati.Version('1.05')),
('2', iati.version.STANDARD_VERSION_LATEST),
('3', iati.Version('3.0.0')),
(1, iati.Version('1.05')),
(2, iati.version.STANDARD_VERSION_LATEST),
(3, iati.Version('3.0.0'))
])
@pytest.mark.latest_version('2.03')
def test_integer_version_conversion_valid(self, integer_version, expected_decimal, integer_decimalisation_func):
"""Check that valid Integer Versions return the last Decimal in the Integer."""
assert integer_decimalisation_func(integer_version) == expected_decimal
def test_junk_values_not_modified(self, std_ver_all_uninst_mixederr, junk_ignoring_func):
"""Check that junk values are returned as-is when standardising Decimal Versions.
An `is` check is performed to check that the same object is returned.
An `==` check is performed to check that the value is not modified.
"""
try:
original_value = copy.deepcopy(std_ver_all_uninst_mixederr)
except TypeError:
original_value = std_ver_all_uninst_mixederr
result = junk_ignoring_func(std_ver_all_uninst_mixederr)
assert result is std_ver_all_uninst_mixederr
assert (result == original_value) or isinstance(original_value, type(iter([]))) or math.isnan(original_value)
class TestVersionMajorMinorRelationship:
"""A container for tests relating to the relationship between major and minor versions."""
def test_versions_for_integer(self, std_ver_major_uninst_valid_known):
"""Check that the each of the decimal versions returned by versions_for_integer starts with the input major version."""
result = iati.version.versions_for_integer(std_ver_major_uninst_valid_known)
assert result != []
for version in result:
assert version.major == int(std_ver_major_uninst_valid_known)
| {
"repo_name": "IATI/iati.core",
"path": "iati/tests/test_version.py",
"copies": "1",
"size": "30006",
"license": "mit",
"hash": -6450290303974735000,
"line_mean": 48.3519736842,
"line_max": 213,
"alpha_frac": 0.6863960541,
"autogenerated": false,
"ratio": 3.8802534592008278,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5066649513300827,
"avg_score": null,
"num_lines": null
} |
""" A module containing the default routes. """
from flask import Blueprint, render_template, abort, session, redirect, url_for, request
from jinja2 import TemplateNotFound
from forms import RatingForm
from datetime import datetime
import Pyro4
import requests
ES_URL = 'https://search-net302test-hsws64h5osjjsdgl7krzl357tu.us-west-2.es.amazonaws.com/news/article'
defaults = Blueprint('defaults', __name__, template_folder='templates')
queue = Pyro4.Proxy('PYRO:obj_b432bebc7a4b4fe59aa7525ef6bb7c3c@139.59.178.220:5001')
@defaults.route('/', defaults={'path': ''})
@defaults.route('/<path:path>')
def default_route(path):
""" A heartbeat route. """
return "Service is alive."
@defaults.route('view-news', methods=['GET'])
def view_news():
""" Route to allow a user to get a news article. """
article = queue.pop() # Get an article
if article == 'None':
return render_template('no_news.html') # No articles left :(
else:
session['art'] = article
return render_template('news.html', article=article, form=RatingForm())
@defaults.route('submit-reaction', methods=['POST'])
def react():
""" A submission route for the user's reaction. """
form = RatingForm(request.form)
if form.validate(): # Ensure secret token is present.
# Generate data for ES cluster
sub_data = {
'rating': int(request.form['rating']),
'comments': request.form['comments'],
'title': session['art']['title'],
'description': session['art']['description'],
'link': session['art']['link'],
'piclink': session['art']['pic_link'],
'published-date-time': datetime.strptime(session['art']['published-date-time'], '%a, %d %b %Y %H:%M:%S %Z').strftime('%Y-%m-%dT%H:%M:%SZ'),
'reaction-date-time': datetime.now().strftime('%Y-%m-%dT%H:%M:%SZ')
}
# Post to ES
requests.post(ES_URL, json=sub_data)
return redirect(url_for('defaults.view_news'))
else:
abort(403) # Refuse to respond as its an attack
| {
"repo_name": "Sciprios/RateMyNewsUI",
"path": "rate_my_news_ui/default.py",
"copies": "1",
"size": "2082",
"license": "mit",
"hash": -1677727952098270700,
"line_mean": 39.0384615385,
"line_max": 151,
"alpha_frac": 0.6325648415,
"autogenerated": false,
"ratio": 3.528813559322034,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4661378400822034,
"avg_score": null,
"num_lines": null
} |
"""A module containing validation functionality."""
import sys
from lxml import etree
import yaml
import iati.default
import iati.resources
class ValidationError:
"""A base class to encapsulate information about Validation Errors."""
# pylint: disable=too-many-instance-attributes
def __init__(self, err_name, calling_locals=None):
"""Create a new ValidationError.
Args:
err_name (str): The name of the error to use as a base.
calling_locals (dict): The dictionary of local variables from the calling scope. Obtained by calling `locals()`. Default is an empty dictionary.
Raises:
ValueError: If there is no base error with the provided name.
Todo:
Split message formatting into a child class and raise an error when variables are missing.
Determine what defaults for attributes should be when the appropriate values are not available.
"""
# have to set here to ensure each ValidationError has its own dictionary
if calling_locals is None:
calling_locals = dict()
try:
err_detail = get_error_codes()[err_name]
except (KeyError, TypeError):
raise ValueError('{err_name} is not a known type of ValidationError.'.format(**locals()))
# set general attributes for this type of error
self.name = err_name
self.actual_value = None
for key, val in err_detail.items():
setattr(self, key, val)
self.status = 'error' if err_name.split('-')[0] == 'err' else 'warning'
# format error messages with context-specific info
try:
self.help = self.help.format(**calling_locals)
self.info = self.info.format(**calling_locals)
except KeyError: # as missing_var_err:
# raise NameError('The calling scope must contain a `{missing_var_err.args[0]}` variable for providing information for the error message.'.format(**locals()))
pass
# set general attributes for this type of error that require context from the calling scope
try:
self.line_number = calling_locals['line_number']
self.context = calling_locals['dataset'].source_around_line(self.line_number)
except KeyError:
pass
try:
self.column_number = calling_locals['column_number']
except KeyError:
pass
try:
self.err = calling_locals['err']
self.lxml_err_code = calling_locals['err'].type_name
except (AttributeError, KeyError):
pass
class ValidationErrorLog:
"""A container to keep track of a set of ValidationErrors.
This acts as an iterable that ValidationErrors can be looped over.
ValidationErrors may be added to the log.
Warning:
It is highly likely that the methods available on a `ValidationErrorLog` will change name. At present the mix of errors, warnings and the combination of the two is confusing. This needs rectifying.
Todo:
Make the mix of errors, warnings and both returned by functions clearer, while not being hugely long-winded (`errors_and_warnings`-esque).
"""
def __init__(self):
"""Initialise the error log."""
self._values = []
def __iter__(self):
"""Return an iterator."""
return iter(self._values)
def __len__(self):
"""Return the number of items in the ErrorLog."""
return len(self._values)
def __getitem__(self, key):
"""Return an item with the specified key."""
return self._values[key]
def __eq__(self, other):
"""Test equality with another object."""
if len(self._values) != len(other):
return False
for val in self._values:
if val not in other:
return False
return True
def add(self, value):
"""Add a single ValidationError to the Error Log.
Args:
value (iati.validator.ValidationError): The ValidationError to add to the Error Log.
Raises:
TypeError: When attempting to set an item that is not a ValidationError.
"""
if not isinstance(value, iati.validator.ValidationError):
raise TypeError('Only ValidationErrors may be added to a ValidationErrorLog.')
self._values.append(value)
def contains_error_called(self, err_name):
"""Check the log for an error or warning with the specified name.
Args:
err_name (str): The name of the error to look for.
Returns:
bool: Whether there is an error or warning with the specified name within the log.
"""
errors_with_name = self.get_errors_or_warnings_by_name(err_name)
return len(errors_with_name) > 0
def contains_error_of_type(self, err_type):
"""Check the log for an error or warning with the specified base exception type.
Args:
err_type (type): The type of the error to look for.
Returns:
bool: Whether there is an error or warning with the specified type within the log.
"""
errors_with_type = self.get_errors_or_warning_by_type(err_type)
return len(errors_with_type) > 0
def contains_errors(self):
"""Determine whether there are errors contained within the ErrorLog.
Note:
The error log may contain warnings, or may be empty.
Returns:
bool: Whether there are errors within this error log.
"""
errors = self.get_errors()
return len(errors) > 0
def contains_warnings(self):
"""Determine whether there are warnings contained within the ErrorLog.
Note:
The error log may contain errors, or may be empty.
Returns:
bool: Whether there are warnings within this error log.
"""
warnings = self.get_warnings()
return len(warnings) > 0
def extend(self, values):
"""Extend the ErrorLog with ValidationErrors from an iterable.
Args:
values (iterable): An iterable containing ValidationErrors.
Note:
All ValidationErrors within the iterable shall be added. Any other contents shall not, and will fail to be added silently.
Raises:
TypeError: When values is not an iterable.
"""
for value in values:
try:
self.add(value)
except TypeError:
pass
def get_errors(self):
"""Return a list of errors contained.
Returns:
list(ValidationError): A list of all errors (but not warnings) that are present within the log.
Todo:
Add explicit tests.
"""
return [err for err in self if err.status == 'error']
def get_errors_or_warnings_by_category(self, err_category):
"""Return a list of errors or warnings of the specified category.
Args:
err_category (str): The category of the error to look for.
Returns:
list(ValidationError): A list of errors and warnings of the specified category that are present within the log.
Todo:
Add explicit tests.
"""
return [err for err in self._values if err.category == err_category]
def get_errors_or_warnings_by_name(self, err_name):
"""Return a list of errors or warnings with the specified name.
Args:
err_name (str): The name of the error to look for.
Returns:
list(ValidationError): A list of errors and warnings with the specified name that are present within the log.
Todo:
Add explicit tests.
"""
return [err for err in self._values if err.name == err_name]
def get_errors_or_warning_by_type(self, err_type):
"""Return a list of errors or warnings of the specified type.
Args:
err_type (type): The type of the error to look for.
Returns:
list(ValidationError): A list of errors and warnings of the specified type that are present within the log.
Todo:
Add explicit tests.
"""
return [err for err in self._values if err.base_exception == err_type]
def get_warnings(self):
"""Return a list of warnings contained.
Returns:
list(ValidationError): A list of all warnings (but not errors) that are present within the log.
Todo:
Add explicit tests.
"""
return [err for err in self if err.status == 'warning']
def _extract_codes_from_attrib(dataset, parent_el_xpath, attr_name, condition=None):
"""Extract codes for checking from a Dataset. The codes are being extracted from attributes.
Args:
dataset (iati.data.Dataset): The Dataset to check Codelist values within.
parent_el_xpath (str): An XPath to locate the element(s) with the attribute of interest.
attr_name (str): The name of the attribute to extract a code from.
condition (str): An optional XPath expression to limit the scope of what is extracted.
Returns:
list of tuple: A tuple in the format: `(str, int)` - The `str` is a matching code from within the Dataset; The `int` is the sourceline at which the parent element is located.
"""
if condition is None:
parent_el_xpath = parent_el_xpath + '[@' + attr_name + ']'
else:
parent_el_xpath = parent_el_xpath + '[' + condition + ' and @' + attr_name + ']'
# some nasty string manipulation to make the `//@xml:lang` mapping work
while not parent_el_xpath.startswith('//'):
parent_el_xpath = '/' + parent_el_xpath
if parent_el_xpath.startswith('//['):
parent_el_xpath = '//*[' + parent_el_xpath[3:]
# provide a secondary cludge to deal with the 'xml' namespace
if attr_name == 'xml:lang':
attr_name = '{http://www.w3.org/XML/1998/namespace}lang'
parents_to_check = dataset.xml_tree.xpath(parent_el_xpath)
located_codes = list()
for parent in parents_to_check:
located_codes.append((parent.attrib[attr_name], parent.sourceline))
return located_codes
def _extract_codes_from_element_text(dataset, parent_el_xpath, condition=None): # pylint: disable=invalid-name
"""Extract codes for checking from a Dataset. The codes are being extracted from element text.
Args:
dataset (iati.data.Dataset): The Dataset to check Codelist values within.
parent_el_xpath (str): An XPath to locate the element(s) with the attribute of interest.
condition (str): An optional XPath expression to limit the scope of what is extracted.
Returns:
list of tuple: A tuple in the format: `(str, int)` - The `str` is a matching code from within the Dataset; The `int` is the sourceline at which the parent element is located.
"""
# include the condition
if condition:
parent_el_xpath = parent_el_xpath + '[' + condition + ']'
parents_to_check = dataset.xml_tree.xpath(parent_el_xpath)
located_codes = list()
for parent in parents_to_check:
located_codes.append((parent.text, parent.sourceline))
return located_codes
def _extract_codes(dataset, parent_el_xpath, last_xpath_section, condition=None):
"""Extract codes for checking from a Dataset.
Args:
dataset (iati.data.Dataset): The Dataset to check Codelist values within.
parent_el_xpath (str): An XPath to locate the element(s) with the code of interest.
last_xpath_section (str): The last section of the XPath, detailing how to find the code on the identified element(s).
condition (str): An optional XPath expression to limit the scope of what is extracted.
list of tuple: A tuple in the format: `(str, int)` - The `str` is a matching code from within the Dataset; The `int` is the sourceline at which the parent element is located.
Raises:
ValueError: When a path in a mapping is not looking for an attribute value or element text.
"""
if last_xpath_section.startswith('@'):
attr_name = last_xpath_section[1:]
return _extract_codes_from_attrib(dataset, parent_el_xpath, attr_name, condition)
elif last_xpath_section == 'text()':
return _extract_codes_from_element_text(dataset, parent_el_xpath, condition)
else:
raise ValueError('mapping path does not locate attribute value or element text')
def _check_codes(dataset, codelist):
"""Determine whether a given Dataset has values from the specified Codelist where expected.
Args:
dataset (iati.data.Dataset): The Dataset to check Codelist values within.
codelist (iati.codelists.Codelist): The Codelist to check values from.
Returns:
iati.validator.ValidationErrorLog: A log of the errors that occurred.
Raises:
ValueError: When a path in a mapping is looking for a type of information that is not supported.
Note:
This code assumes that the Version codelist acts as a list of all possible version numbers.
"""
error_log = ValidationErrorLog()
# clunky workaround due to pre-#230 behavior of `iati.Dataset().version`
if dataset.version in iati.version.STANDARD_VERSIONS:
mappings = iati.default.codelist_mapping(dataset.version)
else:
# rather than attempting general checks, ensure version number errors occur
codelist = iati.default.codelist('Version', iati.version.STANDARD_VERSION_LATEST)
mappings = iati.default.codelist_mapping(iati.version.STANDARD_VERSION_LATEST)
err_name_prefix = 'err' if codelist.complete else 'warn'
for mapping in mappings[codelist.name]:
parent_el_xpath, last_xpath_section = mapping['xpath'].rsplit('/', 1)
located_codes = _extract_codes(dataset, parent_el_xpath, last_xpath_section, mapping['condition'])
for (code, line_number) in located_codes: # `line_number` used via `locals()` # pylint: disable=unused-variable
if code not in codelist.codes:
if last_xpath_section.startswith('@'):
attr_name = last_xpath_section[1:] # used via `locals()` # pylint: disable=unused-variable
error = ValidationError(err_name_prefix + '-code-not-on-codelist', locals())
else:
_, el_name = parent_el_xpath.rsplit('/', 1) # used via `locals()` # pylint: disable=unused-variable
error = ValidationError(err_name_prefix + '-code-not-on-codelist-element-text', locals())
error.actual_value = code
error_log.add(error)
return error_log
def _check_codelist_values(dataset, schema):
"""Check whether a given Dataset has values from Codelists that have been added to a Schema where expected.
Args:
dataset (iati.data.Dataset): The Dataset to check Codelist values within.
schema (iati.schemas.Schema): The Schema to locate Codelists within.
Returns:
iati.validator.ValidationErrorLog: A log of the errors that occurred.
"""
error_log = ValidationErrorLog()
for codelist in schema.codelists:
error_log.extend(_check_codes(dataset, codelist))
return error_log
def _check_is_iati_xml(dataset, schema):
"""Check whether a given Dataset contains valid IATI XML.
Args:
dataset (iati.data.Dataset): The Dataset to check validity of.
schema (iati.schemas.Schema): The Schema to validate the Dataset against.
Returns:
iati.validator.ValidationErrorLog: A log of the errors that occurred.
Raises:
TypeError: Something was provided as a Dataset that is not a Dataset.
iati.exceptions.SchemaError: An error occurred in the parsing of the Schema.
Todo:
Create test against a bad Schema.
"""
error_log = ValidationErrorLog()
try:
validator = schema.validator()
except iati.exceptions.SchemaError as err:
raise err
try:
validator.assertValid(dataset.xml_tree)
except etree.DocumentInvalid as doc_invalid:
for log_entry in doc_invalid.error_log: # pylint: disable=no-member
error = _create_error_for_lxml_log_entry(log_entry)
error_log.add(error)
except AttributeError:
raise TypeError('Unexpected argument: {0} is not an iati.Dataset'.format(type(dataset)))
return error_log
def _check_is_xml(maybe_xml):
"""Check whether a given parameter is valid XML.
Args:
maybe_xml (str / bytes): A string that may or may not contain valid XML.
Returns:
iati.validator.ValidationErrorLog: A log of the errors that occurred.
Todo:
Consider how a Dataset may be passed when creating errors so that context can be obtained.
"""
error_log = ValidationErrorLog()
if isinstance(maybe_xml, iati.data.Dataset):
maybe_xml = maybe_xml.xml_str
try:
parser = etree.XMLParser()
_ = etree.fromstring(maybe_xml.strip(), parser)
except etree.XMLSyntaxError:
for log_entry in parser.error_log:
error = _create_error_for_lxml_log_entry(log_entry)
error_log.add(error)
except ValueError as err:
if 'can only parse strings' in err.args[0]:
problem_var_type = type(maybe_xml) # used via `locals()` # pylint: disable=unused-variable
error = ValidationError('err-not-xml-not-string', locals())
error_log.add(error)
elif 'Unicode strings with encoding declaration are not supported.' in err.args[0]:
error = ValidationError('err-encoding-in-str', locals())
error_log.add(error)
except (AttributeError, TypeError):
problem_var_type = type(maybe_xml) # used via `locals()` # pylint: disable=unused-variable
error = ValidationError('err-not-xml-not-string', locals())
error_log.add(error)
# the parser does not cause any errors when given an empty string, so this needs handling separately
if error_log == ValidationErrorLog() and maybe_xml.strip() == '':
err_name = 'err-not-xml-empty-document'
err = 'A file or string containing no data is not XML.' # used via `locals()` # pylint: disable=unused-variable
error = ValidationError(err_name, locals())
error_log.add(error)
return error_log
def _check_rules(dataset, ruleset):
"""Determine whether a given Dataset conforms with a provided Ruleset.
Args:
dataset (iati.data.Dataset): The Dataset to check Ruleset conformance with.
ruleset (iati.code.Ruleset): The Ruleset to check conformance with.
Returns:
iati.validator.ValidationErrorLog: A log of the errors that occurred.
"""
error_log = ValidationErrorLog()
error_found = False
for rule in ruleset.rules:
validation_status = rule.is_valid_for(dataset)
if validation_status is None:
# A result of `None` signifies that a rule was skipped.
error = ValidationError('warn-rule-skipped', locals())
error_log.add(error)
elif validation_status is False:
# A result of `False` signifies that a rule did not pass.
error = _create_error_for_rule(rule)
error_log.add(error)
error_found = True
if error_found:
# Add a ruleset error if at least one rule error was found.
error = ValidationError('err-ruleset-conformance-fail', locals())
error_log.add(error)
return error_log
def _check_ruleset_conformance(dataset, schema):
"""Check whether a given Dataset conforms with Rulesets that have been added to a Schema.
Args:
dataset (iati.data.Dataset): The Dataset to check Ruleset conformance with.
schema (iati.schemas.Schema): The Schema to locate Rulesets within.
Returns:
iati.validator.ValidationErrorLog: A log of the errors that occurred.
"""
error_log = ValidationErrorLog()
for ruleset in schema.rulesets:
error_log.extend(_check_rules(dataset, ruleset))
return error_log
def _conforms_with_ruleset(dataset, schema):
"""Determine whether a given Dataset conforms with Rulesets that have been added to a Schema.
Args:
dataset (iati.data.Dataset): The Dataset to check Ruleset conformance with.
schema (iati.schemas.Schema): The Schema to locate Rulesets within.
Returns:
bool: A boolean indicating whether the given Dataset conforms with Rulesets attached to the given Schema.
"""
error_log = _check_ruleset_conformance(dataset, schema)
return not error_log.contains_errors()
def _correct_codelist_values(dataset, schema):
"""Determine whether a given Dataset has values from Codelists that have been added to a Schema where expected.
Args:
dataset (iati.data.Dataset): The Dataset to check Codelist values within.
schema (iati.schemas.Schema): The Schema to locate Codelists within.
Returns:
bool: A boolean indicating whether the given Dataset has values from the specified Codelists where they should be.
"""
error_log = _check_codelist_values(dataset, schema)
return not error_log.contains_errors()
def _create_error_for_lxml_log_entry(log_entry): # pylint: disable=invalid-name
"""Parse a log entry from an lxml error log and convert it to a IATI ValidationError.
Args:
log_entry (etree._LogEntry): A log entry from an `etree.XMLSyntaxError` or `etree.DocumentInvalid`.
Returns:
ValidationError: An IATI ValidationError that contains the information from the log entry.
Todo:
Create a small program to determine the common types of errors so that they can be handled as special cases with detailed help info.
Determine whether there should be a range of uncategorised errors rather than just 'err-not-xml-uncategorised-xml-syntax-error' eg. IATI error vs. XML error.
"""
# set the `err` variable so it can be used in error string formatting via locals()
err = log_entry
# configure local variables for the creation of the error
line_number = err.line # used via `locals()`# pylint: disable=unused-variable
column_number = err.column # used via `locals()`# pylint: disable=unused-variable
# undertake the mapping between error name formats
lxml_to_iati_error_mapping = {
'ERR_DOCUMENT_EMPTY': 'err-not-xml-empty-document',
'ERR_DOCUMENT_END': 'err-not-xml-content-at-end',
'ERR_INTERNAL_ERROR': 'err-lxml-internal-error',
'ERR_INVALID_ENCODING': 'err-encoding-invalid',
'ERR_UNSUPPORTED_ENCODING': 'err-encoding-unsupported',
'ERR_RESERVED_XML_NAME': 'err-not-xml-xml-text-decl-only-at-doc-start',
'SCHEMAV_CVC_COMPLEX_TYPE_2_3': 'err-not-iati-xml-non-whitespace-in-element-only',
'SCHEMAV_CVC_COMPLEX_TYPE_3_2_1': 'err-not-iati-xml-forbidden-attribute',
'SCHEMAV_CVC_COMPLEX_TYPE_3_2_2': 'err-not-iati-xml-forbidden-attribute',
'SCHEMAV_CVC_COMPLEX_TYPE_4': 'err-not-iati-xml-missing-attribute',
'SCHEMAV_CVC_DATATYPE_VALID_1_2_1': 'err-not-iati-xml-incorrect-datatype',
'SCHEMAV_CVC_ELT_1': 'err-not-iati-xml-root-element-undeclared',
'SCHEMAV_ELEMENT_CONTENT': 'err-not-iati-xml-missing-required-element'
}
try:
err_name = lxml_to_iati_error_mapping[err.type_name]
except KeyError:
err_name = 'err-not-xml-uncategorised-xml-syntax-error'
error = ValidationError(err_name, locals())
return error
def _create_error_for_rule(rule):
"""Parse a Rule skip or failure and convert it into an IATI ValidationError.
Args:
rule (iati.rulesets.Rule): The Rule which has either skipped or failed.
Returns:
ValidationError: An IATI ValidationError that contains information about the Rule that has failed.
Todo:
Determine whether there should be a range of uncategorised errors for various ways Ruleset validation may fail, rather than just 'err-rule-uncategorised-conformance-fail'.
"""
# undertake the mapping between Rule subclass and error name formats
rule_to_iati_error_mapping = {
'atleast_one': 'err-rule-at-least-one-conformance-fail',
'date_order': 'err-rule-date-order-conformance-fail',
'dependent': 'err-rule-dependent-conformance-fail',
'no_more_than_one': 'err-rule-no-more-than-one-conformance-fail',
'regex_matches': 'err-rule-regex-matches-conformance-fail',
'regex_no_matches': 'err-rule-regex-no-matches-conformance-fail',
'startswith': 'err-rule-starts-with-conformance-fail',
'sum': 'err-rule-sum-conformance-fail',
'unique': 'err-rule-unique-conformance-fail'
}
try:
err_name = rule_to_iati_error_mapping[rule.name]
except KeyError:
err_name = 'err-rule-uncategorised-conformance-fail'
error = ValidationError(err_name, locals())
return error
def full_validation(dataset, schema):
"""Perform full validation on a Dataset against the provided Schema.
Args:
dataset (iati.Dataset): The Dataset to check validity of.
schema (iati.Schema): The Schema to validate the Dataset against.
Warning:
Parameters are likely to change in some manner.
Returns:
iati.validator.ValidationErrorLog: A log of the errors that occurred.
Todo:
Create test against a bad Schema.
"""
error_log = ValidationErrorLog()
error_log.extend(_check_is_xml(dataset))
try:
error_log.extend(_check_is_iati_xml(dataset, schema))
except TypeError:
return error_log
error_log.extend(_check_codelist_values(dataset, schema))
error_log.extend(_check_ruleset_conformance(dataset, schema))
return error_log
def get_error_codes():
"""Return a dictionary of the possible error codes and their information.
Returns:
dict: A dictionary of error codes.
Raises:
KeyError: When a specified base_exception is not a valid type of exception.
Todo:
Raise the correct error for incorrect base_exception values.
Raise an error when there is a problem with non-base_exception-related errors.
"""
err_codes_str = iati.utilities.load_as_string(iati.resources.create_lib_data_path('validation_err_codes.yaml'))
err_codes_list_of_dict = yaml.safe_load(err_codes_str)
# yaml parses the values into a list of dicts, so they need combining into one
err_codes_dict = {k: v for code in err_codes_list_of_dict for k, v in code.items()}
# convert name of exception into reference to the relevant class
for err in err_codes_dict.values():
err['base_exception'] = getattr(sys.modules['builtins'], err['base_exception'])
return err_codes_dict
def is_iati_xml(dataset, schema):
"""Determine whether a given Dataset's XML is valid against the specified Schema.
Args:
dataset (iati.data.Dataset): The Dataset to check validity of.
schema (iati.schemas.Schema): The Schema to validate the Dataset against.
Warning:
Parameters are likely to change in some manner.
Returns:
bool: A boolean indicating whether the given Dataset is valid XML against the given Schema.
Raises:
iati.exceptions.SchemaError: An error occurred in the parsing of the Schema.
Todo:
Create test against a bad Schema.
"""
return not _check_is_iati_xml(dataset, schema).contains_errors()
def is_valid(dataset, schema):
"""Determine whether a given Dataset is valid against the specified Schema.
Args:
dataset (iati.Dataset): The Dataset to check validity of.
schema (iati.Schema): The Schema to validate the Dataset against.
Warning:
Parameters are likely to change in some manner.
Returns:
bool: A boolean indicating whether the given Dataset is valid against the given Schema.
Todo:
Create test against a bad Schema.
"""
try:
iati_xml = is_iati_xml(dataset, schema)
if not iati_xml:
return False
except iati.exceptions.SchemaError:
return False
correct_codelist_values = _correct_codelist_values(dataset, schema)
conforms_with_ruleset = _conforms_with_ruleset(dataset, schema)
return correct_codelist_values and conforms_with_ruleset
def is_xml(maybe_xml):
"""Determine whether a given parameter is XML.
Args:
maybe_xml (str): An string that may or may not be valid XML.
Returns:
bool: A boolean indicating whether the given Dataset is valid XML.
"""
error_log = _check_is_xml(maybe_xml)
return not error_log.contains_errors()
def validate_is_iati_xml(dataset, schema):
"""Check whether a Dataset contains valid IATI XML.
Args:
dataset (iati.Dataset): The Dataset to check validity of.
Returns:
iati.validator.ValidationErrorLog: A log of the errors that occurred.
"""
return _check_is_iati_xml(dataset, schema)
def validate_is_xml(maybe_xml):
"""Check whether a Dataset contains valid XML.
Args:
maybe_xml (str): An string that may or may not be valid XML.
Returns:
iati.validator.ValidationErrorLog: A log of the errors that occurred.
"""
return _check_is_xml(maybe_xml)
| {
"repo_name": "IATI/iati.core",
"path": "iati/validator.py",
"copies": "1",
"size": "29558",
"license": "mit",
"hash": -4691988290062403000,
"line_mean": 34.5264423077,
"line_max": 205,
"alpha_frac": 0.656167535,
"autogenerated": false,
"ratio": 4.133407914976926,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0012912592043115394,
"num_lines": 832
} |
""" A module designed to hold onto encode.* decorated functions. Each
function decorated by encode is returned untouched. However, they
are registered within this module for retrieval by factories when
encoding from a pymm element.
This module (and it's decorations) should be used in-place of
writing a specific factory, since the type of decorations
allowed is limited but powerful enough to allow custom exporting
of mindmaps
"""
# unclaimed is the dictionary of functions decorated, and their
# keyword-used in decorating them. For example, 'post_encode': fxn
unclaimed = {}
def pre_encode(fxn):
"""any function decorated by pre_encode will be called before any
other encode functions. pre_encode functions are called top-down
from the root to subchildren, in the order they appear in the tree
in breadth-first search. Decorate an element's function with
pre_encode if you have some custom modifications to do before the
element is encoded to file. (if you want to then undo these
modifications after encoding is finished, decorate the undo
function with post_encode)
"""
unclaimed[fxn] = 'pre_encode'
return fxn
def post_encode(fxn):
"""decorate a function with post_encode if you want to re-configure
an element after encoding. Since anything done in post_encode will
not influence the file / encoded tree, this decoration is best used
to undo a custom modification to the element
"""
unclaimed[fxn] = 'post_encode'
return fxn
def get_children(fxn):
"""the function decorated by get_children will be used when getting
the children list from the element. Use this if you wish to modify
the list of children, such as including additional children or
removing children that you don't want to include in the exported
file
"""
unclaimed[fxn] = 'encode_getchildren'
return fxn
def get_attrib(fxn):
"""the function decorated by get_Attrib will be used when getting
the attrib dictionary from pymm element. Use this if you wish to
modify the attrib dictionary; such as include or exclude attrib
key,values. The attrib returned by this function will be used
in exporting
"""
unclaimed[fxn] = 'encode_getattrib'
return fxn
| {
"repo_name": "lancekindle/pymm",
"path": "pymm/encode.py",
"copies": "1",
"size": "2293",
"license": "mit",
"hash": 1937283328937140200,
"line_mean": 37.2166666667,
"line_max": 71,
"alpha_frac": 0.7296118622,
"autogenerated": false,
"ratio": 4.326415094339622,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.016666666666666666,
"num_lines": 60
} |
"""A module designed to interact with SCSGate.
See:
https://github.com/flavio/scsgate
"""
# Always prefer setuptools over distutils
from setuptools import setup, find_packages
# To use a consistent encoding
from codecs import open
from os import path
here = path.abspath(path.dirname(__file__))
# Get the long description from the README file
with open(path.join(here, 'README.rst'), encoding='utf-8') as f:
long_description = f.read()
setup(
name='scsgate',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version='0.1.0',
description='A Python module to interact with SCSGate',
long_description=long_description,
# The project's main homepage.
url='https://github.com/flavio/scsgate',
download_url='https://github.com/flavio/scsgate/archive/0.1.0.tar.gz',
# Author details
author='Flavio Castelli',
author_email='flavio@castelli.me',
# Choose your license
license='MIT',
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
# How mature is this project? Common values are
# 3 - Alpha
# 4 - Beta
# 5 - Production/Stable
'Development Status :: 3 - Alpha',
# Indicate who your project is intended for
'Intended Audience :: Developers',
# Pick your license as you wish (should match "license" above)
'License :: OSI Approved :: MIT License',
# Specify the Python versions you support here.
'Programming Language :: Python :: 3',
'Programming Language :: Python :: 3.2',
'Programming Language :: Python :: 3.3',
'Programming Language :: Python :: 3.4',
'Programming Language :: Python :: 3.5',
],
# What does your project relate to?
keywords='scsgate home-automation development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=find_packages(),
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['pyserial', 'pyyaml'],
# List additional groups of dependencies here (e.g. development
# dependencies). You can install these using the following syntax,
# for example:
# $ pip install -e .[dev,test]
extras_require={
'dev': [],
'test': ['nosetest'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'scs-monitor=scsgate.monitor:main',
],
},
)
| {
"repo_name": "flavio/scsgate",
"path": "setup.py",
"copies": "1",
"size": "3044",
"license": "mit",
"hash": 6304226218097423000,
"line_mean": 31.7311827957,
"line_max": 79,
"alpha_frac": 0.6596583443,
"autogenerated": false,
"ratio": 4.037135278514588,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5196793622814588,
"avg_score": null,
"num_lines": null
} |
''' A moduled used by maxent.py and phlearn.py to find the ideal weights for a tableau.
'''
import megatableau
import scipy, scipy.optimize
import math
import numpy as np
### HELPER FUNCTIONS FOR CALCULATING PROBABILITY ###
def maxent_value(weights, tableau, ur, sr):
""" Compute maxent value P* = exp(harmony) for a particular UR/SR pair.
"""
harmony = 0
very_very_tiny_number = np.finfo(np.double).tiny # Approximately 2.2e-308
for c in tableau[ur][sr][1]:
harmony += weights[c] * tableau[ur][sr][1][c]
return math.exp(harmony) + very_very_tiny_number # Makes positive any "0" results created by roundoff error.
def z_score(tableau, ur):
""" Compute the Z-score for a particular UR, using current maxent values.
"""
zScore = 0
for j in tableau[ur]:
zScore += tableau[ur][j][2]
return zScore
def update_maxent_values(weights, tableau):
""" Computes maxent value P* = exp(harmony) for all UR/SR pairs
in a supplied tableau, and updates the tableau with these values.
"""
for ur in tableau:
for sr in tableau[ur]:
tableau[ur][sr][2] = maxent_value(weights, tableau, ur, sr)
### OBJECTIVE FUNCTION(S) ###
def neg_log_probability_with_gradient(weights, tableau, l1_mult=0.0, l2_mult=1.0, gaussian_priors=None):
""" Returns the negative log probability of the data AND a gradient vector.
This is the objective function used in learn_weights().
"""
update_maxent_values(weights, tableau)
logProbDat = 0
observed = [0 for i in range(len(weights))] # Vector of observed violations
expected = [0 for i in range(len(weights))] # Vector of expected violations
# Gaussian priors override L1/L2 priors
if gaussian_priors:
mus, sigmas = gaussian_priors[0], gaussian_priors[1]
normalized = (weights-mus)/sigmas
prob_prior = -(0.5*sum(normalized*normalized))
grad_prior = -(normalized/sigmas)
else:
l1_prob_prior = -(l1_mult * sum(weights))
l2_prob_prior = l2_mult * sum(weights*weights)
l1_grad_prior = -(l1_mult * scipy.ones(len(weights)))
l2_grad_prior = 2 * l2_mult * weights
prob_prior = -(l1_prob_prior + l2_prob_prior)
grad_prior = -(l1_grad_prior + l2_grad_prior)
for ur in tableau:
ur_count = 0 # Total observed for this UR
z = z_score(tableau, ur)
new_expected = [0 for i in range(len(weights))]
for sr in tableau[ur]:
ur_count += tableau[ur][sr][0]
prob = tableau[ur][sr][2] / z
logProbDat += math.log(prob) * tableau[ur][sr][0]
for c in tableau[ur][sr][1]:
observed[c] += tableau[ur][sr][1][c] * tableau[ur][sr][0]
new_expected[c] += tableau[ur][sr][1][c] * prob
for i in range(0,len(expected)):
expected[i] += new_expected[i] * ur_count
logProbDat += prob_prior
gradient = [e-o-p for e, o, p in zip(expected, observed, grad_prior)] # i.e. -(observed minus expected)
return (-logProbDat, np.array(gradient))
nlpwg = neg_log_probability_with_gradient # So you don't get carpal tunnel syndrome.
def neg_log_probability(weights, tableau, l1_mult=0.0, l2_mult=1.0):
""" Returns just the negative log probability of the data.
"""
return (nlpwg(weights, tableau, l1_mult, l2_mult))[0]
def probability(weights, tableau, l1_mult=0.0, l2_mult=1.0):
""" Returns just the probability of the data.
"""
return math.exp(-(nlpwg(weights, tableau, l1_mult, l2_mult))[0])
### OPTIMIZATION FUNCTION
def learn_weights(mt, l1_mult = 0.0, l2_mult = 1.0, precision = 10000000):
""" Given a filled-in megatableau, return the optimal weight vector.
"""
# Set up the initial weights and weight bounds (nonpositive reals)
w_0 = -scipy.rand(len(mt.weights)) # Random initial weights
#w_0 = [0 for w in mt.weights] # 0 initial weights
nonpos_reals = [(-50,0) for wt in mt.weights]
# Find the best weights
learned_weights, fneval, rc = scipy.optimize.fmin_l_bfgs_b(nlpwg, w_0, \
args = (mt.tableau,l1_mult,l2_mult, mt.gaussian_priors), bounds=nonpos_reals, factr=precision)
# Update the mt in place with the new weights
mt.weights = learned_weights
# Be sociable
print("\nBoom! Weights have been updated:")
for i in range(0,len(learned_weights)):
print("{}\t{}".format(mt.constraints_abbrev[i], str(learned_weights[i])))
print("\nLog probability of data: {}".format(str(-(nlpwg(learned_weights, mt.tableau))[0])))
print("")
# Return
return learned_weights
| {
"repo_name": "rdaland/PhoMEnt",
"path": "optimizer.py",
"copies": "1",
"size": "4662",
"license": "bsd-3-clause",
"hash": 2370169338653923300,
"line_mean": 37.85,
"line_max": 112,
"alpha_frac": 0.6314886315,
"autogenerated": false,
"ratio": 3.190965092402464,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9284165828925088,
"avg_score": 0.00765757899547498,
"num_lines": 120
} |
# Copyright 2015 Steven G. Decker
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import gempakf as gp
import matplotlib.pyplot as plt
from mpl_toolkits.basemap import Basemap
class Dataset:
def __init__(self, gemfile):
self.gemfile = gemfile
n = gp.gemread.get_num_grids(gemfile)
# Local variables
gdattm = np.zeros((20,n,2), np.int8, 'F')
level = np.zeros((n,2), np.int32, 'F')
ivcord = np.zeros(n, np.int32)
vcord = np.zeros((4,n), np.int8, 'F')
parm = np.zeros((12,n), np.int8, 'F')
self.max_grids, self.num_grids, self.nx, self.ny, self.proj, \
self.ang, self.lllat, self.lllon, self.urlat, self.urlon = \
gp.gemread.ggi(gemfile, gdattm, level, ivcord, vcord, parm)
self.proj = self.proj.strip()
self.datainfo = []
for i in range(self.num_grids):
dattim = [gdattm[:,i,0].view('a20')[0].strip(), \
gdattm[:,i,1].view('a20')[0].strip()]
lev = [level[i,0], level[i,1]]
vc = vcord[:,i].view('a4')[0].strip()
fun = parm[:,i].view('a12')[0].strip()
datarow = {'gdattim': dattim, 'glevel': lev, 'gvcord': vc,
'gfunc': fun}
self.datainfo.append(datarow)
def grid_from_num(self, num):
grid = np.zeros((self.nx,self.ny), np.float32, 'F')
gp.gemread.read_grid(self.gemfile, \
self.datainfo[num]['gdattim'][0], \
self.datainfo[num]['gdattim'][1], \
self.datainfo[num]['glevel'][0], \
self.datainfo[num]['glevel'][1], \
self.datainfo[num]['gvcord'], \
self.datainfo[num]['gfunc'], grid)
return grid.transpose()
def grid_from_dict(self, d):
grid = np.zeros((self.nx,self.ny), np.float32, 'F')
gp.gemread.read_grid(self. gemfile, d['gdattim'][0], d['gdattim'][1],
d['glevel'][0], d['glevel'][1], d['gvcord'],
d['gfunc'], grid)
return grid.transpose()
def map_for_dataset(dset, res='l'):
if dset.proj=='LCC':
m = Basemap(llcrnrlon=dset.lllon, llcrnrlat=dset.lllat,
urcrnrlon=dset.urlon, urcrnrlat = dset.urlat,
projection='lcc', lat_1=dset.ang[0], lat_2=dset.ang[2],
lon_0=dset.ang[1], resolution=res)
else:
print 'Sorry, this projection is not yet supported. :-('
m = 0
return m
if __name__ == "__main__":
gemdata = Dataset('nam211.gem')
print gemdata.datainfo[0]
arr = gemdata.grid_from_dict(gemdata.datainfo[10])
m = map_for_dataset(gemdata)
m.drawcountries()
m.drawcoastlines()
m.drawstates()
x = np.linspace(m.xmin,m.xmax,gemdata.nx)
y = np.linspace(m.ymin,m.ymax,gemdata.ny)
xmesh, ymesh = np.meshgrid(x, y)
m.contourf(xmesh,ymesh,arr)
plt.show()
| {
"repo_name": "sgdecker/pygempak",
"path": "gempak.py",
"copies": "1",
"size": "3626",
"license": "apache-2.0",
"hash": -2628176378530501000,
"line_mean": 37.1684210526,
"line_max": 77,
"alpha_frac": 0.5579150579,
"autogenerated": false,
"ratio": 3.1668122270742356,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9168459308162265,
"avg_score": 0.011253595362394193,
"num_lines": 95
} |
""" A module for command parsing
The command module deals with parsing a cli command properly. It supports
abbreviated commands ("a" is parsed as the command "add" if there is no
other command beginning with "a" on that level) and has methods useful for
tab completion of both commands and external values.
"""
class Command:
""" A command parser and handler
The Command class can be used to extract a command from a list of
strings (such as sys.argv).
The main method is the :func:`parse_cmd` which handles all the parsing
and sets appropriate instance variables. It is automatically called
from the constructor :func:`__init__`.
"""
key = {}
""" Contains the current value
"""
key_complete = True
children = {}
""" Contains the next valid values
"""
exe = None
""" Pointer to function to execute
"""
arg = None
""" Function argument - a single argument passed to the function
"""
exe_options = {}
""" Function options - a dict of options passed to the function
"""
inp_cmd = []
""" List of the commands inputed
"""
_scoop_rest_arguments = False
""" Set when we're scooping up all unknown arguments
"""
def __init__(self, tree, inp_cmd):
""" Create instance of the Command class
The tree argument should contain a specifically formatted dict
which describes the available commands, options, arguments and
callbacks to methods for completion of arguments.
TODO: document dict format
The inp_cmd argument should contain a list of strings containing
the complete command to parse, such as sys.argv (without the first
element which specified the command itself).
"""
self.inp_cmd = inp_cmd
self.parse_cmd(tree)
def _examine_key(self, key_name, key_val, p, i, option_parsing):
""" Examine the current matching key
Extracts information, such as function to execute and command
options, from the current key (passed to function as 'key_name' and
'key_val').
"""
# if the element we reached has an executable registered, save it!
if 'exec' in key_val:
self.exe = key_val['exec']
# simple bool options, save value
if 'type' in key_val and key_val['type'] == 'bool':
self.exe_options[key_name] = True
# Elements wich takes arguments need special attention
if 'argument' in key_val:
# is there an argument (the next element)?
if len(self.inp_cmd) > i+1:
self.key = { 'argument': key_val['argument'] }
# there is - save it
if key_val['type'] == 'option':
# if argument is of type multiple, store result in a list
if 'multiple' in key_val and key_val['multiple'] == True:
if key_name not in self.exe_options:
self.exe_options[key_name] = []
self.exe_options[key_name].append(self.inp_cmd[i+1])
else:
self.exe_options[key_name] = self.inp_cmd[i+1]
else:
self.arg = self.inp_cmd[i+1]
# Validate the argument if possible
if 'validator' in key_val['argument']:
self.key_complete = key_val['argument']['validator'](self.inp_cmd[i+1])
else:
self.key_complete = True
# if there are sub parameters, add them
if 'children' in key_val:
self.children = key_val['children']
# If we reached a command without parameters (which
# should be the end of the command), unset the children
# dict.
elif key_val['type'] == 'command':
self.children = None
# if the command is finished (there is an element after the argument) and
# there is an exec_immediately-function, execute it now
if 'exec_immediately' in key_val and len(self.inp_cmd) > i+2:
key_val['exec_immediately'](self.inp_cmd[i+1], self.exe_options)
# clear exe_options as these were options for exec_immediately
self.exe_options = {}
i += 1
else:
# if there is no next element, let key_complete be true
# and set children to the option argument
self.children = { 'argument': key_val['argument'] }
# remove option from further tab completion as it has been filled in,
# unless it has the 'multiple' key set, which means it can be filled
# in multiple types and will return a list of all values
if option_parsing and p == key_name and key_name in self.children:
# if multiple, then pass
if 'multiple' in self.children[key_name] and self.children[key_name]['multiple'] == True:
pass
else:
del self.children[key_name]
# otherwise we are handling a command without arguments
else:
# Rest arguments?
if 'rest_argument' in key_val:
self._scoop_rest_arguments = True
self.arg = []
self.children = key_val.get('children')
if self.exe is not None:
option_parsing = True
return i, option_parsing
def parse_cmd(self, tree, inp_cmd = None):
""" Extract command and options from string.
The tree argument should contain a specifically formatted dict
which describes the available commands, options, arguments and
callbacks to methods for completion of arguments.
TODO: document dict format
The inp_cmd argument should contain a list of strings containing
the complete command to parse, such as sys.argv (without the first
element which specified the command itself).
"""
# reset state from previous execution
self.exe = None
self.arg = None
self.exe_options = {}
self.children = tree['children']
self.key = tree['children']
option_parsing = False
self._scoop_rest_arguments = False
if inp_cmd is not None:
self.inp_cmd = inp_cmd
# iterate the list of inputted commands
i = 0
while i < len(self.inp_cmd):
p = self.inp_cmd[i]
self.key = {}
# Find which of the valid commands matches the current element of inp_cmd
if self.children is not None:
self.key_complete = False
match = False
for param, content in self.children.items():
# match string to command
if param.find(p) == 0:
self.key[param] = content
match = True
# If we have an exact match, make sure that
# is the only element in self.key
if p == param and len(self.inp_cmd) > i+1:
self.key_complete = True
self.key = { param: content }
break
# if we are in scoop-rest-mode, place elements not matching
# anything in argument-array
if not match:
if self._scoop_rest_arguments:
self.arg.append(p)
else:
raise InvalidCommand("Invalid argument: " + p)
else:
raise InvalidCommand('ran out of parameters; command too long')
# Note that there are two reasons self.key can contain entries:
# 1) The current string (p) contained something and matched a param
# 2) The current string (p) is empty and matches all children
# If p is empty we don't really have a match but still need to
# have data in self.key to show all possible completions at this
# level. Therefore, we skip the command matching stuff when
# len(p) == 0
if len(p) != 0 and len(self.key) == 1:
key, val = list(self.key.items())[0]
i, option_parsing = self._examine_key(key, val, p, i, option_parsing)
i += 1
def complete(self):
""" Return list of valid completions
Returns a list of valid completions on the current level in the
tree. If an element of type 'value' is found, its complete callback
function is called (if set).
"""
comp = []
for k, v in self.key.items():
# if we have reached a value, try to fetch valid completions
if v['type'] == 'value':
if 'complete' in v:
comp += v['complete'](self.inp_cmd[-1])
# otherwise, k is our valid completion
else:
comp.append(k)
return comp
def next_values(self):
""" Return list of valid next values
"""
nval = []
for k, v in self.children.items():
# if we have reached a value, try to fetch valid completions
if v['type'] == 'value':
if 'complete' in v:
nval += v['complete']('')
# otherwise, k is our valid completion
else:
nval.append(k)
return nval
class CommandError(Exception):
""" A base error class for the command module
"""
class InvalidCommand(CommandError):
""" Raised when an invalid command is parsed
"""
| {
"repo_name": "fredsod/NIPAP",
"path": "nipap-cli/nipap_cli/command.py",
"copies": "4",
"size": "9996",
"license": "mit",
"hash": 5797237855453652000,
"line_mean": 34.0736842105,
"line_max": 105,
"alpha_frac": 0.5423169268,
"autogenerated": false,
"ratio": 4.730714623757691,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.7273031550557691,
"avg_score": null,
"num_lines": null
} |
"""A module for computing regret and social welfare of profiles"""
import itertools
import multiprocessing
import numpy as np
from scipy import optimize
def pure_strategy_deviation_gains(game, prof):
"""Returns the pure strategy deviations gains
The result is a compact array of deviation gains. Each element corresponds
to the deviation from strategy i to strategy j ordered by (i, j) for all
valid deviations."""
prof = np.asarray(prof, int)
supp = prof > 0
num_supp = game.role_reduce(supp)
from_inds = np.arange(game.num_role_strats)[supp]
reps = game.num_strategies[game.role_indices[from_inds]]
num_devs = np.sum(num_supp * (game.num_strategies - 1))
to_inds = np.ones(reps.sum(), int)
to_inds[0] = 0
to_inds[reps[:-1].cumsum()] -= reps[:-1]
role_inds = (num_supp * game.num_strategies)[:-1].cumsum()
to_inds[role_inds] += game.num_strategies[:-1]
to_inds = to_inds.cumsum()
to_inds = to_inds[to_inds != from_inds.repeat(reps)]
from_inds = from_inds.repeat(reps - 1)
pays = game.get_payoffs(prof)[from_inds]
dev_profs = prof[None].repeat(num_devs, 0)
dev_profs[np.arange(num_devs), from_inds] -= 1
dev_profs[np.arange(num_devs), to_inds] += 1
dev_pays = np.array([game.get_payoffs(dprof)[to]
for dprof, to in zip(dev_profs, to_inds)])
return dev_pays - pays
def pure_strategy_regret(game, prof):
"""Returns the regret of a pure strategy profile
If prof has more than one dimension, the last dimension is taken as a set
of profiles and returned as a new array."""
prof = np.asarray(prof, int)
return max(pure_strategy_deviation_gains(game, prof).max(), 0)
def mixture_deviation_gains(game, mix, assume_complete=False):
"""Returns all the gains from deviation from a mixed strategy
The result is ordered by role, then strategy."""
mix = np.asarray(mix, float)
strategy_evs = game.deviation_payoffs(mix, assume_complete=assume_complete)
# strategy_evs is nan where there's no data, however, if it's not played in
# the mix, it doesn't effect the role_evs
masked = strategy_evs.copy()
masked[mix == 0] = 0
role_evs = game.role_reduce(masked * mix, keepdims=True)
return strategy_evs - role_evs
def mixture_regret(game, mix):
"""Return the regret of a mixture profile"""
mix = np.asarray(mix, float)
return mixture_deviation_gains(game, mix).max()
def pure_social_welfare(game, profile):
"""Returns the social welfare of a pure strategy profile in game"""
profile = np.asarray(profile, int)
return game.get_payoffs(profile).dot(profile)
def mixed_social_welfare(game, mix):
"""Returns the social welfare of a mixed strategy profile"""
return game.get_expected_payoffs(mix).dot(game.num_players)
class SocialWelfareOptimizer(object):
"""A pickleable object to find Nash equilibria
This method uses constrained convex optimization to to attempt to solve a
proxy for the nonconvex regret minimization."""
def __init__(self, game, gtol=1e-8):
self.game = game
self.scale = game.max_payoffs() - game.min_payoffs()
self.scale[self.scale == 0] = 1 # In case payoffs are the same
self.offset = game.min_payoffs()
self.gtol = gtol
def obj_func(self, mix, penalty): # pragma: no cover
# We assume that the initial point is in a constant sum subspace, and
# so project the gradient so that any gradient step maintains that
# constant step. Thus, sum to 1 is not one of the penalty terms
# Because deviation payoffs uses log space, we max with 0 just for the
# payoff calculation
ep, ep_jac = self.game.get_expected_payoffs(
np.maximum(0, mix), assume_complete=True, jacobian=True)
# Normalize so payoffs are effectively in [0, 1]
ep = (ep - self.offset) / self.scale
ep_jac /= self.scale[:, None]
# Compute normalized negative walfare (minimization)
welfare = -self.game.num_players.dot(ep)
dwelfare = -self.game.num_players.dot(ep_jac)
# Add penalty for negative mixtures
welfare += penalty * np.sum(np.minimum(mix, 0) ** 2) / 2
dwelfare += penalty * np.minimum(mix, 0)
# Project grad so steps stay in the simplex (more or less)
dwelfare -= self.game.role_repeat(self.game.role_reduce(dwelfare) /
self.game.num_strategies)
return welfare, dwelfare
def __call__(self, mix): # pragma: no cover
# Pass in lambda, and make penalty not a member
result = None
penalty = np.sum(self.game.num_players)
for _ in range(30):
# First get an unconstrained result from the optimization
with np.errstate(over='raise', invalid='raise'):
try:
opt = optimize.minimize(
lambda m: self.obj_func(m, penalty), mix, method='CG',
jac=True, options={'gtol': self.gtol})
except FloatingPointError: # pragma: no cover
penalty *= 2
continue
mix = opt.x
# Project it onto the simplex, it might not be due to the penalty
result = self.game.simplex_project(mix)
if np.allclose(mix, result):
break
# Increase constraint penalty
penalty *= 2
return result
def max_mixed_social_welfare(game, grid_points=2, random_restarts=0,
processes=None, **swopt_args):
"""Returns the maximum role symmetric mixed social welfare profile
Arguments
---------
grid_points : int > 1, optional
The number of grid points to use for mixture seeds. two implies just
pure mixtures, more will be denser, but scales exponentially with the
dimension.
random_restarts : int, optional
The number of random initializations.
processes : int, optional
Number of processes to use when finding Nash equilibria. The game needs
to be pickleable.
"""
assert game.is_complete(), \
"Max welfare finding only works on complete games"""
initial_points = list(itertools.chain(
[game.uniform_mixture()],
game.grid_mixtures(grid_points),
game.biased_mixtures(),
game.role_biased_mixtures(),
game.random_mixtures(random_restarts)))
chunksize = len(initial_points) if processes == 1 else 4
best = (-np.inf, -1, None)
opt = SocialWelfareOptimizer(game, **swopt_args)
with multiprocessing.Pool(processes) as pool:
for i, mix in enumerate(pool.imap_unordered(
opt, initial_points, chunksize=chunksize)):
welfare = mixed_social_welfare(game, mix)
best = max(best, (welfare, i, mix))
return best[0], best[2]
def max_pure_social_welfare(game, by_role=False):
"""Returns the maximum social welfare over the known profiles.
If by_role is specified, then max social welfare applies to each role
independently."""
if by_role:
if game.num_complete_profiles:
# TODO technically you could have no complete profiles, but full
# payoff data for all roles
welfares = game.role_reduce(game.profiles * game.payoffs)
prof_inds = np.nanargmax(welfares, 0)
return (welfares[prof_inds, np.arange(game.num_roles)],
game.profiles[prof_inds])
else:
welfares = np.empty(game.num_roles)
welfares.fill(np.nan)
profiles = np.empty(game.num_roles, dtype=object)
profiles.fill(None)
return welfares, profiles
else:
if game.num_complete_profiles:
welfares = np.sum(game.profiles * game.payoffs, 1)
prof_ind = np.nanargmax(welfares)
return welfares[prof_ind], game.profiles[prof_ind]
else:
return np.nan, None
| {
"repo_name": "yackj/GameAnalysis",
"path": "gameanalysis/regret.py",
"copies": "1",
"size": "8072",
"license": "apache-2.0",
"hash": 6019105604549840000,
"line_mean": 37.2559241706,
"line_max": 79,
"alpha_frac": 0.6278493558,
"autogenerated": false,
"ratio": 3.664094416704494,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47919437725044944,
"avg_score": null,
"num_lines": null
} |
"""A module for computing regret and social welfare of profiles"""
import numpy as np
def pure_strategy_deviation_pays(game, profile):
"""Returns the pure strategy deviation payoffs
The result is a compact array of deviation payoffs. Each element
corresponds to the payoff of deviating to strategy i from strategy j for
all valid deviations."""
profile = np.asarray(profile, int)
pays = game.get_payoffs(profile)
devs = np.empty(game.num_devs)
for dev_ind, (from_ind, to_ind) in enumerate(zip(
game.dev_from_indices, game.dev_to_indices)):
if profile[from_ind] == 0:
devs[dev_ind] = 0
elif from_ind == to_ind:
devs[dev_ind] = pays[from_ind]
else:
prof_copy = profile.copy()
prof_copy[from_ind] -= 1
prof_copy[to_ind] += 1
devs[dev_ind] = game.get_payoffs(prof_copy)[to_ind]
return devs
def pure_strategy_deviation_gains(game, profile):
"""Returns the pure strategy deviations gains"""
profile = np.asarray(profile, int)
pays = game.get_payoffs(profile)
devs = pure_strategy_deviation_pays(game, profile)
return devs - pays.repeat(game.num_strat_devs)
def pure_strategy_regret(game, prof):
"""Returns the regret of a pure strategy profile
If prof has more than one dimension, the last dimension is taken as a set
of profiles and returned as a new array."""
with np.errstate(invalid='ignore'): # keep nans
return pure_strategy_deviation_gains(game, prof).max()
def mixture_deviation_gains(game, mix):
"""Returns all the gains from deviation from a mixed strategy
The result is ordered by role, then strategy."""
mix = np.asarray(mix, float)
strategy_evs = game.deviation_payoffs(mix)
# strategy_evs is nan where there's no data, however, if it's not played in
# the mix, it doesn't effect the role_evs
masked = strategy_evs.copy()
masked[mix == 0] = 0
role_evs = np.add.reduceat(
masked * mix, game.role_starts).repeat(game.num_role_strats)
return strategy_evs - role_evs
def mixture_regret(game, mix):
"""Return the regret of a mixture profile"""
mix = np.asarray(mix, float)
return mixture_deviation_gains(game, mix).max()
def pure_social_welfare(game, profile):
"""Returns the social welfare of a pure strategy profile in game"""
profile = np.asarray(profile, int)
return game.get_payoffs(profile).dot(profile)
def mixed_social_welfare(game, mix):
"""Returns the social welfare of a mixed strategy profile"""
return game.expected_payoffs(mix).dot(game.num_role_players)
def max_pure_social_welfare(game, *, by_role=False):
"""Returns the maximum social welfare over the known profiles.
If by_role is specified, then max social welfare applies to each role
independently. If there are no profiles with full payoff data for a role,
an arbitrary profile will be returned."""
if by_role: # pylint: disable=no-else-return
if game.num_profiles: # pylint: disable=no-else-return
welfares = np.add.reduceat(
game.profiles() * game.payoffs(), game.role_starts, 1)
prof_inds = np.nanargmax(welfares, 0)
return (welfares[prof_inds, np.arange(game.num_roles)],
game.profiles()[prof_inds])
else:
welfares = np.full(game.num_roles, np.nan)
profiles = np.full(game.num_roles, None)
return welfares, profiles
else:
if game.num_complete_profiles: # pylint: disable=no-else-return
welfares = np.einsum('ij,ij->i', game.profiles(), game.payoffs())
prof_ind = np.nanargmax(welfares)
return welfares[prof_ind], game.profiles()[prof_ind]
else:
return np.nan, None
| {
"repo_name": "egtaonline/GameAnalysis",
"path": "gameanalysis/regret.py",
"copies": "1",
"size": "3845",
"license": "apache-2.0",
"hash": -7474672418785447000,
"line_mean": 36.6960784314,
"line_max": 79,
"alpha_frac": 0.6520156047,
"autogenerated": false,
"ratio": 3.4986351228389445,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4650650727538944,
"avg_score": null,
"num_lines": null
} |
"""A module for consuming the Penn Dining API"""
import datetime
from .base import WrapperBase
BASE_URL = "https://esb.isc-seo.upenn.edu/8091/open_data/dining/"
V2_BASE_URL = "https://esb.isc-seo.upenn.edu/8091/open_data/dining/v2/?service="
ENDPOINTS = {
'MENUS': BASE_URL + 'menus',
'VENUES': BASE_URL + 'venues',
}
V2_ENDPOINTS = {
'VENUES': V2_BASE_URL + 'venues',
'HOURS': V2_BASE_URL + 'cafes&cafe=',
'MENUS': V2_BASE_URL + 'menus&cafe=',
'ITEMS': V2_BASE_URL + 'items&item='
}
VENUE_NAMES = {
'593': '1920 Commons',
'636': 'Hill House',
'637': 'Kings Court English House',
'638': 'Kosher Dining at Falk'
}
def normalize_weekly(data):
"""Normalization for dining menu data"""
if "tblMenu" not in data["result_data"]["Document"]:
data["result_data"]["Document"]["tblMenu"] = []
if isinstance(data["result_data"]["Document"]["tblMenu"], dict):
data["result_data"]["Document"]["tblMenu"] = [data["result_data"]["Document"]["tblMenu"]]
for day in data["result_data"]["Document"]["tblMenu"]:
if "tblDayPart" not in day:
continue
if isinstance(day["tblDayPart"], dict):
day["tblDayPart"] = [day["tblDayPart"]]
for meal in day["tblDayPart"]:
if isinstance(meal["tblStation"], dict):
meal["tblStation"] = [meal["tblStation"]]
for station in meal["tblStation"]:
if isinstance(station["tblItem"], dict):
station["tblItem"] = [station["tblItem"]]
return data
def get_meals(v2_response, building_id):
"""Extract meals into old format from a DiningV2 JSON response"""
result_data = v2_response["result_data"]
meals = []
day_parts = result_data["days"][0]["cafes"][building_id]["dayparts"][0]
for meal in day_parts:
stations = []
for station in meal["stations"]:
items = []
for item_id in station["items"]:
item = result_data["items"][item_id]
new_item = {}
new_item["txtTitle"] = item["label"]
new_item["txtPrice"] = ""
new_item["txtNutritionInfo"] = ""
new_item["txtDescription"] = item["description"]
new_item["tblSide"] = ""
new_item["tblFarmToFork"] = ""
attrs = [{"description": item["cor_icon"][attr]} for attr in item["cor_icon"]]
if len(attrs) == 1:
new_item["tblAttributes"] = {"txtAttribute": attrs[0]}
elif len(attrs) > 1:
new_item["tblAttributes"] = {"txtAttribute": attrs}
else:
new_item["tblAttributes"] = ""
if isinstance(item["options"], list):
item["options"] = {}
if "values" in item["options"]:
for side in item["options"]["values"]:
new_item["tblSide"] = {"txtSideName": side["label"]}
items.append(new_item)
stations.append({"tblItem": items, "txtStationDescription": station["label"]})
meals.append({"tblStation": stations, "txtDayPartDescription": meal["label"]})
return meals
class DiningV2(WrapperBase):
"""The client for the Registrar. Used to make requests to the API.
:param bearer: The user code for the API
:param token: The password code for the API
Usage::
>>> from penn import DiningV2
>>> din = DiningV2('MY_USERNAME_TOKEN', 'MY_PASSWORD_TOKEN')
"""
def venues(self):
"""Get a list of all venue objects.
>>> venues = din.venues()
"""
response = self._request(V2_ENDPOINTS['VENUES'])
return response
def hours(self, venue_id):
"""Get the list of hours for the venue corresponding to
venue_id.
:param venue_id:
A string representing the id of a venue, e.g. "abc".
>>> commons_hours = din.hours("593")
"""
response = self._request(V2_ENDPOINTS['HOURS'] + venue_id)
return response
def menu(self, venue_id, date):
"""Get the menu for the venue corresponding to venue_id,
on date.
:param venue_id:
A string representing the id of a venue, e.g. "abc".
:param date:
A string representing the date of a venue's menu, e.g. "2015-09-20".
>>> commons_menu = din.menu("593", "2015-09-20")
"""
query = "&date=" + date
response = self._request(V2_ENDPOINTS['MENUS'] + venue_id + query)
return response
def item(self, item_id):
"""Get a description of the food item corresponding to item_id.
:param item_id:
A string representing the id of an item, e.g. "3899220".
>>> tomato_sauce = din.item("3899220")
"""
response = self._request(V2_ENDPOINTS['ITEMS'] + item_id)
return response
class Dining(WrapperBase):
"""The client for the Registrar. Used to make requests to the API.
:param bearer: The user code for the API
:param token: The password code for the API
Usage::
>>> from penn import Dining
>>> din = Dining('MY_USERNAME_TOKEN', 'MY_PASSWORD_TOKEN')
"""
def venues(self):
"""Get a list of all venue objects.
>>> venues = din.venues()
"""
response = self._request(V2_ENDPOINTS['VENUES'])
# Normalize `dateHours` to array
for venue in response["result_data"]["document"]["venue"]:
if venue.get("id") in VENUE_NAMES:
venue["name"] = VENUE_NAMES[venue.get("id")]
if isinstance(venue.get("dateHours"), dict):
venue["dateHours"] = [venue["dateHours"]]
if "dateHours" in venue:
for dh in venue["dateHours"]:
if isinstance(dh.get("meal"), dict):
dh["meal"] = [dh["meal"]]
return response
def menu_daily(self, building_id):
"""Get a menu object corresponding to the daily menu for the
venue with building_id.
:param building_id:
A string representing the id of a building, e.g. "abc".
>>> commons_today = din.menu_daily("593")
"""
today = str(datetime.date.today())
v2_response = DiningV2(self.bearer, self.token).menu(building_id, today)
response = {'result_data': {'Document': {}}}
response["result_data"]["Document"]["menudate"] = datetime.datetime.strptime(today, '%Y-%m-%d').strftime('%-m/%d/%Y')
if building_id in VENUE_NAMES:
response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id]
else:
response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"]
response["result_data"]["Document"]["tblMenu"] = {"tblDayPart": get_meals(v2_response, building_id)}
return response
def menu_weekly(self, building_id):
"""Get an array of menu objects corresponding to the weekly menu for the
venue with building_id.
:param building_id:
A string representing the id of a building, e.g. "abc".
>>> commons_week = din.menu_weekly("593")
"""
din = DiningV2(self.bearer, self.token)
response = {'result_data': {'Document': {}}}
days = []
for i in range(7):
date = str(datetime.date.today() + datetime.timedelta(days=i))
v2_response = din.menu(building_id, date)
if building_id in VENUE_NAMES:
response["result_data"]["Document"]["location"] = VENUE_NAMES[building_id]
else:
response["result_data"]["Document"]["location"] = v2_response["result_data"]["days"][0]["cafes"][building_id]["name"]
formatted_date = datetime.datetime.strptime(date, '%Y-%m-%d').strftime('%-m/%d/%Y')
days.append({"tblDayPart": get_meals(v2_response, building_id), "menudate": formatted_date})
response["result_data"]["Document"]["tblMenu"] = days
return normalize_weekly(response)
| {
"repo_name": "pennlabs/penn-sdk-python",
"path": "penn/dining.py",
"copies": "1",
"size": "8193",
"license": "mit",
"hash": 6292493504977225000,
"line_mean": 36.5825688073,
"line_max": 133,
"alpha_frac": 0.5612107897,
"autogenerated": false,
"ratio": 3.6364846870838883,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.46976954767838885,
"avg_score": null,
"num_lines": null
} |
"""A module for consuming the Penn Registrar API"""
from os import path
import requests
from base import WrapperBase
BASE_URL = "https://esb.isc-seo.upenn.edu/8091/open_data/transit/"
ENDPOINTS = {
'APC': BASE_URL + 'apc',
'MDT': BASE_URL + 'mdt',
'TRANSAPC': BASE_URL + 'transapc',
'STOPINVENTORY': BASE_URL + 'stopinventory',
'STOPTIMES': BASE_URL + 'stoptimes'
}
class Transit(WrapperBase):
"""The client for Transit. Used to make requests to the API.
:param bearer: The user code for the API
:param token: The password code for the API
Usage::
>>> from penn.transit import Transit
>>> trans = Transit('MY_USERNAME_TOKEN', 'MY_PASSWORD_TOKEN')
"""
def formatDate(self, date):
#print date.strftime("%d/%m/%Y")+ "%20" + date.strftime("%H24:%M:%S")
return date.strftime("%m/%d/%Y")+ " " + date.strftime("%H:%M:%S")
def apc(self, start_date, end_date):
"""Return a list of venue objects.
>>> venues = din.venues()
"""
params = {
'start': self.formatDate(start_date),
'end': self.formatDate(end_date)
}
response = self._request(ENDPOINTS['APC'], params)
return response;
def mdt(self, start_date, end_date):
"""Return a list of venue objects.
>>> venues = din.venues()
"""
params = {
'start': self.formatDate(start_date),
'end': self.formatDate(end_date)
}
response = self._request(ENDPOINTS['MDT'], params)
return response;
def transapc(self, start_date, end_date):
"""Return a list of venue objects.
>>> venues = din.venues()
"""
params = {
'start': self.formatDate(start_date),
'end': self.formatDate(end_date)
}
response = self._request(ENDPOINTS['TRANSAPC'], params)
return response;
def stopinventory(self, start_date, end_date):
"""Return a list of venue objects.
>>> venues = din.venues()
"""
params = {
'start': self.formatDate(start_date),
'end': self.formatDate(end_date)
}
response = self._request(ENDPOINTS['STOPINVENTORY'], params)
return response;
def stoptimes(self, start_date, end_date):
"""Return a list of venue objects.
>>> venues = din.venues()
"""
params = {
'start': self.formatDate(start_date),
'end': self.formatDate(end_date)
}
response = self._request(ENDPOINTS['STOPTIMES'], params)
return response;
| {
"repo_name": "parkerh/freeclassrooms",
"path": "penn/transit.py",
"copies": "1",
"size": "2640",
"license": "mit",
"hash": 3841105784985343000,
"line_mean": 27.085106383,
"line_max": 77,
"alpha_frac": 0.5598484848,
"autogenerated": false,
"ratio": 3.5246995994659547,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9570609344263385,
"avg_score": 0.002787748000513958,
"num_lines": 94
} |
"""A module for consuming the Penn Registrar API"""
from os import path
import requests
from base import WrapperBase
BASE_URL = "https://esb.isc-seo.upenn.edu/8091/open_data/dining/"
ENDPOINTS = {
'MENUS': BASE_URL + 'menus',
'VENUES': BASE_URL + 'venues',
}
class Dining(WrapperBase):
"""The client for the Registrar. Used to make requests to the API.
:param bearer: The user code for the API
:param token: The password code for the API
Usage::
>>> from penn.dining import Dining
>>> din = Dining('MY_USERNAME_TOKEN', 'MY_PASSWORD_TOKEN')
"""
def venues(self):
"""Get a list of all venue objects.
>>> venues = din.venues()
"""
response = self._request(ENDPOINTS['VENUES'])
return response
def menu_daily(self, building_id):
"""Get a menu object corresponding to the daily menu for the
venue with building_id.
:param building_id:
A string representing the id of a building, e.g. "abc".
>>> commons_today = din.menu_daily("593")
"""
response = self._request(
path.join(ENDPOINTS['MENUS'], 'daily', str(building_id))
)
return response
def menu_weekly(self, building_id):
"""Get an array of menu objects corresponding to the weekly menu for the
venue with building_id.
:param building_id:
A string representing the id of a building, e.g. "abc".
>>> commons_week = din.menu_weekly("593")
"""
response = self._request(path.join(ENDPOINTS['MENUS'], 'weekly', str(building_id)))
return response
| {
"repo_name": "parkerh/freeclassrooms",
"path": "penn/dining.py",
"copies": "1",
"size": "1652",
"license": "mit",
"hash": 8990904504336362000,
"line_mean": 27,
"line_max": 91,
"alpha_frac": 0.6047215496,
"autogenerated": false,
"ratio": 3.6792873051224944,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47840088547224946,
"avg_score": null,
"num_lines": null
} |
"""A module for consuming the Penn Transit API"""
from .base import WrapperBase
BASE_URL = "https://esb.isc-seo.upenn.edu/8091/open_data/transit/"
ENDPOINTS = {
'APC': BASE_URL + 'apc',
'MDT': BASE_URL + 'mdt',
'TRANSAPC': BASE_URL + 'transapc',
'STOPINVENTORY': BASE_URL + 'stopinventory',
'STOPTIMES': BASE_URL + 'stoptimes',
'PREDICTION': BASE_URL + '511/Prediction',
'CONFIGURATION': BASE_URL + '511/Configuration'
}
class Transit(WrapperBase):
"""The client for Transit. Used to make requests to the API.
:param bearer: The user code for the API
:param token: The password code for the API
Usage::
>>> from penn import Transit
>>> trans = Transit('MY_USERNAME_TOKEN', 'MY_PASSWORD_TOKEN')
"""
@staticmethod
def format_date(date):
return date.strftime("%m/%d/%Y") + " " + date.strftime("%H:%M:%S")
def apc(self, start_date, end_date):
"""Return all APC data packets in date range
:param start_date:
The starting date for the query.
:param end_date:
The end date for the query.
>>> import datetime
>>> today = datetime.date.today()
>>> trans.apc(today - datetime.timedelta(days=1), today))
"""
params = {
'start': self.format_date(start_date),
'end': self.format_date(end_date)
}
response = self._request(ENDPOINTS['APC'], params)
return response
def mdt(self, start_date, end_date):
"""Return all MDT data packets in date range
:param start_date:
The starting date for the query.
:param end_date:
The end date for the query.
>>> import datetime
>>> today = datetime.date.today()
>>> trans.mdt(today - datetime.timedelta(days=1), today))
"""
params = {
'start': self.format_date(start_date),
'end': self.format_date(end_date)
}
response = self._request(ENDPOINTS['MDT'], params)
return response
def transapc(self, start_date, end_date):
"""Return detail of boardings, alightings, by vehicle and stop,
including the passenger load leaving the stop (this is only for
vehicles equipped with APC hardware)
:param start_date:
The starting date for the query.
:param end_date:
The end date for the query.
>>> import datetime
>>> today = datetime.date.today()
>>> trans.transapc(today - datetime.timedelta(days=1), today))
"""
params = {
'start': self.format_date(start_date),
'end': self.format_date(end_date)
}
response = self._request(ENDPOINTS['TRANSAPC'], params)
return response
def stopinventory(self):
"""Return a list all transit stops.
>>> stops = trans.stopinventory()
"""
response = self._request(ENDPOINTS['STOPINVENTORY'])
return response
def prediction(self):
"""Return route data and time predictions
>>> predictions = trans.prediction()
"""
response = self._request(ENDPOINTS['PREDICTION'])
return response
def configuration(self):
"""Return route configuration info
>>> route_config = trans.configuration()
"""
response = self._request(ENDPOINTS['CONFIGURATION'])
return response
def stoptimes(self, start_date, end_date):
"""Return all stop times in the date range
:param start_date:
The starting date for the query.
:param end_date:
The end date for the query.
>>> import datetime
>>> today = datetime.date.today()
>>> trans.stoptimes(today - datetime.timedelta(days=1), today)
"""
params = {
'start': self.format_date(start_date),
'end': self.format_date(end_date)
}
response = self._request(ENDPOINTS['STOPTIMES'], params)
return response
| {
"repo_name": "pennlabs/penn-sdk-python",
"path": "penn/transit.py",
"copies": "1",
"size": "4065",
"license": "mit",
"hash": 5326973518133533000,
"line_mean": 30.0305343511,
"line_max": 74,
"alpha_frac": 0.5761377614,
"autogenerated": false,
"ratio": 4.012833168805528,
"config_test": true,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 131
} |
"""A module for creating docstrings for sphinx ``data`` domains."""
import re
import textwrap
_docstrings_list = []
def add_newdoc(name, value, doc):
_docstrings_list.append((name, value, doc))
def _parse_docstrings():
type_list_ret = []
for name, value, doc in _docstrings_list:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
indent = ""
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
if prev == "Examples":
indent = ""
new_lines.append(f'{m.group(1)}.. rubric:: {prev}')
else:
indent = 4 * " "
new_lines.append(f'{m.group(1)}.. admonition:: {prev}')
new_lines.append("")
else:
new_lines.append(f"{indent}{line}")
s = "\n".join(new_lines)
# Done.
type_list_ret.append(f""".. data:: {name}\n :value: {value}\n {s}""")
return "\n".join(type_list_ret)
add_newdoc('ArrayLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced into an `~numpy.ndarray`.
Among others this includes the likes of:
* Scalars.
* (Nested) sequences.
* Objects implementing the `~class.__array__` protocol.
See Also
--------
:term:`array_like`:
Any scalar or sequence that can be interpreted as an ndarray.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> def as_array(a: npt.ArrayLike) -> np.ndarray:
... return np.array(a)
""")
add_newdoc('DTypeLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced into a `~numpy.dtype`.
Among others this includes the likes of:
* :class:`type` objects.
* Character codes or the names of :class:`type` objects.
* Objects with the ``.dtype`` attribute.
See Also
--------
:ref:`Specifying and constructing data types <arrays.dtypes.constructing>`
A comprehensive overview of all objects that can be coerced into data types.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> def as_dtype(d: npt.DTypeLike) -> np.dtype:
... return np.dtype(d)
""")
_docstrings = _parse_docstrings()
| {
"repo_name": "anntzer/numpy",
"path": "numpy/typing/_add_docstring.py",
"copies": "4",
"size": "2605",
"license": "bsd-3-clause",
"hash": -864408812808923100,
"line_mean": 26.1354166667,
"line_max": 88,
"alpha_frac": 0.5393474088,
"autogenerated": false,
"ratio": 3.847858197932053,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6387205606732054,
"avg_score": null,
"num_lines": null
} |
"""A module for creating docstrings for sphinx ``data`` domains."""
import re
import textwrap
from ._generic_alias import NDArray
_docstrings_list = []
def add_newdoc(name: str, value: str, doc: str) -> None:
"""Append ``_docstrings_list`` with a docstring for `name`.
Parameters
----------
name : str
The name of the object.
value : str
A string-representation of the object.
doc : str
The docstring of the object.
"""
_docstrings_list.append((name, value, doc))
def _parse_docstrings() -> str:
"""Convert all docstrings in ``_docstrings_list`` into a single
sphinx-legible text block.
"""
type_list_ret = []
for name, value, doc in _docstrings_list:
s = textwrap.dedent(doc).replace("\n", "\n ")
# Replace sections by rubrics
lines = s.split("\n")
new_lines = []
indent = ""
for line in lines:
m = re.match(r'^(\s+)[-=]+\s*$', line)
if m and new_lines:
prev = textwrap.dedent(new_lines.pop())
if prev == "Examples":
indent = ""
new_lines.append(f'{m.group(1)}.. rubric:: {prev}')
else:
indent = 4 * " "
new_lines.append(f'{m.group(1)}.. admonition:: {prev}')
new_lines.append("")
else:
new_lines.append(f"{indent}{line}")
s = "\n".join(new_lines)
# Done.
type_list_ret.append(f""".. data:: {name}\n :value: {value}\n {s}""")
return "\n".join(type_list_ret)
add_newdoc('ArrayLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced into an `~numpy.ndarray`.
Among others this includes the likes of:
* Scalars.
* (Nested) sequences.
* Objects implementing the `~class.__array__` protocol.
See Also
--------
:term:`array_like`:
Any scalar or sequence that can be interpreted as an ndarray.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> def as_array(a: npt.ArrayLike) -> np.ndarray:
... return np.array(a)
""")
add_newdoc('DTypeLike', 'typing.Union[...]',
"""
A `~typing.Union` representing objects that can be coerced into a `~numpy.dtype`.
Among others this includes the likes of:
* :class:`type` objects.
* Character codes or the names of :class:`type` objects.
* Objects with the ``.dtype`` attribute.
See Also
--------
:ref:`Specifying and constructing data types <arrays.dtypes.constructing>`
A comprehensive overview of all objects that can be coerced into data types.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> def as_dtype(d: npt.DTypeLike) -> np.dtype:
... return np.dtype(d)
""")
add_newdoc('NDArray', repr(NDArray),
"""
A :term:`generic <generic type>` version of
`np.ndarray[Any, np.dtype[+ScalarType]] <numpy.ndarray>`.
Can be used during runtime for typing arrays with a given dtype
and unspecified shape.
Examples
--------
.. code-block:: python
>>> import numpy as np
>>> import numpy.typing as npt
>>> print(npt.NDArray)
numpy.ndarray[typing.Any, numpy.dtype[+ScalarType]]
>>> print(npt.NDArray[np.float64])
numpy.ndarray[typing.Any, numpy.dtype[numpy.float64]]
>>> NDArrayInt = npt.NDArray[np.int_]
>>> a: NDArrayInt = np.arange(10)
>>> def func(a: npt.ArrayLike) -> npt.NDArray[Any]:
... return np.array(a)
""")
_docstrings = _parse_docstrings()
| {
"repo_name": "numpy/numpy",
"path": "numpy/typing/_add_docstring.py",
"copies": "2",
"size": "3815",
"license": "bsd-3-clause",
"hash": -3897951223405970000,
"line_mean": 25.6783216783,
"line_max": 88,
"alpha_frac": 0.5533420708,
"autogenerated": false,
"ratio": 3.8380281690140845,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0029477855232170544,
"num_lines": 143
} |
"""A module for dealing with BMP bitmap image files."""
def write_grayscale(filename, pixels):
"""Create and write a grayscale BMP file."""
"""
Args:
filename: The name of the BMP file to be created.
pixels: A rectangular image stored as a sequence of rows.
Each row must be an iterable series of integers in the
range 0-255.
Raises:
OSError: If the file couldn't be written.
"""
height = len(pixels)
width = len(pixels[0])
with open(filename, 'wb') as bmp:
# BMP Header
bmp.write(b'BM')
# Next 4 bytes hold the file size as 32-bit.
size_bookmark = bmp.tell()
# little-endian integer. Zero placeholder for now.
bmp.write(b'\x00\x00\x00\x00')
# Unused 16-bit integer - should be zero.
bmp.write(b'\x00\x00')
# Unused 16-bit integer - should be zero.
bmp.write(b'\x00\x00')
# The next four bytes hold the integer offset.
# to the pixel data. Zero placeholder for now.
pixel_offset_bookmark = bmp.tell()
bmp.write(b'\x00\x00\x00\x00')
# Image header
# Image header size in bytes - 40 decimal
bmp.write(b'\x28\x00\x00\x00')
# Image width in pixels
bmp.write(_int32_to_bytes(width))
# Image height in pixels
bmp.write(_int32_to_bytes(height))
# Number of image planes
bmp.write(b'\x01\x00')
# Bits per pixel 8 for grayscale
bmp.write(b'\x08\x00')
# No compression
bmp.write(b'\x00\x00\x00\x00')
# Zero for uncompressed images
bmp.write(b'\x00\x00\x00\x00')
# Unused pixels per meter
bmp.write(b'\x00\x00\x00\x00')
# Unused pixels per meter
bmp.write(b'\x00\x00\x00\x00')
# Use whole color table
bmp.write(b'\x00\x00\x00\x00')
# All colors are important
bmp.write(b'\x00\x00\x00\x00')
# Color palette - a linear grayscale
for c in range(256):
# Blue, Green, Red, Zero
bmp.write(bytes((c, c, c, 0)))
# Pixel data
pixel_data_bookmark = bmp.tell()
# BMP Files are bottom to top
for row in reversed(pixels):
row_data = bytes(row)
bmp.write(row_data)
# Pad row to multiple of four bytes
padding = b'\x00' * ((4 - (len(row) % 4)) % 4)
bmp.write(padding)
# End of file
eof_bookmark = bmp.tell()
# Fill in file size placeholder
bmp.seek(size_bookmark)
bmp.write(_int32_to_bytes(eof_bookmark))
# Fill in pixel offset placeholder
bmp.seek(pixel_offset_bookmark)
bmp.write(_int32_to_bytes(pixel_data_bookmark))
def _int32_to_bytes(i):
"""Convert an integer to four bytes in little-endian formart."""
return bytes((i & 0xff,
i >> 8 & 0xff,
i >> 16 & 0xff,
i >> 24 & 0xff))
def _bytes_to_int32(b):
"""Convert a byte object containing four bytes into an integer."""
return b[0] | (b[1] << 8) | (b[2] << 16) | (b[3] << 24)
def dimensions(filename):
"""Determine the dimensions in pixels of a BMP image."""
"""
Args:
filename: The filename of a BMP file.
Returns:
A tuple containing two integers with the width and height in pixels.
Raises:
ValueError: If the file was nto a BMP file.
OSError: If there was a problem reading the file.
"""
with open(filename, 'rb') as f:
# First two magic bytes expected in a BMP file.
magic = f.read(2)
# Validate first two magic bytes of file are BMP file.
if magic != b'BM':
raise ValueError("{} is not a BMP file".format(filename))
# Image dimensions stored 18 bytes in file.
f.seek(18)
# Width and height bytes of image.
width_bytes = f.read(4)
height_bytes = f.read(4)
return (_bytes_to_int32(width_bytes),
_bytes_to_int32(height_bytes))
| {
"repo_name": "carlb15/Python",
"path": "bmp.py",
"copies": "1",
"size": "4090",
"license": "mit",
"hash": 261625247587111260,
"line_mean": 30.4615384615,
"line_max": 76,
"alpha_frac": 0.5655256724,
"autogenerated": false,
"ratio": 3.513745704467354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4579271376867354,
"avg_score": null,
"num_lines": null
} |
"""A module for dealing with BMP bitmap image files."""
def write_grayscale(filename, pixels):
"""Creates and writes a grayscale BMP file
Args:
filename: The name of the BMP file to be crated.
pixels: A rectangular image stored as a sequence of rows.
Each row must be an iterable series of integers in the range 0-255.
Raises:
OSError: If the file couldn't be written.
"""
height = len(pixels)
width = len(pixels[0])
with open(filename, 'wb') as bmp:
# BMP Header
bmp.write(b'BM')
size_bookmark = bmp.tell() # The next four bytes hold the filesize as a 32-bit
bmp.write(b'\x00\x00\x00\x00') # little-endian integer. Zero placeholder for now.
bmp.write(b'\x00\x00') # Unused 16-bit integer - should be zero
bmp.write(b'\x00\x00') # Unused 16-bit integer - should be zero
pixel_offset_bookmark = bmp.tell() # The next four bytes hold the integer offset
bmp.write(b'\x00\x00\x00\x00') # to the pixel data. Zero placeholder for now.
# Image header
bmp.write(b'\x28\x00\x00\x00') # Image header size in bytes - 40 decimal
bmp.write(_int32_to_bytes(width)) # Image width in pixels
bmp.write(_int32_to_bytes(height)) # Image height in pixels
bmp.write(b'\x01\x00') # Number of image planes
bmp.write(b'\x08\x00') # Bits per pixel 8 for grayscale
bmp.write(b'\x00\x00\x00\x00') # No compression
bmp.write(b'\x00\x00\x00\x00') # Zero for uncompressed images
bmp.write(b'\x00\x00\x00\x00') # Unused pixels per meter
bmp.write(b'\x00\x00\x00\x00') # Unused pixels per meter
bmp.write(b'\x00\x00\x00\x00') # Use whole color table
bmp.write(b'\x00\x00\x00\x00') # All colors are important
# Color palette - a linear grayscale
for c in range(256):
bmp.write(bytes((c, c, c, 0)))
# Pixel data
pixel_data_bookmark = bmp.tell()
for row in reversed(pixels): # BMP files are bottom to top
row_data = bytes(row)
bmp.write(row_data)
padding = b'\x00' * ((4 - (len(row) % 4)) % 4) # Pad row to multiple of four bytes
bmp.write(padding)
# End of file
eof_bookmark = bmp.tell()
# Fill in file size placeholder
bmp.seek(size_bookmark)
bmp.write(_int32_to_bytes(eof_bookmark))
# Fill in pixel
bmp.seek(pixel_offset_bookmark)
bmp.write(_int32_to_bytes(pixel_data_bookmark))
def _int32_to_bytes(i):
"""Convert an integer to four bytes in little-endian format."""
return bytes((i & 0xff,
i >> 8 & 0xff,
i >> 16 & 0xff,
i >> 24 & 0xff))
def dimensions(filename):
"""Determine the dimensions in pixels of a BMP image.
Args:
filename: The filename of a BMP file.
Returns:
A tuple containing two integer with the width
and height in pixels.
Raises:
ValueError: If the file was not aBMP file.
OSError: If there was a problem reading the file.
"""
with open(filename, 'rb') as f:
magic = f.read(2)
if magic != b'BM':
raise ValueError("{} is not a BMP file".format(filename))
f.seek(18)
width_bytes = f.read(4)
height_bytes = f.read(4)
return (_bytes_to_int32(width_bytes),
_bytes_to_int32(height_bytes))
def _bytes_to_int32(b):
return b[0] | (b[1] << 8) | (b[2] << 16 | (b[3] << 24))
| {
"repo_name": "kentoj/python-fundamentals",
"path": "bmp.py",
"copies": "1",
"size": "3588",
"license": "mit",
"hash": 929341682898686500,
"line_mean": 33.1714285714,
"line_max": 95,
"alpha_frac": 0.5847268673,
"autogenerated": false,
"ratio": 3.3160813308687613,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9397590588647085,
"avg_score": 0.0006435219043353129,
"num_lines": 105
} |
"""A module for defining structured request species.
:author: Matthew Gidden <matthew.gidden _at_ gmail.com>
"""
import itertools
import numpy as np
import random
import math
from collections import OrderedDict, defaultdict, Iterable
from cyclopts import tools as cyctools
from cyclopts import cyclopts_io as cycio
from cyclopts import io_tools as io_tools
import cyclopts.exchange_instance as exinst
from cyclopts.problems import ProblemSpecies
from cyclopts.exchange_family import ResourceExchange
from cyclopts.structured_species import data
from cyclopts.structured_species import tools as strtools
def rxtr_commods(kind, fidelity):
"""return a list of commodities per reactor kind and fidelity"""
commods = [data.Commodities.uox]
if fidelity > 0:
commods += [data.Commodities.th_mox, data.Commodities.f_mox]
if fidelity > 1 and kind != data.Reactors.th:
commods += [data.Commodities.f_thox]
return commods
class Point(strtools.Point):
"""A container class representing a point in parameter space"""
"""ordered mapping from input parameters to default values and np.dtypes, see
the theory manual for further explanation of the parameter names"""
parameters = OrderedDict(sorted({
"f_rxtr": strtools.Param(0, np.int8),
"f_fc": strtools.Param(0, np.int8),
"f_loc": strtools.Param(0, np.int8),
# use a different tool for more than 4294967295 rxtrs!
"n_rxtr": strtools.Param(1, np.uint32),
"r_t_f": strtools.Param(1.0, np.float32),
"r_th_pu": strtools.Param(0.0, np.float32),
"r_s_th": strtools.Param(1.0 / 2, np.float32),
"r_s_mox_uox": strtools.Param(1.0, np.float32),
"r_s_mox": strtools.Param(1.0 / 2, np.float32),
"r_s_thox": strtools.Param(1.0 / 2, np.float32),
"f_mox": strtools.Param(1.0, np.float32),
"r_inv_proc": strtools.Param(1.0, np.float32),
# use a different tool for more than 4294967295 regions!
"n_reg": strtools.Param(10, np.uint32),
"r_l_c": strtools.Param(1.0, np.float32),
"seed": strtools.Param(-1.0, np.int64), # default is negative
}.items(), key=lambda t: t[0]))
def __init__(self, d=None):
"""Parameters
----------
d : dict, optional
a dictionary with key value pairs of parameter name, parameter
value
"""
super(Point, self).__init__(d)
if self.seed > 0:
random.seed(self.seed)
def _parameters(self):
return Point.parameters
class Reactor(strtools.Reactor):
"""An extension reactor model for Structured Request Species"""
def __init__(self, kind, point, gids, nids):
super(Reactor, self).__init__(kind, point)
req = True
qty = data.fuel_unit * data.core_vol_frac[self.kind]
self.base_req_qty = qty / self.n_assems
gid = gids.next()
grp = exinst.ExGroup(gid, req, qty)
grp.AddCap(qty)
self.group = grp
self._gen_nodes(point, gid, nids)
def _gen_nodes(self, point, gid, nids):
self.nodes = []
self.commod_to_nodes = defaultdict(list)
req = True
excl = True
for commod in rxtr_commods(self.kind, point.f_fc):
nreq = self.n_assems
# account for less mox requests
if self.kind == data.Reactors.th:
if commod == data.Commodities.f_mox or \
commod == data.Commodities.th_mox:
nreq = int(math.ceil(nreq * point.f_mox))
for i in range(nreq):
node = exinst.ExNode(nids.next(), gid, req,
self.req_qty(commod), excl)
self.nodes.append(node)
self.commod_to_nodes[commod].append(node)
def req_qty(self, commod):
return self.base_req_qty * data.relative_qtys[self.kind][commod]
class Supplier(object):
"""A simplified supplier model for Structured Request Species"""
def __init__(self, kind, point, gids):
self.kind = kind
self.nodes = []
req = True
# process then inventory
rhs = [data.sup_rhs[kind],
data.sup_rhs[kind] * point.r_inv_proc * strtools.conv_ratio(kind)]
grp = exinst.ExGroup(gids.next(), not req)
for cap in rhs:
grp.AddCap(cap)
self.group = grp
self.loc = data.loc()
def coeffs(self, qty, enr):
return [data.converters[self.kind][k](
qty, enr, data.sup_to_commod[self.kind]) / qty \
for k in ['proc', 'inv']]
class PathMap(io_tools.PathMap):
"""A simple container class for mapping columns to Hdf5 paths
implemented for the StructuredRequest problem species"""
def __init__(self, col):
super(PathMap, self).__init__(col)
@property
def path(self):
# this is an approx. heuristic, it might need to be updated
inst = StructuredRequest()
col = self.col
if col.startswith('n_') and not col.endswith('_rxtr') \
and not col.endswith('_reg'):
tbl = inst.sum_tbl_name
elif col.endswith('pref_flow') or col.endswith('cost_flow'):
tbl = strtools.pp_tbl_name
else:
tbl = inst.param_tbl_name
return '/'.join([inst.io_prefix, tbl])
class StructuredRequest(ProblemSpecies):
"""A class representing structured request-based exchanges species."""
@property
def family(cls):
"""Returns
-------
family : ResourceExchange
An instance of this species' family
"""
return ResourceExchange()
@property
def name(cls):
"""Returns
-------
name : string
The name of this species
"""
return 'StructuredRequest'
@property
def param_tbl_name(cls):
"""Returns
-------
name : string
The name of parameter space output table
"""
return 'Points'
@property
def sum_tbl_name(cls):
"""Returns
-------
name : string
The name of summary output table
"""
return 'Summary'
@property
def summary_tbls(cls):
"""
Returns
-------
name : list
A list of cyclopts_io.TblDesc for summary tables.
"""
return strtools.tbl_descs(cls.io_prefix) + [
cycio.TblDesc('/'.join([cls.io_prefix, cls.sum_tbl_name]),
'param', 'paramid'),
cycio.TblDesc('/'.join([cls.io_prefix, cls.param_tbl_name]),
'param', 'paramid'),
]
def __init__(self):
super(StructuredRequest, self).__init__()
self.space = None
self._n_points = None
# 16 bytes for uuid
self._param_dtype = np.dtype(
[('paramid', ('str', 16)), ('family', ('str', 30))] + \
[(name, param.dtype) for name, param in Point.parameters.items()])
facs = ['n_r_th', 'n_r_f_mox', 'n_r_f_thox', 'n_s_uox', 'n_s_th_mox',
'n_s_f_mox', 'n_s_f_thox']
self._sum_dtype = np.dtype(
[('paramid', ('str', 16)), ('family', ('str', 30))] + \
[(name, np.uint32) for name in facs])
self.nids = cyctools.Incrementer()
self.gids = cyctools.Incrementer()
self.arcids = cyctools.Incrementer()
self.instid = None
self.tables = None
self.groups = None
self.arc_tbl = None
def register_tables(self, h5file, prefix):
"""Parameters
----------
h5file : PyTables File
the hdf5 file
prefix : string
the absolute path to the group for tables of this species
Returns
-------
tables : list of cyclopts_io.Tables
All tables that could be written to by this species.
"""
return [cycio.Table(h5file, '/'.join([prefix, self.param_tbl_name]),
self._param_dtype),
cycio.Table(h5file, '/'.join([prefix, self.sum_tbl_name]),
self._sum_dtype),
cycio.Table(h5file, '/'.join([prefix, strtools.pp_tbl_name]),
strtools.pp_tbl_dtype),]
def register_groups(self, h5file, prefix):
"""Parameters
----------
h5file : PyTables File
the hdf5 file
prefix : string
the absolute path to the group for tables of this family
Returns
-------
groups : list of cyclopts_io.Groups
All groups that could be written to by this species.
"""
return [cycio.Group(h5file, '/'.join([prefix, strtools.arc_io_name]))]
def read_space(self, space_dict):
"""Parameters
----------
space_dict : dict
A dictionary container resulting from the reading in of a run
control file
"""
self.space = {k: v if isinstance(v, Iterable) else [v] \
for k, v in space_dict.items() \
if k in Point.parameters}
@property
def n_points(self):
"""Returns
-------
n : int
The total number of points in the parameter space
"""
return cyctools.n_permutations(self.space)
def points(self):
"""Derived classes must implement this function returning a
representation of a point in its parameter space to be used by other
class member functions.
Returns
-------
point_generator : generator
A generator for representation of a point in parameter space to be
used by this species
"""
keys = self.space.keys()
vals = self.space.values()
for args in cyctools.expand_args(vals):
d = {keys[i]: args[i] for i in range(len(args))}
yield Point(d)
def record_point(self, point, param_uuid, io_manager):
"""Parameters
----------
point : tuple or other
A representation of a point in parameter space
param_uuid : uuid
The uuid of the point in parameter space
io_manager : cyclopts_io.IOManager, optional
IOManager that gives access to tables/groups for writing
"""
tables = io_manager.tables
uid = param_uuid.bytes if len(param_uuid.bytes) == 16 \
else param_uuid.bytes + '\0'
data = [param_uuid.bytes, self.family.name]
data += [getattr(point, k) for k in Point.parameters.keys()]
tables[self.param_tbl_name].append_data([tuple(data)])
data = [param_uuid.bytes, self.family.name]
data += strtools.reactor_breakdown(point)
data += strtools.support_breakdown(point)[:-1]
tables[self.sum_tbl_name].append_data([tuple(data)])
def _get_reactors(self, point):
n_uox, n_mox, n_thox = strtools.reactor_breakdown(point)
uox_th_r = np.ndarray(
shape=(n_uox,),
buffer=np.array([Reactor(data.Reactors.th, point,
self.gids, self.nids) \
for i in range(n_uox)]),
dtype=Reactor)
mox_f_r = np.ndarray(
shape=(n_mox,),
buffer=np.array([Reactor(data.Reactors.f_mox, point,
self.gids, self.nids) \
for i in range(n_mox)]),
dtype=Reactor)
thox_f_r = np.ndarray(
shape=(n_thox,),
buffer=np.array([Reactor(data.Reactors.f_thox, point,
self.gids, self.nids) \
for i in range(n_thox)]),
dtype=Reactor)
reactors = {
data.Reactors.th: uox_th_r,
data.Reactors.f_mox: mox_f_r,
data.Reactors.f_thox: thox_f_r,
}
return reactors
def _get_suppliers(self, point):
n_uox, n_t_mox, n_f_mox, n_f_thox, _ = strtools.support_breakdown(point)
uox_s = np.ndarray(
shape=(n_uox,),
buffer=np.array([Supplier(data.Supports.uox, point, self.gids) \
for i in range(n_uox)]),
dtype=Supplier)
mox_th_s = np.ndarray(
shape=(n_t_mox,),
buffer=np.array([Supplier(data.Supports.th_mox, point, self.gids) \
for i in range(n_t_mox)]),
dtype=Supplier)
mox_f_s = np.ndarray(
shape=(n_f_mox,),
buffer=np.array([Supplier(data.Supports.f_mox, point, self.gids) \
for i in range(n_f_mox)]),
dtype=Supplier)
thox_s = np.ndarray(
shape=(n_f_thox,),
buffer=np.array([Supplier(data.Supports.f_thox, point, self.gids) \
for i in range(n_f_thox)]),
dtype=Supplier)
suppliers = {
data.Supports.uox: uox_s,
data.Supports.th_mox: mox_th_s,
data.Supports.f_mox: mox_f_s,
data.Supports.f_thox: thox_s,
}
return suppliers
def _generate_supply(self, point, commod, requester, supplier):
r = requester
s = supplier
commod_pref = data.rxtr_pref_basis[r.kind][commod]
loc_pref = strtools.loc_pref(r.loc, s.loc, point.f_loc, point.n_reg)
pref = commod_pref + loc_pref * point.r_l_c
rnodes = r.commod_to_nodes[commod]
arcs = []
enr = r.enr(commod)
# req coeffs have full orders take into relative fissile material
req_coeffs = r.coeffs(commod)
# sup coeffs act on the quantity of fissile material
qty = r.req_qty(commod)
sup_coeffs = s.coeffs(qty, enr)
for i in range(len(rnodes)):
req = True
nid = self.nids.next()
node = exinst.ExNode(nid, s.group.id, not req, qty)
s.nodes.append(node)
arcid = self.arcids.next()
if self.arc_tbl is not None:
self.arc_tbl.append_data([(arcid, commod, commod_pref, loc_pref)])
#print('id', arcid, 'commod', commod, 'pref', pref)
arcs.append(exinst.ExArc(
arcid,
rnodes[i].id, req_coeffs,
nid, sup_coeffs,
pref))
return arcs
def _get_arcs(self, point, reactors, suppliers):
arcs = []
for r_kind, r_ary in reactors.items():
for r in r_ary:
for commod in rxtr_commods(r.kind, point.f_fc):
for s in suppliers[data.commod_to_sup[commod]]:
supply = self._generate_supply(point, commod, r, s)
arcs.append(supply)
return np.concatenate(arcs)
def gen_inst(self, point, instid=None, io_manager=None):
"""Parameters
----------
point : structured_species.Point
A representation of a point in parameter space
instid : uuid
the id for the instance
io_manager : cyclopts_io.IOManager, optional
IOManager that gives access to tables/groups for writing
Returns
-------
inst : tuple of lists of ExGroups, ExNodes, and ExArgs
A representation of a problem instance to be used by this species'
family
"""
# reset id generation
self.nids = cyctools.Incrementer()
self.gids = cyctools.Incrementer()
self.arcids = cyctools.Incrementer()
self.instid = instid
# set up IO
self.tables = None if io_manager is None else io_manager.tables
self.groups = None if io_manager is None else io_manager.groups
self.arc_tbl = None
if self.groups is not None:
arc_grp = self.groups[strtools.arc_io_name]
arc_tbl_path = '/'.join([arc_grp.path,
'id_' + self.instid.hex])
self.arc_tbl = cycio.Table(arc_grp.h5file, arc_tbl_path, strtools.arc_tbl_dtype)
self.arc_tbl.cond_create()
# species objects
reactors = self._get_reactors(point)
suppliers = self._get_suppliers(point)
# create arcs
arcs = self._get_arcs(point, reactors, suppliers)
if self.arc_tbl is not None:
self.arc_tbl.flush()
# collect nodes
r_nodes = np.concatenate([x.nodes for ary in reactors.values() \
for x in ary])
s_nodes = np.concatenate([x.nodes for ary in suppliers.values() \
for x in ary])
nodes = np.concatenate((r_nodes, s_nodes))
# collect groups
r_groups = [x.group for ary in reactors.values() for x in ary]
s_groups = [x.group for ary in suppliers.values() for x in ary]
groups = np.concatenate((r_groups, s_groups))
return groups, nodes, arcs
def post_process(self, instid, solnids, props, io_managers):
"""Perform any post processing on input and output.
Parameters
----------
instid : UUID
UUID of the instance to post process
solnids : tuple of UUIDs
a collection of solution UUIDs corresponding the instid
props : tuple, other
as defined by cyclopts.exchange_family
io_managers : tuple of cyclopts.cyclopts_io.IOManager
iomanager from an input file, iomanager from an output file,
and iomanager from a post-processed file
"""
strtools.post_process(instid, solnids, props, io_managers, self.name)
| {
"repo_name": "gidden/cyclopts",
"path": "cyclopts/structured_species/request.py",
"copies": "1",
"size": "18085",
"license": "bsd-3-clause",
"hash": -5246458175752097000,
"line_mean": 35.9836400818,
"line_max": 92,
"alpha_frac": 0.540945535,
"autogenerated": false,
"ratio": 3.786641541038526,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9786627123416396,
"avg_score": 0.008191990524425994,
"num_lines": 489
} |
"""A module for defining structured supply species.
:author: Matthew Gidden <matthew.gidden _at_ gmail.com>
"""
import itertools
import numpy as np
import random
import math
from collections import OrderedDict, defaultdict, Iterable, namedtuple
from cyclopts import tools as cyctools
from cyclopts import cyclopts_io as cycio
from cyclopts import io_tools as io_tools
import cyclopts.exchange_instance as exinst
from cyclopts.problems import ProblemSpecies
from cyclopts.exchange_family import ResourceExchange
from cyclopts.structured_species import data
from cyclopts.structured_species import tools as strtools
from cyclopts.structured_species import request
def commod_to_reqrs(fidelity):
"""return a mapping of commodities to requesters of those commodities"""
ret = defaultdict(list)
min_fidelities = {
data.Supports.th_mox: 0,
data.Supports.f_mox: 1,
data.Supports.f_thox: 2,
data.Supports.repo: 0,
}
for reqr, v in data.sup_pref_basis.items():
if not fidelity >= min_fidelities[reqr]:
continue
commods = v.keys()
for c in commods:
ret[c].append(reqr)
return ret
class Point(strtools.Point):
"""A container class representing a point in parameter space"""
"""ordered mapping from input parameters to default values and np.dtypes, see
the theory manual for further explanation of the parameter names"""
parameters = OrderedDict(sorted(request.Point.parameters.items() + {
"d_th": strtools.Param([0.67, 0.33, 0], (np.float64, 3)),
"d_f_mox": strtools.Param([0., 0., 1., 0.], (np.float64, 4)),
"d_f_thox": strtools.Param([0., 0., 0., 1.], (np.float64, 4)),
"r_repo": strtools.Param(0.1, np.float32),
}.items(), key=lambda t: t[0]))
def __init__(self, d=None):
"""Parameters
----------
d : dict, optional
a dictionary with key value pairs of parameter name, parameter
value
"""
super(Point, self).__init__(d)
if self.seed > 0:
random.seed(self.seed)
def _parameters(self):
"""ordered mapping from input parameters to default values and np.dtypes, see
the theory manual for further explanation of the parameter names"""
return Point.parameters
class Reactor(strtools.Reactor):
"""An extension reactor model for Structured Supply Species"""
def __init__(self, kind, point=None, n_assems=None):
super(Reactor, self).__init__(kind, point, n_assems)
self.assem_qty = data.fuel_unit * data.core_vol_frac[self.kind] \
/ self.n_assems
def gen_group(self, gid):
supply = False
grp = exinst.ExGroup(gid, supply)
grp.AddCap(self.assem_qty)
return grp
def gen_node(self, nid, gid, excl_id):
supply = False
excl = True
return exinst.ExNode(nid, gid, supply,
self.assem_qty, excl, excl_id)
class Requester(object):
"""A simplified requester model for Structured Supply Species"""
def __init__(self, kind, gids, nids):
self.kind = kind
self.req_qty = data.sup_rhs[self.kind]
gid = gids.next()
req = True
self.group = grp = exinst.ExGroup(gid, req, self.req_qty)
grp.AddCap(self.req_qty)
if self.kind != data.Supports.repo:
commod = data.sup_to_commod[self.kind]
rxtr = data.sup_to_rxtr[self.kind]
grp.AddCap(self.req_qty * strtools.mean_enr(rxtr, commod) / 100. \
* data.relative_qtys[rxtr][commod])
self._gen_nodes(gid, nids)
self.loc = data.loc()
def _gen_nodes(self, gid, nids):
self.nodes = []
self.commod_to_nodes = {}
req = True
for commod in data.sup_pref_basis[self.kind].keys():
nid = nids.next()
node = exinst.ExNode(nid, gid, req, self.req_qty)
self.nodes.append(node)
self.commod_to_nodes[commod] = node
def coeff(self, enr, rkind, commod):
if self.kind == data.Supports.repo:
raise RuntimeError('Coeff not supported for repos')
return enr / 100. * data.relative_qtys[rkind][commod]
class PathMap(io_tools.PathMap):
"""A simple container class for mapping columns to Hdf5 paths
implemented for the StructuredSupply problem species"""
def __init__(self, col):
super(PathMap, self).__init__(col)
@property
def path(self):
# this is an approx. heuristic, it might need to be updated
inst = StructuredSupply()
if self.col.startswith('n_'):
tbl = inst.sum_tbl_name
elif self.col.endswith('pref_flow') or self.col.endswith('cost_flow') :
tbl = strtools.pp_tbl_name
else:
tbl = inst.param_tbl_name
return '/'.join([inst.io_prefix, tbl])
class StructuredSupply(ProblemSpecies):
"""A class representing structured supply-based exchanges species."""
@property
def family(cls):
"""Returns
-------
family : ResourceExchange
An instance of this species' family
"""
return ResourceExchange()
@property
def name(cls):
"""Returns
-------
name : string
The name of this species
"""
return 'StructuredSupply'
@property
def param_tbl_name(cls):
"""Returns
-------
name : string
The name of parameter space output table
"""
return 'Points'
@property
def sum_tbl_name(cls):
"""Returns
-------
name : string
The name of summary output table
"""
return 'Summary'
@property
def summary_tbls(cls):
"""
Returns
-------
name : list
A list of cyclopts_io.TblDesc for summary tables.
"""
return strtools.tbl_descs(cls.io_prefix) + [
cycio.TblDesc('/'.join([cls.io_prefix, cls.sum_tbl_name]),
'param', 'paramid'),
cycio.TblDesc('/'.join([cls.io_prefix, cls.param_tbl_name]),
'param', 'paramid'),
]
@staticmethod
def pnt_to_realization(point):
"""Returns a realization of a structured supply instance given a point
in parameter space.
A realization is a namedtuple of :
* reqrs: a dictionary of the kind and number of each requester
* rxtrs: a dictionary of the kind and number of each reactor
* assem_dists: a dictionary of the kind of reactor to a dictionary
of Commodity type to the number of assemblies of that Commodity type
"""
# skip uox support facilities
reqrs = {data.Supports[i]: n \
for i, n in enumerate(strtools.support_breakdown(point)) \
if i in data.sup_pref_basis.keys()}
rxtrs = {data.Reactors[i]: n \
for i, n in enumerate(strtools.reactor_breakdown(point))}
dists = {k: strtools.assembly_breakdown(point, k) \
for k in data.Reactors}
keys = ['n_reqrs', 'n_rxtrs', 'assem_dists']
return namedtuple('Realization', keys)(reqrs, rxtrs, dists)
@staticmethod
def gen_arc(aid, point, commod, rx_node_id, rxtr, reqr, instid=None, arc_tbl=None):
"""generate an arc"""
commod_pref = data.sup_pref_basis[reqr.kind][commod]
loc_pref = strtools.loc_pref(rxtr.loc, reqr.loc, point.f_loc, point.n_reg)
pref = commod_pref + loc_pref * point.r_l_c
if arc_tbl is not None:
arc_tbl.append_data([(aid, commod, commod_pref, loc_pref)])
# unit capacity for total mass constraint first
rq_coeffs = [1., reqr.coeff(rxtr.enr(commod), rxtr.kind, commod)] \
if not reqr.kind == data.Supports.repo else [1.]
arc = exinst.ExArc(aid,
reqr.commod_to_nodes[commod].id, rq_coeffs,
rx_node_id, [1],
pref)
return arc
def __init__(self):
super(StructuredSupply, self).__init__()
self.space = None
self._n_points = None
# 16 bytes for uuid
self._param_dtype = np.dtype(
[('paramid', ('str', 16)), ('family', ('str', 30))] + \
[(name, param.dtype) for name, param in Point.parameters.items()])
facs = ['n_r_th', 'n_r_f_mox', 'n_r_f_thox', 'n_s_uox', 'n_s_th_mox',
'n_s_f_mox', 'n_s_f_thox', 'n_s_repo']
self.iter_params = ['d_th', 'd_f_mox', 'd_f_thox']
self._sum_dtype = np.dtype(
[('paramid', ('str', 16)), ('family', ('str', 30))] + \
[(name, np.uint32) for name in facs])
# reset id generation
self.nids = cyctools.Incrementer()
self.excl_ids = cyctools.Incrementer()
self.gids = cyctools.Incrementer()
self.arcids = cyctools.Incrementer()
self.instid = None
self.tables = None
# default realization is None
self._rlztn = None
def register_tables(self, h5file, prefix):
"""Parameters
----------
h5file : PyTables File
the hdf5 file
prefix : string
the absolute path to the group for tables of this species
Returns
-------
tables : list of cyclopts_io.Tables
All tables that could be written to by this species.
"""
return [cycio.Table(h5file, '/'.join([prefix, self.param_tbl_name]),
self._param_dtype),
cycio.Table(h5file, '/'.join([prefix, self.sum_tbl_name]),
self._sum_dtype),
cycio.Table(h5file, '/'.join([prefix, strtools.pp_tbl_name]),
strtools.pp_tbl_dtype),]
def register_groups(self, h5file, prefix):
"""Parameters
----------
h5file : PyTables File
the hdf5 file
prefix : string
the absolute path to the group for tables of this family
Returns
-------
groups : list of cyclopts_io.Groups
All groups that could be written to by this species.
"""
return [cycio.Group(h5file, '/'.join([prefix, strtools.arc_io_name]))]
def read_space(self, space_dict):
"""Parameters
----------
space_dict : dict
A dictionary container resulting from the reading in of a run
control file
"""
self.space = {k: v if isinstance(v, Iterable) else [v] \
for k, v in space_dict.items() \
if k in Point.parameters}
@property
def n_points(self):
"""Returns
-------
n : int
The total number of points in the parameter space
"""
return cyctools.n_permutations(self.space, iter_keys=self.iter_params)
def points(self):
"""Derived classes must implement this function returning a
representation of a point in its parameter space to be used by other
class member functions.
Returns
-------
point_generator : generator
A generator for representation of a point in parameter space to be
used by this species
"""
keys = self.space.keys()
for k in keys:
if k in self.iter_params:
# iterable params must be iterable
if not cyctools.seq_not_str(self.space[k]):
raise RuntimeError('{0} entry must be a Sequence'.format(k))
# if they are defined as a single value, make them a sequence
if not cyctools.seq_not_str(self.space[k][0]):
self.space[k] = [self.space[k]]
vals = self.space.values()
for args in cyctools.expand_args(vals):
d = {keys[i]: args[i] for i in range(len(args))}
yield Point(d)
def record_point(self, point, param_uuid, io_manager):
"""Parameters
----------
point : tuple or other
A representation of a point in parameter space
param_uuid : uuid
The uuid of the point in parameter space
io_manager : cyclopts_io.IOManager, optional
IOManager that gives access to tables/groups for writing
"""
tables = io_manager.tables
uid = param_uuid.bytes if len(param_uuid.bytes) == 16 \
else param_uuid.bytes + '\0'
data = [param_uuid.bytes, self.family.name]
data += [getattr(point, k) for k in Point.parameters.keys()]
tables[self.param_tbl_name].append_data([tuple(data)])
data = [param_uuid.bytes, self.family.name]
data += strtools.reactor_breakdown(point)
data += strtools.support_breakdown(point)
tables[self.sum_tbl_name].append_data([tuple(data)])
def _get_reactors(self):
# requires self._rlztn to be set
rkinds = self._rlztn.n_rxtrs.keys()
n_assems = {k: sum(v.values()) \
for k, v in self._rlztn.assem_dists.items()}
gen_ary = lambda kind, num, n_assems: \
np.ndarray(
shape=(num,),
buffer=np.array([Reactor(kind, n_assems=n_assems) \
for i in range(num)]),
dtype=Reactor)
return {k: gen_ary(k, self._rlztn.n_rxtrs[k], n_assems[k]) \
for k in rkinds}
def _get_requesters(self):
# requires self._rlztn to be set
gen_ary = lambda kind, num: \
np.ndarray(
shape=(num,),
buffer=np.array([Requester(kind, self.gids, self.nids) \
for i in range(num)]),
dtype=Requester)
return {k: gen_ary(k, v) for k, v in self._rlztn.n_reqrs.items()}
def _gen_structure(self, point, reactors, requesters):
# requires self._rlztn to be set
grps, nodes, arcs = [], [], []
for rx_kind, rx_ary in reactors.items():
for rxtr in rx_ary:
for commod, nassems in self._rlztn.assem_dists[rx_kind].items():
for i in range(nassems):
excl_id = self.excl_ids.next()
gid = self.gids.next()
grp = rxtr.gen_group(gid)
grps.append(grp)
for rq_kind in self.commod_to_reqrs[commod]:
if rq_kind not in requesters:
continue
for reqr in requesters[rq_kind]:
nid = self.nids.next()
node = rxtr.gen_node(nid, gid, excl_id)
arc = StructuredSupply.gen_arc(
self.arcids.next(), point, commod,
nid, rxtr, reqr, self.instid, self.arc_tbl)
nodes.append(node)
arcs.append(arc)
return grps, nodes, arcs
def gen_inst(self, point, instid=None, io_manager=None, reset_rlztn=True):
"""Parameters
----------
point : structured_species.Point
A representation of a point in parameter space
instid : uuid, optional
the id for the instance
io_manager : cyclopts_io.IOManager, optional
IOManager that gives access to tables/groups for writing
reset_rltzn : bool, optional
Reset the internal realization
Returns
-------
inst : tuple of lists of ExGroups, ExNodes, and ExArgs
A representation of a problem instance to be used by this species'
family
"""
# reset id generation
self.nids = cyctools.Incrementer()
self.excl_ids = cyctools.Incrementer()
self.gids = cyctools.Incrementer()
self.arcids = cyctools.Incrementer()
self.instid = instid
# set up IO
self.tables = None if io_manager is None else io_manager.tables
self.groups = None if io_manager is None else io_manager.groups
self.arc_tbl = None
if self.groups is not None:
arc_grp = self.groups[strtools.arc_io_name]
arc_tbl_path = '/'.join([arc_grp.path,
'id_' + self.instid.hex])
self.arc_tbl = cycio.Table(arc_grp.h5file, arc_tbl_path, strtools.arc_tbl_dtype)
self.arc_tbl.cond_create()
self.commod_to_reqrs = commod_to_reqrs(point.f_fc)
# species objects
if self._rlztn is None or reset_rlztn:
# this could have been set before calling gen_inst, e.g., for
# testing
self._rlztn = StructuredSupply.pnt_to_realization(point)
reactors = self._get_reactors()
requesters = self._get_requesters()
# structure
rx_groups, rx_nodes, arcs = self._gen_structure(point, reactors, requesters)
if self.arc_tbl is not None:
self.arc_tbl.flush()
# combine groups, nodes
groups = np.concatenate(
(rx_groups,
[x.group for ary in requesters.values() for x in ary]))
nodes = np.concatenate(
(rx_nodes,
[n for ary in requesters.values() for x in ary for n in x.nodes]))
return groups, nodes, arcs
def post_process(self, instid, solnids, props, io_managers):
"""Perform any post processing on input and output.
Parameters
----------
instid : UUID
UUID of the instance to post process
solnids : tuple of UUIDs
a collection of solution UUIDs corresponding the instid
props : tuple, other
as defined by cyclopts.exchange_family
io_managers : tuple of cyclopts.cyclopts_io.IOManager
iomanager from an input file, iomanager from an output file,
and iomanager from a post-processed file
"""
strtools.post_process(instid, solnids, props, io_managers, self.name)
| {
"repo_name": "gidden/cyclopts",
"path": "cyclopts/structured_species/supply.py",
"copies": "1",
"size": "18552",
"license": "bsd-3-clause",
"hash": -1422654497562031600,
"line_mean": 37.4099378882,
"line_max": 92,
"alpha_frac": 0.5495903407,
"autogenerated": false,
"ratio": 3.860978147762747,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9869745724004462,
"avg_score": 0.008164552891656917,
"num_lines": 483
} |
"""A module for displaying tabular data to humans.
This is currently a work in progress.
Example:
--------
from ... import table
...
t = t.Table(padding=1)
t.SetColumns(['Country', 'Total Population', 'Populous Cities'])
t.AppendRow(['China', '1,354,040,000',
['Shanghai', 'Beijing', 'Tianjin', 'Guangzhou']])
t.AppendRow(['India', '1,210,569,573',
['Mumbai', 'Delhi', 'Bangalore', 'Hyderabad']])
t.Write()
The snippet above will print the following table to stdout:
+---------+------------------+-----------------+
| Country | Total Population | Populous Cities |
+---------+------------------+-----------------+
| China | 1,354,040,000 | Shanghai |
| | | Beijing |
| | | Tianjin |
| | | Guangzhou |
+---------+------------------+-----------------+
| India | 1,210,569,573 | Mumbai |
| | | Delhi |
| | | Bangalore |
| | | Hyderabad |
+---------+------------------+-----------------+
It's also possible to get a detailed format by using the DetailedTable
class:
+------------+---------------+
| Country | China |
| Population | 1,354,040,000 |
| Cities | Shanghai |
| | Beijing |
| | Tianjin |
| | Guangzhou |
+------------+---------------+
| Country | India |
| Population | 1,210,569,573 |
| Cities | Mumbai |
| | Delhi |
| | Bangalore |
| | Hyderabad |
+------------+---------------+
"""
import csv
import itertools
import numbers
import os
import re
import StringIO
import subprocess
import sys
import textwrap
__all__ = [
'Alignment',
'Format',
'Column',
'Table',
'DetailedTable',
'Csv',
'CreateTable',
]
# The absolute minimum width that will be allocated to cells.
_MIN_CELL_WIDTH = 5
# Control characters that need to be escaped before they are printed.
_CONTROL_CHARS = set(unichr(c) for c in range(32) + [127])
def _GetTerminalWidth():
"""Returns the terminal width or None if width cannot be determined."""
if sys.platform == 'win32':
try:
# Redirect stderr to stdout which is ignored anyway if cmd or mode fail.
output = subprocess.check_output(['cmd', '/R', 'mode', 'con:'],
stderr=subprocess.STDOUT)
# The second integer value is the console window width. Literal strings
# are avoided in the parse in case they are localized.
width = int(re.sub(r'\D+\d+\D+(\d+).*', r'\1', output,
count=1, flags=re.DOTALL))
return width
except BaseException:
pass
else:
try:
# Redirect stderr to stdout which is ignored anyway if stty fails.
output = subprocess.check_output(['stty', 'size'],
stderr=subprocess.STDOUT)
width = int(output.split()[1])
return width
except BaseException:
pass
# ``stty size'' is non-standard -- try ``stty -a'' and hope its not
# localized.
try:
# Redirect stderr to stdout which is ignored anyway if stty fails.
output = subprocess.check_output(['stty', '-a'], stderr=subprocess.STDOUT)
width = int(re.sub(r'.*columns *(\d+).*', r'\1', output,
count=1, flags=re.DOTALL))
return width
except BaseException:
pass
# Native commands failed, default to COLUMNS.
return os.environ.get('COLUMNS', None)
class Alignment(object):
"""Alignment policies for columns.
LEFT, CENTER, and RIGHT are self-explanatory. AUTO will
right-align numerical values and left-align everything else.
Alignment does not have an effect on the CSV format.
"""
POLICIES = ['left', 'center', 'right', 'auto']
LEFT, CENTER, RIGHT, AUTO = POLICIES
class Format(object):
"""Defines the available table formats."""
TABLE, DETAILED, CSV = ['table', 'detailed', 'csv']
class Column(object):
"""A class for representing a table column."""
def __init__(self, name, priority=0, alignment=Alignment.AUTO):
"""Returns a new column descriptor.
Args:
name: The name of the column.
priority: A numerical value that defines this column's
priority. A higher number means a higher priority.
Priorities are relative. When there is a terminal column
length constraint that cannot be met by reducing column
widths, columns with lower priorities are dropped.
Priorities are ignored for the CSV format.
alignment: The alignment policy. See Alignment for more
details.
"""
self.__name = name
self.__priority = priority
self.__alignment = alignment
@property
def name(self):
return self.__name
@property
def priority(self):
return self.__priority
@property
def alignment(self):
return self.__alignment
def __eq__(self, other):
return (self.name == other.name and
self.priority == other.priority and
self.alignment == other.alignment)
def __ne__(self, other):
return not self.__eq__(other)
def __lt__(self, other):
return self.priority < other.priority
def __le__(self, other):
return self.priority <= other.priority
def __gt__(self, other):
return self.priority > other.priority
def __ge__(self, other):
return self.priority >= other.priority
def __repr__(self):
args = ', '.join(
arg + '=' + str(repr(getattr(self, arg)))
for arg in self.__init__.func_code.co_varnames[1:])
return '{0}({1})'.format(self.__class__.__name__, args)
class _TabularData(object):
"""A base class for holding tabular data.
This class takes care of holding the tabular data. Subclasses are
responsible for implementing a Write() method which will display the
tabular data.
"""
def __init__(self):
"""Constructs a new _TabularData object."""
self.__cols = None
self.__rows = []
@property
def columns(self):
"""Returns the normalized columns."""
if self.__cols is None:
raise ValueError('SetColumns() must be called before accessing columns.')
return tuple(self.__cols)
@property
def rows(self):
"""Returns an immutable copy of the rows."""
return tuple(self.__rows)
def SetColumns(self, cols):
"""Sets the columns. This method must be called exactly once.
Args:
cols: A list of columns. Each element can either be a string
representing the column's name or an instance of Column.
Strings are promoted to Column.
Raises:
ValueError: If the cols preconditions are violated or if SetColumns()
has already been called.
"""
def Normalize(col):
if isinstance(col, basestring):
return Column(col)
elif isinstance(col, Column):
return col
raise ValueError(
'Columns must be strings or instances of Column. Received: {0}'
.format(col))
if self.__cols is not None:
raise ValueError('The header has already been set.')
self.__cols = [Normalize(col) for col in cols]
def AppendRow(self, row):
"""Appends a single row to the table.
Args:
row: A list of row values. Elements other than lists and dicts
are serialized to strings using the built-in str(). For CSV
output, lists and dicts are also converted to strings. For
non-CSV output, each list element is converted into a string
and printed on its own line inside the cell. Similarly, each
dict mapping is output as 'key: value' on its own line.
Raises:
ValueError: If SetColumns() has not been called or if len(row) >
the number of columns.
"""
if self.__cols is None:
raise ValueError('SetColumns() must be called before appending rows.')
row = tuple(row)
if len(row) > len(self.columns):
raise ValueError(
'Expected length of row ({0}) to be <= the number of columns ({1})'
.format(len(row), len(self.columns)))
# Pads the right side of the row with Nones until the length of
# the row is equal to the length of the columns. It's useful to do
# this now because jagged tables are hard to deal with for
# subclasses.
row = tuple(itertools.chain(
row,
(None for _ in xrange(len(self.columns) - len(row)))))
# Normalization is left for the subclasses since different
# subclasses will have different normalization semantics.
self.__rows.append(row)
def AppendRows(self, rows):
"""Appends many rows to the table.
The semantics of how each row is handled is similar to
AppendRow().
Args:
rows: A list of rows.
"""
for row in rows:
self.AppendRow(row)
def Write(self, out=None):
"""Writes the table to out.
Assumes SetColumns() has been called.
Args:
out: Any object with a write() method. The output is
written to this object. If None, sys.stdout is used.
"""
raise NotImplementedError('Write() should be implemented by subclasses.')
@staticmethod
def _Stringify(value):
"""Formats the given value so it's appropriate for inclusion in a table.
The given value is coerced to a unicode and all control characters
are escaped. For example, '\n' is transformed to '\\n'. '\0' is
transformed to '\\x00'.
Args:
value: The value to transform. This can be any type.
Returns:
A unicode string transformed according to the rules above.
"""
return u''.join(
c.encode('unicode_escape') if c in _CONTROL_CHARS else c
for c in unicode(value))
class _Cell(object):
"""A single cell for the tabular display formats.
This class holds the data associated with a cell and provides
functionality for outputting the cell into the "tabular" table
formats.
A cell can span multiple lines due to print width limitations or
data requirements (list of items or dicts). Most of the logic here
is for dealing with multi-line cells.
"""
__STRING_ALIGNMENT_METHODS = {
Alignment.LEFT: 'ljust',
Alignment.CENTER: 'center',
Alignment.RIGHT: 'rjust',
}
def __init__(self, data, alignment=None):
"""Constructs a new _Cell.
Args:
data: The data that should be displayed in this cell.
alignment: The alignment rule for this cell.
"""
self.__original_data = data
self.__data = data
self.__alignment = alignment
self._UpdateDimensions()
def _UpdateDimensions(self):
"""Computes and sets the height and width for this cell."""
self.__height = len(self.__data)
self.__width = max(len(line) for line in self.__data) if self.__data else 0
@property
def data(self):
return tuple(self.__data)
@property
def height(self):
return self.__height
@property
def width(self):
return self.__width
@property
def alignment(self):
return self.__alignment
@property
def is_numeric(self):
return self.__is_numeric
def EmitLine(self, line, width, out):
"""Writes one line of this cell's data to out.
Examples:
>>> import StringIO
>>> cell = _Cell('hello\nworld')
>>> out = StringIO.StringIO()
>>> cell.EmitLine(0, 10, out)
>>> out.getvalue()
'hello '
>>> out = StringIO.StringIO()
>>> cell.EmitLine(1, 10, out)
>>> out.getvalue()
'world '
>>> out = StringIO.StringIO()
>>> cell.EmitLine(1, 5, out)
>>> out.getvalue()
'world'
>>> out = StringIO.StringIO()
>>> cell.EmitLine(2, 10, out)
>>> out.getvalue()
' '
Args:
line: An index into data. data[line] will be written to out. If
line >= len(data), data[line] will be assumed to be the empty
string.
width: The space allocated to this cell.
out: An object with a write() method.
Raises:
ValueError: If any of the parameters are non-sane values (e.g.,
negative width).
"""
if line < 0:
raise ValueError('line must be non-negative: {0}'.format(line))
if line < len(self.data):
value_at_line = self.data[line]
else:
value_at_line = ''
if len(value_at_line) > width:
raise ValueError(
'Line {0} of {1} does not fit in width {2}. '
'Given width must be >= the width of the cell.'
.format(line, repr(self.data), width))
out.write(self.Align(value_at_line, width, alignment=self.alignment))
def Align(self, string, width, alignment=Alignment.LEFT):
"""Returns the given string aligned in the allotted space."""
if alignment is None:
alignment = Alignment.LEFT
alignment_method = self.__STRING_ALIGNMENT_METHODS.get(alignment)
if not alignment_method:
raise ValueError(
'Alignment value must be one of {{{0}}}; Received: {1}.'.format(
', '.join(sorted(self.__STRING_ALIGNMENT_METHODS)),
alignment))
return getattr(string, alignment_method)(width)
def AdjustWidth(self, allotted_width):
"""Shrinks the width of this cell.
Args:
allotted_width: The new width. All the lines in the cell will
coerced into having lengths that are <= allotted_width.
"""
self.__data = []
for line in self.__original_data:
if len(line) > allotted_width:
self.__data += textwrap.wrap(line, allotted_width)
else:
self.__data.append(line)
self._UpdateDimensions()
class _TableBase(_TabularData):
"""Base class for the human-readable table formats."""
def __init__(self, width=None, padding=1,
get_terminal_width_fn=_GetTerminalWidth):
""""Creates a new Table.
Args:
width: The maximum width that the table should occupy. If non-positive,
no width constraint will be exacted. If None and the output is destined
for a tty device, get_terminal_width_fn will be invoked to figure out
the terminal's width.
padding: The amount of whitespace to add before and after
each cell value.
get_terminal_width_fn: A function that can return the terminal's width if
the output is destined for a tty device. This argument is used for
testing and should not be set by the client.
Raises:
ValueError: If padding is negative.
"""
super(_TableBase, self).__init__()
self.__width = width
self.__get_terminal_width_fn = get_terminal_width_fn
if padding < 0:
raise ValueError('padding must be non-negative. Received: {0}'
.format(padding))
self.__padding = padding
def _UpdateWidth(self, out):
"""Updates the allotted table width, if necessary.
If no explicit width was specified and the output destination is a
tty device, this method will attempt to discover the width of the
device and, if successful, will overwrite the width to the
device's width.
Args:
out: A file-like object to which the table is written. If out
represents a tty device, it is expected that it will have an
'isatty' method that returns True.
"""
if self.__width is not None:
return
isatty_method = getattr(out, 'isatty', None)
if isatty_method is None:
return
if isatty_method() and self.__get_terminal_width_fn is not None:
self.__width = self.__get_terminal_width_fn()
@property
def has_width_constraint(self):
return self.__width is not None and self.__width > 0
@property
def padding(self):
return self.__padding
@staticmethod
def _MakeWidthMatrix(cell_matrix):
"""Calculates the width for each cell in cell_matrix."""
width_matrix = []
for row in cell_matrix:
for i, cell in enumerate(row):
if i == len(width_matrix):
width_matrix.append([cell.width])
else:
width_matrix[i].append(cell.width)
return width_matrix
def _CalculateColumnWidths(self, cell_matrix, num_columns, percentile=1):
"""Calculates the amount of characters each column can have.
If a width constraint is specified by the client, this method will
attempt to find a "fair" allocation of widths for the columns that
meet the constraint. The calculation is a best-effort one. If the
constraint cannot be met, all columns will be allocated
_MIN_CELL_WIDTH.
If a width constraint is not specified, this method simply returns
the widths of the maximum cells in each column.
Args:
cell_matrix: A list where each element is a list corresponding to a
single row of data to be printed.
num_columns: The number of columns for the final table. This could
be equal to the number of columns for the normal table or 2 for
the detailed table (since the latter's left column contains all
the headers and the right column contains all the values).
percentile: A number in the range [0.0, 1.0] that controls how
column widths will be allocated. Each column's cell widths are
sorted and the percentile is used to pick a "representative"
width for each column. These widths are then used to figure out
how much space each column should get in the shrunken table.
If 1.0, the cell with the maximum width is picked for each column
as the representative. 0.5 will pick the median, 0.0 will pick the
minimum. It is recommended to use a number in the neighborhood of
0.5 so a few really long cells do not skew the calculations in
favor of their column.
Raises:
ValueError: If the percentile is not in [0.0, 1.0].
Returns:
A list where the element at index i specifies the amount of
characters the content of column i can have. Content is defined
as the data in the cell. Content does not include the padding or
cell separators ('|').
"""
if percentile < 0 or percentile > 1:
raise ValueError('percentile must be in range [0.0, 1.0]; received: {0}'
.format(percentile))
width_matrix = self._MakeWidthMatrix(cell_matrix)
# The maximum content widths for all the columns. Content width
# does not include space allocated for padding or the cell
# separators ('|'). This list represents the ideal widths in the
# absence of width constraints.
ideal_col_widths = [max(widths) for widths in width_matrix]
# If no width constraints exist, returns the ideal widths.
if not self.has_width_constraint:
return ideal_col_widths
# TODO(user): Add logic to degrade padding if necessary. (Or
# maybe even make padding not be configurable and always use 1?)
# Selects the content widths based on the given percentile. We use
# percentiles so that a few really long cells do not skew the
# final width allocations.
widths = []
for column_widths in width_matrix:
column_widths.sort()
index = int(percentile * (len(column_widths) - 1))
widths.append(min(column_widths[index], _MIN_CELL_WIDTH))
total = sum(widths) # The total content width at the given percentile.
normalized_widths = [float(width) / total for width in widths]
# The amount of width available for content.
width_budget = min(
self.__width - (num_columns * (2 * self.padding + 1) + 1),
sum(ideal_col_widths))
allowances = [max(int(width_budget * allowance), _MIN_CELL_WIDTH)
for allowance in normalized_widths]
allowances = [min(allowance, ideal_width) for allowance, ideal_width
in zip(allowances, ideal_col_widths)]
# Due to errors arising from casting floats to ints, we could end
# up with unallocated characters. This block "sprinkles" any
# leftovers to columns that need extra space one character at a
# time in round-robin style. (We could do something smarter, but
# the number of leftovers is small compared to the total width, so
# additional complexity is probably not warranted.)
unallocated = width_budget - sum(allowances)
while unallocated > 0:
for i, allowance in enumerate(allowances):
if unallocated <= 0:
break
wanted = ideal_col_widths[i] - allowance
if wanted > 0:
unallocated -= 1
allowances[i] += 1
return allowances
def _EmitSeparator(self, widths, out):
"""Writes the separator between two rows.
A separator looks like: '+-----+----+----+\n'
Args:
widths: A list containing the widths of the columns.
out: A file-like object with a write() method.
"""
out.write('+')
for width in widths:
out.write('-' * (2 * self.padding + width))
out.write('+')
out.write('\n')
def _EmitPadding(self, out):
"""Writes padding to out."""
out.write(' ' * self.padding)
@staticmethod
def _AdjustCellWidths(cell_matrix, widths):
"""Shrinks all cells in __cell_matrix based on values in widths."""
for row in cell_matrix:
for cell, allotted_width in zip(row, widths):
cell.AdjustWidth(allotted_width)
@staticmethod
def _IsAssociativeList(data):
"""Returns True if data is a dict-like object."""
if isinstance(data, dict):
return True
elif isinstance(data, (list, tuple)):
try:
dict(data)
return True
except BaseException:
pass
return False
@staticmethod
def _IsNumeric(data):
"""Returns True if data is numeric."""
try:
float(data)
return True
except BaseException:
return False
def _MakeCell(self, data, alignment):
"""Returns a new _Cell for the given data.
The data is normalized according to some rules that will be
described later (see next TODO).
TODO(user): Explain the normalization rules in detail.
TODO(user): Revisit normalization and ensure that nothing
"surprising" will happen.
Args:
data: The data for the cell.
alignment: The alignment policy.
Returns:
A list containing the normalized data. Each item in the list will
represent a singline line of the cell.
Raises:
ValueError: If the data type is not supported.
"""
normalized_data = None
if data is None:
normalized_data = tuple()
elif isinstance(data, numbers.Number):
normalized_data = (self._Stringify(data),)
elif isinstance(data, basestring):
normalized_data = tuple(data.splitlines())
elif self._IsAssociativeList(data):
# Sorts the dictionary, so we get consistent results across
# different versions of Python.
if isinstance(data, dict):
data = sorted(data.iteritems())
normalized_data = tuple(
self._Stringify(key) + ': ' + self._Stringify(value)
for key, value in data)
elif isinstance(data, (list, tuple)):
normalized_data = tuple(self._Stringify(item) for item in data)
if normalized_data is None:
# We have failed to identify the value as a supported type.
raise ValueError(
'Unexpected data type. Type: {0}; value: {1}; '
'one of numbers.Number, basestring, list, tuple, dict is required.'
.format(type(data), data))
if alignment == Alignment.AUTO:
alignment = Alignment.RIGHT if self._IsNumeric(data) else Alignment.LEFT
return _Cell(normalized_data, alignment=alignment)
class Table(_TableBase):
"""A class that can be used for displaying tabular data.
This class can produce tables like the following:
+------+-------------+--------------+------------+------+
| Rank | Country | Capital City | Population | Year |
+------+-------------+--------------+------------+------+
| 1 | Japan | Tokyo | 13,189,000 | 2011 |
+------+-------------+--------------+------------+------+
| 2 | Russia | Moscow | 11,541,000 | 2011 |
+------+-------------+--------------+------------+------+
| 3 | South Korea | Seoul | 10,528,774 | 2011 |
+------+-------------+--------------+------------+------+
| 4 | Indonesia | Jakarta | 10,187,595 | 2011 |
+------+-------------+--------------+------------+------+
| 5 | Iran | Tehran | 9,110,347 | |
+------+-------------+--------------+------------+------+
"""
def _MakeCellMatrix(self):
"""Creates a matrix containing the column headers and rows as _Cells.
The result is placed in the property __cell_matrix.
"""
self.__cell_matrix = []
cells = []
for col in self.columns:
cells.append(self._MakeCell(col.name, alignment=col.alignment))
self.__cell_matrix.append(tuple(cells))
for row in self.rows:
cells = []
for cell, col in zip(row, self.columns):
cell = self._MakeCell(cell, alignment=col.alignment)
cells.append(cell)
self.__cell_matrix.append(tuple(cells))
def Write(self, out=None):
"""Writes the table to out.
Assumes SetColumns() has been called.
Args:
out: Any object with a write() method. The output is
written to this object. If None, sys.stdout is used.
"""
out = out or sys.stdout
self._UpdateWidth(out)
self._MakeCellMatrix()
widths = self._CalculateColumnWidths(
self.__cell_matrix,
num_columns=len(self.columns),
percentile=0.5)
self._AdjustCellWidths(self.__cell_matrix, widths)
self._EmitSeparator(widths, out)
for row in self.__cell_matrix:
row_height = max(cell.height for cell in row)
for line in xrange(row_height):
for col, cell in enumerate(row):
out.write('|')
self._EmitPadding(out)
cell.EmitLine(line, widths[col], out)
self._EmitPadding(out)
out.write('|\n')
self._EmitSeparator(widths, out)
class DetailedTable(_TableBase):
"""A class that can be used for displaying tabular data in detailed format.
This class can produce tables like the following:
+------------+---------------+
| Country | China |
| Population | 1,354,040,000 |
| Cities | Shanghai |
| | Beijing |
| | Tianjin |
| | Guangzhou |
+------------+---------------+
| Country | India |
| Population | 1,210,569,573 |
| Cities | Mumbai |
| | Delhi |
| | Bangalore |
| | Hyderabad |
+------------+---------------+
"""
def _MakeCellsForDetailValue(self, data, alignment):
"""Returns _Cell instances for non-column header data.
Args:
data: The data for the _Cell.
alignment: The alignment policy.
Returns:
A list of _Cell instances. For associative data, the list will contain
(key, value) _Cell tuples. For all other data, the list will contain
exactly one _Cell.
Raises:
ValueError: If the data type is not supported.
"""
if not self._IsAssociativeList(data):
return [self._MakeCell(data, alignment=alignment)]
# Sorts the dictionary, so we get consistent results across
# different versions of Python.
if isinstance(data, dict):
data = sorted(data.iteritems())
if alignment == Alignment.AUTO:
alignment = Alignment.LEFT
return [
(_Cell([' ' + self._Stringify(key)], alignment=Alignment.LEFT),
_Cell([self._Stringify(value)], alignment=alignment))
for key, value in data]
def _MakeCellMatrix(self):
"""Creates a matrix of _Cells corresponding to the final table cells.
The result is placed in the property __cell_matrix.
__cell_matrix is a list of lists. Each inner list will
correspond to a single row of data. Inner lists are comprised of
tuples where the first element is a key (i.e., column header) and
the second element is the value for that header in the current
row.
"""
self.__cell_matrix = []
for row in self.rows:
# A section is a single row. We have sections so that Write()
# can tell where lines separating each "row" should be written.
section = []
for key, value in zip(self.columns, row):
if value is None:
continue
key_cell = self._MakeCell(key.name, alignment=Alignment.LEFT)
if key_cell.alignment == Alignment.AUTO:
key_alignment = Alignment.LEFT
else:
key_alignment = key_cell.alignment
value_cells = self._MakeCellsForDetailValue(
value, alignment=key_alignment)
if self._IsAssociativeList(value_cells):
section.append((key_cell, _Cell(tuple())))
for left, right in value_cells:
section.append((left, right))
else:
section.append((key_cell, value_cells[0]))
self.__cell_matrix.append(tuple(section))
def Write(self, out=None):
"""Writes the table to out.
Assumes SetColumns() has been called.
Args:
out: Any object with a write() method. The output is
written to this object. If None, sys.stdout is used.
"""
out = out or sys.stdout
self._UpdateWidth(out)
self._MakeCellMatrix()
flattened_cell_matrix = tuple(itertools.chain(*self.__cell_matrix))
widths = self._CalculateColumnWidths(
flattened_cell_matrix,
num_columns=2,
percentile=0.5)
self._AdjustCellWidths(flattened_cell_matrix, widths)
self._EmitSeparator(widths, out)
for section in self.__cell_matrix:
for key, value in section:
row_height = max(key.height, value.height)
for line in xrange(row_height):
for i, cell in enumerate((key, value)):
out.write('|')
self._EmitPadding(out)
cell.EmitLine(line, widths[i], out)
self._EmitPadding(out)
out.write('|\n')
self._EmitSeparator(widths, out)
class Csv(_TabularData):
"""A class that can be used for displaying data in CSV format.
It is recommended that cell values only be simple types such as
strings and numbers. More complicated types like lists are handled
by outputting their Pythonic representations.
"""
# TODO(user): Add customizability to how the CSV is outputted.
@staticmethod
def _UnicodeEncode(row):
"""utf-8 encodes all values in iterable row."""
return ['' if cell is None else unicode(cell).encode('utf-8')
for cell in row]
def Write(self, out=None):
"""Writes the table to out.
Assumes SetColumns() has been called.
Args:
out: Any object with a write() method. The output is
written to this object. If None, sys.stdout is used.
"""
out = out or sys.stdout
# The csv module does not support Unicode, so we have to manually
# shepherd Unicode values in and out of the csv module using the
# StringIO file-like object.
buf = StringIO.StringIO()
writer = csv.writer(
buf, delimiter=',',
lineterminator='\n', quoting=csv.QUOTE_MINIMAL)
writer.writerow(self._UnicodeEncode(col.name for col in self.columns))
for row in self.rows:
row = self._UnicodeEncode(row)
writer.writerow(row)
out.write(buf.getvalue().decode('utf-8'))
def CreateTable(table_format, width=None, padding=1):
"""Returns a table of the given format."""
if table_format == Format.TABLE:
return Table(
width=width, padding=padding, get_terminal_width_fn=_GetTerminalWidth)
elif table_format == Format.DETAILED:
return DetailedTable(
width=width, padding=padding, get_terminal_width_fn=_GetTerminalWidth)
elif table_format == Format.CSV:
return Csv()
else:
raise ValueError(
'Table format not recognized: {0}; expected one of {{{1}}}.'
.format(table_format,
', '.join([Format.TABLE, Format.DETAILED, Format.CSV])))
| {
"repo_name": "ychen820/microblog",
"path": "y/google-cloud-sdk/.install/.backup/platform/gcutil/lib/google_compute_engine/gcutil_lib/table/table.py",
"copies": "4",
"size": "32076",
"license": "bsd-3-clause",
"hash": 6348894764527780000,
"line_mean": 30.8846918489,
"line_max": 80,
"alpha_frac": 0.6122957975,
"autogenerated": false,
"ratio": 4.039289761994711,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007045851964594501,
"num_lines": 1006
} |
"""A module for dynamic, incremental ctypes code generation.
See the 'include' function for usage information.
"""
import sys
import os
import time
import bz2
import cPickle
import tempfile
try:
# md5 is deprecated in Python 2.5, so use hashlib if available
from hashlib import md5
except ImportError:
from md5 import new as md5
import ctypes
import ctypeslib
from ctypeslib.codegen import gccxmlparser, codegenerator, typedesc
gen_dir = os.path.join(tempfile.gettempdir(), "gccxml_cache")
if not os.path.exists(gen_dir):
os.mkdir(gen_dir)
# TODO:
#
# Clean up the names Generator and CodeGenerator.
#
def include(code, persist=True, compilerflags=["-c"]):
"""This function replaces the *calling module* with a dynamic
module that generates code on demand. The code is generated from
type descriptions that are created by gccxml compiling the C code
'code'.
If <persist> is True, generated code is appended to the module's
source code, otherwise the generated code is executed and then
thrown away.
The calling module must load all the shared libraries that it uses
*BEFORE* this function is called.
NOTE:
- the calling module MUST contain 'from ctypes import *',
and, on windows, also 'from ctypes.wintypes import *'.
"""
# create a hash for the code, and use that as basename for the
# files we have to create
fullcode = "/* compilerflags: %r */\n%s" % (compilerflags, code)
hashval = md5(fullcode).hexdigest()
fnm = os.path.abspath(os.path.join(gen_dir, hashval))
h_file = fnm + ".h"
xml_file = fnm + ".xml"
tdesc_file = fnm + ".typedesc.bz2"
if not os.path.exists(h_file):
open(h_file, "w").write(fullcode)
if is_newer(h_file, tdesc_file):
if is_newer(h_file, xml_file):
print >> sys.stderr, "# Compiling into...", xml_file
from ctypeslib import h2xml
h2xml.compile_to_xml(["h2xml",
"-I", os.path.dirname(fnm), "-q",
h_file,
"-o", xml_file] + list(compilerflags))
if is_newer(xml_file, tdesc_file):
print >> sys.stderr, "# Parsing XML file and compressing type descriptions..."
decls = gccxmlparser.parse(xml_file)
ofi = bz2.BZ2File(tdesc_file, "w")
data = cPickle.dump(decls, ofi, -1)
os.remove(xml_file) # not needed any longer.
frame = sys._getframe(1)
glob = frame.f_globals
name = glob["__name__"]
mod = sys.modules[name]
sys.modules[name] = DynamicModule(mod, tdesc_file, persist=persist)
def is_newer(source, target):
"""Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
Raise ValueError if 'source' does not exist.
"""
if not os.path.exists(source):
raise ValueError("file '%s' does not exist" % source)
if not os.path.exists(target):
return 1
from stat import ST_MTIME
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
################################################################
class DynamicModule(object):
def __init__(self, mod, tdesc_file, persist):
# We need to keep 'mod' alive, otherwise it would set the
# values of it's __dict__ to None when it's deleted.
self.__dict__ = mod.__dict__
self.__orig_module__ = mod
fnm = os.path.abspath(self.__file__)
if fnm.endswith(".pyc") or fnm.endswith(".pyo"):
fnm = fnm[:-1]
if persist and not os.path.exists(fnm):
raise ValueError("source file %r does not exist" % fnm)
self.__code_generator_args = (fnm, tdesc_file, mod.__dict__, persist)
self.__code_generator = None
self.__tdesc_file = tdesc_file
@property
def _code_generator(self):
if not self.__code_generator:
self.__code_generator = CodeGenerator(*self.__code_generator_args)
return self.__code_generator
def __repr__(self):
return "<DynamicModule(%r) %r from %r>" % (
self.__tdesc_file, self.__name__, self.__file__)
def __getattr__(self, name):
if not name.startswith("__") and not name.endswith("__"):
val = self._code_generator.generate(name)
# print "# Generating", name
self.__dict__[name] = val
return val
raise AttributeError(name)
################
class UnknownSymbol(Exception):
pass
class Generator(codegenerator.Generator):
"""A subclass of codegenerator, specialized for our requirements:
- libraries are already loaded in the module, won't be loaded by
the code we generate.
- no need to generate symbols that are already present in
self.namespace
"""
def need_CLibraries(self):
pass
# Libraries are already loaded in the module, no code needed
need_WinLibraries = need_CLibraries
def generate(self, item):
if isinstance(item, typedesc.StructureHead):
name = getattr(item.struct, "name", None)
else:
name = getattr(item, "name", None)
if name in self.namespace:
return
super(Generator, self).generate(item)
def get_sharedlib(self, dllname, cc):
# XXX This should assert that the correct calling convention
# is used.
dll = self.searched_dlls[dllname]
if os.name == "nt":
if cc == "stdcall":
assert isinstance(
dll, ctypes.WinDLL), "wrong calling convention"
else:
assert not isinstance(
dll, ctypes.WinDLL), "wrong calling convention"
return dllname
def find_dllname(self, func):
# Find which of the libraries in 'searched_dlls' exports the
# function 'func'. Return name of library or None.
name = func.name
for dllname, dll in self.searched_dlls.items():
try:
getattr(dll, name)
except AttributeError:
pass
else:
return dllname
return None
def Function(self, func):
# XXX Not sure this is approach makes sense.
super(Generator, self).Function(func)
restype = self.type_name(func.returns)
errcheck = self.namespace.get("%s_errcheck" % restype, None)
if errcheck is not None:
print >> self.stream, "%s.errcheck = %s_errcheck" % (
func.name, restype)
class CodeGenerator(object):
"""Dynamic, incremental code generation. The generated code is
executed in the dictionary <ns>, and appended to the file
specified by <src_path>, if <persist> is True."""
output = None
def __init__(self, src_path, tdesc_file, ns, persist):
# We should do lazy initialization, so that all this stuff is
# only done when really needed because we have to generate
# something.
if persist:
# We open the file in universal newline mode, read the
# contents to determine the line endings. All this to
# avoid creating files with mixed line endings!
ifi = open(src_path, "U")
ifi.read()
ifi.close()
self._newlines = ifi.newlines or "\n"
self.output = open(src_path, "ab")
data = open(tdesc_file, "rb").read()
decls = cPickle.loads(bz2.decompress(data))
names = {}
self.namespace = ns
done = set()
for i in decls:
try:
name = i.name
except AttributeError:
continue
if name in ns:
done.add(i)
if isinstance(i, typedesc.Structure):
done.add(i.get_head())
done.add(i.get_body())
names[name] = i
self.decls = names
dlls = dict([o for o in ns.items()
if isinstance(o[1], ctypes.CDLL)
and not isinstance(o[1], ctypes.PyDLL)])
self.codegenerator = Generator(output=None,
known_symbols=None,
searched_dlls=dlls)
self.codegenerator.errcheck = ns.get("errcheck")
self.codegenerator.done |= done
self.codegenerator.namespace = self.namespace
self.imports = ""
self.code = ""
def generate(self, name):
# Incremental code generation for one name.
try:
item = self.decls[name]
except KeyError:
raise UnknownSymbol(name)
self.codegenerator.generate_items([item])
# Could as well call getvalue(), and create a new StringIO
# instance for .imports and .stream.
imports = self.codegenerator.imports.getvalue()[len(self.imports):]
self.imports += imports
code = self.codegenerator.stream.getvalue()[len(self.code):]
self.code += code
code = imports + code
exec code in self.namespace
# I guess when this fails, it means that the dll exporting
# this function is not in searched_dlls. So we should
# probably raise a different exception.
if self.output is not None:
code = code.replace("\n", self._newlines)
self.output.write(code)
try:
return self.namespace[name]
except KeyError:
raise UnknownSymbol(name)
################################################################
| {
"repo_name": "luzfcb/ctypeslib",
"path": "ctypeslib/dynamic_module.py",
"copies": "1",
"size": "9763",
"license": "mit",
"hash": -4425632373919867000,
"line_mean": 33.1363636364,
"line_max": 90,
"alpha_frac": 0.5813786746,
"autogenerated": false,
"ratio": 4.110736842105263,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.000038423115346192275,
"num_lines": 286
} |
"""A module for dynamic, incremental ctypes code generation.
See the 'include' function for usage information.
"""
import sys, os, time, bz2, cPickle, tempfile
try:
# md5 is deprecated in Python 2.5, so use hashlib if available
from hashlib import md5
except ImportError:
from md5 import new as md5
import ctypes
import ctypeslib
from ctypeslib.codegen import gccxmlparser, codegenerator, typedesc
gen_dir = os.path.join(tempfile.gettempdir(), "gccxml_cache")
if not os.path.exists(gen_dir):
os.mkdir(gen_dir)
# TODO:
#
# Clean up the names Generator and CodeGenerator.
#
def include(code, persist=True, compilerflags=["-c"]):
"""This function replaces the *calling module* with a dynamic
module that generates code on demand. The code is generated from
type descriptions that are created by gccxml compiling the C code
'code'.
If <persist> is True, generated code is appended to the module's
source code, otherwise the generated code is executed and then
thrown away.
The calling module must load all the shared libraries that it uses
*BEFORE* this function is called.
NOTE:
- the calling module MUST contain 'from ctypes import *',
and, on windows, also 'from ctypes.wintypes import *'.
"""
# create a hash for the code, and use that as basename for the
# files we have to create
fullcode = "/* compilerflags: %r */\n%s" % (compilerflags, code)
hashval = md5(fullcode).hexdigest()
fnm = os.path.abspath(os.path.join(gen_dir, hashval))
h_file = fnm + ".h"
xml_file = fnm + ".xml"
tdesc_file = fnm + ".typedesc.bz2"
if not os.path.exists(h_file):
open(h_file, "w").write(fullcode)
if is_newer(h_file, tdesc_file):
if is_newer(h_file, xml_file):
print >> sys.stderr, "# Compiling into...", xml_file
from ctypeslib import h2xml
h2xml.compile_to_xml(["h2xml",
"-I", os.path.dirname(fnm), "-q",
h_file,
"-o", xml_file] + list(compilerflags))
if is_newer(xml_file, tdesc_file):
print >> sys.stderr, "# Parsing XML file and compressing type descriptions..."
decls = gccxmlparser.parse(xml_file)
ofi = bz2.BZ2File(tdesc_file, "w")
data = cPickle.dump(decls, ofi, -1)
os.remove(xml_file) # not needed any longer.
frame = sys._getframe(1)
glob = frame.f_globals
name = glob["__name__"]
mod = sys.modules[name]
sys.modules[name] = DynamicModule(mod, tdesc_file, persist=persist)
def is_newer(source, target):
"""Return true if 'source' exists and is more recently modified than
'target', or if 'source' exists and 'target' doesn't. Return false if
both exist and 'target' is the same age or younger than 'source'.
Raise ValueError if 'source' does not exist.
"""
if not os.path.exists(source):
raise ValueError("file '%s' does not exist" % source)
if not os.path.exists(target):
return 1
from stat import ST_MTIME
mtime1 = os.stat(source)[ST_MTIME]
mtime2 = os.stat(target)[ST_MTIME]
return mtime1 > mtime2
################################################################
class DynamicModule(object):
def __init__(self, mod, tdesc_file, persist):
# We need to keep 'mod' alive, otherwise it would set the
# values of it's __dict__ to None when it's deleted.
self.__dict__ = mod.__dict__
self.__orig_module__ = mod
fnm = os.path.abspath(self.__file__)
if fnm.endswith(".pyc") or fnm.endswith(".pyo"):
fnm = fnm[:-1]
if persist and not os.path.exists(fnm):
raise ValueError("source file %r does not exist" % fnm)
self.__code_generator_args = (fnm, tdesc_file, mod.__dict__, persist)
self.__code_generator = None
self.__tdesc_file = tdesc_file
@property
def _code_generator(self):
if not self.__code_generator:
self.__code_generator = CodeGenerator(*self.__code_generator_args)
return self.__code_generator
def __repr__(self):
return "<DynamicModule(%r) %r from %r>" % (self.__tdesc_file, self.__name__, self.__file__)
def __getattr__(self, name):
if not name.startswith("__") and not name.endswith("__"):
val = self._code_generator.generate(name)
## print "# Generating", name
self.__dict__[name] = val
return val
raise AttributeError(name)
################
class UnknownSymbol(Exception):
pass
class Generator(codegenerator.Generator):
"""A subclass of codegenerator, specialized for our requirements:
- libraries are already loaded in the module, won't be loaded by
the code we generate.
- no need to generate symbols that are already present in
self.namespace
"""
def need_CLibraries(self): pass
# Libraries are already loaded in the module, no code needed
need_WinLibraries = need_CLibraries
def generate(self, item):
if isinstance(item, typedesc.StructureHead):
name = getattr(item.struct, "name", None)
else:
name = getattr(item, "name", None)
if name in self.namespace:
return
super(Generator, self).generate(item)
def get_sharedlib(self, dllname, cc):
# XXX This should assert that the correct calling convention
# is used.
dll = self.searched_dlls[dllname]
if os.name == "nt":
if cc == "stdcall":
assert isinstance(dll, ctypes.WinDLL), "wrong calling convention"
else:
assert not isinstance(dll, ctypes.WinDLL), "wrong calling convention"
return dllname
def find_dllname(self, func):
# Find which of the libraries in 'searched_dlls' exports the
# function 'func'. Return name of library or None.
name = func.name
for dllname, dll in self.searched_dlls.items():
try:
getattr(dll, name)
except AttributeError:
pass
else:
return dllname
return None
def Function(self, func):
# XXX Not sure this is approach makes sense.
super(Generator, self).Function(func)
restype = self.type_name(func.returns)
errcheck = self.namespace.get("%s_errcheck" % restype, None)
if errcheck is not None:
print >> self.stream, "%s.errcheck = %s_errcheck" % (func.name, restype)
class CodeGenerator(object):
"""Dynamic, incremental code generation. The generated code is
executed in the dictionary <ns>, and appended to the file
specified by <src_path>, if <persist> is True."""
output = None
def __init__(self, src_path, tdesc_file, ns, persist):
# We should do lazy initialization, so that all this stuff is
# only done when really needed because we have to generate
# something.
if persist:
# We open the file in universal newline mode, read the
# contents to determine the line endings. All this to
# avoid creating files with mixed line endings!
ifi = open(src_path, "U")
ifi.read()
ifi.close()
self._newlines = ifi.newlines or "\n"
self.output = open(src_path, "ab")
data = open(tdesc_file, "rb").read()
decls = cPickle.loads(bz2.decompress(data))
names = {}
self.namespace = ns
done = set()
for i in decls:
try:
name = i.name
except AttributeError:
continue
if name in ns:
done.add(i)
if isinstance(i, typedesc.Structure):
done.add(i.get_head())
done.add(i.get_body())
names[name] = i
self.decls = names
dlls = dict([o for o in ns.items()
if isinstance(o[1], ctypes.CDLL)
and not isinstance(o[1], ctypes.PyDLL)])
self.codegenerator = Generator(output=None,
known_symbols=None,
searched_dlls=dlls)
self.codegenerator.errcheck = ns.get("errcheck")
self.codegenerator.done |= done
self.codegenerator.namespace = self.namespace
self.imports = ""
self.code = ""
def generate(self, name):
# Incremental code generation for one name.
try:
item = self.decls[name]
except KeyError:
raise UnknownSymbol(name)
self.codegenerator.generate_items([item])
# Could as well call getvalue(), and create a new StringIO
# instance for .imports and .stream.
imports = self.codegenerator.imports.getvalue()[len(self.imports):]
self.imports += imports
code = self.codegenerator.stream.getvalue()[len(self.code):]
self.code += code
code = imports + code
exec code in self.namespace
# I guess when this fails, it means that the dll exporting
# this function is not in searched_dlls. So we should
# probably raise a different exception.
if self.output is not None:
code = code.replace("\n", self._newlines)
self.output.write(code)
try:
return self.namespace[name]
except KeyError:
raise UnknownSymbol(name)
################################################################
| {
"repo_name": "sugarmanz/ctypeslib",
"path": "ctypeslib/dynamic_module.py",
"copies": "1",
"size": "9680",
"license": "mit",
"hash": 3817550622574953000,
"line_mean": 35.1194029851,
"line_max": 99,
"alpha_frac": 0.5832644628,
"autogenerated": false,
"ratio": 4.091293322062553,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0026257410714840656,
"num_lines": 268
} |
"""A module for extracting information from lists of dicts for creating
geodatabase fields.
"""
from __future__ import (
absolute_import, print_function, unicode_literals, division)
import re
from datetime import date, time, datetime
FIELD_TYPE_TEXT = "TEXT"
FIELD_TYPE_FLOAT = "FLOAT"
FIELD_TYPE_DOUBLE = "DOUBLE"
FIELD_TYPE_SHORT = "SHORT"
FIELD_TYPE_LONG = "LONG"
FIELD_TYPE_DATE = "DATE"
FIELD_TYPE_BLOB = "BLOB"
FIELD_TYPE_RASTER = "RASTER"
FIELD_TYPE_GUID = "GUID"
# Defines a ranking of field types.
_TYPE_RANKS = {
FIELD_TYPE_GUID: 6,
FIELD_TYPE_DATE: 6,
FIELD_TYPE_RASTER: 6,
FIELD_TYPE_BLOB: 5,
FIELD_TYPE_DOUBLE: 4,
FIELD_TYPE_FLOAT: 3,
FIELD_TYPE_LONG: 2,
FIELD_TYPE_SHORT: 1,
FIELD_TYPE_TEXT: 0,
None: -1
}
def _compare_types(name1, name2):
"""Compares two field type names.
Returns an integer:
1 if name1 should be used
-1 if name2 should be used
0 if the two names are the same
Raises a ValueError if name1 and name2 are different but have the same
rank.
"""
if name1 == name2:
return 0
elif _TYPE_RANKS[name1] == _TYPE_RANKS[name2]:
raise ValueError("Incompatible types: %s & %s" % (name1, name2))
elif name1 is None:
return -1
elif name2 is None:
return 1
elif _TYPE_RANKS[name1] > _TYPE_RANKS[name2]:
return 1
else:
return -1
def _get_field_type(value):
"""Determines a field type based on a value's type.
"""
if value is None:
return None
field_type = None
if isinstance(value, float):
field_type = FIELD_TYPE_DOUBLE
elif isinstance(value, int):
field_type = FIELD_TYPE_LONG
elif isinstance(value, (date, time, datetime)):
field_type = FIELD_TYPE_DATE
elif isinstance(value, str):
guid_re = re.compile(r"^\{[a-f\d]+\}$", re.IGNORECASE)
if guid_re.match(value):
field_type = FIELD_TYPE_GUID
else:
field_type = FIELD_TYPE_TEXT
return field_type
class FieldInfo(object):
"""Represents parameters for creating fields.
Attributes:
field_name: name of the field
field_length: length of field. Only applicable to certain data types.
field_type: data type of field
field_is_nullable: indicates if the field is nullable.
"""
def __init__(self, name, value, template=None):
"""Creates a new FieldInfo instance.
Args:
name: field name
value: value used to determine the data type of the field
template: Another FieldInfo object to be used as a template
"""
self.field_name = None
self.field_length = None
self.field_type = None
self.field_is_nullable = None
if template and isinstance(template, FieldInfo):
self.field_name = template.field_name
# Get the field type of value
new_field_type = _get_field_type(value)
if template.field_type is None:
self.field_type = new_field_type
elif template.field_type == new_field_type:
self.field_type = new_field_type
else:
# Make sure type is floating point
compare_result = _compare_types(
new_field_type, template.field_type)
if compare_result < 0:
self.field_type = template.field_type
elif compare_result > 0:
self.field_type = new_field_type
self.field_is_nullable = (
template.field_is_nullable or value is None)
if isinstance(value, str):
new_len = len(value)
if (
template.field_length is None or
template.field_length < new_len
):
self.field_length = new_len
else:
self.field_length = template.field_length
elif template.field_length is not None:
self.field_length = template.field_length
else:
self.field_name = name
self.field_type = _get_field_type(value)
self.field_is_nullable = False
self.field_length = None
if self.field_type is None:
self.field_is_nullable = True
elif self.field_type == FIELD_TYPE_TEXT:
self.field_length = len(value)
@staticmethod
def from_features(features):
"""Extracts a list of FieldInfos from a list of dicts representing
GDB features
Args:
features: a list of dicts that define features
Returns:
A dict of field infos keyed by field_name.
"""
master = {}
for feature in features:
for field_key, field_info in _iter_field_infos(feature):
new_fi = FieldInfo(
field_key,
None,
field_info
)
master[field_key] = new_fi
return master
def _iter_field_infos(feature_dict):
"""Iterates over dict key/value pairs and yields FieldInfo objects
"""
for key, val in feature_dict.items():
next_fi = FieldInfo(key, val)
yield key, next_fi
| {
"repo_name": "WSDOT-GIS/wsdot-traffic-gp",
"path": "wsdottraffic/fielddetection.py",
"copies": "1",
"size": "5368",
"license": "unlicense",
"hash": -2328973715128310000,
"line_mean": 29.5,
"line_max": 77,
"alpha_frac": 0.5702309985,
"autogenerated": false,
"ratio": 4.005970149253732,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 176
} |
"""A module for forms used"""
from wtforms import StringField, PasswordField, validators, SubmitField, TextAreaField
from flask_wtf import FlaskForm
class SignupForm(FlaskForm):
"""The signup form"""
email = StringField(
'Email Address',
[
validators.data_required(),
validators.Length(min=6, max=35),
validators.email(message="Invalid email address"),
validators.regexp(
r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)",
flags=0, message="Invalid email address"
)
]
)
username = StringField(
'Username', [
validators.data_required(),
validators.Length(min=3, max=25),
validators.regexp(
r"(^[a-zA-Z _.+-]+$)",
message="Only text characters allowed for username."
)
]
)
password = PasswordField('New Password', [
validators.input_required(),
validators.EqualTo('confirm', message='Passwords must match'),
validators.length(min=8, message='Password needs to be atleast 8 characters long')
])
confirm = PasswordField('Confirm Password', [validators.data_required()])
submit = SubmitField("Submit")
class LoginForm(FlaskForm):
"""The signup form"""
email = StringField('Email Address', [
validators.data_required(),
validators.email(message="Invalid email address"),
validators.regexp(
r"(^[a-zA-Z0-9_.+-]+@[a-zA-Z0-9-]+\.[a-zA-Z0-9-.]+$)", message="Invalid email address"
)
])
password = PasswordField('Password', [
validators.DataRequired()
])
login = SubmitField("Login")
class CategoryForm(FlaskForm):
"""The new category form"""
name = StringField('Name', [
validators.input_required('Please name your category'),
validators.length(min=4, max=10, message='Name should be 4-10 characters long'),
validators.regexp(
r"(^[a-zA-Z _.+-]+$)",
message="Only text characters allowed for category name."
)
])
description = TextAreaField('Description', [
validators.data_required('A description would be nice.'),
validators.length(max=50, message='Description should be less than 50 characters long')
])
class RecipeForm(FlaskForm):
"""This defines the form for recipe manipulation"""
name = StringField('Name', [
validators.data_required('A name for your recipe would be nice'),
validators.length(min=4, message="The name should be more than 4 characters long"),
validators.regexp(
r"(^[a-zA-Z _.+-]+$)",
message="Only text characters allowed for recipe name."
)
])
fun_fact = StringField('Fun Fact')
ingredients = TextAreaField('Ingredients', [
validators.data_required('Some ingredients please')
])
description = TextAreaField('Directions and Serving', [
validators.data_required('How can I prepare this?')
])
| {
"repo_name": "indungu/yummy-recipes",
"path": "app/forms.py",
"copies": "1",
"size": "3058",
"license": "mit",
"hash": 5007774018050441000,
"line_mean": 36.2926829268,
"line_max": 98,
"alpha_frac": 0.5977763244,
"autogenerated": false,
"ratio": 4.149253731343284,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5247030055743284,
"avg_score": null,
"num_lines": null
} |
"""A module for function minimization."""
# pylint: disable=too-many-arguments, too-many-instance-attributes
from average import EWMA
import numpy as np
from num_utils import saxpy, BILINEAR, sdot, EPS, resize, roll2
class AdamOptimizer:
"""Implements the Adam gradient descent optimizer [4] with iterate averaging."""
def __init__(self, params, step_size=1, b1=0.9, b2=0.999, bp1=0, decay=0, power=1,
biased_g1=False):
"""Initializes the optimizer."""
self.params = params
self.step_size = step_size
self.decay, self.power = decay, power
self.i = 1
self.xy = np.zeros(2, dtype=np.int32)
self.g1 = EWMA.like(params, b1, correct_bias=not biased_g1)
self.g2 = EWMA.like(params, b2)
self.p1 = EWMA.like(params, bp1)
def update(self, opfunc):
"""Returns a step's parameter update given a loss/gradient evaluation function."""
# Step size decay
step_size = self.step_size / self.i**self.power
self.i += self.decay
loss, grad = opfunc(self.params)
# Adam
self.g1.update(grad)
self.g2.update(grad**2)
step = self.g1.get() / (np.sqrt(self.g2.get()) + EPS)
saxpy(-step_size, step, self.params)
# Iterate averaging
self.p1.update(self.params)
return roll2(self.p1.get(), -self.xy), loss
def roll(self, xy):
"""Rolls the optimizer's internal state."""
if (xy == 0).all():
return
self.xy += xy
roll2(self.g1.value, xy)
roll2(self.g2.value, xy)
roll2(self.p1.value, xy)
def set_params(self, last_iterate):
"""Sets params to the supplied array (a possibly-resized or altered last non-averaged
iterate), resampling the optimizer's internal state if the shape has changed."""
self.i = 1
self.params = last_iterate
hw = self.params.shape[-2:]
self.g1.value = resize(self.g1.value, hw)
self.g2.value = np.maximum(0, resize(self.g2.value, hw, method=BILINEAR))
self.p1.value = resize(self.p1.value, hw)
class LBFGSOptimizer:
"""L-BFGS [2] for function minimization, with fixed size steps (no line search)."""
def __init__(self, params, initial_step=0.1, n_corr=10):
self.params = params
self.initial_step = initial_step
self.n_corr = n_corr
self.xy = np.zeros(2, dtype=np.int32)
self.loss, self.grad = None, None
self.sk, self.yk, self.syk = [], [], []
def update(self, opfunc):
"""Take an L-BFGS step. Returns the new parameters and the loss after the step."""
if self.loss is None:
self.loss, self.grad = opfunc(self.params)
# Compute and take a step, being cautious if the L-BFGS memory is not full
s = -self.inv_hv(self.grad)
if not self.sk:
s *= self.initial_step / np.mean(abs(s))
elif len(self.sk) < self.n_corr:
s *= len(self.sk) / self.n_corr
self.params += s
# Compute a curvature pair and store parameters for the next step
loss, grad = opfunc(self.params)
y = grad - self.grad
self.store_curvature_pair(s, y)
self.loss, self.grad = loss, grad
return self.params, loss
def store_curvature_pair(self, s, y):
"""Updates the L-BFGS memory with a new curvature pair."""
sy = sdot(s, y)
if sy > 1e-10:
self.sk.append(s)
self.yk.append(y)
self.syk.append(sy)
if len(self.sk) > self.n_corr:
self.sk, self.yk, self.syk = self.sk[1:], self.yk[1:], self.syk[1:]
def inv_hv(self, p):
"""Computes the product of a vector with an approximation of the inverse Hessian."""
p = p.copy()
alphas = []
for s, y, sy in zip(reversed(self.sk), reversed(self.yk), reversed(self.syk)):
alphas.append(sdot(s, p) / sy)
saxpy(-alphas[-1], y, p)
if self.sk:
sy, y = self.syk[-1], self.yk[-1]
p *= sy / sdot(y, y)
for s, y, sy, alpha in zip(self.sk, self.yk, self.syk, reversed(alphas)):
beta = sdot(y, p) / sy
saxpy(alpha - beta, s, p)
return p
def roll(self, xy):
"""Rolls the optimizer's internal state."""
if (xy == 0).all():
return
self.xy += xy
if self.grad is not None:
roll2(self.grad, xy)
for s, y in zip(self.sk, self.yk):
roll2(s, xy)
roll2(y, xy)
def set_params(self, last_iterate):
"""Sets params to the supplied array and clears the L-BFGS memory."""
self.params = last_iterate
self.loss, self.grad = None, None
self.sk, self.yk, self.syk = [], [], []
| {
"repo_name": "crowsonkb/style_transfer",
"path": "optimizers.py",
"copies": "1",
"size": "4835",
"license": "mit",
"hash": 1740575003854315300,
"line_mean": 34.0362318841,
"line_max": 93,
"alpha_frac": 0.5677352637,
"autogenerated": false,
"ratio": 3.2602832097100474,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.932304889921638,
"avg_score": 0.0009939148387335093,
"num_lines": 138
} |
""" A module for handling HiSeq-specific files and folders
"""
import os
import glob
import csv
import scilifelab.illumina as illumina
class HiSeqRun(illumina.IlluminaRun):
def __init__(self, run_dir, samplesheet=None):
illumina.IlluminaRun.__init__(self, run_dir, samplesheet)
if self.samplesheet_file is not None:
self.samplesheet = HiSeqSampleSheet(self.samplesheet_file)
@staticmethod
def _samplesheet_header():
"""Return a list of columns in the HiSeq samplesheet
"""
return ["FCID",
"Lane",
"SampleID",
"SampleRef",
"Index",
"Description",
"Control",
"Recipe",
"Operator",
"SampleProject"]
@staticmethod
def parse_samplesheet(samplesheet, lane=None, sample_project=None, index=None):
"""Parse a .csv samplesheet and return a list of dictionaries with
elements corresponding to rows of the samplesheet and keys
corresponding to the columns in the header. Optionally filter by lane
and/or sample_project and/or index.
"""
entries = []
with open(samplesheet,"rU") as fh:
csvr = csv.DictReader(fh, dialect='excel')
entries = [row for row in csvr \
if (lane is None or row["Lane"] == lane) \
and (sample_project is None or row["SampleProject"] == sample_project) \
and (index is None or row["Index"] == index)]
return entries
@staticmethod
def write_samplesheet(sdata, samplesheet):
"""Write a .csv samplesheet from a list of entries
"""
with open(samplesheet,"w") as outh:
csvw = csv.writer(outh)
csvw.writerow(HiSeqRun._samplesheet_header())
csvw.writerows(sdata)
return samplesheet
@staticmethod
def get_project_names(samplesheet):
"""List the projects available in the samplesheet. Optionally filter by project name.
"""
return sorted(list(set([e['SampleProject'].replace("__", ".") for e in HiSeqRun.parse_samplesheet(samplesheet)])))
@staticmethod
def get_project_sample_ids(samplesheet, project):
"""Return the samples listed in the samplesheet for a project
"""
ids = []
for e in HiSeqRun.parse_samplesheet(samplesheet):
if e['SampleProject'].replace('__','.') == project:
ids.append(e['SampleID'])
return ids
class HiSeqSampleSheet(list):
def __init__(self, samplesheet, lane=None, sample_project=None, index=None):
self.header = ["FCID",
"Lane",
"SampleID",
"SampleRef",
"Index",
"Description",
"Control",
"Recipe",
"Operator",
"SampleProject"]
if isinstance(samplesheet, list):
self.extend(samplesheet)
else:
self.samplesheet = samplesheet
self._parse_sample_sheet(lane=None, sample_project=None, index=None)
def _parse_sample_sheet(self, lane=None, sample_project=None, index=None):
"""Parse a .csv samplesheet and return a list of dictionaries with
elements corresponding to rows of the samplesheet and keys
corresponding to the columns in the header. Optionally filter by lane
and/or sample_project and/or index.
"""
with open(self.samplesheet,"rU") as fh:
csvr = csv.DictReader(fh, dialect='excel')
for row in csvr:
if (lane is None or row["Lane"] == lane) \
and (sample_project is None or row["SampleProject"] == sample_project) \
and (index is None or row["Index"] == index):
self.append(row)
def write(self, samplesheet):
"""Write samplesheet to .csv file
"""
with open(samplesheet, "w") as outh:
csvw = csv.writer(outh)
if len(self) > 0:
csvw.writerow(self[0].keys())
else:
csvw.writerow(self.header)
csvw.writerows([row.values() for row in self])
| {
"repo_name": "kate-v-stepanova/scilifelab",
"path": "scilifelab/illumina/hiseq.py",
"copies": "4",
"size": "4389",
"license": "mit",
"hash": 3364563665902037500,
"line_mean": 35.8823529412,
"line_max": 122,
"alpha_frac": 0.5532011848,
"autogenerated": false,
"ratio": 4.31988188976378,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.6873083074563779,
"avg_score": null,
"num_lines": null
} |
# a module for handling interaction with the Flickr photo-sharing service
import os
import urllib, urllib2, urlparse, mimetools, mimetypes
import xml.dom.minidom
import hashlib
import random
from fetch import GET_CMD, Request
API_KEY="f29c6d8fa5d950131c4ae13adc55700d"
SECRET="037ec6eec0e91cab"
BASE_URL="http://www.flickr.com/services/rest/"
AUTH_URL="http://www.flickr.com/services/auth/"
UPLOAD_URL="http://www.flickr.com/services/upload/"
GF4D_GROUP="46555832@N00"
class FlickrError(Exception):
def __init__(self, msg,code=0):
Exception.__init__(self, msg)
self.code = int(code)
def makeRequest(base_url,is_post,
is_signed=False, input="",
content_type="application/x-binary",
**kwds):
if is_signed:
kwds["api_sig"]=createSig(**kwds)
query = urllib.urlencode(kwds)
url = "%s?%s" % (base_url,query)
cmd = GET_CMD
method = is_post and "POST" or "GET"
args = [ method , url]
if is_post:
args.append(content_type)
return Request(cmd,args,input)
def parseResponse(resp):
try:
dom = xml.dom.minidom.parseString(resp)
except xml.parsers.expat.ExpatError:
raise FlickrError("Unexpected response:not an xml file")
if dom.documentElement.nodeName != "rsp":
raise FlickrError("Unexpected response: %s" % resp)
if dom.documentElement.getAttribute("stat") != "ok":
# error encountered
err = dom.getElementsByTagName("err")[0]
code = err.getAttribute("code")
msg = err.getAttribute("msg")
raise FlickrError("Error returned: %s [%s]" % (msg,code),code)
return dom
def createSig(**kwds):
keys = kwds.keys()
keys.sort()
siglist = [ SECRET ]
for k in keys:
item = kwds[k]
siglist.append(k)
siglist.append(item)
sig = "".join(siglist)
hash = hashlib.md5(sig)
digest = hash.hexdigest()
return digest
def getSignedUrl(url,**kwds):
sig = createSig(**kwds)
kwds["api_sig"] = sig
query = urllib.urlencode(kwds)
url = "%s?%s" % (url, query)
return url
def getAuthUrl(frob_):
return getSignedUrl(AUTH_URL,api_key=API_KEY,perms="write",frob=frob_)
def requestFrob():
return makeRequest(
BASE_URL,
False,
True,
api_key=API_KEY,
method="flickr.auth.getFrob")
def parseFrob(resp):
return resp.getElementsByTagName("frob")[0].firstChild.nodeValue
def requestToken(frob_):
return makeRequest(
BASE_URL,
False,
True,
api_key=API_KEY,
method="flickr.auth.getToken",
frob=frob_)
def parseToken(resp):
return Token(resp)
def requestCheckToken(token):
return makeRequest(
BASE_URL,
False,
True,
method="flickr.auth.checkToken",
api_key=API_KEY,auth_token=token)
def parseCheckToken(resp):
# we'll throw an exception if token is invalid
return Token(resp)
def requestUpload(photo,token,**kwds):
kwds["api_key"]=API_KEY
kwds["auth_token"]=token
sig = createSig(**kwds)
kwds["api_sig"] = sig
files = [("photo",os.path.basename(photo),open(photo).read())]
content_type, body = encode_multipart_formdata(kwds, files)
return makeRequest(
UPLOAD_URL,
True,
True,
body,
content_type)
def parseUpload(resp):
photoid = resp.getElementsByTagName("photoid")[0].firstChild.nodeValue
return photoid
def requestGroupsSearch(query):
return makeRequest(
BASE_URL,
False,
api_key=API_KEY,
method="flickr.groups.search",
text=query)
def parseGroupsSearch(resp):
groups = [ Group(x) for x in resp.getElementsByTagName("group")]
return groups
def requestGroupsPoolsAdd(photo,token,group=GF4D_GROUP):
return makeRequest(
BASE_URL,
True,
True,
api_key=API_KEY,
method="flickr.groups.pools.add",
auth_token=token,
data="",
photo_id=photo,
group_id=group)
# no return value so no parse method
def requestPeopleGetPublicGroups(nsid):
return makeRequest(
BASE_URL,
False,
api_key=API_KEY,
method="flickr.people.getPublicGroups",
user_id=nsid)
def parsePeopleGetPublicGroups(resp):
groups = [ Group(x) for x in resp.getElementsByTagName("group")]
return groups
def requestUrlsGetUserPhotos(nsid):
req = makeRequest(
BASE_URL,
False,
True,
api_key=API_KEY,
method="flickr.urls.getUserPhotos",
user_id=nsid)
def parseUrlsGetUserPhotos(resp):
url = resp.getElementsByTagName("user")[0].getAttribute("url")
return url
def requestBlogsGetList(token):
return makeRequest(
BASE_URL,
False,
True,
api_key=API_KEY,
auth_token=token,
method="flickr.blogs.getList")
def parseBlogsGetList(resp):
blogs = [ Blog(x) for x in resp.getElementsByTagName("blog")]
return blogs
def requestBlogsPostPhoto(blog,photo,title_,description_,token):
return makeRequest(
BASE_URL,
True,
True,
api_key=API_KEY,
method="flickr.blogs.postPhoto",
auth_token=token,
blog_id=blog.id,
photo_id=photo,
title=title_,
description=description_)
# no parse method, there's no response
class Blog:
def __init__(self,element):
self.id = element.getAttribute("id")
self.name = element.getAttribute("name")
self.needspassword = element.getAttribute("needspassword")
self.url = element.getAttribute("url")
class Token:
def __init__(self,resp):
self.token = resp.getElementsByTagName("token")[0].firstChild.nodeValue.encode("ascii")
self.user = User(resp.getElementsByTagName("user")[0])
class User:
def __init__(self,element):
self.nsid = element.getAttribute("nsid")
self.username = element.getAttribute("username")
self.fullname = element.getAttribute("fullname")
class Group:
def __init__(self,element):
self.nsid = element.getAttribute("nsid")
self.name = element.getAttribute("name")
# This code is from www.voidspace.org.uk/atlantibots/pythonutils.html
def encode_multipart_formdata(fields, files):
""" Encodes fields and files for uploading.
fields is a sequence of (name, value) elements for regular form fields - or a dictionary.
files is a sequence of (name, filename, value) elements for data to be uploaded as files.
Return (content_type, body) ready for urllib2.Request instance
You can optionally pass in a boundary string to use or we'll let mimetools provide one.
"""
try:
BOUNDARY = '-----'+mimetools.choose_boundary()+'-----'
except socket.gaierror:
# occurs on some peoples' computers, appears to be due to subtle
# misconfiguration. But since we don't really need it...
BOUNDARY = '-----'+int(random.uniform(1000000000))+'-----'
CRLF = '\r\n'
L = []
if isinstance(fields, dict):
fields = fields.items()
for (key, value) in fields:
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"' % key)
L.append('')
L.append(value)
for (key, filename, value) in files:
filetype = mimetypes.guess_type(filename)[0] or 'application/octet-stream'
L.append('--' + BOUNDARY)
L.append('Content-Disposition: form-data; name="%s"; filename="%s"' % (key, filename))
L.append('Content-Type: %s' % filetype)
L.append('')
L.append(value)
L.append('--' + BOUNDARY + '--')
L.append('')
try:
body = CRLF.join(L)
except UnicodeDecodeError, err:
print "unicode error", str(err)
for x in L:
print x.__class__
if len(x) > 0:
print "%x" % ord(x[0])
print x[:100]
raise
content_type = 'multipart/form-data; boundary=%s' % BOUNDARY
return content_type, body
def build_request(theurl, fields, files, txheaders=None):
"""Given the fields to set and the files to encode it returns a fully formed urllib2.Request object.
You can optionally pass in additional headers to encode into the object.
(Content-type and Content-length will be overridden if they are set).
fields is a sequence of (name, value) elements for regular form fields - or a dictionary.
files is a sequence of (name, filename, value) elements for data to be uploaded as files.
"""
content_type, body = encode_multipart_formdata(fields, files)
if not txheaders: txheaders = {}
txheaders['Content-type'] = content_type
txheaders['Content-length'] = str(len(body))
return urllib2.Request(theurl, body, txheaders)
| {
"repo_name": "ericchill/gnofract4d",
"path": "fractutils/flickr.py",
"copies": "1",
"size": "8915",
"license": "bsd-3-clause",
"hash": 2372287830133748700,
"line_mean": 29.1182432432,
"line_max": 104,
"alpha_frac": 0.6259113853,
"autogenerated": false,
"ratio": 3.629885993485342,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.47557973787853414,
"avg_score": null,
"num_lines": null
} |
""" A module for handling MiSeq-specific files and folders
"""
import os
import re
import glob
from collections import OrderedDict, defaultdict
import scilifelab.illumina as illumina
from scilifelab.illumina.hiseq import HiSeqSampleSheet
def group_fastq_files(fastq_files):
"""Divide the input fastq files into batches based on lane and read, ignoring set"""
regexp = r'_(L\d+)_([RI]\d+)_'
batches = {}
for fastq_file in fastq_files:
m = re.search(regexp, fastq_file)
if not m or len(m.groups()) < 2:
print "WARNING: Could not determine lane and read from input file %s" % fastq_file
continue
batch = "%s%s" % (m.group(1).strip(),m.group(2).strip())
if batch not in batches:
batches[batch] = []
batches[batch].append(fastq_file)
return batches.values()
class MiSeqRun(illumina.IlluminaRun):
def __init__(self, run_dir, samplesheet=None):
illumina.IlluminaRun.__init__(self, run_dir, samplesheet)
if self.samplesheet_file is None:
self.samplesheet_file = illumina.IlluminaRun.get_samplesheet(self._basecalls_dir())
if self.samplesheet_file is not None:
self.samplesheet = MiSeqSampleSheet(self.samplesheet_file)
self._fastq = self._fastq_files()
def write_hiseq_samplesheet(self, samplesheet):
"""Export the metadata for this run in a HiSeq samplesheet format
"""
hs_ssheet = HiSeqSampleSheet(self.samplesheet.to_hiseq(self.run_info))
hs_ssheet.write(samplesheet)
def _data_dir(self):
return os.path.join(self._run_dir,"Data")
def _intensities_dir(self):
return os.path.join(self._data_dir(),"Intensities")
def _basecalls_dir(self):
return os.path.join(self._intensities_dir(),"BaseCalls")
def _multiplex_dir(self):
return os.path.join(self._basecalls_dir(),"Multiplex")
def _alignment_dir(self):
return os.path.join(self._basecalls_dir(),"Alignment")
def _runParameters(self):
return os.path.join(self._run_dir,"runParameters.xml")
def _runInfo(self):
return os.path.join(self._run_dir,"RunInfo.xml")
def _fastq_files(self, fastq_dir=None):
if fastq_dir is None:
fastq_dir = self._basecalls_dir()
fastq_files = group_fastq_files(glob.glob(os.path.join(fastq_dir,"*.fastq*")))
return fastq_files
def _find_samplesheet(self):
dirs = [self._run_dir,
self._basecalls_dir()]
for dir in dirs:
ss = os.path.join(dir,"SampleSheet.csv")
if os.path.exists(ss):
return ss
return None
def _split_fastq(self):
samples = self.samplesheet.sample_names()
samples.insert(0,"unmatched")
sample_names = {}
for i,name in enumerate(samples):
sample_names[str(i)] = name
out_dir = self._multiplex_dir()
import split_demultiplexed
split_demultiplexed._split_fastq_batches(self._fastq,out_dir,sample_names)
class MiSeqSampleSheet:
def __init__(self, ss_file):
assert os.path.exists(ss_file), \
"Samplesheet %s does not exist" % ss_file
setattr(self, "samplesheet", ss_file)
self.data_header = ["Sample_ID",
"Sample_Name",
"Sample_Plate",
"Sample_Well",
"Sample_Project",
"index",
"I7_Index_ID",
"index2",
"I5_Index_ID",
"Description",
"Manifest",
"GenomeFolder"]
self._parse_sample_sheet()
def _parse_sample_sheet(self):
# Parse the samplesheet file into a data structure
data = defaultdict(dict)
with open(self.samplesheet,"rU") as fh:
current = None
for line in fh:
line = line.strip()
if line.startswith("["):
current = line.strip("[], ")
else:
if current is None:
current = "NoSection"
s = line.split(",",1)
if len(s) > 1:
data[current][s[0]] = s[1]
else:
data[current][line] = ''
# Assign the parsed attributes to class attributes
for option, value in data.get("Header",{}).items():
setattr(self, option.replace(" ", ""), value)
for option, value in data.get("Settings",{}).items():
setattr(self, option, value)
if "Data" not in data:
data["Data"] = {}
data["Data"][self.data_header[0]] = ",".join(self.data_header[1:])
for option, value in data.get("NoSection",{}).items():
data["Data"][option] = value
# Parse sample data
first_data_col = "Sample_ID"
if "Data" in data and first_data_col in data["Data"]:
self.data_header = [s.lower() for s in data["Data"][first_data_col].split(",")]
samples = {}
for sample_id, sample_data in data["Data"].items():
if sample_id == first_data_col:
continue
samples[sample_id] = dict(zip(self.data_header,sample_data.split(",")))
samples[sample_id][first_data_col.lower()] = sample_id
setattr(self, "samples", samples)
def sample_names(self):
"""Return the name of the samples in the same order as they are listed in
the samplesheet.
"""
samples = getattr(self,"samples",{})
if getattr(self, "_sample_names", None) is None:
sample_names = []
with open(self.samplesheet,"rU") as fh:
for line in fh:
if line.startswith("[Data]"):
for line in fh:
data = line.split(",")
if len(data) == 0 or data[0].startswith("["):
break
if data[0] in samples:
sample_names.append(data[0])
self._sample_names = sample_names
return self._sample_names
def sample_field(self, sample_id, sample_field=None):
samples = getattr(self,"samples",{})
assert sample_id in samples, \
"The sample '%s' was not found in samplesheet %s" % (sample_id,self.samplesheet)
if sample_field is None:
return samples[sample_id]
assert sample_field in samples[sample_id], \
"The sample field '%s' was not found in samplesheet %s" % (sample_field,self.samplesheet)
return samples[sample_id][sample_field]
def to_hiseq(self, run_config={}):
"""Convert Miseq SampleSheet to HiSeq formatted Samplesheet.
"""
FCID = run_config.get('Flowcell','NA')
Lane = "1"
SampleRef = "NA"
Description = "NA"
Control = "N"
Recipe = "NA"
Operator = "NA"
rows = []
for sampleID, info in self.samples.iteritems():
row = OrderedDict()
row["FCID"] = FCID
row["Lane"] = Lane
row["SampleID"] = sampleID
row["SampleRef"] = self._extract_reference_from_path(info.get('genomefolder',''))
row["Index"] = info.get('index','')
if 'index2' in info and len(info['index2']) > 0:
row["Index"] = "{}-{}".format(row["Index"],info["index2"])
row["Description"] = info.get('description','')
row["Control"] = Control
row["Recipe"] = Recipe
row["Operator"] = Operator
row["SampleProject"] = info.get('sample_project','Unknown')
rows.append(row)
return rows
def _extract_reference_from_path(self, path):
"""Attempts to extract a name of a reference assembly from a path
"""
head = path
regexp = r'[a-zA-Z]+[0-9\.]+$'
while head is not None and len(head) > 0:
head, tail = os.path.split(head.replace('\\','/'))
if re.match(regexp, tail) is not None:
return tail
return path
| {
"repo_name": "senthil10/scilifelab",
"path": "scilifelab/illumina/miseq.py",
"copies": "4",
"size": "8622",
"license": "mit",
"hash": -7939207240046575000,
"line_mean": 35.8504273504,
"line_max": 101,
"alpha_frac": 0.5213407562,
"autogenerated": false,
"ratio": 4.03085553997195,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.655219629617195,
"avg_score": null,
"num_lines": null
} |
"""A module for housing the Cursor class.
Exported Classes:
Cursor -- Class for representing a database cursor.
"""
from collections import deque
from .statement import Statement, PreparedStatement
from .exception import Error, NotSupportedError, ProgrammingError
class Cursor(object):
"""Class for representing a database cursor.
Public Functions:
close -- Closes the cursor into the database.
callproc -- Currently not supported.
execute -- Executes an SQL operation.
executemany -- Executes the operation for each list of paramaters passed in.
fetchone -- Fetches the first row of results generated by the previous execute.
fetchmany -- Fetches the number of rows that are passed in.
fetchall -- Fetches everything generated by the previous execute.
nextset -- Currently not supported.
setinputsizes -- Currently not supported.
setoutputsize -- Currently not supported.
Private Functions:
__init__ -- Constructor for the Cursor class.
_check_closed -- Checks if the cursor is closed.
_reset -- Resets SQL transaction variables.
_execute -- Handles operations without parameters.
_executeprepared -- Handles operations with parameters.
_get_next_results -- Gets the next set of results.
"""
def __init__(self, session, prepared_statement_cache_size):
"""
Constructor for the Cursor class.
:type session EncodedSession
"""
self.session = session
""" :type : EncodedSession """
self._statement_cache = StatementCache(session, prepared_statement_cache_size)
""" :type : StatementCache """
self._result_set = None
""" :type : result_set.ResultSet """
self.closed = False
self.arraysize = 1
self.description = None
self.rowcount = -1
self.colcount = -1
self.rownumber = 0
self.__query = None
@property
def query(self):
"""Return the most recent query"""
return self.__query
def close(self):
"""Closes the cursor into the database."""
self._check_closed()
self._statement_cache.shutdown()
self.closed = True
def _check_closed(self):
"""Checks if the cursor is closed."""
if self.closed:
raise Error("cursor is closed")
if self.session.closed:
raise Error("connection is closed")
def _reset(self):
"""Resets SQL transaction variables.
Also closes any open statements and result sets.
"""
self.description = None
self.rowcount = -1
self.colcount = -1
self._result_set = None
def callproc(self, procname, parameters=None):
"""Currently not supported."""
if(procname is not None or parameters is not None):
raise NotSupportedError("Currently unsupported")
def execute(self, operation, parameters=None):
"""Executes an SQL operation.
The SQL operations can be with or without parameters, if parameters are included
then _executeprepared is invoked to prepare and execute the operation.
Arguments:
operation -- SQL operation to be performed.
parameters -- Additional parameters for the operation may be supplied, but these
are optional.
Returns:
None
"""
self._check_closed()
self._reset()
self.__query = operation
if parameters is None:
exec_result = self._execute(operation)
else:
exec_result = self._executeprepared(operation, parameters)
self.rowcount = exec_result.row_count
if exec_result.result > 0:
self._result_set = self.session.fetch_result_set(exec_result.statement)
self.description = self.session.fetch_result_set_description(self._result_set)
# TODO: ???
if self.rowcount < 0:
self.rowcount = -1
self.rownumber = 0
def _execute(self, operation):
"""Handles operations without parameters."""
# Use handle to query
return self.session.execute_statement(self._statement_cache.get_statement(), operation)
def _executeprepared(self, operation, parameters):
"""Handles operations with parameters."""
# Create a statement handle
p_statement = self._statement_cache.get_prepared_statement(operation)
if p_statement.parameter_count != len(parameters):
raise ProgrammingError("Incorrect number of parameters specified, expected %d, got %d" %
(p_statement.parameter_count, len(parameters)))
# Use handle to query
return self.session.execute_prepared_statement(p_statement, parameters)
def executemany(self, operation, seq_of_parameters):
"""Executes the operation for each list of paramaters passed in."""
self._check_closed()
p_statement = self._statement_cache.get_prepared_statement(operation)
self.session.execute_batch_prepared_statement(p_statement, seq_of_parameters)
def fetchone(self):
"""Fetches the first row of results generated by the previous execute."""
self._check_closed()
if self._result_set is None:
raise Error("Previous execute did not produce any results or no call was issued yet")
self.rownumber += 1
return self._result_set.fetchone(self.session)
def fetchmany(self, size=None):
"""Fetches the number of rows that are passed in."""
self._check_closed()
if size is None:
size = self.arraysize
fetched_rows = []
num_fetched_rows = 0
while num_fetched_rows < size:
row = self.fetchone()
if row is None:
break
else:
fetched_rows.append(row)
num_fetched_rows += 1
return fetched_rows
def fetchall(self):
"""Fetches everything generated by the previous execute."""
self._check_closed()
fetched_rows = []
while True:
row = self.fetchone()
if row is None:
break
else:
fetched_rows.append(row)
return fetched_rows
def nextset(self):
"""Currently not supported."""
raise NotSupportedError("Currently unsupported")
def setinputsizes(self, sizes):
"""Currently not supported."""
pass
def setoutputsize(self, size, column=None):
"""Currently not supported."""
pass
class StatementCache(object):
def __init__(self, session, prepared_statement_cache_size):
self._session = session
""" :type : EncodedSession """
self._statement = self._session.create_statement()
""" :type : Statement """
self._ps_cache = dict()
""" :type : dict[str,PreparedStatement] """
self._ps_key_queue = deque()
""" :type : deque[str] """
self._ps_cache_size = prepared_statement_cache_size
""" :type : int """
def get_statement(self):
"""
:rtype : Statement
"""
return self._statement
def get_prepared_statement(self, query):
"""
:type query str
:rtype : PreparedStatement
"""
statement = self._ps_cache.get(query)
if statement is not None:
self._ps_key_queue.remove(query)
self._ps_key_queue.append(query)
return statement
statement = self._session.create_prepared_statement(query)
while len(self._ps_cache) >= self._ps_cache_size:
lru_statement_key = self._ps_key_queue.popleft()
statement_to_remove = self._ps_cache[lru_statement_key]
self._session.close_statement(statement_to_remove)
del self._ps_cache[lru_statement_key]
self._ps_key_queue.append(query)
self._ps_cache[query] = statement
return statement
def shutdown(self):
""" Close connection and clear the cursor cache"""
self._session.close_statement(self._statement)
for key in self._ps_cache:
statement_to_remove = self._ps_cache[key]
self._session.close_statement(statement_to_remove)
self._ps_cache.clear()
self._ps_key_queue.clear()
| {
"repo_name": "tvincentNuoDB/nuodb-python",
"path": "pynuodb/cursor.py",
"copies": "1",
"size": "8518",
"license": "bsd-3-clause",
"hash": -8210587714487926000,
"line_mean": 31.3878326996,
"line_max": 100,
"alpha_frac": 0.6021366518,
"autogenerated": false,
"ratio": 4.586968228325256,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.011137601469320592,
"num_lines": 263
} |
"""A module for housing the datatype classes.
Exported Classes:
Binary -- Class for a Binary object
Exported Functions:
DateFromTicks -- Converts ticks to a Date object.
TimeFromTicks -- Converts ticks to a Time object.
TimestampFromTicks -- Converts ticks to a Timestamp object.
DateToTicks -- Converts a Date object to ticks.
TimeToTicks -- Converts a Time object to ticks.
TimestampToTicks -- Converts a Timestamp object to ticks.
TypeObjectFromNuodb -- Converts a Nuodb column type name to a TypeObject variable.
TypeObject Variables:
STRING -- TypeObject(str)
BINARY -- TypeObject(str)
NUMBER -- TypeObject(int, decimal.Decimal)
DATETIME -- TypeObject(datetime.datetime, datetime.date, datetime.time)
ROWID -- TypeObject()
"""
__all__ = [ 'Date', 'Time', 'Timestamp', 'DateFromTicks', 'TimeFromTicks',
'TimestampFromTicks', 'DateToTicks', 'TimeToTicks', 'TimestampToTicks',
'Binary', 'STRING', 'BINARY', 'NUMBER', 'DATETIME', 'ROWID', 'TypeObjectFromNuodb' ]
from datetime import datetime as Timestamp, date as Date, time as Time, timedelta as TimeDelta
import decimal, time
from .exception import DataError
class Binary(object):
"""Class for a Binary object.
Private Functions:
__init__ -- Constructor for the Binary class.
__str__ -- Stringifies the Binary object.
__eq__ -- Checks equality of two Binary objects.
"""
def __init__(self, string):
"""Constructor for the Binary class."""
self.string = string
def __str__(self):
"""Stringifies the Binary object."""
return self.string
def __eq__(self, other):
"""Checks equality of two Binary objects."""
if isinstance(other, Binary):
return self.string == other.string
else:
return False
def DateFromTicks(ticks):
"""Converts ticks to a Date object."""
return Date(*time.localtime(ticks)[:3])
def TimeFromTicks(ticks, micro = 0):
"""Converts ticks to a Time object."""
return Time(*time.localtime(ticks)[3:6] + (micro,))
def TimestampFromTicks(ticks, micro = 0):
"""Converts ticks to a Timestamp object."""
return Timestamp(*time.localtime(ticks)[:6] + (micro,))
def DateToTicks(value):
"""Converts a Date object to ticks."""
timeStruct = Date(value.year, value.month, value.day).timetuple()
try:
return int(time.mktime(timeStruct))
except:
raise DataError("Year out of range")
def TimeToTicks(value):
"""Converts a Time object to ticks."""
timeStruct = TimeDelta(hours = value.hour, minutes = value.minute, seconds = value.second, microseconds = value.microsecond)
timeDec = decimal.Decimal(str(timeStruct.total_seconds()))
return (int((timeDec + time.timezone) * 10**abs(timeDec.as_tuple()[2])), abs(timeDec.as_tuple()[2]))
def TimestampToTicks(value):
"""Converts a Timestamp object to ticks."""
timeStruct = Timestamp(value.year, value.month, value.day, value.hour, value.minute, value.second).timetuple()
try:
if value.microsecond:
micro = decimal.Decimal(value.microsecond) / decimal.Decimal(1000000)
return (int((decimal.Decimal(int(time.mktime(timeStruct))) + micro) * decimal.Decimal(int(10**(len(str(micro)) - 2)))), len(str(micro)) - 2)
else:
return (int(time.mktime(timeStruct)), 0)
except:
raise DataError("Year out of range")
class TypeObject(object):
def __init__(self, *values):
self.values = values
def __cmp__(self, other):
if other in self.values:
return 0
if other < self.values:
return 1
return -1
STRING = TypeObject(str)
BINARY = TypeObject(str)
NUMBER = TypeObject(int, decimal.Decimal)
DATETIME = TypeObject(Timestamp, Date, Time)
ROWID = TypeObject()
TYPEMAP={"<null>":None,
"string":STRING,
"char":STRING,
"varchar":STRING,
"text":STRING,
"smallint":NUMBER,
"integer":NUMBER,
"bigint":NUMBER,
"float":NUMBER,
"double":NUMBER,
"decimal":NUMBER,
"double precision":NUMBER,
"date":DATETIME,
"timestamp":DATETIME,
"datetime":DATETIME,
"time":DATETIME,
"clob":BINARY,
"blob":BINARY,
"numeric":NUMBER,
"number":NUMBER,
"bytes":BINARY,
"binarystring":BINARY,
"binaryvaryingstring":BINARY,
"boolean":NUMBER,
"binary":BINARY
}
def TypeObjectFromNuodb(nuodb_type_name):
"""Returns one of STRING, BINARY, NUMBER, DATETIME, ROWID based on the
supplied NuoDB column type name
"""
nuodb_type_name=nuodb_type_name.strip()
try:
return TYPEMAP[nuodb_type_name]
except:
raise DataError('received unknown column type from the database "%s"'%(nuodb_type_name,))
| {
"repo_name": "tvincentNuoDB/nuodb-python",
"path": "pynuodb/datatype.py",
"copies": "2",
"size": "4870",
"license": "bsd-3-clause",
"hash": -5951563940907047000,
"line_mean": 32.3561643836,
"line_max": 152,
"alpha_frac": 0.6406570842,
"autogenerated": false,
"ratio": 3.9116465863453813,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5552303670545382,
"avg_score": null,
"num_lines": null
} |
"""A module for implementing interaction between MIDI and SequenceGenerators."""
import abc
import threading
import time
# internal imports
import tensorflow as tf
from magenta.protobuf import generator_pb2
from magenta.protobuf import music_pb2
class MidiInteractionException(Exception):
"""Base class for exceptions in this module."""
pass
# TODO(adarob): Move to sequence_utils.
def merge_sequence_notes(sequence_1, sequence_2):
"""Returns a new NoteSequence combining the notes from both inputs.
All fields aside from `notes` and `total_time` are copied from the first
input.
Args:
sequence_1: A NoteSequence to merge. All fields aside from `notes` and
`total_time` are copied directly from this sequence in the merged
sequence.
sequence_2: A NoteSequence to merge.
Returns:
A new NoteSequence combining the notes from the input sequences.
"""
merged_sequence = music_pb2.NoteSequence()
merged_sequence.CopyFrom(sequence_1)
merged_sequence.notes.extend(sequence_2.notes)
merged_sequence.total_time = max(sequence_1.total_time, sequence_2.total_time)
return merged_sequence
# TODO(adarob): Move to sequence_utils.
def filter_instrument(sequence, instrument, from_time=0):
"""Returns a new NoteSequence with notes from the given instrument removed.
Only notes that start on or after `from_time` will be completely removed.
Those that start before and end after `from_time` will be truncated to end
at `from_time`.
Args:
sequence: The NoteSequence to created the filtered sequence from.
instrument: The instrument number to remove notes of.
from_time: The time on or after which to remove or truncate notes.
Returns:
A new NoteSequence with notes from the given instrument removed or truncated
after `from_time`.
"""
filtered_sequence = music_pb2.NoteSequence()
filtered_sequence.CopyFrom(sequence)
del filtered_sequence.notes[:]
for note in sequence.notes:
if note.instrument == instrument:
if note.start_time >= from_time:
continue
if note.end_time >= from_time:
note.end_time = from_time
filtered_sequence.notes.add().CopyFrom(note)
return filtered_sequence
# TODO(adarob): Move to sequence_utils.
def adjust_times(sequence, delta_seconds):
"""Adjusts times in NoteSequence by given amount."""
for note in sequence.notes:
note.start_time += delta_seconds
note.end_time += delta_seconds
sequence.total_time += delta_seconds
class MidiInteraction(threading.Thread):
"""Base class for handling interaction between MIDI and SequenceGenerator.
Child classes will provided the "main loop" of an interactive session between
a MidiHub used for MIDI I/O and sequences generated by a SequenceGenerator in
their `run` methods.
Should be started by calling `start` to launch in a separate thread.
Args:
midi_hub: The MidiHub to use for MIDI I/O.
qpm: The quarters per minute to use for this interaction.
"""
_metaclass__ = abc.ABCMeta
def __init__(self, midi_hub, qpm):
self._midi_hub = midi_hub
self._qpm = qpm
# A signal to tell the main loop when to stop.
self._stop_signal = threading.Event()
super(MidiInteraction, self).__init__()
@abc.abstractmethod
def run(self):
"""The main loop for the interaction.
Must exit shortly after `self._stop_signal` is set.
"""
pass
def stop(self):
"""Stops the main loop, and blocks until the interaction is stopped."""
self._stop_signal.set()
self.join()
class CallAndResponseMidiInteraction(MidiInteraction):
"""Implementation of a MidiInteraction for real-time "call and response".
Alternates between receiving input from the MidiHub ("call") and playing
generated sequences ("response"). During the call stage, the input is captured
and used to generate the response, which is then played back during the
response stage.
Args:
midi_hub: The MidiHub to use for MIDI I/O.
qpm: The quarters per minute to use for this interaction.
sequence_generator: The SequenceGenerator to use to generate the responses
in this interaction.
quarters_per_bar: The number of quarter notes in each bar/measure.
phrase_bars: The optional number of bars in each phrase. `end_call_signal`
must be provided if None.
end_call_signal: The optional midi_hub.MidiSignal to use as a signal to stop
the call phrase at the end of the current bar. `phrase_bars` must be
provided if None.
"""
def __init__(self,
midi_hub,
qpm,
sequence_generator,
quarters_per_bar=4,
phrase_bars=None,
end_call_signal=None):
super(CallAndResponseMidiInteraction, self).__init__(midi_hub, qpm)
self._sequence_generator = sequence_generator
self._quarters_per_bar = quarters_per_bar
self._phrase_bars = phrase_bars
self._end_call_signal = end_call_signal
def run(self):
"""The main loop for a real-time call and response interaction."""
# We measure time in units of quarter notes.
quarter_duration = 60.0 / self._qpm
# Start time in quarter notes from the epoch.
start_quarters = (time.time() + 1.0) // quarter_duration
# The number of notes before call stage ends to start generation of
# response. Will be automatically adjusted to be as small as possible while
# avoiding late response starts.
predictahead_quarters = 1
# Offset to beginning of call phrase from start_quarters.
call_offset_quarters = 0
while not self._stop_signal.is_set():
# Call stage.
# Call stage start in quarter notes from the epoch.
call_start_quarters = start_quarters + call_offset_quarters
# Start the metronome at the beginning of the call stage.
self._midi_hub.start_metronome(
self._qpm, call_start_quarters * quarter_duration)
# Start a captor at the beginning of the call stage.
captor = self._midi_hub.start_capture(
self._qpm, call_start_quarters * quarter_duration)
if self._phrase_bars is not None:
# The duration of the call stage in quarter notes.
call_quarters = self._phrase_bars * self._quarters_per_bar
# The duration of the call capture in quarter notes.
capture_quarters = call_quarters - predictahead_quarters
else:
# Wait for end signal.
self._midi_hub.wait_for_event(self._end_call_signal)
# The duration of the call stage in quarter notes.
# We end the call stage at the end of the next bar that is at least
# `predicathead_quarters` in the future.
call_quarters = time.time() // quarter_duration - call_start_quarters
remaining_call_quarters = -call_quarters % self._quarters_per_bar
if remaining_call_quarters < predictahead_quarters:
remaining_call_quarters += self._quarters_per_bar
call_quarters += remaining_call_quarters
# The duration of the call capture in quarter notes.
capture_quarters = call_quarters - predictahead_quarters
# Set the metronome to stop at the appropriate time.
self._midi_hub.stop_metronome(
(call_quarters + call_start_quarters) * quarter_duration,
block=False)
# Stop the captor at the appropriate time.
captor.stop(stop_time=(
(call_start_quarters + capture_quarters) * quarter_duration))
captured_sequence = captor.captured_sequence()
# Check to see if a stop has been requested during capture.
if self._stop_signal.is_set():
break
# Set times in `captured_sequence` so that the call start is at 0.
adjust_times(captured_sequence, -(call_start_quarters * quarter_duration))
# Generate sequence.
response_start_quarters = call_quarters
response_end_quarters = 2 * call_quarters
generator_options = generator_pb2.GeneratorOptions()
generator_options.generate_sections.add(
start_time_seconds=response_start_quarters * quarter_duration,
end_time_seconds=response_end_quarters * quarter_duration)
# Generate response.
response_sequence = self._sequence_generator.generate(
captured_sequence, generator_options)
# Set times in `captured_sequence` back to the wall times.
adjust_times(response_sequence, call_start_quarters * quarter_duration)
# Check to see if a stop has been requested during generation.
if self._stop_signal.is_set():
break
# Response stage.
# Start response playback.
self._midi_hub.start_playback(response_sequence)
# Compute remaining time after generation before the response stage
# starts, updating `predictahead_quarters` appropriately.
remaining_time = (
(response_start_quarters + call_start_quarters) * quarter_duration -
time.time())
if remaining_time > (predictahead_quarters * quarter_duration):
predictahead_quarters -= 1
tf.logging.info('Generator is ahead by %.3f seconds. '
'Decreasing predictahead_quarters to %d.',
remaining_time, predictahead_quarters)
elif remaining_time < 0:
predictahead_quarters += 1
tf.logging.info('Generator is lagging by %.3f seconds. '
'Increasing predictahead_quarters to %d.',
-remaining_time, predictahead_quarters)
call_offset_quarters += response_end_quarters
def stop(self):
if self._end_call_signal is not None:
self._midi_hub.wake_signal_waiters(self._end_call_signal)
super(CallAndResponseMidiInteraction, self).stop()
| {
"repo_name": "hanzorama/magenta",
"path": "magenta/interfaces/midi/midi_interaction.py",
"copies": "1",
"size": "9737",
"license": "apache-2.0",
"hash": -3038283948923979300,
"line_mean": 36.30651341,
"line_max": 80,
"alpha_frac": 0.686042929,
"autogenerated": false,
"ratio": 3.954914703493095,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007764559262341072,
"num_lines": 261
} |
"""A module for implementing interaction between MIDI and SequenceGenerators."""
import abc
import threading
import time
# internal imports
import tensorflow as tf
import magenta
from magenta.protobuf import generator_pb2
from magenta.protobuf import music_pb2
class MidiInteractionException(Exception):
"""Base class for exceptions in this module."""
pass
def adjust_sequence_times(sequence, delta_time):
"""Adjusts note and total NoteSequence times by `delta_time`."""
retimed_sequence = music_pb2.NoteSequence()
retimed_sequence.CopyFrom(sequence)
for note in retimed_sequence.notes:
note.start_time += delta_time
note.end_time += delta_time
retimed_sequence.total_time += delta_time
return retimed_sequence
class MidiInteraction(threading.Thread):
"""Base class for handling interaction between MIDI and SequenceGenerator.
Child classes will provided the "main loop" of an interactive session between
a MidiHub used for MIDI I/O and sequences generated by a SequenceGenerator in
their `run` methods.
Should be started by calling `start` to launch in a separate thread.
Args:
midi_hub: The MidiHub to use for MIDI I/O.
sequence_generators: A collection of SequenceGenerator objects.
qpm: The quarters per minute to use for this interaction. May be overriden
by control changes sent to `tempo_control_number`.
generator_select_control_number: An optional MIDI control number whose
value to use for selection a sequence generator from the collection.
Must be provided if `sequence_generators` contains multiple
SequenceGenerators.
tempo_control_number: An optional MIDI control number whose value to use to
determine the qpm for this interaction. On receipt of a control change,
the qpm will be set to 60 more than the control change value.
temperature_control_number: The optional control change number to use for
controlling generation softmax temperature.
Raises:
ValueError: If `generator_select_control_number` is None and
`sequence_generators` contains multiple SequenceGenerators.
"""
_metaclass__ = abc.ABCMeta
# Base QPM when set by a tempo control change.
_BASE_QPM = 60
def __init__(self,
midi_hub,
sequence_generators,
qpm,
generator_select_control_number=None,
tempo_control_number=None,
temperature_control_number=None):
if generator_select_control_number is None and len(sequence_generators) > 1:
raise ValueError(
'`generator_select_control_number` cannot be None if there are '
'multiple SequenceGenerators.')
self._midi_hub = midi_hub
self._sequence_generators = sequence_generators
self._default_qpm = qpm
self._generator_select_control_number = generator_select_control_number
self._tempo_control_number = tempo_control_number
self._temperature_control_number = temperature_control_number
# A signal to tell the main loop when to stop.
self._stop_signal = threading.Event()
super(MidiInteraction, self).__init__()
@property
def _sequence_generator(self):
"""Returns the SequenceGenerator selected by the current control value."""
if len(self._sequence_generators) == 1:
return self._sequence_generators[0]
val = self._midi_hub.control_value(self._generator_select_control_number)
val = 0 if val is None else val
return self._sequence_generators[val % len(self._sequence_generators)]
@property
def _qpm(self):
"""Returns the qpm based on the current tempo control value."""
val = self._midi_hub.control_value(self._tempo_control_number)
return self._default_qpm if val is None else val + self._BASE_QPM
@property
def _temperature(self, min_temp=0.1, max_temp=2.0, default=1.0):
"""Returns the temperature based on the current control value.
Linearly interpolates between `min_temp` and `max_temp`.
Args:
min_temp: The minimum temperature, which will be returned when value is 0.
max_temp: The maximum temperature, which will be returned when value is
127.
default: The temperature to return if control value is None.
Returns:
A float temperature value based on the 8-bit MIDI control value.
"""
val = self._midi_hub.control_value(self._temperature_control_number)
if val is None:
return default
return min_temp + (val / 127.) * (max_temp - min_temp)
@abc.abstractmethod
def run(self):
"""The main loop for the interaction.
Must exit shortly after `self._stop_signal` is set.
"""
pass
def stop(self):
"""Stops the main loop, and blocks until the interaction is stopped."""
self._stop_signal.set()
self.join()
class CallAndResponseMidiInteraction(MidiInteraction):
"""Implementation of a MidiInteraction for interactive "call and response".
Alternates between receiving input from the MidiHub ("call") and playing
generated sequences ("response"). During the call stage, the input is captured
and used to generate the response, which is then played back during the
response stage.
The call phrase is started when notes are received and ended by an external
signal (`end_call_signal`) or after receiving no note events for a full tick.
The response phrase is immediately generated and played. Its length is
optionally determined by a control value set for
`response_ticks_control_number` or by the length of the call.
Args:
midi_hub: The MidiHub to use for MIDI I/O.
sequence_generators: A collection of SequenceGenerator objects.
qpm: The quarters per minute to use for this interaction. May be overriden
by control changes sent to `tempo_control_number`.
generator_select_control_number: An optional MIDI control number whose
value to use for selection a sequence generator from the collection.
Must be provided if `sequence_generators` contains multiple
SequenceGenerators.
clock_signal: An optional midi_hub.MidiSignal to use as a clock. Each tick
period should have the same duration. No other assumptions are made
about the duration, but is typically equivalent to a bar length. Either
this or `tick_duration` must be specified.be
tick_duration: An optional float specifying the duration of a tick period in
seconds. No assumptions are made about the duration, but is typically
equivalent to a bar length. Either this or `clock_signal` must be
specified.
end_call_signal: The optional midi_hub.MidiSignal to use as a signal to stop
the call phrase at the end of the current tick.
panic_signal: The optional midi_hub.MidiSignal to use as a signal to end
all open notes and clear the playback sequence.
mutate_signal: The optional midi_hub.MidiSignal to use as a signal to
generate a new response sequence using the current response as the
input.
allow_overlap: A boolean specifying whether to allow the call to overlap
with the response.
enable_metronome: A boolean specifying whether to enable the metronome.
min_listen_ticks_control_number: The optional control change number to use
for controlling the minimum call phrase length in clock ticks.
max_listen_ticks_control_number: The optional control change number to use
for controlling the maximum call phrase length in clock ticks. Call
phrases will automatically be ended and responses generated when this
length is reached.
response_ticks_control_number: The optional control change number to use for
controlling the length of the response in clock ticks.
tempo_control_number: An optional MIDI control number whose value to use to
determine the qpm for this interaction. On receipt of a control change,
the qpm will be set to 60 more than the control change value.
temperature_control_number: The optional control change number to use for
controlling generation softmax temperature.
loop_control_number: The optional control change number to use for
determining whether the response should be looped. Looping is enabled
when the value is 127 and disabled otherwise.
state_control_number: The optinal control change number to use for sending
state update control changes. The values are 0 for `IDLE`, 1 for
`LISTENING`, and 2 for `RESPONDING`.
Raises:
ValueError: If exactly one of `clock_signal` or `tick_duration` is not
specified.
"""
class State(object):
"""Class holding state value representations."""
IDLE = 0
LISTENING = 1
RESPONDING = 2
_STATE_NAMES = {
IDLE: 'Idle', LISTENING: 'Listening', RESPONDING: 'Responding'}
@classmethod
def to_string(cls, state):
return cls._STATE_NAMES[state]
def __init__(self,
midi_hub,
sequence_generators,
qpm,
generator_select_control_number,
clock_signal=None,
tick_duration=None,
end_call_signal=None,
panic_signal=None,
mutate_signal=None,
allow_overlap=False,
enable_metronome=False,
min_listen_ticks_control_number=None,
max_listen_ticks_control_number=None,
response_ticks_control_number=None,
tempo_control_number=None,
temperature_control_number=None,
loop_control_number=None,
state_control_number=None):
super(CallAndResponseMidiInteraction, self).__init__(
midi_hub, sequence_generators, qpm, generator_select_control_number,
tempo_control_number, temperature_control_number)
if [clock_signal, tick_duration].count(None) != 1:
raise ValueError(
'Exactly one of `clock_signal` or `tick_duration` must be specified.')
self._clock_signal = clock_signal
self._tick_duration = tick_duration
self._end_call_signal = end_call_signal
self._panic_signal = panic_signal
self._mutate_signal = mutate_signal
self._allow_overlap = allow_overlap
self._enable_metronome = enable_metronome
self._min_listen_ticks_control_number = min_listen_ticks_control_number
self._max_listen_ticks_control_number = max_listen_ticks_control_number
self._response_ticks_control_number = response_ticks_control_number
self._loop_control_number = loop_control_number
self._state_control_number = state_control_number
# Event for signalling when to end a call.
self._end_call = threading.Event()
# Event for signalling when to flush playback sequence.
self._panic = threading.Event()
# Even for signalling when to mutate response.
self._mutate = threading.Event()
def _update_state(self, state):
"""Logs and sends a control change with the state."""
if self._state_control_number is not None:
self._midi_hub.send_control_change(self._state_control_number, state)
tf.logging.info('State: %s', self.State.to_string(state))
def _end_call_callback(self, unused_captured_seq):
"""Method to use as a callback for setting the end call signal."""
self._end_call.set()
tf.logging.info('End call signal received.')
def _panic_callback(self, unused_captured_seq):
"""Method to use as a callback for setting the panic signal."""
self._panic.set()
tf.logging.info('Panic signal received.')
def _mutate_callback(self, unused_captured_seq):
"""Method to use as a callback for setting the mutate signal."""
self._mutate.set()
tf.logging.info('Mutate signal received.')
@property
def _min_listen_ticks(self):
"""Returns the min listen ticks based on the current control value."""
val = self._midi_hub.control_value(
self._min_listen_ticks_control_number)
return 0 if val is None else val
@property
def _max_listen_ticks(self):
"""Returns the max listen ticks based on the current control value."""
val = self._midi_hub.control_value(
self._max_listen_ticks_control_number)
return float('inf') if not val else val
@property
def _should_loop(self):
return (self._loop_control_number and
self._midi_hub.control_value(self._loop_control_number) == 127)
def _generate(self, input_sequence, zero_time, response_start_time,
response_end_time):
"""Generates a response sequence with the currently-selected generator.
Args:
input_sequence: The NoteSequence to use as a generation seed.
zero_time: The float time in seconds to treat as the start of the input.
response_start_time: The float time in seconds for the start of
generation.
response_end_time: The float time in seconds for the end of generation.
Returns:
The generated NoteSequence.
"""
# Generation is simplified if we always start at 0 time.
response_start_time -= zero_time
response_end_time -= zero_time
generator_options = generator_pb2.GeneratorOptions()
generator_options.input_sections.add(
start_time=0,
end_time=response_start_time)
generator_options.generate_sections.add(
start_time=response_start_time,
end_time=response_end_time)
# Get current temperature setting.
generator_options.args['temperature'].float_value = self._temperature
# Generate response.
tf.logging.info(
"Generating sequence using '%s' generator.",
self._sequence_generator.details.id)
tf.logging.debug('Generator Details: %s',
self._sequence_generator.details)
tf.logging.debug('Bundle Details: %s',
self._sequence_generator.bundle_details)
tf.logging.debug('Generator Options: %s', generator_options)
response_sequence = self._sequence_generator.generate(
adjust_sequence_times(input_sequence, -zero_time), generator_options)
response_sequence = magenta.music.trim_note_sequence(
response_sequence, response_start_time, response_end_time)
return adjust_sequence_times(response_sequence, zero_time)
def run(self):
"""The main loop for a real-time call and response interaction."""
start_time = time.time()
self._captor = self._midi_hub.start_capture(self._qpm, start_time)
if not self._clock_signal and self._enable_metronome:
self._midi_hub.start_metronome(self._qpm, start_time)
# Set callback for end call signal.
if self._end_call_signal is not None:
self._captor.register_callback(self._end_call_callback,
signal=self._end_call_signal)
if self._panic_signal is not None:
self._captor.register_callback(self._panic_callback,
signal=self._panic_signal)
if self._mutate_signal is not None:
self._captor.register_callback(self._mutate_callback,
signal=self._mutate_signal)
# Keep track of the end of the previous tick time.
last_tick_time = time.time()
# Keep track of the duration of a listen state.
listen_ticks = 0
# Start with an empty response sequence.
response_sequence = music_pb2.NoteSequence()
response_start_time = 0
response_duration = 0
player = self._midi_hub.start_playback(
response_sequence, allow_updates=True)
# Enter loop at each clock tick.
for captured_sequence in self._captor.iterate(signal=self._clock_signal,
period=self._tick_duration):
if self._stop_signal.is_set():
break
if self._panic.is_set():
response_sequence = music_pb2.NoteSequence()
player.update_sequence(response_sequence)
self._panic.clear()
tick_time = captured_sequence.total_time
# Set to current QPM, since it might have changed.
if self._enable_metronome:
self._midi_hub.start_metronome(self._qpm, tick_time)
captured_sequence.tempos[0].qpm = self._qpm
tick_duration = tick_time - last_tick_time
last_end_time = (max(note.end_time for note in captured_sequence.notes)
if captured_sequence.notes else 0.0)
# True iff there was no input captured during the last tick.
silent_tick = last_end_time <= last_tick_time
if not silent_tick:
listen_ticks += 1
if not captured_sequence.notes:
# Reset captured sequence since we are still idling.
if response_sequence.total_time <= tick_time:
self._update_state(self.State.IDLE)
if self._captor.start_time < tick_time:
self._captor.start_time = tick_time
self._end_call.clear()
listen_ticks = 0
elif (self._end_call.is_set() or
silent_tick or
listen_ticks >= self._max_listen_ticks):
if listen_ticks < self._min_listen_ticks:
tf.logging.info(
'Input too short (%d vs %d). Skipping.',
listen_ticks,
self._min_listen_ticks)
self._captor.start_time = tick_time
else:
# Create response and start playback.
self._update_state(self.State.RESPONDING)
capture_start_time = self._captor.start_time
if silent_tick:
# Move the sequence forward one tick in time.
captured_sequence = adjust_sequence_times(
captured_sequence, tick_duration)
captured_sequence.total_time = tick_time
capture_start_time += tick_duration
# Compute duration of response.
num_ticks = self._midi_hub.control_value(
self._response_ticks_control_number)
if num_ticks:
response_duration = num_ticks * tick_duration
else:
# Use capture duration.
response_duration = tick_time - capture_start_time
response_start_time = tick_time
response_sequence = self._generate(
captured_sequence,
capture_start_time,
response_start_time,
response_start_time + response_duration)
# If it took too long to generate, push response to next tick.
if (time.time() - response_start_time) >= tick_duration / 4:
push_ticks = (
(time.time() - response_start_time) // tick_duration + 1)
response_start_time += push_ticks * tick_duration
response_sequence = adjust_sequence_times(
response_sequence, push_ticks * tick_duration)
tf.logging.warn(
'Response too late. Pushing back %d ticks.', push_ticks)
# Start response playback. Specify the start_time to avoid stripping
# initial events due to generation lag.
player.update_sequence(
response_sequence, start_time=response_start_time)
# Optionally capture during playback.
if self._allow_overlap:
self._captor.start_time = response_start_time
else:
self._captor.start_time = response_start_time + response_duration
# Clear end signal and reset listen_ticks.
self._end_call.clear()
listen_ticks = 0
else:
# Continue listening.
self._update_state(self.State.LISTENING)
# Potentially loop or mutate previous response.
if self._mutate.is_set() and not response_sequence.notes:
self._mutate.clear()
tf.logging.warn('Ignoring mutate request with nothing to mutate.')
if (response_sequence.total_time <= tick_time and
(self._should_loop or self._mutate.is_set())):
if self._mutate.is_set():
new_start_time = response_start_time + response_duration
new_end_time = new_start_time + response_duration
response_sequence = self._generate(
response_sequence,
response_start_time,
new_start_time,
new_end_time)
response_start_time = new_start_time
self._mutate.clear()
response_sequence = adjust_sequence_times(
response_sequence, tick_time - response_start_time)
response_start_time = tick_time
player.update_sequence(
response_sequence, start_time=tick_time)
last_tick_time = tick_time
player.stop()
def stop(self):
self._stop_signal.set()
self._captor.stop()
self._midi_hub.stop_metronome()
super(CallAndResponseMidiInteraction, self).stop()
| {
"repo_name": "YoshikawaMasashi/magenta",
"path": "magenta/interfaces/midi/midi_interaction.py",
"copies": "2",
"size": "20665",
"license": "apache-2.0",
"hash": -1082898153736650000,
"line_mean": 39.5992141454,
"line_max": 80,
"alpha_frac": 0.66682797,
"autogenerated": false,
"ratio": 4.13630904723779,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.006330895734310765,
"num_lines": 509
} |
"""A module for inferring objects
For more information see the documentation in `rope.base.oi`
package.
"""
import rope.base.builtins
import rope.base.pynames
import rope.base.pyobjects
from rope.base import evaluate, utils, arguments
from rope.base.oi.type_hinting.factory import get_type_hinting_factory
_ignore_inferred = utils.ignore_exception(
rope.base.pyobjects.IsBeingInferredError)
@_ignore_inferred
def infer_returned_object(pyfunction, args):
"""Infer the `PyObject` this `PyFunction` returns after calling"""
object_info = pyfunction.pycore.object_info
result = object_info.get_exact_returned(pyfunction, args)
if result is not None:
return result
result = _infer_returned(pyfunction, args)
if result is not None:
if args and pyfunction.get_module().get_resource() is not None:
params = args.get_arguments(
pyfunction.get_param_names(special_args=False))
object_info.function_called(pyfunction, params, result)
return result
result = object_info.get_returned(pyfunction, args)
if result is not None:
return result
hint_return = get_type_hinting_factory(pyfunction.pycore.project).make_return_provider()
type_ = hint_return(pyfunction)
if type_ is not None:
return rope.base.pyobjects.PyObject(type_)
@_ignore_inferred
def infer_parameter_objects(pyfunction):
"""Infer the `PyObject`\s of parameters of this `PyFunction`"""
object_info = pyfunction.pycore.object_info
result = object_info.get_parameter_objects(pyfunction)
if result is None:
result = _parameter_objects(pyfunction)
_handle_first_parameter(pyfunction, result)
return result
def _handle_first_parameter(pyobject, parameters):
kind = pyobject.get_kind()
if parameters is None or kind not in ['method', 'classmethod']:
pass
if not parameters:
if not pyobject.get_param_names(special_args=False):
return
parameters.append(rope.base.pyobjects.get_unknown())
if kind == 'method':
parameters[0] = rope.base.pyobjects.PyObject(pyobject.parent)
if kind == 'classmethod':
parameters[0] = pyobject.parent
@_ignore_inferred
def infer_assigned_object(pyname):
if not pyname.assignments:
return
for assignment in reversed(pyname.assignments):
result = _infer_assignment(assignment, pyname.module)
if isinstance(result, rope.base.builtins.BuiltinUnknown) and result.get_name() == 'NotImplementedType':
break
elif result == rope.base.pyobjects.get_unknown():
break
elif result is not None:
return result
hint_assignment = get_type_hinting_factory(pyname.module.pycore.project).make_assignment_provider()
hinting_result = hint_assignment(pyname)
if hinting_result is not None:
return rope.base.pyobjects.PyObject(hinting_result)
return result
def get_passed_objects(pyfunction, parameter_index):
object_info = pyfunction.pycore.object_info
result = object_info.get_passed_objects(pyfunction,
parameter_index)
if not result:
statically_inferred = _parameter_objects(pyfunction)
if len(statically_inferred) > parameter_index:
result.append(statically_inferred[parameter_index])
return result
def _infer_returned(pyobject, args):
if args:
# HACK: Setting parameter objects manually
# This is not thread safe and might cause problems if `args`
# does not come from a good call site
pyobject.get_scope().invalidate_data()
pyobject._set_parameter_pyobjects(
args.get_arguments(pyobject.get_param_names(special_args=False)))
scope = pyobject.get_scope()
if not scope._get_returned_asts():
return
maxtries = 3
for returned_node in reversed(scope._get_returned_asts()[-maxtries:]):
try:
resulting_pyname = evaluate.eval_node(scope, returned_node)
if resulting_pyname is None:
continue
pyobject = resulting_pyname.get_object()
if pyobject == rope.base.pyobjects.get_unknown():
continue
if not scope._is_generator():
return pyobject
else:
return rope.base.builtins.get_generator(pyobject)
except rope.base.pyobjects.IsBeingInferredError:
pass
def _parameter_objects(pyobject):
result = []
params = pyobject.get_param_names(special_args=False)
hint_param = get_type_hinting_factory(pyobject.pycore.project).make_param_provider()
for name in params:
type_ = hint_param(pyobject, name)
if type_ is not None:
result.append(rope.base.pyobjects.PyObject(type_))
else:
result.append(rope.base.pyobjects.get_unknown())
return result
# handling `rope.base.pynames.AssignmentValue`
@_ignore_inferred
def _infer_assignment(assignment, pymodule):
result = _follow_pyname(assignment, pymodule)
if result is None:
return None
pyname, pyobject = result
pyobject = _follow_evaluations(assignment, pyname, pyobject)
if pyobject is None:
return None
return _follow_levels(assignment, pyobject)
def _follow_levels(assignment, pyobject):
for index in assignment.levels:
if isinstance(pyobject.get_type(), rope.base.builtins.Tuple):
holdings = pyobject.get_type().get_holding_objects()
if holdings:
pyobject = holdings[min(len(holdings) - 1, index)]
else:
pyobject = None
elif isinstance(pyobject.get_type(), rope.base.builtins.List):
pyobject = pyobject.get_type().holding
else:
pyobject = None
if pyobject is None:
break
return pyobject
@_ignore_inferred
def _follow_pyname(assignment, pymodule, lineno=None):
assign_node = assignment.ast_node
if lineno is None:
lineno = _get_lineno_for_node(assign_node)
holding_scope = pymodule.get_scope().get_inner_scope_for_line(lineno)
pyname = evaluate.eval_node(holding_scope, assign_node)
if pyname is not None:
result = pyname.get_object()
if isinstance(result.get_type(), rope.base.builtins.Property) and \
holding_scope.get_kind() == 'Class':
arg = rope.base.pynames.UnboundName(
rope.base.pyobjects.PyObject(holding_scope.pyobject))
return pyname, result.get_type().get_property_object(
arguments.ObjectArguments([arg]))
return pyname, result
@_ignore_inferred
def _follow_evaluations(assignment, pyname, pyobject):
new_pyname = pyname
tokens = assignment.evaluation.split('.')
for token in tokens:
call = token.endswith('()')
if call:
token = token[:-2]
if token:
pyname = new_pyname
new_pyname = _get_attribute(pyobject, token)
if new_pyname is not None:
pyobject = new_pyname.get_object()
if pyobject is not None and call:
if isinstance(pyobject, rope.base.pyobjects.AbstractFunction):
args = arguments.ObjectArguments([pyname])
pyobject = pyobject.get_returned_object(args)
else:
pyobject = None
if pyobject is None:
break
if pyobject is not None and assignment.assign_type:
return rope.base.pyobjects.PyObject(pyobject)
return pyobject
def _get_lineno_for_node(assign_node):
if hasattr(assign_node, 'lineno') and \
assign_node.lineno is not None:
return assign_node.lineno
return 1
def _get_attribute(pyobject, name):
if pyobject is not None and name in pyobject:
return pyobject[name]
| {
"repo_name": "zrzka/blackmamba",
"path": "blackmamba/lib/rope/base/oi/soi.py",
"copies": "1",
"size": "7894",
"license": "mit",
"hash": -4675897874265190000,
"line_mean": 34.5585585586,
"line_max": 111,
"alpha_frac": 0.6513807955,
"autogenerated": false,
"ratio": 3.8848425196850394,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00024882198930234234,
"num_lines": 222
} |
# a module for interacting with Amazon SimpleDB using Python pandas (and boto of course)
import pandas as pd
import boto.sdb
import datetime as dt
import dataframe_utils
from datetime_utils import *
class Creds:
def __init__(self, ID, secret):
self.ID = ID
self.secret = secret
# a domain is equivalent to a resultset from a select query.
def make_records_from_resultsets(resultsets):
records = list()
for itemlist in resultsets:
records += [dict(list(item.items()) + [('itemName', item.name)]) for item in itemlist]
return records
def df_force_numerics(df):
for col in df.columns:
try:
df[col] = df[col].astype(int)
except:
try:
df[col] = df[col].astype(float)
except:
try:
df[col] = pd.to_datetime(df[col])
except:
pass
# this turns a SDB domain into a dataframe, and converts columns to be datetime objects
def make_dataframe_from_records_with_dates(records, force_numerics=True):
if len(records) == 0:
return pd.DataFrame() # empty dataframe
df = pd.DataFrame.from_records(records, index='itemName') # SDB always indexes by itemName
if force_numerics:
df_force_numerics(df)
return df
# this is a convenience function
# rename to read_sdb
def make_df_from_sdb(resultsets):
return make_dataframe_from_records_with_dates(
make_records_from_resultsets(resultsets))
def build_sdb_datarange_query(domain_name, datetime_col=None,
date_start=yesterday(), date_end=None,
select_columns=None):
query = 'select '
if select_columns:
query += '`' + '`,`'.join(select_columns) + '` '
else:
query += '* '
query += 'from `' + domain_name + '` '
if (date_start or date_end) and datetime_col:
query += 'where '
if date_start:
query += '`' + datetime_col + '` > "' + date_start.isoformat() + '" '
if date_end:
query += 'AND '
if date_end:
query += '`' + datetime_col + '` < "' + date_end.isoformat() + '" '
return query
def download_dtrange_from_domain(domain, datetime_col=None,
date_start=yesterday(), date_end=None,
select_columns=None):
query = build_sdb_datarange_query(domain.name, datetime_col=datetime_col,
date_start=date_start, date_end=None,
select_columns=None)
return from_sdb_query(domain, query)
def from_sdb_query(domain, query):
rsets = list()
print('Performing SDB query: ' + query)
resultset = domain.connection.select(domain, query=query)
while resultset.next_token:
rsets.append(resultset)
resultset = domain.connection.select(domain, query=query, next_token=resultset.next_token)
rsets.append(resultset)
return rsets
| {
"repo_name": "petergaultney/dataframe-browser",
"path": "sdb_utils.py",
"copies": "1",
"size": "3031",
"license": "bsd-3-clause",
"hash": -7388053325372506000,
"line_mean": 35.0833333333,
"line_max": 98,
"alpha_frac": 0.5856153085,
"autogenerated": false,
"ratio": 3.8367088607594937,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9881165459679082,
"avg_score": 0.008231741916082369,
"num_lines": 84
} |
"""A module for interactive testing.
Use:
$ python
>>> from csrv.model import setup_game
>>> setup_game.play()
this will prompt you for responses to choices if you break out at any point you
can access the game object as 'setup_game.g' and can resume the interactive
play with setup_game.play(setup_game.g)
"""
from csrv.model import game
from csrv.model import deck
from csrv.model import errors
from csrv.model import parameters
from csrv.model import premade_decks
from csrv.model import json_serializer
from csrv.model import read_o8d
def new_game(corp_deck_file=None, runner_deck_file=None):
if corp_deck_file:
corp_deck = deck.CorpDeck(*read_o8d.read_file(corp_deck_file))
else:
corp_deck = deck.CorpDeck(
premade_decks.corp_decks[0]['identity'],
premade_decks.corp_decks[0]['cards'])
if runner_deck_file:
runner_deck = deck.CorpDeck(*read_o8d.read_file(runner_deck_file))
else:
runner_deck = deck.RunnerDeck(
premade_decks.runner_decks[0]['identity'],
premade_decks.runner_decks[0]['cards'])
return game.Game(corp_deck, runner_deck)
g = None
def play(game_obj=None):
global g
if game_obj:
g = game_obj
else:
g = new_game()
g.current_phase()
while True:
try:
phase = g.current_phase()
with open('game_state.json', 'w') as json_out:
json_out.write(json_serializer.JsonSerializer(g).serialize_game_corp())
player = phase.player
if player == g.corp:
hand = g.corp.hq
else:
hand = g.runner.grip
choices = phase.choices()
if choices:
for i, choice in enumerate(choices):
print '%d) %s <%s>' % (i, choice, choice.cost)
print '\n%s has %d credits, %d cards in hand, %d agenda points, %d clicks' % (
player, player.credits.value, hand.size, player.agenda_points, player.clicks.value)
chosen = raw_input('(%s) %s\'s Choice? : ' % (phase, player))
if chosen:
choice = choices[int(chosen)]
req = choice.request()
if (isinstance(req, parameters.InstallIceRequest) or
isinstance(req, parameters.InstallAgendaAssetRequest) or
isinstance(req, parameters.InstallUpgradeRequest)):
resp = req.new_response()
if isinstance(req, parameters.InstallAgendaAssetRequest):
servers = g.corp.remotes
else:
servers = (
[g.corp.archives, g.corp.rnd, g.corp.hq] + g.corp.remotes)
for x, server in enumerate(servers):
print '%d) %s' % (x, server)
server_choice = raw_input('Install in which server?: ')
if server_choice:
resp.server = servers[int(server_choice)]
g.resolve_current_phase(choice, resp)
else:
g.resolve_current_phase(choices[int(chosen)], None)
else:
try:
g.resolve_current_phase(None, None)
except errors.ChoiceRequiredError:
print 'You must choose one of the options.'
continue
else:
g.resolve_current_phase(None, None)
except errors.CostNotSatisfied, err:
print '\033[31m%s\033[37m' % err
| {
"repo_name": "mrroach/CentralServer",
"path": "csrv/model/setup_game.py",
"copies": "1",
"size": "3216",
"license": "apache-2.0",
"hash": 1096238321573087900,
"line_mean": 32.8526315789,
"line_max": 95,
"alpha_frac": 0.6203358209,
"autogenerated": false,
"ratio": 3.403174603174603,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.45235104240746027,
"avg_score": null,
"num_lines": null
} |
"""A module for interfacing with the MIDI environment."""
import abc
from collections import defaultdict
from collections import deque
import Queue
import re
import threading
import time
# internal imports
import mido
import tensorflow as tf
# TODO(adarob): Use flattened imports.
from magenta.common import concurrency
from magenta.protobuf import music_pb2
_DEFAULT_METRONOME_TICK_DURATION = 0.05
_DEFAULT_METRONOME_PROGRAM = 117 # Melodic Tom
_DEFAULT_METRONOME_PITCHES = [44, 35, 35, 35]
_DEFAULT_METRONOME_VELOCITY = 64
_METRONOME_CHANNEL = 1
# 0-indexed.
_DRUM_CHANNEL = 8
try:
# The RtMidi backend is easier to install and has support for virtual ports.
import rtmidi # pylint: disable=unused-import,g-import-not-at-top
mido.set_backend('mido.backends.rtmidi')
except ImportError:
# Tries to use PortMidi backend by default.
tf.logging.warn('Could not import RtMidi. Virtual ports are disabled.')
class MidiHubException(Exception):
"""Base class for exceptions in this module."""
pass
def get_available_input_ports():
"""Returns a list of available input MIDI ports."""
return mido.get_input_names()
def get_available_output_ports():
"""Returns a list of available output MIDI ports."""
return mido.get_output_names()
class MidiSignal(object):
"""A class for representing a MIDI-based event signal.
Provides a `__str__` method to return a regular expression pattern for
matching against the string representation of a mido.Message with wildcards
for unspecified values.
Supports matching for message types 'note_on', 'note_off', and
'control_change'. If a mido.Message is given as the `msg` argument, matches
against the exact message, ignoring the time attribute. If a `msg` is
not given, keyword arguments must be provided matching some non-empty subset
of those listed as a value for at least one key in `_VALID_ARGS`.
Examples:
# A signal that matches any 'note_on' message.
note_on_signal = MidiSignal(type='note_on')
# A signal that matches any 'note_on' or 'note_off' message with a pitch
# value of 4 and a velocity of 127.
note_signal = MidiSignal(note=4, velocity=127)
# A signal that matches a specific mido.Message exactly (ignoring time).
msg = mido.Message(type='control_signal', control=1, value=127)
control_1_127_signal = MidiSignal(msg=msg)
Args:
msg: A mido.Message that should be matched exactly (excluding the time
attribute) or None if wildcards are to be used.
**kwargs: Valid mido.Message arguments. Those that are not provided will be
treated as wildcards.
Raises:
MidiHubException: If the message type is unsupported or the arguments are
not in the valid set for the given or inferred type.
"""
_NOTE_ARGS = set(['type', 'note', 'program_number', 'velocity'])
_CONTROL_ARGS = set(['type', 'control', 'value'])
_VALID_ARGS = {
'note_on': _NOTE_ARGS,
'note_off': _NOTE_ARGS,
'control_change': _CONTROL_ARGS,
}
def __init__(self, msg=None, **kwargs):
if msg is not None and kwargs:
raise MidiHubException(
'Either a mido.Message should be provided or arguments. Not both.')
type_ = msg.type if msg is not None else kwargs.get('type')
if type_ is not None and type_ not in self._VALID_ARGS:
raise MidiHubException(
"The type of a MidiSignal must be either 'note_on', 'note_off', "
"'control_change' or None for wildcard matching. Got '%s'." % type_)
# The compatible mido.Message types.
inferred_types = [type_] if type_ is not None else []
# If msg is not provided, check that the given arguments are valid for some
# message type.
if msg is None:
if type_ is not None:
for arg_name in kwargs:
if arg_name not in self._VALID_ARGS[type_]:
raise MidiHubException(
"Invalid argument for type '%s': %s" % (type_, arg_name))
else:
if kwargs:
for name, args in self._VALID_ARGS.iteritems():
if set(kwargs) <= args:
inferred_types.append(name)
if not inferred_types:
raise MidiHubException(
'Could not infer a message type for set of given arguments: %s'
% ', '.join(kwargs))
# If there is only a single valid inferred type, use it.
if len(inferred_types) == 1:
type_ = inferred_types[0]
if msg is not None:
self._regex_pattern = '^' + mido.messages.format_as_string(
msg, include_time=False) + r' time=\d+.\d+$'
else:
# Generate regex pattern.
parts = ['.*' if type_ is None else type_]
for name in mido.messages.get_spec(inferred_types[0]).arguments:
if name in kwargs:
parts.append('%s=%d' % (name, kwargs[name]))
else:
parts.append(r'%s=\d+' % name)
self._regex_pattern = '^' + ' '.join(parts) + r' time=\d+.\d+$'
def __str__(self):
"""Returns a regex pattern for matching against a mido.Message string."""
return self._regex_pattern
class Metronome(threading.Thread):
"""A thread implementing a MIDI metronome.
Args:
outport: The Mido port for sending messages.
qpm: The integer quarters per minute to signal on.
start_time: The float wall time in seconds to treat as the first beat
for alignment. If in the future, the first tick will not start until
after this time.
stop_time: The float wall time in seconds after which the metronome should
stop, or None if it should continue until `stop` is called.
velocity: The velocity of the metronome's tick `note_on` message.
program: The MIDI program number to use for metronome ticks.
pitches: An ordered collection of integes representing MIDI pitches of the
metronome's tick, which will be cycled through.
duration: The duration of the metronome's tick.
"""
daemon = True
def __init__(self,
outport,
qpm,
start_time,
stop_time=None,
velocity=_DEFAULT_METRONOME_VELOCITY,
program=_DEFAULT_METRONOME_PROGRAM,
pitches=None,
duration=_DEFAULT_METRONOME_TICK_DURATION):
self._outport = outport
self.update(
qpm, start_time, stop_time, velocity, program, pitches, duration)
super(Metronome, self).__init__()
def update(self,
qpm,
start_time,
stop_time=None,
velocity=_DEFAULT_METRONOME_VELOCITY,
program=_DEFAULT_METRONOME_PROGRAM,
pitches=None,
duration=_DEFAULT_METRONOME_TICK_DURATION):
"""Updates Metronome options."""
# Locking is not required since variables are independent and assignment is
# atomic.
# Set the program number for the channel.
self._outport.send(
mido.Message(type='program_change', program=program,
channel=_METRONOME_CHANNEL))
self._period = 60. / qpm
self._start_time = start_time
self._stop_time = stop_time
self._velocity = velocity
self._pitches = pitches or _DEFAULT_METRONOME_PITCHES
self._duration = duration
def run(self):
"""Outputs metronome tone on the qpm interval until stop signal received."""
sleeper = concurrency.Sleeper()
while True:
now = time.time()
tick_number = max(0, int((now - self._start_time) // self._period) + 1)
tick_time = tick_number * self._period + self._start_time
if self._stop_time is not None and self._stop_time < tick_time:
break
sleeper.sleep_until(tick_time)
metric_position = tick_number % len(self._pitches)
self._outport.send(
mido.Message(
type='note_on',
note=self._pitches[metric_position],
channel=_METRONOME_CHANNEL,
velocity=self._velocity))
sleeper.sleep(self._duration)
self._outport.send(
mido.Message(
type='note_off',
note=self._pitches[metric_position],
channel=_METRONOME_CHANNEL))
def stop(self, stop_time=0, block=True):
"""Signals for the metronome to stop.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until thread terminates.
"""
self._stop_time = stop_time
if block:
self.join()
class MidiPlayer(threading.Thread):
"""A thread for playing back a NoteSequence proto via MIDI.
The NoteSequence times must be based on the wall time. The playhead matches
the wall clock. The playback sequence may be updated at any time if
`allow_updates` is set to True.
Args:
outport: The Mido port for sending messages.
sequence: The NoteSequence to play.
start_time: The float time before which to strip events. Defaults to
construction time. Events before this time will be sent immediately on
start.
allow_updates: If False, the thread will terminate after playback of
`sequence` completes and calling `update_sequence` will result in an
exception. Otherwise, the the thread will stay alive until `stop` is
called, allowing for additional updates via `update_sequence`.
channel: The MIDI channel to send playback events.
offset: The float time in seconds to adjust the playback event times by.
"""
def __init__(self, outport, sequence, start_time=time.time(),
allow_updates=False, channel=0, offset=0.0):
self._outport = outport
self._channel = channel
self._offset = offset
# Set of notes (pitches) that are currently on.
self._open_notes = set()
# Lock for serialization.
self._lock = threading.RLock()
# A control variable to signal when the sequence has been updated.
self._update_cv = threading.Condition(self._lock)
# The queue of mido.Message objects to send, sorted by ascending time.
self._message_queue = deque()
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Initialize message queue.
# We first have to allow "updates" to set the initial sequence.
self._allow_updates = True
self.update_sequence(sequence, start_time=start_time)
# We now make whether we allow updates dependent on the argument.
self._allow_updates = allow_updates
super(MidiPlayer, self).__init__()
@concurrency.serialized
def update_sequence(self, sequence, start_time=None):
"""Updates sequence being played by the MidiPlayer.
Adds events to close any notes that are no longer being closed by the
new sequence using the times when they would have been closed by the
previous sequence.
Args:
sequence: The NoteSequence to play back.
start_time: The float time before which to strip events. Defaults to call
time.
Raises:
MidiHubException: If called when _allow_updates is False.
"""
if start_time is None:
start_time = time.time()
if not self._allow_updates:
raise MidiHubException(
'Attempted to update a MidiPlayer sequence with updates disabled.')
new_message_list = []
# The set of pitches that are already playing and will be closed without
# first being reopened in in the new sequence.
closed_notes = set()
for note in sequence.notes:
if note.start_time >= start_time:
new_message_list.append(
mido.Message(type='note_on', note=note.pitch,
velocity=note.velocity, time=note.start_time))
new_message_list.append(
mido.Message(type='note_off', note=note.pitch, time=note.end_time))
elif note.end_time >= start_time and note.pitch in self._open_notes:
new_message_list.append(
mido.Message(type='note_off', note=note.pitch, time=note.end_time))
closed_notes.add(note.pitch)
# Close remaining open notes at the next event time to avoid abruptly ending
# notes.
notes_to_close = self._open_notes - closed_notes
if notes_to_close:
next_event_time = (
min(msg.time for msg in new_message_list) if new_message_list else 0)
for note in notes_to_close:
new_message_list.append(
mido.Message(type='note_off', note=note, time=next_event_time))
for msg in new_message_list:
msg.channel = self._channel
msg.time += self._offset
self._message_queue = deque(
sorted(new_message_list, key=lambda msg: (msg.time, msg.note)))
self._update_cv.notify()
@concurrency.serialized
def run(self):
"""Plays messages in the queue until empty and _allow_updates is False."""
# Assumes model where NoteSequence is time-stamped with wall time.
# TODO(hanzorama): Argument to allow initial start not at sequence start?
while self._message_queue and self._message_queue[0].time < time.time():
self._message_queue.popleft()
while True:
while self._message_queue:
delta = self._message_queue[0].time - time.time()
if delta > 0:
self._update_cv.wait(timeout=delta)
else:
msg = self._message_queue.popleft()
if msg.type == 'note_on':
self._open_notes.add(msg.note)
elif msg.type == 'note_off':
self._open_notes.discard(msg.note)
self._outport.send(msg)
# Either keep player alive and wait for sequence update, or return.
if self._allow_updates:
self._update_cv.wait()
else:
break
def stop(self, block=True):
"""Signals for the playback to stop and ends all open notes.
Args:
block: If true, blocks until thread terminates.
"""
with self._lock:
if not self._stop_signal.is_set():
self._stop_signal.set()
self._allow_updates = False
# Replace message queue with immediate end of open notes.
self._message_queue.clear()
for note in self._open_notes:
self._message_queue.append(
mido.Message(type='note_off', note=note, time=time.time()))
self._update_cv.notify()
if block:
self.join()
class MidiCaptor(threading.Thread):
"""Base class for thread that captures MIDI into a NoteSequence proto.
If neither `stop_time` nor `stop_signal` are provided as arguments, the
capture will continue until the `stop` method is called.
Args:
qpm: The quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds when the capture begins. Events
occuring before this time are ignored.
stop_time: The float wall time in seconds when the capture is to be stopped
or None.
stop_signal: A MidiSignal to use as a signal to stop capture.
"""
_metaclass__ = abc.ABCMeta
# A message that is used to wake the consumer thread.
_WAKE_MESSAGE = None
def __init__(self, qpm, start_time=0, stop_time=None, stop_signal=None):
# A lock for synchronization.
self._lock = threading.RLock()
self._receive_queue = Queue.Queue()
self._captured_sequence = music_pb2.NoteSequence()
self._captured_sequence.tempos.add(qpm=qpm)
self._start_time = start_time
self._stop_time = stop_time
self._stop_regex = re.compile(str(stop_signal))
# A set of active MidiSignals being used by iterators.
self._iter_signals = []
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Active callback threads keyed by unique thread name.
self._callbacks = {}
super(MidiCaptor, self).__init__()
@property
@concurrency.serialized
def start_time(self):
return self._start_time
@start_time.setter
@concurrency.serialized
def start_time(self, value):
"""Updates the start time, removing any notes that started before it."""
self._start_time = value
i = 0
for note in self._captured_sequence.notes:
if note.start_time >= self._start_time:
break
i += 1
del self._captured_sequence.notes[:i]
@property
@concurrency.serialized
def _stop_time(self):
return self._stop_time_unsafe
@_stop_time.setter
@concurrency.serialized
def _stop_time(self, value):
self._stop_time_unsafe = value
def receive(self, msg):
"""Adds received mido.Message to the queue for capture.
Args:
msg: The incoming mido.Message object to add to the queue for capture. The
time attribute is assumed to be pre-set with the wall time when the
message was received.
Raises:
MidiHubException: When the received message has an empty time attribute.
"""
if not msg.time:
raise MidiHubException(
'MidiCaptor received message with empty time attribute: %s' % msg)
self._receive_queue.put(msg)
@abc.abstractmethod
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Must be serialized in children.
Args:
msg: The incoming mido.Message object to capture. The time field is
assumed to be pre-filled with the wall time when the message was
received.
"""
pass
def _add_note(self, msg):
"""Adds and returns a new open note based on the MIDI message."""
new_note = self._captured_sequence.notes.add()
new_note.start_time = msg.time
new_note.pitch = msg.note
new_note.velocity = msg.velocity
new_note.is_drum = (msg.channel == _DRUM_CHANNEL)
return new_note
def run(self):
"""Captures incoming messages until stop time or signal received."""
while True:
timeout = None
stop_time = self._stop_time
if stop_time is not None:
timeout = stop_time - time.time()
if timeout <= 0:
break
try:
msg = self._receive_queue.get(block=True, timeout=timeout)
except Queue.Empty:
continue
if msg is MidiCaptor._WAKE_MESSAGE:
continue
if msg.time <= self._start_time:
continue
if self._stop_regex.match(str(msg)) is not None:
break
with self._lock:
msg_str = str(msg)
for regex, queue in self._iter_signals:
if regex.match(msg_str) is not None:
queue.put(msg.copy())
self._capture_message(msg)
stop_time = self._stop_time
end_time = stop_time if stop_time is not None else msg.time
# Acquire lock to avoid race condition with `iterate`.
with self._lock:
# Set final captured sequence.
self._captured_sequence = self.captured_sequence(end_time)
# Wake up all generators.
for regex, queue in self._iter_signals:
queue.put(MidiCaptor._WAKE_MESSAGE)
def stop(self, stop_time=None, block=True):
"""Ends capture and truncates the captured sequence at `stop_time`.
Args:
stop_time: The float time in seconds to stop the capture, or None if it
should be stopped now. May be in the past, in which case the captured
sequence will be truncated appropriately.
block: If True, blocks until the thread terminates.
Raises:
MidiHubException: When called multiple times with a `stop_time`.
"""
with self._lock:
if self._stop_signal.is_set():
if stop_time is not None:
raise MidiHubException(
'`stop` must not be called multiple times with a `stop_time` on '
'MidiCaptor.')
else:
self._stop_signal.set()
self._stop_time = time.time() if stop_time is None else stop_time
# Force the thread to wake since we've updated the stop time.
self._receive_queue.put(MidiCaptor._WAKE_MESSAGE)
if block:
self.join()
def captured_sequence(self, end_time=None):
"""Returns a copy of the current captured sequence.
If called before the thread terminates, `end_time` is required and any open
notes will have their end time set to it, any notes starting after it will
be removed, and any notes ending after it will be truncated. `total_time`
will also be set to `end_time`.
Args:
end_time: The float time in seconds to close any open notes and after
which to close or truncate notes, if the thread is still alive.
Otherwise, must be None.
Returns:
A copy of the current captured NoteSequence proto with open notes closed
at and later notes removed or truncated to `end_time`.
Raises:
MidiHubException: When the thread is alive and `end_time` is None or the
thread is terminated and `end_time` is not None.
"""
# Make a copy of the sequence currently being captured.
current_captured_sequence = music_pb2.NoteSequence()
with self._lock:
current_captured_sequence.CopyFrom(self._captured_sequence)
if self.is_alive():
if end_time is None:
raise MidiHubException(
'`end_time` must be provided when capture thread is still running.')
for i, note in enumerate(current_captured_sequence.notes):
if note.start_time >= end_time:
del current_captured_sequence.notes[i:]
break
if not note.end_time or note.end_time > end_time:
note.end_time = end_time
current_captured_sequence.total_time = end_time
elif end_time is not None:
raise MidiHubException(
'`end_time` must not be provided when capture is complete.')
return current_captured_sequence
def iterate(self, signal=None, period=None):
"""Yields the captured sequence at every signal message or time period.
Exactly one of `signal` or `period` must be specified. Continues until the
captor terminates, at which point the final captured sequence is yielded
before returning.
If consecutive calls to iterate are longer than the period, immediately
yields and logs a warning.
Args:
signal: A MidiSignal to use as a signal to yield, or None.
period: A float period in seconds, or None.
Yields:
The captured NoteSequence at event time.
Raises:
MidiHubException: If neither `signal` nor `period` or both are specified.
"""
if (signal, period).count(None) != 1:
raise MidiHubException(
'Exactly one of `signal` or `period` must be provided to `iterate` '
'call.')
if signal is None:
sleeper = concurrency.Sleeper()
next_yield_time = time.time() + period
else:
regex = re.compile(str(signal))
queue = Queue.Queue()
with self._lock:
self._iter_signals.append((regex, queue))
while self.is_alive():
if signal is None:
skipped_periods = (time.time() - next_yield_time) // period
if skipped_periods > 0:
tf.logging.warn(
'Skipping %d %.3fs period(s) to catch up on iteration.',
skipped_periods, period)
next_yield_time += skipped_periods * period
else:
sleeper.sleep_until(next_yield_time)
end_time = next_yield_time
next_yield_time += period
else:
signal_msg = queue.get()
if signal_msg is MidiCaptor._WAKE_MESSAGE:
# This is only recieved when the thread is in the process of
# terminating. Wait until it is done before yielding the final
# sequence.
self.join()
break
end_time = signal_msg.time
# Acquire lock so that `captured_sequence` will be called before thread
# terminates, if it has not already done so.
with self._lock:
if not self.is_alive():
break
captured_sequence = self.captured_sequence(end_time)
yield captured_sequence
yield self.captured_sequence()
def register_callback(self, fn, signal=None, period=None):
"""Calls `fn` at every signal message or time period.
The callback function must take exactly one argument, which will be the
current captured NoteSequence.
Exactly one of `signal` or `period` must be specified. Continues until the
captor thread terminates, at which point the callback is called with the
final sequence, or `cancel_callback` is called.
If callback execution is longer than a period, immediately calls upon
completion and logs a warning.
Args:
fn: The callback function to call, passing in the captured sequence.
signal: A MidiSignal to use as a signal to call `fn` on the current
captured sequence, or None.
period: A float period in seconds to specify how often to call `fn`, or
None.
Returns:
The unqiue name of the callback thread to enable cancellation.
Raises:
MidiHubException: If neither `signal` nor `period` or both are specified.
"""
class IteratorCallback(threading.Thread):
"""A thread for executing a callback on each iteration."""
def __init__(self, iterator, fn):
self._iterator = iterator
self._fn = fn
self._stop_signal = threading.Event()
super(IteratorCallback, self).__init__()
def run(self):
"""Calls the callback function for each iterator value."""
for captured_sequence in self._iterator:
if self._stop_signal.is_set():
break
self._fn(captured_sequence)
def stop(self):
"""Stops the thread on next iteration, without blocking."""
self._stop_signal.set()
t = IteratorCallback(self.iterate(signal, period), fn)
t.start()
with self._lock:
assert t.name not in self._callbacks
self._callbacks[t.name] = t
return t.name
@concurrency.serialized
def cancel_callback(self, name):
"""Cancels the callback with the given name.
While the thread may continue to run until the next iteration, the callback
function will not be executed.
Args:
name: The unique name of the callback thread to cancel.
"""
self._callbacks[name].stop()
del self._callbacks[name]
class MonophonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for monophonic melodies."""
def __init__(self, *args, **kwargs):
self._open_note = None
super(MonophonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
If the message is a note_on event, ends the previous note (if applicable)
and opens a new note in the capture sequence. Ignores repeated note_on
events.
If the message is a note_off event matching the current open note in the
capture sequence
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if self._open_note is None or msg.note != self._open_note.pitch:
# This is not the note we're looking for. Drop it.
return
self._open_note.end_time = msg.time
self._open_note = None
elif msg.type == 'note_on':
if self._open_note:
if self._open_note.pitch == msg.note:
# This is just a repeat of the previous message.
return
# End the previous note.
self._open_note.end_time = msg.time
self._open_note = self._add_note(msg)
class PolyphonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for polyphonic melodies."""
def __init__(self, *args, **kwargs):
# A dictionary of open NoteSequence.Note messages keyed by pitch.
self._open_notes = dict()
super(PolyphonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if msg.note not in self._open_notes:
# This is not a note we're looking for. Drop it.
return
self._open_notes[msg.note].end_time = msg.time
del self._open_notes[msg.note]
elif msg.type == 'note_on':
if msg.note in self._open_notes:
# This is likely just a repeat of the previous message.
return
new_note = self._add_note(msg)
self._open_notes[new_note.pitch] = new_note
class TextureType(object):
"""An Enum specifying the type of musical texture."""
MONOPHONIC = 1
POLYPHONIC = 2
class MidiHub(object):
"""A MIDI interface for capturing and playing NoteSequences.
Ignores/filters `program_change` messages. Assumes all messages are on the
same channel.
Args:
input_midi_port: The string MIDI port name or mido.ports.BaseInput object to
use for input. If a name is given that is not an available port, a
virtual port will be opened with that name.
output_midi_port: The string MIDI port name mido.ports.BaseOutput object to
use for output. If a name is given that is not an available port, a
virtual port will be opened with that name.
texture_type: A TextureType Enum specifying the musical texture to assume
during capture, passthrough, and playback.
passthrough: A boolean specifying whether or not to pass incoming messages
through to the output, applying the appropriate texture rules.
playback_channel: The MIDI channel to send playback events.
playback_offset: The float time in seconds to adjust the playback event
times by.
"""
def __init__(self, input_midi_port, output_midi_port, texture_type,
passthrough=True, playback_channel=0, playback_offset=0.0):
self._texture_type = texture_type
self._passthrough = passthrough
self._playback_channel = playback_channel
self._playback_offset = playback_offset
# When `passthrough` is True, this is the set of open MIDI note pitches.
self._open_notes = set()
# This lock is used by the serialized decorator.
self._lock = threading.RLock()
# A dictionary mapping a compiled MidiSignal regex to a condition variable
# that will be notified when a matching messsage is received.
self._signals = {}
# A dictionary mapping a compiled MidiSignal regex to a list of functions
# that will be called with the triggering message in individual threads when
# a matching message is received.
self._callbacks = defaultdict(list)
# A dictionary mapping integer control numbers to most recently-received
# integer value.
self._control_values = {}
# Threads actively being used to capture incoming messages.
self._captors = []
# Potentially active player threads.
self._players = []
self._metronome = None
# Open MIDI ports.
self._inport = (
input_midi_port if isinstance(input_midi_port, mido.ports.BaseInput)
else mido.open_input(
input_midi_port,
virtual=input_midi_port not in get_available_input_ports()))
self._outport = (
output_midi_port if isinstance(output_midi_port, mido.ports.BaseOutput)
else mido.open_output(
output_midi_port,
virtual=output_midi_port not in get_available_output_ports()))
# Start processing incoming messages.
self._inport.callback = self._timestamp_and_handle_message
def __del__(self):
"""Stops all running threads and waits for them to terminate."""
for captor in self._captors:
captor.stop(block=False)
for player in self._players:
player.stop(block=False)
self.stop_metronome()
for captor in self._captors:
captor.join()
for player in self._players:
player.join()
@property
@concurrency.serialized
def passthrough(self):
return self._passthrough
@passthrough.setter
@concurrency.serialized
def passthrough(self, value):
"""Sets passthrough value, closing all open notes if being disabled."""
if self._passthrough == value:
return
# Close all open notes.
while self._open_notes:
self._outport.send(mido.Message('note_off', note=self._open_notes.pop()))
self._passthrough = value
def _timestamp_and_handle_message(self, msg):
"""Stamps message with current time and passes it to the handler."""
if msg.type == 'program_change':
return
if not msg.time:
msg.time = time.time()
self._handle_message(msg)
@concurrency.serialized
def _handle_message(self, msg):
"""Handles a single incoming MIDI message.
-If the message is being used as a signal, notifies threads waiting on the
appropriate condition variable.
-Adds the message to any capture queues.
-Passes the message through to the output port, if appropriate.
Args:
msg: The mido.Message MIDI message to handle.
"""
# Notify any threads waiting for this message.
msg_str = str(msg)
for regex in list(self._signals):
if regex.match(msg_str) is not None:
self._signals[regex].notify_all()
del self._signals[regex]
# Call any callbacks waiting for this message.
for regex in list(self._callbacks):
if regex.match(msg_str) is not None:
for fn in self._callbacks[regex]:
threading.Thread(target=fn, args=(msg,)).start()
del self._callbacks[regex]
# Remove any captors that are no longer alive.
self._captors[:] = [t for t in self._captors if t.is_alive()]
# Add a different copy of the message to the receive queue of each live
# capture thread.
for t in self._captors:
t.receive(msg.copy())
# Update control values if this is a control change message.
if msg.type == 'control_change':
if self._control_values.get(msg.control, None) != msg.value:
tf.logging.debug('Control change %d: %d', msg.control, msg.value)
self._control_values[msg.control] = msg.value
# Pass the message through to the output port, if appropriate.
if not self._passthrough:
pass
elif self._texture_type == TextureType.POLYPHONIC:
if msg.type == 'note_on' and msg.velocity > 0:
self._open_notes.add(msg.note)
elif (msg.type == 'note_off' or
(msg.type == 'note_on' and msg.velocity == 0)):
self._open_notes.discard(msg.note)
self._outport.send(msg)
elif self._texture_type == TextureType.MONOPHONIC:
assert len(self._open_notes) <= 1
if msg.type not in ['note_on', 'note_off']:
self._outport.send(msg)
elif ((msg.type == 'note_off' or
msg.type == 'note_on' and msg.velocity == 0) and
msg.note in self._open_notes):
self._outport.send(msg)
self._open_notes.remove(msg.note)
elif msg.type == 'note_on' and msg.velocity > 0:
if self._open_notes:
self._outport.send(
mido.Message('note_off', note=self._open_notes.pop()))
self._outport.send(msg)
self._open_notes.add(msg.note)
def start_capture(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Starts a MidiCaptor to compile incoming messages into a NoteSequence.
If neither `stop_time` nor `stop_signal`, are provided, the caller must
explicitly stop the returned capture thread. If both are specified, the one
that occurs first will stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The MidiCaptor thread.
"""
captor_class = (MonophonicMidiCaptor if
self._texture_type == TextureType.MONOPHONIC else
PolyphonicMidiCaptor)
captor = captor_class(qpm, start_time, stop_time, stop_signal)
with self._lock:
self._captors.append(captor)
captor.start()
return captor
def capture_sequence(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Compiles and returns incoming messages into a NoteSequence.
Blocks until capture stops. At least one of `stop_time` or `stop_signal`
must be specified. If both are specified, the one that occurs first will
stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The captured NoteSequence proto.
Raises:
MidiHubException: When neither `stop_time` nor `stop_signal` are provided.
"""
if stop_time is None and stop_signal is None:
raise MidiHubException(
'At least one of `stop_time` and `stop_signal` must be provided to '
'`capture_sequence` call.')
captor = self.start_capture(qpm, start_time, stop_time, stop_signal)
captor.join()
return captor.captured_sequence()
@concurrency.serialized
def wait_for_event(self, signal=None, timeout=None):
"""Blocks until a matching mido.Message arrives or the timeout occurs.
Exactly one of `signal` or `timeout` must be specified. Using a timeout
with a threading.Condition object causes additional delays when notified.
Args:
signal: A MidiSignal to use as a signal to stop waiting, or None.
timeout: A float timeout in seconds, or None.
Raises:
MidiHubException: If neither `signal` nor `timeout` or both are specified.
"""
if (signal, timeout).count(None) != 1:
raise MidiHubException(
'Exactly one of `signal` or `timeout` must be provided to '
'`wait_for_event` call.')
if signal is None:
concurrency.Sleeper().sleep(timeout)
return
signal_pattern = str(signal)
cond_var = None
for regex, cond_var in self._signals:
if regex.pattern == signal_pattern:
break
if cond_var is None:
cond_var = threading.Condition(self._lock)
self._signals[re.compile(signal_pattern)] = cond_var
cond_var.wait()
@concurrency.serialized
def wake_signal_waiters(self, signal=None):
"""Wakes all threads waiting on a signal event.
Args:
signal: The MidiSignal to wake threads waiting on, or None to wake all.
"""
for regex in list(self._signals):
if signal is None or regex.pattern == str(signal):
self._signals[regex].notify_all()
del self._signals[regex]
for captor in self._captors:
captor.wake_signal_waiters(signal)
@concurrency.serialized
def start_metronome(self, qpm, start_time):
"""Starts or updates the metronome with the given arguments.
Args:
qpm: The quarter notes per minute to use.
start_time: The wall time in seconds that the metronome is started on for
synchronization and beat alignment. May be in the past.
"""
if self._metronome is not None and self._metronome.is_alive():
self._metronome.update(qpm, start_time)
else:
self._metronome = Metronome(self._outport, qpm, start_time)
self._metronome.start()
@concurrency.serialized
def stop_metronome(self, stop_time=0, block=True):
"""Stops the metronome at the given time if it is currently running.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until metronome is stopped.
"""
if self._metronome is None:
return
self._metronome.stop(stop_time, block)
self._metronome = None
def start_playback(self, sequence, start_time=time.time(),
allow_updates=False):
"""Plays the notes in aNoteSequence via the MIDI output port.
Args:
sequence: The NoteSequence to play, with times based on the wall clock.
start_time: The float time before which to strip events. Defaults to call
time. Events before this time will be sent immediately on start.
allow_updates: A boolean specifying whether or not the player should stay
allow the sequence to be updated and stay alive until `stop` is
called.
Returns:
The MidiPlayer thread handling playback to enable updating.
"""
player = MidiPlayer(self._outport, sequence, start_time, allow_updates,
self._playback_channel, self._playback_offset)
with self._lock:
self._players.append(player)
player.start()
return player
@concurrency.serialized
def control_value(self, control_number):
"""Returns the most recently received value for the given control number.
Args:
control_number: The integer control number to return the value for, or
None.
Returns:
The most recently recieved integer value for the given control number, or
None if no values have been received for that control.
"""
if control_number is None:
return None
return self._control_values.get(control_number)
def send_control_change(self, control_number, value):
"""Sends the specified control change message on the output port."""
self._outport.send(
mido.Message(
type='control_change',
control=control_number,
value=value))
@concurrency.serialized
def register_callback(self, fn, signal):
"""Calls `fn` at the next signal message.
The callback function must take exactly one argument, which will be the
message triggering the signal.
Survives until signal is called or the MidiHub is destroyed.
Args:
fn: The callback function to call, passing in the triggering message.
signal: A MidiSignal to use as a signal to call `fn` on the triggering
message.
"""
self._callbacks[re.compile(str(signal))].append(fn)
| {
"repo_name": "YoshikawaMasashi/magenta",
"path": "magenta/interfaces/midi/midi_hub.py",
"copies": "2",
"size": "42110",
"license": "apache-2.0",
"hash": -8017975807528309000,
"line_mean": 34.8688245315,
"line_max": 80,
"alpha_frac": 0.6564948943,
"autogenerated": false,
"ratio": 3.913568773234201,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.007314036097165351,
"num_lines": 1174
} |
"""A module for interfacing with the MIDI environment."""
import abc
from collections import deque
import logging
import Queue
import re
import threading
import time
# internal imports
import mido
# TODO(adarob): Use flattened imports.
from magenta.common import concurrency
from magenta.protobuf import music_pb2
_DEFAULT_METRONOME_TICK_DURATION = 0.05
_DEFAULT_METRONOME_PITCH = 95
_DEFAULT_METRONOME_VELOCITY = 64
_METRONOME_CHANNEL = 0
# The RtMidi backend is easier to install and has support for virtual ports.
mido.set_backend('mido.backends.rtmidi')
class MidiHubException(Exception):
"""Base class for exceptions in this module."""
pass
def get_available_input_ports():
"""Returns a list of available input MIDI ports."""
return mido.get_input_names()
def get_available_output_ports():
"""Returns a list of available output MIDI ports."""
return mido.get_output_names()
class MidiSignal(object):
"""A class for representing a MIDI-based event signal.
Provides a `__str__` method to return a regular expression pattern for
matching against the string representation of a mido.Message with wildcards
for unspecified values.
Supports matching for message types 'note_on', 'note_off', and
'control_change'. If a mido.Message is given as the `msg` argument, matches
against the exact message, ignoring the time attribute. If a `msg` is
not given, keyword arguments must be provided matching some non-empty subset
of those listed as a value for at least one key in `_VALID_ARGS`.
Examples:
# A signal that matches any 'note_on' message.
note_on_signal = MidiSignal(type='note_on')
# A signal that matches any 'note_on' or 'note_off' message with a pitch
# value of 4 and a velocity of 127.
note_signal = MidiSignal(note=4, velocity=127)
# A signal that matches a specific mido.Message exactly (ignoring time).
msg = mido.Message(type='control_signal', control=1, value=127)
control_1_127_signal = MidiSignal(msg=msg)
Args:
msg: A mido.Message that should be matched exactly (excluding the time
attribute) or None if wildcards are to be used.
**kwargs: Valid mido.Message arguments. Those that are not provided will be
treated as wildcards.
Raises:
MidiHubException: If the message type is unsupported or the arguments are
not in the valid set for the given or inferred type.
"""
_NOTE_ARGS = set(['type', 'note', 'program_number', 'velocity'])
_CONTROL_ARGS = set(['type', 'control', 'value'])
_VALID_ARGS = {
'note_on': _NOTE_ARGS,
'note_off': _NOTE_ARGS,
'control_change': _CONTROL_ARGS,
}
def __init__(self, msg=None, **kwargs):
if msg is not None and kwargs:
raise MidiHubException(
'Either a mido.Message should be provided or arguments. Not both.')
type_ = msg.type if msg is not None else kwargs.get('type')
if type_ is not None and type_ not in self._VALID_ARGS:
raise MidiHubException(
"The type of a MidiSignal must be either 'note_on', 'note_off', "
"'control_change' or None for wildcard matching. Got '%s'." % type_)
# The compatible mido.Message types.
inferred_types = [type_] if type_ is not None else []
# If msg is not provided, check that the given arguments are valid for some
# message type.
if msg is None:
if type_ is not None:
for arg_name in kwargs:
if arg_name not in self._VALID_ARGS[type_]:
raise MidiHubException(
"Invalid argument for type '%s': %s" % (type_, arg_name))
else:
if kwargs:
for name, args in self._VALID_ARGS.iteritems():
if set(kwargs) <= args:
inferred_types.append(name)
if not inferred_types:
raise MidiHubException(
'Could not infer a message type for set of given arguments: %s'
% ', '.join(kwargs))
# If there is only a single valid inferred type, use it.
if len(inferred_types) == 1:
type_ = inferred_types[0]
if msg is not None:
self._regex_pattern = '^' + mido.messages.format_as_string(
msg, include_time=False) + r' time=\d+.\d+$'
else:
# Generate regex pattern.
parts = ['.*' if type_ is None else type_]
for name in mido.messages.get_spec(inferred_types[0]).arguments:
if name in kwargs:
parts.append('%s=%d' % (name, kwargs[name]))
else:
parts.append(r'%s=\d+' % name)
self._regex_pattern = '^' + ' '.join(parts) + r' time=\d+.\d+$'
def __str__(self):
"""Returns a regex pattern for matching against a mido.Message string."""
return self._regex_pattern
class Metronome(threading.Thread):
"""A thread implementing a MIDI metronome.
Args:
outport: The Mido port for sending messages.
qpm: The integer quarters per minute to signal on.
start_time: The float wall time in seconds to treat as the first beat
for alignment. If in the future, the first tick will not start until
after this time.
stop_time: The float wall time in seconds after which the metronome should
stop, or None if it should continue until `stop` is called.
velocity: The velocity of the metronome's tick `note_on` message.
pitch: The pitch of the metronome's tick `note_on` message.
duration: The duration of the metronome's tick.
"""
def __init__(self,
outport,
qpm,
start_time,
stop_time=None,
velocity=_DEFAULT_METRONOME_VELOCITY,
pitch=_DEFAULT_METRONOME_PITCH,
duration=_DEFAULT_METRONOME_TICK_DURATION):
self._outport = outport
self._qpm = qpm
self._start_time = start_time
self._velocity = velocity
self._pitch = pitch
self._duration = duration
# A signal for when to stop the metronome.
self._stop_time = stop_time
super(Metronome, self).__init__()
def run(self):
"""Outputs metronome tone on the qpm interval until stop signal received."""
period = 60. / self._qpm
sleeper = concurrency.Sleeper()
now = time.time()
next_tick_time = max(
self._start_time,
now + period - ((now - self._start_time) % period))
while self._stop_time is None or self._stop_time > next_tick_time:
sleeper.sleep_until(next_tick_time)
self._outport.send(
mido.Message(
type='note_on',
note=self._pitch,
channel=_METRONOME_CHANNEL,
velocity=self._velocity))
sleeper.sleep(self._duration)
self._outport.send(
mido.Message(
type='note_off',
note=self._pitch,
channel=_METRONOME_CHANNEL))
now = time.time()
next_tick_time = now + period - ((now - self._start_time) % period)
def stop(self, stop_time=0, block=True):
"""Signals for the metronome to stop.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until thread terminates.
"""
self._stop_time = stop_time
if block:
self.join()
class MidiPlayer(threading.Thread):
"""A thread for playing back a NoteSequence proto via MIDI.
The NoteSequence times must be based on the wall time. The playhead matches
the wall clock. The playback sequence may be updated at any time if
`allow_updates` is set to True.
Args:
outport: The Mido port for sending messages.
sequence: The NoteSequence to play.
allow_updates: If False, the thread will terminate after playback of
`sequence` completes and calling `update_sequence` will result in an
exception. Otherwise, the the thread will stay alive until `stop` is
called, allowing for additional updates via `update_sequence`.
"""
def __init__(self, outport, sequence, allow_updates=False):
self._outport = outport
# Set of notes (pitches) that are currently on.
self._open_notes = set()
# Lock for serialization.
self._lock = threading.RLock()
# A control variable to signal when the sequence has been updated.
self._update_cv = threading.Condition(self._lock)
# The queue of mido.Message objects to send, sorted by ascending time.
self._message_queue = deque()
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Initialize message queue.
# We first have to allow "updates" to set the initial sequence.
self._allow_updates = True
self.update_sequence(sequence)
# We now make whether we allow updates dependent on the argument.
self._allow_updates = allow_updates
super(MidiPlayer, self).__init__()
@concurrency.serialized
def update_sequence(self, sequence):
"""Updates sequence being played by the MidiPlayer.
Adds events to close any notes that are no longer being closed by the
new sequence using the times when they would have been closed by the
previous sequence.
Args:
sequence: The NoteSequence to play back.
Raises:
MidiHubException: If called when _allow_updates is False.
"""
if not self._allow_updates:
raise MidiHubException(
'Attempted to update a MidiPlayer sequence with updates disabled.')
start_time = time.time()
new_message_list = []
# The set of pitches that are already playing but are not closed without
# being reopened in the future in the new sequence.
notes_to_close = set()
for note in sequence.notes:
if note.start_time >= start_time:
new_message_list.append(
mido.Message(type='note_on', note=note.pitch,
velocity=note.velocity, time=note.start_time))
if note.end_time >= start_time:
new_message_list.append(
mido.Message(type='note_off', note=note.pitch, time=note.end_time))
if note.start_time < start_time and note.pitch not in self._open_notes:
notes_to_close.add(note.pitch)
for msg in self._message_queue:
if not notes_to_close:
break
if msg.note in notes_to_close:
assert msg.type == 'note_off'
new_message_list.append(msg)
notes_to_close.remove(msg.note)
self._message_queue = deque(sorted(new_message_list, key=lambda x: x.time))
self._update_cv.notify()
@concurrency.serialized
def run(self):
"""Plays messages in the queue until empty and _allow_updates is False."""
# Assumes model where NoteSequence is time-stampped with wall time.
# TODO(hanzorama): Argument to allow initial start not at sequence start?
while self._message_queue and self._message_queue[0].time < time.time():
self._message_queue.popleft()
while True:
while self._message_queue:
delta = self._message_queue[0].time - time.time()
if delta > 0:
self._update_cv.wait(timeout=delta)
else:
msg = self._message_queue.popleft()
if msg.type == 'note_on':
self._open_notes.add(msg.note)
elif msg.type == 'note_off':
self._open_notes.discard(msg.note)
self._outport.send(msg)
# Either keep player alive and wait for sequence update, or return.
if self._allow_updates:
self._update_cv.wait()
else:
break
def stop(self, block=True):
"""Signals for the playback to stop and ends all open notes.
Args:
block: If true, blocks until thread terminates.
"""
with self._lock:
if not self._stop_signal.is_set():
self._stop_signal.set()
self._allow_updates = False
# Replace message queue with immediate end of open notes.
self._message_queue.clear()
for note in self._open_notes:
self._message_queue.append(
mido.Message(type='note_off', note=note, time=time.time()))
self._update_cv.notify()
if block:
self.join()
class MidiCaptor(threading.Thread):
"""Base class for thread that captures MIDI into a NoteSequence proto.
If neither `stop_time` nor `stop_signal` are provided as arguments, the
capture will continue until the `stop` method is called.
Args:
qpm: The quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds when the capture begins. Events
occuring before this time are ignored.
stop_time: The float wall time in seconds when the capture is to be stopped
or None.
stop_signal: A MidiSignal to use as a signal to stop capture.
"""
_metaclass__ = abc.ABCMeta
# A message that is used to wake the consumer thread.
_WAKE_MESSAGE = None
def __init__(self, qpm, start_time=0, stop_time=None, stop_signal=None):
# A lock for synchronization.
self._lock = threading.RLock()
self._receive_queue = Queue.Queue()
self._captured_sequence = music_pb2.NoteSequence()
self._captured_sequence.tempos.add(qpm=qpm)
self._start_time = start_time
self._stop_time = stop_time
self._stop_regex = re.compile(str(stop_signal))
# A set of active MidiSignals being used by iterators.
self._iter_signals = []
# An event that is set when `stop` has been called.
self._stop_signal = threading.Event()
# Active callback threads keyed by unique thread name.
self._callbacks = {}
super(MidiCaptor, self).__init__()
@property
@concurrency.serialized
def _stop_time(self):
return self._stop_time_unsafe
@_stop_time.setter
@concurrency.serialized
def _stop_time(self, value):
self._stop_time_unsafe = value
def receive(self, msg):
"""Adds received mido.Message to the queue for capture.
Args:
msg: The incoming mido.Message object to add to the queue for capture. The
time attribute is assumed to be pre-set with the wall time when the
message was received.
Raises:
MidiHubException: When the received message has an empty time attribute.
"""
if not msg.time:
raise MidiHubException(
'MidiCaptor received message with empty time attribute: %s' % msg)
self._receive_queue.put(msg)
@abc.abstractmethod
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Must be serialized in children.
Args:
msg: The incoming mido.Message object to capture. The time field is
assumed to be pre-filled with the wall time when the message was
received.
"""
pass
def run(self):
"""Captures incoming messages until stop time or signal received."""
while True:
timeout = None
stop_time = self._stop_time
if stop_time is not None:
timeout = stop_time - time.time()
if timeout <= 0:
break
try:
msg = self._receive_queue.get(block=True, timeout=timeout)
except Queue.Empty:
continue
if msg is MidiCaptor._WAKE_MESSAGE:
continue
if msg.time <= self._start_time:
continue
if self._stop_regex.match(str(msg)) is not None:
break
with self._lock:
msg_str = str(msg)
for regex, queue in self._iter_signals:
if regex.match(msg_str) is not None:
queue.put(msg.copy())
self._capture_message(msg)
stop_time = self._stop_time
end_time = stop_time if stop_time is not None else msg.time
# Acquire lock to avoid race condition with `iterate`.
with self._lock:
# Set final captured sequence.
self._captured_sequence = self.captured_sequence(end_time)
# Wake up all generators.
for regex, queue in self._iter_signals:
queue.put(MidiCaptor._WAKE_MESSAGE)
def stop(self, stop_time=None, block=True):
"""Ends capture and truncates the captured sequence at `stop_time`.
Args:
stop_time: The float time in seconds to stop the capture, or None if it
should be stopped now. May be in the past, in which case the captured
sequence will be truncated appropriately.
block: If True, blocks until the thread terminates.
Raises:
MidiHubException: When called multiple times with a `stop_time`.
"""
with self._lock:
if self._stop_signal.is_set():
if stop_time is not None:
raise MidiHubException(
'`stop` must not be called multiple times with a `stop_time` on '
'MidiCaptor.')
else:
self._stop_signal.set()
self._stop_time = time.time() if stop_time is None else stop_time
# Force the thread to wake since we've updated the stop time.
self._receive_queue.put(MidiCaptor._WAKE_MESSAGE)
if block:
self.join()
def captured_sequence(self, end_time=None):
"""Returns a copy of the current captured sequence.
If called before the thread terminates, `end_time` is required and any open
notes will have their end time set to it, any notes starting after it will
be removed, and any notes ending after it will be truncated. `total_time`
will also be set to `end_time`.
Args:
end_time: The float time in seconds to close any open notes and after
which to close or truncate notes, if the thread is still alive.
Otherwise, must be None.
Returns:
A copy of the current captured NoteSequence proto with open notes closed
at and later notes removed or truncated to `end_time`.
Raises:
MidiHubException: When the thread is alive and `end_time` is None or the
thread is terminated and `end_time` is not None.
"""
# Make a copy of the sequence currently being captured.
current_captured_sequence = music_pb2.NoteSequence()
with self._lock:
current_captured_sequence.CopyFrom(self._captured_sequence)
if self.is_alive():
if end_time is None:
raise MidiHubException(
'`end_time` must be provided when capture thread is still running.')
for i, note in enumerate(current_captured_sequence.notes):
if note.start_time >= end_time:
del current_captured_sequence.notes[i:]
break
if not note.end_time or note.end_time > end_time:
note.end_time = end_time
current_captured_sequence.total_time = end_time
elif end_time is not None:
raise MidiHubException(
'`end_time` must not be provided when capture is complete.')
return current_captured_sequence
def iterate(self, signal=None, period=None):
"""Yields the captured sequence at every signal message or time period.
Exactly one of `signal` or `period` must be specified. Continues until the
captor terminates, at which point the final captured sequence is yielded
before returning.
If consecutive calls to iterate are longer than the period, immediately
yields and logs a warning.
Args:
signal: A MidiSignal to use as a signal to yield, or None.
period: A float period in seconds, or None.
Yields:
The captured NoteSequence at event time.
Raises:
MidiHubException: If neither `signal` nor `period` or both are specified.
"""
if (signal, period).count(None) != 1:
raise MidiHubException(
'Exactly one of `signal` or `period` must be provided to `iterate` '
'call.')
if signal is None:
sleeper = concurrency.Sleeper()
next_yield_time = time.time() + period
else:
regex = re.compile(str(signal))
queue = Queue.Queue()
with self._lock:
self._iter_signals.append((regex, queue))
while self.is_alive():
if signal is None:
skipped_periods = (time.time() - next_yield_time) // period
if skipped_periods > 0:
logging.warning(
'Skipping %d %.3fs period(s) to catch up on iteration.',
skipped_periods, period)
next_yield_time += skipped_periods * period
else:
sleeper.sleep_until(next_yield_time)
end_time = next_yield_time
next_yield_time += period
else:
signal_msg = queue.get()
if signal_msg is MidiCaptor._WAKE_MESSAGE:
# This is only recieved when the thread is in the process of
# terminating. Wait until it is done before yielding the final
# sequence.
self.join()
break
end_time = signal_msg.time
# Acquire lock so that `captured_sequence` will be called before thread
# terminates, if it has not already done so.
with self._lock:
if not self.is_alive():
break
captured_sequence = self.captured_sequence(end_time)
yield captured_sequence
yield self.captured_sequence()
def register_callback(self, fn, signal=None, period=None):
"""Calls `fn` at every signal message or time period.
The callback function must take exactly a single argument, which will be the
current captured NoteSequence.
Exactly one of `signal` or `period` must be specified. Continues until the
captor thread terminates, at which point the callback is called with the
final sequence, or `cancel_callback` is called.
If callback execution is longer than a period, immediately calls upon
completion and logs a warning.
Args:
fn: The callback function to call, passing in the captured sequence.
signal: A MidiSignal to use as a signal to call `fn` on the current
captured sequence, or None.
period: A float period in seconds to specify how often to call `fn`, or
None.
Returns:
The unqiue name of the callback thread to enable cancellation.
Raises:
MidiHubException: If neither `signal` nor `period` or both are specified.
"""
class IteratorCallback(threading.Thread):
"""A thread for executing a callback on each iteration."""
def __init__(self, iterator, fn):
self._iterator = iterator
self._fn = fn
self._stop_signal = threading.Event()
super(IteratorCallback, self).__init__()
def run(self):
"""Calls the callback function for each iterator value."""
for captured_sequence in self._iterator:
if self._stop_signal.is_set():
break
self._fn(captured_sequence)
def stop(self):
"""Stops the thread on next iteration, without blocking."""
self._stop_signal.set()
t = IteratorCallback(self.iterate(signal, period), fn)
t.start()
with self._lock:
assert t.name not in self._callbacks
self._callbacks[t.name] = t
return t.name
@concurrency.serialized
def cancel_callback(self, name):
"""Cancels the callback with the given name.
While the thread may continue to run until the next iteration, the callback
function will not be executed.
Args:
name: The unique name of the callback thread to cancel.
"""
self._callbacks[name].stop()
del self._callbacks[name]
class MonophonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for monophonic melodies."""
def __init__(self, *args, **kwargs):
self._open_note = None
super(MonophonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
If the message is a note_on event, ends the previous note (if applicable)
and opens a new note in the capture sequence. Ignores repeated note_on
events.
If the message is a note_off event matching the current open note in the
capture sequence
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if self._open_note is None or msg.note != self._open_note.pitch:
# This is not the note we're looking for. Drop it.
return
self._open_note.end_time = msg.time
self._open_note = None
elif msg.type == 'note_on':
if self._open_note:
if self._open_note.pitch == msg.note:
# This is just a repeat of the previous message.
return
# End the previous note.
self._open_note.end_time = msg.time
new_note = self._captured_sequence.notes.add()
new_note.start_time = msg.time
new_note.pitch = msg.note
new_note.velocity = msg.velocity
self._open_note = new_note
class PolyphonicMidiCaptor(MidiCaptor):
"""A MidiCaptor for polyphonic melodies."""
def __init__(self, *args, **kwargs):
# A dictionary of open NoteSequence.Note messages keyed by pitch.
self._open_notes = dict()
super(PolyphonicMidiCaptor, self).__init__(*args, **kwargs)
@concurrency.serialized
def _capture_message(self, msg):
"""Handles a single incoming MIDI message during capture.
Args:
msg: The mido.Message MIDI message to handle.
"""
if msg.type == 'note_off' or (msg.type == 'note_on' and msg.velocity == 0):
if msg.note not in self._open_notes:
# This is not a note we're looking for. Drop it.
return
self._open_notes[msg.note].end_time = msg.time
del self._open_notes[msg.note]
elif msg.type == 'note_on':
if msg.note in self._open_notes:
# This is likely just a repeat of the previous message.
return
new_note = self._captured_sequence.notes.add()
new_note.start_time = msg.time
new_note.pitch = msg.note
new_note.velocity = msg.velocity
self._open_notes[new_note.pitch] = new_note
class TextureType(object):
"""An Enum specifying the type of musical texture."""
MONOPHONIC = 1
POLYPHONIC = 2
class MidiHub(object):
"""A MIDI interface for capturing and playing NoteSequences.
Ignores/filters `program_change` messages. Assumes all messages are on the
same channel.
Args:
input_midi_port: The string MIDI port name or mido.ports.BaseInput object to
use for input. If a name is given that is not an available port, a
virtual port will be opened with that name.
output_midi_port: The string MIDI port name mido.ports.BaseOutput object to
use for output. If a name is given that is not an available port, a
virtual port will be opened with that name.
texture_type: A TextureType Enum specifying the musical texture to assume
during capture, passthrough, and playback.
passthrough: A boolean specifying whether or not to pass incoming messages
through to the output, applyig the appropriate texture rules.
"""
def __init__(self, input_midi_port, output_midi_port, texture_type,
passthrough=True):
self._texture_type = texture_type
self._passthrough = passthrough
# When `passthrough` is True, this is the set of open MIDI note pitches.
self._open_notes = set()
# This lock is used by the serialized decorator.
self._lock = threading.RLock()
# A dictionary mapping a string-formatted mido.Messages to a condition
# variable that will be notified when a matching messsage is received,
# ignoring the time field.
self._signals = {}
# Threads actively being used to capture incoming messages.
self._captors = []
# Potentially active player threads.
self._players = []
self._metronome = None
# Open MIDI ports.
self._inport = (
input_midi_port if isinstance(input_midi_port, mido.ports.BaseInput)
else mido.open_input(
input_midi_port,
virtual=input_midi_port not in get_available_input_ports()))
self._outport = (
output_midi_port if isinstance(output_midi_port, mido.ports.BaseOutput)
else mido.open_output(
output_midi_port,
virtual=output_midi_port not in get_available_output_ports()))
# Start processing incoming messages.
self._inport.callback = self._timestamp_and_handle_message
def __del__(self):
"""Stops all running threads and waits for them to terminate."""
for captor in self._captors:
captor.stop(block=False)
for player in self._players:
player.stop(block=False)
self.stop_metronome()
for captor in self._captors:
captor.join()
for player in self._players:
player.join()
@property
@concurrency.serialized
def passthrough(self):
return self._passthrough
@passthrough.setter
@concurrency.serialized
def passthrough(self, value):
"""Sets passthrough value, closing all open notes if being disabled."""
if self._passthrough == value:
return
# Close all open notes.
while self._open_notes:
self._outport.send(mido.Message('note_off', note=self._open_notes.pop()))
self._passthrough = value
def _timestamp_and_handle_message(self, msg):
"""Stamps message with current time and passes it to the handler."""
if msg.type == 'program_change':
return
if not msg.time:
msg.time = time.time()
self._handle_message(msg)
@concurrency.serialized
def _handle_message(self, msg):
"""Handles a single incoming MIDI message.
-If the message is being used as a signal, notifies threads waiting on the
appropriate condition variable.
-Adds the message to any capture queues.
-Passes the message through to the output port, if appropriate.
Args:
msg: The mido.Message MIDI message to handle.
"""
# Notify any threads waiting for this message.
msg_str = str(msg)
for regex in list(self._signals):
if regex.match(msg_str) is not None:
self._signals[regex].notify_all()
del self._signals[regex]
# Remove any captors that are no longer alive.
self._captors[:] = [t for t in self._captors if t.is_alive()]
# Add a different copy of the message to the receive queue of each live
# capture thread.
for t in self._captors:
t.receive(msg.copy())
# Pass the message through to the output port, if appropriate.
if not self._passthrough:
pass
elif self._texture_type == TextureType.POLYPHONIC:
if msg.type == 'note_on' and msg.velocity > 0:
self._open_notes.add(msg.note)
elif (msg.type == 'note_off' or
(msg.type == 'note_on' and msg.velocity == 0)):
self._open_notes.discard(msg.note)
self._outport.send(msg)
elif self._texture_type == TextureType.MONOPHONIC:
assert len(self._open_notes) <= 1
if msg.type not in ['note_on', 'note_off']:
self._outport.send(msg)
elif ((msg.type == 'note_off' or
msg.type == 'note_on' and msg.velocity == 0) and
msg.note in self._open_notes):
self._outport.send(msg)
self._open_notes.remove(msg.note)
elif msg.type == 'note_on' and msg.velocity > 0:
if self._open_notes:
self._outport.send(
mido.Message('note_off', note=self._open_notes.pop()))
self._outport.send(msg)
self._open_notes.add(msg.note)
def start_capture(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Starts a MidiCaptor to compile incoming messages into a NoteSequence.
If neither `stop_time` nor `stop_signal`, are provided, the caller must
explicitly stop the returned capture thread. If both are specified, the one
that occurs first will stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The MidiCaptor thread.
"""
captor_class = (MonophonicMidiCaptor if
self._texture_type == TextureType.MONOPHONIC else
PolyphonicMidiCaptor)
captor = captor_class(qpm, start_time, stop_time, stop_signal)
with self._lock:
self._captors.append(captor)
captor.start()
return captor
def capture_sequence(self, qpm, start_time, stop_time=None, stop_signal=None):
"""Compiles and returns incoming messages into a NoteSequence.
Blocks until capture stops. At least one of `stop_time` or `stop_signal`
must be specified. If both are specified, the one that occurs first will
stop the capture.
Args:
qpm: The integer quarters per minute to use for the captured sequence.
start_time: The float wall time in seconds to start the capture. May be in
the past. Used for beat alignment.
stop_time: The optional float wall time in seconds to stop the capture.
stop_signal: The optional mido.Message to use as a signal to use to stop
the capture.
Returns:
The captured NoteSequence proto.
Raises:
MidiHubException: When neither `stop_time` nor `stop_signal` are provided.
"""
if stop_time is None and stop_signal is None:
raise MidiHubException(
'At least one of `stop_time` and `stop_signal` must be provided to '
'`capture_sequence` call.')
captor = self.start_capture(qpm, start_time, stop_time, stop_signal)
captor.join()
return captor.captured_sequence()
@concurrency.serialized
def wait_for_event(self, signal=None, timeout=None):
"""Blocks until a matching mido.Message arrives or the timeout occurs.
Exactly one of `signal` or `timeout` must be specified. Using a timeout
with a threading.Condition object causes additional delays when notified.
Args:
signal: A MidiSignal to use as a signal to stop waiting, or None.
timeout: A float timeout in seconds, or None.
Raises:
MidiHubException: If neither `signal` nor `timeout` or both are specified.
"""
if (signal, timeout).count(None) != 1:
raise MidiHubException(
'Exactly one of `signal` or `timeout` must be provided to '
'`wait_for_event` call.')
if signal is None:
concurrency.Sleeper().sleep(timeout)
return
signal_pattern = str(signal)
cond_var = None
for regex, cond_var in self._signals:
if regex.pattern == signal_pattern:
break
if cond_var is None:
cond_var = threading.Condition(self._lock)
self._signals[re.compile(signal_pattern)] = cond_var
cond_var.wait()
@concurrency.serialized
def wake_signal_waiters(self, signal=None):
"""Wakes all threads waiting on a signal event.
Args:
signal: The MidiSignal to wake threads waiting on, or None to wake all.
"""
for regex in list(self._signals):
if signal is None or regex.pattern == str(signal):
self._signals[regex].notify_all()
del self._signals[regex]
@concurrency.serialized
def start_metronome(self, qpm, start_time):
"""Starts or re-starts the metronome with the given arguments.
Args:
qpm: The quarter notes per minute to use.
start_time: The wall time in seconds that the metronome is started on for
synchronization and beat alignment. May be in the past.
"""
if self._metronome is not None:
self.stop_metronome()
self._metronome = Metronome(self._outport, qpm, start_time)
self._metronome.start()
@concurrency.serialized
def stop_metronome(self, stop_time=0, block=True):
"""Stops the metronome at the given time if it is currently running.
Args:
stop_time: The float wall time in seconds after which the metronome should
stop. By default, stops at next tick.
block: If true, blocks until metronome is stopped.
"""
if self._metronome is None:
return
self._metronome.stop(stop_time, block)
self._metronome = None
def start_playback(self, sequence, allow_updates=False):
"""Plays the notes in aNoteSequence via the MIDI output port.
Args:
sequence: The NoteSequence to play, with times based on the wall clock.
allow_updates: A boolean specifying whether or not the player should stay
allow the sequence to be updated and stay alive until `stop` is
called.
Returns:
The MidiPlayer thread handling playback to enable updating.
"""
player = MidiPlayer(self._outport, sequence, allow_updates)
with self._lock:
self._players.append(player)
player.start()
return player
| {
"repo_name": "hanzorama/magenta",
"path": "magenta/interfaces/midi/midi_hub.py",
"copies": "1",
"size": "36483",
"license": "apache-2.0",
"hash": 8382323773509153000,
"line_mean": 34.3860329777,
"line_max": 80,
"alpha_frac": 0.6537017241,
"autogenerated": false,
"ratio": 3.8944278394534586,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5048129563553458,
"avg_score": null,
"num_lines": null
} |
"""A module for I/O helper functions, classes, etc."""
from collections import defaultdict
import cyclopts.tools as tools
class PathMap(object):
"""A simple container class for mapping columns to Hdf5 paths"""
def __init__(self, col=None):
"""Parameters
----------
col : str
the column name
"""
self.col = col
@property
def path(self):
"""Subclasses must implement this method to provide the path to the
column name"""
raise NotImplementedError
def value_mapping(tbl, x, y, uuids=True):
"""Returns a mapping from x to a list of ys in a table. A table can be
supplied, or the underlying table will be used by default. If uuids is
true, the cyclopts.tools.str_to_uuid function is used for both x and
y."""
ret = defaultdict(list)
if uuids:
for row in tbl.iterrows():
ret[tools.str_to_uuid(row[x])].append(tools.str_to_uuid(row[y]))
else:
for row in tbl.iterrows():
ret[row[x]].append(row[y])
return ret
def grab_data(h5file, path, col, matching=None):
"""Grabs data in a path matching parameters
Parameters
----------
h5file : PyTables HDF5 File handle
path : str
the path to the appropriate table
col : str
the target column name
matching : tuple, optional
a tuple of col name and data to match, if no match is given, all column
values will be returned
Returns
-------
data : list, dict, other
if a matching is provided, a dictionary from the instance id to the
data value is returned, otherwise a list of all column values is given
"""
h5node = h5file.get_node(path)
if matching is None:
data = [x[col] for x in h5node.iterrows()]
else:
data = []
scol, search = matching
data = {x['instid']: x[col] for x in h5node.iterrows() if x[scol] in search}
return data
def param_mapping(h5file, path, kcol, vcol):
"""return a mapping of params to all values found
Parameters
----------
h5file : PyTables HDF5 File handle
path : str
the path to the appropriate table
kcol : str
the key column name
vcol : str
the value column name
Return
------
mapping : dict
a mapping from key columns to a set of all found value columns
"""
h5node = h5file.get_node(path)
data = defaultdict(set)
for x in h5node.iterrows():
data[x[kcol]].add(x[vcol])
return data
def param_to_iids(h5file, fam_path, sp_path, col):
"""Return a mapping of parameter values to instids
Parameters
----------
h5file : PyTables HDF5 File handle
fam_path : str
the path to the appropriate family table (for param ids to inst ids)
sp_path : str
the path to the appropriate species table (for param to param ids)
col : str
the parameter column name
Return
------
mapping : dict
a mapping from key columns to a set of all found value columns
"""
pid_to_iids = param_mapping(h5file, fam_path, 'paramid', 'instid')
ret = defaultdict(set)
for p, pids in param_mapping(h5file, sp_path, col, 'paramid').items():
for pid in pids:
ret[p].update(pid_to_iids[pid])
return ret
def tbl_to_dict(tbl, key):
rows = tbl.read()
keys = tbl.coltypes.keys()
keys.remove(key)
return {x[key]: {k: x[k] for k in keys} for x in rows}
| {
"repo_name": "gidden/cyclopts",
"path": "cyclopts/io_tools.py",
"copies": "1",
"size": "3536",
"license": "bsd-3-clause",
"hash": 3760035189876235300,
"line_mean": 28.4666666667,
"line_max": 84,
"alpha_frac": 0.6043552036,
"autogenerated": false,
"ratio": 3.8351409978308024,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49394962014308025,
"avg_score": null,
"num_lines": null
} |
"""A module for Jython emulating (a small part of) CPython's multiprocessing.
With this, pygrametl can be made to use multiprocessing, but actually use
threads when used from Jython (where there is no GIL).
"""
# Copyright (c) 2011-2014, Aalborg University (chr@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from threading import Thread
from pygrametl.jythonsupport import Value
# Needed for both pip2 and pip3 to be supported
try:
from Queue import Queue
except ImportError:
from queue import Queue
# NOTE: This module is made for Jython.
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '2.3'
__all__ = ['JoinableQueue', 'Process', 'Queue', 'Value']
class Process(Thread):
pid = '<n/a>'
daemon = property(Thread.isDaemon, Thread.setDaemon)
name = property(Thread.getName, Thread.setName)
class JoinableQueue(Queue):
def close(self):
pass
| {
"repo_name": "JimHagan/pygrametl",
"path": "pygrametl/jythonmultiprocessing.py",
"copies": "2",
"size": "2189",
"license": "bsd-2-clause",
"hash": -8805146009805427000,
"line_mean": 36.7413793103,
"line_max": 80,
"alpha_frac": 0.7565098218,
"autogenerated": false,
"ratio": 4.18546845124283,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00021285653469561513,
"num_lines": 58
} |
""" A module for Jython emulating (a small part of) CPython's multiprocessing.
With this, pygrametl can be made to use multiprocessing, but actually use threads when used from Jython (where there is no GIL).
"""
# Copyright (c) 2011, Christian Thomsen (chr@cs.aau.dk)
# All rights reserved.
# Redistribution and use in source anqd binary forms, with or without
# modification, are permitted provided that the following conditions are met:
# - Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
# - Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
__author__ = "Christian Thomsen"
__maintainer__ = "Christian Thomsen"
__version__ = '0.2.0'
__all__ = ['JoinableQueue', 'Process', 'Queue', 'Value']
import sys
if not sys.platform.startswith('java'):
raise ImportError, 'jythonmultiprocessing is made for Jython'
from threading import Thread
from Queue import Queue
from pygrametl.jythonsupport import Value
class Process(Thread):
pid = '<n/a>'
daemon = property(Thread.isDaemon, Thread.setDaemon)
name = property(Thread.getName, Thread.setName)
class JoinableQueue(Queue):
def close(self):
pass
| {
"repo_name": "lecs/pygrametl",
"path": "jythonmultiprocessing.py",
"copies": "2",
"size": "2161",
"license": "bsd-2-clause",
"hash": 2305804740471828000,
"line_mean": 42.22,
"line_max": 138,
"alpha_frac": 0.7575196668,
"autogenerated": false,
"ratio": 4.220703125,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5978222791800001,
"avg_score": null,
"num_lines": null
} |
""" A module for managing the AXDebug I*Contexts
"""
from . import gateways, axdebug
import pythoncom, win32com.server.util
# Utility function for wrapping object created by this module.
from .util import _wrap, _wrap_remove, trace
from . import adb
class DebugCodeContext(gateways.DebugCodeContext, gateways.DebugDocumentContext):
# NOTE: We also implement the IDebugDocumentContext interface for Simple Hosts.
# Thus, debugDocument may be NULL when we have smart hosts - but in that case, we
# wont be called upon to provide it.
_public_methods_ = gateways.DebugCodeContext._public_methods_ + \
gateways.DebugDocumentContext._public_methods_
_com_interfaces_ = gateways.DebugCodeContext._com_interfaces_ + \
gateways.DebugDocumentContext._com_interfaces_
def __init__(self, lineNo, charPos, len, codeContainer, debugSite):
self.debugSite = debugSite
self.offset = charPos
self.length = len
self.breakPointState = 0
self.lineno = lineNo
gateways.DebugCodeContext.__init__(self)
self.codeContainer = codeContainer
def _Close(self):
self.debugSite = None
def GetDocumentContext(self):
if self.debugSite is not None:
# We have a smart host - let him give it to us.
return self.debugSite.GetDocumentContextFromPosition(
self.codeContainer.sourceContext,
self.offset,
self.length)
else:
# Simple host - Fine - Ill do it myself!
return _wrap(self, axdebug.IID_IDebugDocumentContext)
def SetBreakPoint(self, bps):
self.breakPointState = bps
adb.OnSetBreakPoint(self, bps, self.lineno)
# The DebugDocumentContext methods for simple hosts.
def GetDocument(self):
return self.codeContainer.debugDocument
def EnumCodeContexts(self):
return _wrap(EnumDebugCodeContexts([self]), axdebug.IID_IEnumDebugCodeContexts)
class EnumDebugCodeContexts(gateways.EnumDebugCodeContexts):
def _wrap(self, obj):
return _wrap(obj, axdebug.IID_IDebugCodeContext)
| {
"repo_name": "sserrot/champion_relationships",
"path": "venv/Lib/site-packages/win32comext/axdebug/contexts.py",
"copies": "1",
"size": "2171",
"license": "mit",
"hash": -4585348097714519600,
"line_mean": 37.7678571429,
"line_max": 87,
"alpha_frac": 0.673422386,
"autogenerated": false,
"ratio": 4.012939001848429,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.0020644059231655757,
"num_lines": 56
} |
""" A module for managing the AXDebug I*Contexts
"""
import gateways, axdebug
import pythoncom, win32com.server.util
# Utility function for wrapping object created by this module.
from .util import _wrap, _wrap_remove, trace
from . import adb
class DebugCodeContext(gateways.DebugCodeContext, gateways.DebugDocumentContext):
# NOTE: We also implement the IDebugDocumentContext interface for Simple Hosts.
# Thus, debugDocument may be NULL when we have smart hosts - but in that case, we
# wont be called upon to provide it.
_public_methods_ = gateways.DebugCodeContext._public_methods_ + \
gateways.DebugDocumentContext._public_methods_
_com_interfaces_ = gateways.DebugCodeContext._com_interfaces_ + \
gateways.DebugDocumentContext._com_interfaces_
def __init__(self, lineNo, charPos, len, codeContainer, debugSite):
self.debugSite = debugSite
self.offset = charPos
self.length = len
self.breakPointState = 0
self.lineno = lineNo
gateways.DebugCodeContext.__init__(self)
self.codeContainer = codeContainer
def _Close(self):
self.debugSite = None
def GetDocumentContext(self):
if self.debugSite is not None:
# We have a smart host - let him give it to us.
return self.debugSite.GetDocumentContextFromPosition(
self.codeContainer.sourceContext,
self.offset,
self.length)
else:
# Simple host - Fine - Ill do it myself!
return _wrap(self, axdebug.IID_IDebugDocumentContext)
def SetBreakPoint(self, bps):
self.breakPointState = bps
adb.OnSetBreakPoint(self, bps, self.lineno)
# The DebugDocumentContext methods for simple hosts.
def GetDocument(self):
return self.codeContainer.debugDocument
def EnumCodeContexts(self):
return _wrap(EnumDebugCodeContexts([self]), axdebug.IID_IEnumDebugCodeContexts)
class EnumDebugCodeContexts(gateways.EnumDebugCodeContexts):
def _wrap(self, obj):
return _wrap(obj, axdebug.IID_IDebugCodeContext)
| {
"repo_name": "ArcherSys/ArcherSys",
"path": "Lib/site-packages/win32comext/axdebug/contexts.py",
"copies": "10",
"size": "2164",
"license": "mit",
"hash": 375585777800011500,
"line_mean": 37.6428571429,
"line_max": 87,
"alpha_frac": 0.6737523105,
"autogenerated": false,
"ratio": 4.0148423005565865,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00277869163745129,
"num_lines": 56
} |
"""A module for manipulating Images, which are specially wrapped Pygame
surfaces.
"""
import pygame
import spyral
import copy
def _new_spyral_surface(size):
"""
Internal method for creating a new Spyral-compliant Pygame surface.
"""
return pygame.Surface((int(size[0]),
int(size[1])),
pygame.SRCALPHA, 32).convert_alpha()
def from_sequence(images, orientation="right", padding=0):
"""
A function that returns a new Image from a list of images by
placing them next to each other.
:param images: A list of images to lay out.
:type images: List of :class:`Image <spyral.Image>`
:param str orientation: Either 'left', 'right', 'above', 'below', or
'square' (square images will be placed in a grid
shape, like a chess board).
:param padding: The padding between each image. Can be specified as a
scalar number (for constant padding between all images)
or a list (for different paddings between each image).
:type padding: int or a list of ints.
:returns: A new :class:`Image <spyral.Image>`
"""
if orientation == 'square':
length = int(math.ceil(math.sqrt(len(images))))
max_height = 0
for index, image in enumerate(images):
if index % length == 0:
x = 0
y += max_height
max_height = 0
else:
x += image.width
max_height = max(max_height, image.height)
sequence.append((image, (x, y)))
else:
if orientation in ('left', 'right'):
selector = spyral.Vec2D(1, 0)
else:
selector = spyral.Vec2D(0, 1)
if orientation in ('left', 'above'):
reversed(images)
if type(padding) in (float, int, long):
padding = [padding] * len(images)
else:
padding = list(padding)
padding.append(0)
base = spyral.Vec2D(0, 0)
sequence = []
for image, padding in zip(images, padding):
sequence.append((image, base))
base = base + selector * (image.size + (padding, padding))
return from_conglomerate(sequence)
def from_conglomerate(sequence):
"""
A function that generates a new image from a sequence of
(image, position) pairs. These images will be placed onto a singe image
large enough to hold all of them. More explicit and less convenient than
:func:`from_seqeuence <spyral.image.from_sequence>`.
:param sequence: A list of (image, position) pairs, where the positions
are :class:`Vec2D <spyral.Vec2D>` s.
:type sequence: List of image, position pairs.
:returns: A new :class:`Image <spyral.Image>`
"""
width, height = 0, 0
for image, (x, y) in sequence:
width = max(width, x+image.width)
height = max(height, y+image.height)
new = Image(size=(width, height))
for image, (x, y) in sequence:
new.draw_image(image, (x, y))
return new
def render_nine_slice(image, size):
"""
Creates a new image by dividing the given image into a 3x3 grid, and stretching
the sides and center while leaving the corners the same size. This is ideal
for buttons and other rectangular shapes.
:param image: The image to stretch.
:type image: :class:`Image <spyral.Image>`
:param size: The new (width, height) of this image.
:type size: :class:`Vec2D <spyral.Vec2D>`
:returns: A new :class:`Image <spyral.Image>` similar to the old one.
"""
bs = spyral.Vec2D(size)
bw = size[0]
bh = size[1]
ps = image.size / 3
pw = int(ps[0])
ph = int(ps[1])
surf = image._surf
# Hack: If we don't make it one px large things get cut
image = spyral.Image(size=bs + (1, 1))
s = image._surf
# should probably fix the math instead, but it works for now
topleft = surf.subsurface(pygame.Rect((0, 0), ps))
left = surf.subsurface(pygame.Rect((0, ph), ps))
bottomleft = surf.subsurface(pygame.Rect((0, 2*pw), ps))
top = surf.subsurface(pygame.Rect((pw, 0), ps))
mid = surf.subsurface(pygame.Rect((pw, ph), ps))
bottom = surf.subsurface(pygame.Rect((pw, 2*ph), ps))
topright = surf.subsurface(pygame.Rect((2*pw, 0), ps))
right = surf.subsurface(pygame.Rect((2*ph, pw), ps))
bottomright = surf.subsurface(pygame.Rect((2*ph, 2*pw), ps))
# corners
s.blit(topleft, (0, 0))
s.blit(topright, (bw - pw, 0))
s.blit(bottomleft, (0, bh - ph))
s.blit(bottomright, bs - ps)
# left and right border
for y in range(ph, bh - ph - ph, ph):
s.blit(left, (0, y))
s.blit(right, (bw - pw, y))
s.blit(left, (0, bh - ph - ph))
s.blit(right, (bw - pw, bh - ph - ph))
# top and bottom border
for x in range(pw, bw - pw - pw, pw):
s.blit(top, (x, 0))
s.blit(bottom, (x, bh - ph))
s.blit(top, (bw - pw - pw, 0))
s.blit(bottom, (bw - pw - pw, bh - ph))
# center
for x in range(pw, bw - pw - pw, pw):
for y in range(ph, bh - ph - ph, ph):
s.blit(mid, (x, y))
for x in range(pw, bw - pw - pw, pw):
s.blit(mid, (x, bh - ph - ph))
for y in range(ph, bh - ph - ph, ph):
s.blit(mid, (bw - pw - pw, y))
s.blit(mid, (bw - pw - pw, bh - ph - ph))
return image
class Image(object):
"""
The image is the basic drawable item in spyral. They can be created
either by loading from common file formats, or by creating a new
image and using some of the draw methods. Images are not drawn on
their own, they are placed as the *image* attribute on Sprites to
be drawn.
Almost all of the methods of an Image instance return the Image itself,
enabling commands to be chained in a
`fluent interface <http://en.wikipedia.org/wiki/Fluent_interface>`_.
:param size: If size is passed, creates a new blank image of that size to
draw on. If you do not specify a size, you *must* pass in a
filename.
:type size: :class:`Vec2D <spyral.Vec2D>`
:param str filename: If filename is set, the file with that name is loaded.
The appendix has a list of the
:ref:`valid image formats<ref.image_formats>`. If you do
not specify a filename, you *must* pass in a size.
"""
def __init__(self, filename=None, size=None):
if size is not None and filename is not None:
raise ValueError("Must specify exactly one of size and filename. See http://platipy.org/en/latest/spyral_docs.html#spyral.image.Image")
if size is None and filename is None:
raise ValueError("Must specify exactly one of size and filename. See http://platipy.org/en/latest/spyral_docs.html#spyral.image.Image")
if size is not None:
self._surf = _new_spyral_surface(size)
self._name = None
else:
self._surf = pygame.image.load(filename).convert_alpha()
self._name = filename
self._version = 1
def _get_width(self):
return self._surf.get_width()
#: The width of this image in pixels (int). Read-only.
width = property(_get_width)
def _get_height(self):
return self._surf.get_height()
#: The height of this image in pixels (int). Read-only.
height = property(_get_height)
def _get_size(self):
return spyral.Vec2D(self._surf.get_size())
#: The (width, height) of the image (:class:`Vec2D <spyral.Vec2D`).
#: Read-only.
size = property(_get_size)
def fill(self, color):
"""
Fills the entire image with the specified color.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:returns: This image.
"""
self._surf.fill(color)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_rect(self, color, position, size=None,
border_width=0, anchor='topleft'):
"""
Draws a rectangle on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param position: The starting position of the rect (top-left corner). If
position is a Rect, then size should be `None`.
:type position: :class:`Vec2D <spyral.Vec2D>` or
:class:`Rect <spyral.Rect>`
:param size: The size of the rectangle; should not be given if position
is a rect.
:type size: :class:`Vec2D <spyral.Vec2D>`
:param int border_width: The width of the border to draw. If it is 0,
the rectangle is filled with the color
specified.
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
if size is None:
rect = spyral.Rect(position)
else:
rect = spyral.Rect(position, size)
offset = self._calculate_offset(anchor, rect.size)
pygame.draw.rect(self._surf, color,
(rect.pos + offset, rect.size), border_width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_lines(self, color, points, width=1, closed=False):
"""
Draws a series of connected lines on a image, with the
vertices specified by points. This does not draw any sort of
end caps on lines.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param points: A list of points that will be connected, one to another.
:type points: A list of :class:`Vec2D <spyral.Vec2D>` s.
:param int width: The width of the lines.
:param bool closed: If closed is True, the first and last point will be
connected. If closed is True and width is 0, the
shape will be filled.
:returns: This image.
"""
if width == 1:
pygame.draw.aalines(self._surf, color, closed, points)
else:
pygame.draw.lines(self._surf, color, closed, points, width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_circle(self, color, position, radius, width=0, anchor='topleft'):
"""
Draws a circle on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param position: The center of this circle
:type position: :class:`Vec2D <spyral.Vec2D>`
:param int radius: The radius of this circle
:param int width: The width of the circle. If it is 0, the circle is
filled with the color specified.
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
offset = self._calculate_offset(anchor)
pygame.draw.circle(self._surf, color, (position + offset).floor(),
radius, width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_ellipse(self, color, position, size=None,
border_width=0, anchor='topleft'):
"""
Draws an ellipse on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param position: The starting position of the ellipse (top-left corner).
If position is a Rect, then size should be `None`.
:type position: :class:`Vec2D <spyral.Vec2D>` or
:class:`Rect <spyral.Rect>`
:param size: The size of the ellipse; should not be given if position is
a rect.
:type size: :class:`Vec2D <spyral.Vec2D>`
:param int border_width: The width of the ellipse. If it is 0, the
ellipse is filled with the color specified.
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
if size is None:
rect = spyral.Rect(position)
else:
rect = spyral.Rect(position, size)
offset = self._calculate_offset(anchor, rect.size)
pygame.draw.ellipse(self._surf, color,
(rect.pos + offset, rect.size), border_width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_point(self, color, position, anchor='topleft'):
"""
Draws a point on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param position: The position of this point.
:type position: :class:`Vec2D <spyral.Vec2D>`
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
offset = self._calculate_offset(anchor)
self._surf.set_at(position + offset, color)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_arc(self, color, start_angle, end_angle,
position, size=None, border_width=0, anchor='topleft'):
"""
Draws an elliptical arc on this image.
:param color: a three-tuple of RGB values ranging from 0-255. Example:
(255, 128, 0) is orange.
:type color: a three-tuple of ints.
:param float start_angle: The starting angle, in radians, of the arc.
:param float end_angle: The ending angle, in radians, of the arc.
:param position: The starting position of the ellipse (top-left corner).
If position is a Rect, then size should be `None`.
:type position: :class:`Vec2D <spyral.Vec2D>` or
:class:`Rect <spyral.Rect>`
:param size: The size of the ellipse; should not be given if position is
a rect.
:type size: :class:`Vec2D <spyral.Vec2D>`
:param int border_width: The width of the ellipse. If it is 0, the
ellipse is filled with the color specified.
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
if size is None:
rect = spyral.Rect(position)
else:
rect = spyral.Rect(position, size)
offset = self._calculate_offset(anchor, rect.size)
pygame.draw.arc(self._surf, color, (rect.pos + offset, rect.size),
start_angle, end_angle, border_width)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def draw_image(self, image, position=(0, 0), anchor='topleft'):
"""
Draws another image over this one.
:param image: The image to overlay on top of this one.
:type image: :class:`Image <spyral.Image>`
:param position: The position of this image.
:type position: :class:`Vec2D <spyral.Vec2D>`
:param str anchor: The anchor parameter is an
:ref:`anchor position <ref.anchors>`.
:returns: This image.
"""
offset = self._calculate_offset(anchor, image._surf.get_size())
self._surf.blit(image._surf, position + offset)
self._version += 1
spyral.util.scale_surface.clear(self._surf)
return self
def rotate(self, angle):
"""
Rotates the image by angle degrees clockwise. This may change the image
dimensions if the angle is not a multiple of 90.
Successive rotations degrate image quality. Save a copy of the
original if you plan to do many rotations.
:param float angle: The number of degrees to rotate.
:returns: This image.
"""
self._surf = pygame.transform.rotate(self._surf, angle).convert_alpha()
self._version += 1
return self
def scale(self, size):
"""
Scales the image to the destination size.
:param size: The new size of the image.
:type size: :class:`Vec2D <spyral.Vec2D>`
:returns: This image.
"""
self._surf = pygame.transform.smoothscale(self._surf,
size).convert_alpha()
self._version += 1
return self
def flip(self, flip_x=True, flip_y=True):
"""
Flips the image horizontally, vertically, or both.
:param bool flip_x: whether to flip horizontally.
:param bool flip_y: whether to flip vertically.
:returns: This image.
"""
self._version += 1
self._surf = pygame.transform.flip(self._surf,
flip_x, flip_y).convert_alpha()
return self
def copy(self):
"""
Returns a copy of this image that can be changed while preserving the
original.
:returns: A new image.
"""
new = copy.copy(self)
new._surf = self._surf.copy()
return new
def crop(self, position, size=None):
"""
Removes the edges of an image, keeping the internal rectangle specified
by position and size.
:param position: The upperleft corner of the internal rectangle that
will be preserved.
:type position: a :class:`Vec2D <spyral.Vec2D>` or a
:class:`Rect <spyral.Rect>`.
:param size: The size of the internal rectangle to preserve. If a Rect
was passed in for position, this should be None.
:type size: :class:`Vec2D <spyral.Vec2D>` or None.
:returns: This image.
"""
if size is None:
rect = spyral.Rect(position)
else:
rect = spyral.Rect(position, size)
new = _new_spyral_surface(size)
new.blit(self._surf, (0, 0), (rect.pos, rect.size))
self._surf = new
self._version += 1
return self
def _calculate_offset(self, anchor_type, size=(0, 0)):
"""
Internal method for calculating the offset associated with an
anchor type.
:param anchor_type: A string indicating the position of the anchor,
taken from :ref:`anchor position <ref.anchors>`. A
numerical offset can also be specified.
:type anchor_type: str or a :class:`Vec2D <spyral.Vec2D>`.
:param size: The size of the region to offset in.
:type size: :class:`Vec2D <spyral.Vec2D>`.
"""
w, h = self._surf.get_size()
w2, h2 = size
if anchor_type == 'topleft':
return spyral.Vec2D(0, 0)
elif anchor_type == 'topright':
return spyral.Vec2D(w - w2, 0)
elif anchor_type == 'midtop':
return spyral.Vec2D((w - w2) / 2., 0)
elif anchor_type == 'bottomleft':
return spyral.Vec2D(0, h - h2)
elif anchor_type == 'bottomright':
return spyral.Vec2D(w - w2, h - h2)
elif anchor_type == 'midbottom':
return spyral.Vec2D((w - w2) / 2., h - h2)
elif anchor_type == 'midleft':
return spyral.Vec2D(0, (h - h2) / 2.)
elif anchor_type == 'midright':
return spyral.Vec2D(w - w2, (h - h2) / 2.)
elif anchor_type == 'center':
return spyral.Vec2D((w - w2) / 2., (h - h2) / 2.)
else:
return spyral.Vec2D(anchor_type) - spyral.Vec2D(w2, h2)
| {
"repo_name": "danShumway/python_math",
"path": "source/PythonMath.activity/libraries/spyral/spyral/image.py",
"copies": "1",
"size": "20561",
"license": "mit",
"hash": -6710973614415197000,
"line_mean": 38.8468992248,
"line_max": 147,
"alpha_frac": 0.5684062059,
"autogenerated": false,
"ratio": 3.851095710807267,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.49195019167072673,
"avg_score": null,
"num_lines": null
} |
""" A module for mapping operators to their corresponding eigenstates
and vice versa
It contains a global dictionary with eigenstate-operator pairings.
If a new state-operator pair is created, this dictionary should be
updated as well.
It also contains functions operators_to_state and state_to_operators
for mapping between the two. These can handle both classes and
instances of operators and states. See the individual function
descriptions for details.
TODO List:
- Update the dictionary with a complete list of state-operator pairs
"""
from __future__ import print_function, division
from sympy.physics.quantum.cartesian import (XOp, YOp, ZOp, XKet, PxOp, PxKet,
PositionKet3D)
from sympsi.operator import Operator
from sympsi.state import StateBase, BraBase, Ket
from sympsi.spin import (JxOp, JyOp, JzOp, J2Op, JxKet, JyKet,
JzKet)
__all__ = [
'operators_to_state',
'state_to_operators'
]
#state_mapping stores the mappings between states and their associated
#operators or tuples of operators. This should be updated when new
#classes are written! Entries are of the form PxKet : PxOp or
#something like 3DKet : (ROp, ThetaOp, PhiOp)
#frozenset is used so that the reverse mapping can be made
#(regular sets are not hashable because they are mutable
state_mapping = { JxKet: frozenset((J2Op, JxOp)),
JyKet: frozenset((J2Op, JyOp)),
JzKet: frozenset((J2Op, JzOp)),
Ket: Operator,
PositionKet3D: frozenset((XOp, YOp, ZOp)),
PxKet: PxOp,
XKet: XOp }
op_mapping = dict((v, k) for k, v in state_mapping.items())
def operators_to_state(operators, **options):
""" Returns the eigenstate of the given operator or set of operators
A global function for mapping operator classes to their associated
states. It takes either an Operator or a set of operators and
returns the state associated with these.
This function can handle both instances of a given operator or
just the class itself (i.e. both XOp() and XOp)
There are multiple use cases to consider:
1) A class or set of classes is passed: First, we try to
instantiate default instances for these operators. If this fails,
then the class is simply returned. If we succeed in instantiating
default instances, then we try to call state._operators_to_state
on the operator instances. If this fails, the class is returned.
Otherwise, the instance returned by _operators_to_state is returned.
2) An instance or set of instances is passed: In this case,
state._operators_to_state is called on the instances passed. If
this fails, a state class is returned. If the method returns an
instance, that instance is returned.
In both cases, if the operator class or set does not exist in the
state_mapping dictionary, None is returned.
Parameters
==========
arg: Operator or set
The class or instance of the operator or set of operators
to be mapped to a state
Examples
========
>>> from sympsi.cartesian import XOp, PxOp
>>> from sympsi.operatorset import operators_to_state
>>> from sympsi.operator import Operator
>>> operators_to_state(XOp)
|x>
>>> operators_to_state(XOp())
|x>
>>> operators_to_state(PxOp)
|px>
>>> operators_to_state(PxOp())
|px>
>>> operators_to_state(Operator)
|psi>
>>> operators_to_state(Operator())
|psi>
"""
if not (isinstance(operators, Operator)
or isinstance(operators, set) or issubclass(operators, Operator)):
raise NotImplementedError("Argument is not an Operator or a set!")
if isinstance(operators, set):
for s in operators:
if not (isinstance(s, Operator)
or issubclass(s, Operator)):
raise NotImplementedError("Set is not all Operators!")
#ops = tuple(operators)
ops = frozenset(operators)
if ops in op_mapping: # ops is a list of classes in this case
#Try to get an object from default instances of the
#operators...if this fails, return the class
try:
op_instances = [op() for op in ops]
ret = _get_state(op_mapping[ops], set(op_instances), **options)
except NotImplementedError:
ret = op_mapping[ops]
return ret
else:
tmp = [type(o) for o in ops]
classes = frozenset(tmp)
if classes in op_mapping:
ret = _get_state(op_mapping[classes], ops, **options)
else:
ret = None
return ret
else:
if operators in op_mapping:
try:
op_instance = operators()
ret = _get_state(op_mapping[operators], op_instance, **options)
except NotImplementedError:
ret = op_mapping[operators]
return ret
elif type(operators) in op_mapping:
return _get_state(op_mapping[type(operators)], operators, **options)
else:
return None
def state_to_operators(state, **options):
""" Returns the operator or set of operators corresponding to the
given eigenstate
A global function for mapping state classes to their associated
operators or sets of operators. It takes either a state class
or instance.
This function can handle both instances of a given state or just
the class itself (i.e. both XKet() and XKet)
There are multiple use cases to consider:
1) A state class is passed: In this case, we first try
instantiating a default instance of the class. If this succeeds,
then we try to call state._state_to_operators on that instance.
If the creation of the default instance or if the calling of
_state_to_operators fails, then either an operator class or set of
operator classes is returned. Otherwise, the appropriate
operator instances are returned.
2) A state instance is returned: Here, state._state_to_operators
is called for the instance. If this fails, then a class or set of
operator classes is returned. Otherwise, the instances are returned.
In either case, if the state's class does not exist in
state_mapping, None is returned.
Parameters
==========
arg: StateBase class or instance (or subclasses)
The class or instance of the state to be mapped to an
operator or set of operators
Examples
========
>>> from sympsi.cartesian import XKet, PxKet, XBra, PxBra
>>> from sympsi.operatorset import state_to_operators
>>> from sympsi.state import Ket, Bra
>>> state_to_operators(XKet)
X
>>> state_to_operators(XKet())
X
>>> state_to_operators(PxKet)
Px
>>> state_to_operators(PxKet())
Px
>>> state_to_operators(PxBra)
Px
>>> state_to_operators(XBra)
X
>>> state_to_operators(Ket)
O
>>> state_to_operators(Bra)
O
"""
if not (isinstance(state, StateBase) or issubclass(state, StateBase)):
raise NotImplementedError("Argument is not a state!")
if state in state_mapping: # state is a class
state_inst = _make_default(state)
try:
ret = _get_ops(state_inst,
_make_set(state_mapping[state]), **options)
except (NotImplementedError, TypeError):
ret = state_mapping[state]
elif type(state) in state_mapping:
ret = _get_ops(state,
_make_set(state_mapping[type(state)]), **options)
elif isinstance(state, BraBase) and state.dual_class() in state_mapping:
ret = _get_ops(state,
_make_set(state_mapping[state.dual_class()]))
elif issubclass(state, BraBase) and state.dual_class() in state_mapping:
state_inst = _make_default(state)
try:
ret = _get_ops(state_inst,
_make_set(state_mapping[state.dual_class()]))
except (NotImplementedError, TypeError):
ret = state_mapping[state.dual_class()]
else:
ret = None
return _make_set(ret)
def _make_default(expr):
try:
ret = expr()
except Exception:
ret = expr
return ret
def _get_state(state_class, ops, **options):
# Try to get a state instance from the operator INSTANCES.
# If this fails, get the class
try:
ret = state_class._operators_to_state(ops, **options)
except NotImplementedError:
ret = _make_default(state_class)
return ret
def _get_ops(state_inst, op_classes, **options):
# Try to get operator instances from the state INSTANCE.
# If this fails, just return the classes
try:
ret = state_inst._state_to_operators(op_classes, **options)
except NotImplementedError:
if isinstance(op_classes, (set, tuple, frozenset)):
ret = tuple(map(lambda x: _make_default(x), op_classes))
else:
ret = _make_default(op_classes)
if isinstance(ret, set) and len(ret) == 1:
return ret[0]
return ret
def _make_set(ops):
if isinstance(ops, (tuple, list, frozenset)):
return set(ops)
else:
return ops
| {
"repo_name": "sympsi/sympsi",
"path": "sympsi/operatorset.py",
"copies": "1",
"size": "9384",
"license": "bsd-3-clause",
"hash": 2453246635343243300,
"line_mean": 32.5142857143,
"line_max": 80,
"alpha_frac": 0.6343776641,
"autogenerated": false,
"ratio": 4.108581436077058,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5242959100177058,
"avg_score": null,
"num_lines": null
} |
"""A module for networking related utilities."""
import random
import socket
def get_local_ip():
"""
Get the local IP address.
Equivalent to https://github.com/stencila/executa/blob/753207cb31298578497d978265c718e20b583a05/src/tcp/util.ts#L15
Thanks to https://stackoverflow.com/questions/166506/finding-local-ip-addresses-using-pythons-stdlib
"""
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(("8.8.8.8", 80))
ip = s.getsockname()[0]
except Exception:
ip = "127.0.0.1"
finally:
s.close()
return ip
def get_random_port():
"""
Get a random port from the local port range.
Get OS to pick a port, and if that fails for some reason, fallback to
random choice.
Thanks to https://unix.stackexchange.com/questions/55913/whats-the-easiest-way-to-find-an-unused-local-port
"""
s = socket.socket(socket.AF_INET, socket.SOCK_STREAM)
try:
s.bind(("", 0))
port = s.getsockname()[1]
except Exception:
port = random.randint(1024, 65535)
finally:
s.close()
return port
| {
"repo_name": "stencila/hub",
"path": "worker/util/network.py",
"copies": "1",
"size": "1130",
"license": "apache-2.0",
"hash": 957746954897078000,
"line_mean": 26.5609756098,
"line_max": 119,
"alpha_frac": 0.6451327434,
"autogenerated": false,
"ratio": 3.313782991202346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9455649184079699,
"avg_score": 0.0006533101045296167,
"num_lines": 41
} |
"""A module for parsing a query and searching for TF2 items
It supports class based search, eg: 'engineer hats'
It also supports alias based search, eg: 'engi hats'
Regular search, eg: 'Meet the Medic'
Slot search, eg: 'primary weps'
Set search, eg: 'the saharan spy set'
Price search, eg: 'unique > 1 ref hat'
Price visualization, eg: '2.66 ref'
Price conversion, eg: '1.5 keys to ref'
Requires TF2 API module to get items and prices.
Note: You must provide your own image URLs for paint cans and blueprints.
Replace the relative URLs in createitemdict and _parseblueprints.
"""
import re
import json
import asyncio
from fractions import Fraction
from collections import namedtuple, defaultdict, OrderedDict
import tf2api
DENOMREGEX = (r'((?:earb|b)uds?|'
'keys?|'
'ref(?:ined|s)?|'
'rec(?:laimed|s)?|'
'scraps?|'
'wea?p(?:on)?s?)')
PRICEREGEX = (r'(?:(\d+(?:\.\d+)?) ?{})'.format(DENOMREGEX))
QUALITYREGEX = r'({}|collector|collectors|dirty|uncraft(?:able)?)'.format(
'|'.join(i.lower() for i in tf2api.getallqualities().values()))
async def gettf2info(apikey, backpackkey, tradekey, blueprintsfilename):
"""Return a named tuple which contains information from multiple sources
about TF2 items"""
schema, storeprices = await asyncio.gather(
tf2api.getschema(apikey),
tf2api.getstoreprices(apikey)
)
items = tf2api.getitems(schema)
itemsbyname = tf2api.getitemsbyname(schema)
itemsets = tf2api.getitemsets(schema)
attributes = tf2api.getattributes(schema)
effects = tf2api.getparticleeffects(schema)
newstoreprices = tf2api.getnewstoreprices(storeprices)
bundles, backpackprices, tradeprices = await asyncio.gather(
tf2api.getbundles(apikey, storeprices),
tf2api.getbackpackprices(backpackkey, items, itemsbyname),
tf2api.gettradeprices(tradekey, items, itemsbyname)
)
with open(blueprintsfilename, encoding='utf-8') as f:
data = json.loads(f.read())
blueprints = _parseblueprints(data, itemsbyname)
fields = ('items itemsbyname itemsets attributes effects '
'blueprints storeprices newstoreprices bundles '
'backpackprices tradeprices')
TF2Info = namedtuple('TF2Info', fields)
return TF2Info(items, itemsbyname, itemsets, attributes, effects,
blueprints, storeprices, newstoreprices, bundles,
backpackprices, tradeprices)
def getitemsdict(tf2info):
"""Return an ordered dictionary with index as key and itemdict as value"""
itemsdict = OrderedDict()
for idx in tf2info.items:
itemsdict[idx] = createitemdict(idx, tf2info)
return itemsdict
def search(query, itemsdict, nametoindexmap, itemsets, bundles, pricesource):
"""This function parses the query using parseinput and gets all the
items that match it. It returns a list of dicts obtained from
getsearchresult"""
input_ = parseinput(query)
query = input_['query']
querylist = input_['querylist']
classes = input_['classes']
tags = input_['tags']
if not querylist:
return []
# Check if searching for an item set
itemsetmatch = re.match(r'(.+) [sS]et$', query)
# Check if searching by price
# Matches this - {quality}{{criteria}{amount}{denom}} {classes|tags}
pricematch = re.match(r'{}?(?: ?(<|>|=)? ?{})?((?: [0-9a-z]+)*)$'.format(
QUALITYREGEX, PRICEREGEX), query.lower()) if query else None
# Get classes and tags in price search, if any
if pricematch:
words = pricematch.group(5)
priceinput = parseinput(words or '')
priceclasses, pricetags = priceinput['classes'], priceinput['tags']
if priceclasses or pricetags:
results = _classtagsearch(priceclasses, pricetags, itemsdict)
elif words:
pricematch = None
# Check if searching for specific indexes
indexmatch = re.match(r'\d+( \d+)*$', query)
indexes = query.split() if indexmatch else []
if classes or tags:
results = _classtagsearch(classes, tags, itemsdict)
elif query == 'sets':
# Get all the item sets and their items
results = _itemsetsearch(None, itemsets, nametoindexmap, itemsdict)
elif itemsetmatch:
# Search for a particular item set or bundle and list its items
query = itemsetmatch.group(1).lower()
result = _bundlesearch(query, bundles, nametoindexmap, itemsdict)
if not result:
result = _updatesearch(query, itemsdict)
if not result:
result = _itemsetsearch(query, itemsets, nametoindexmap, itemsdict)
results = [result] if result else []
elif pricematch:
quality = (pricematch.group(1) or 'unique').capitalize()
criteria = pricematch.group(2)
amount = pricematch.group(3)
if amount:
amount = float(amount)
denom = _getdenom(pricematch.group(4) or '')
if priceclasses or pricetags:
title = results[0]['title'] if results else None
else:
title = None
results = [getsearchresult(items=itemsdict.values())]
_pricefilter(quality, criteria, amount, denom, results, pricesource)
if results and title:
results[0]['title'] = '{} — {}'.format(results[0]['title'], title)
elif len(indexes) > 1:
# Searching for specific indexes
items = []
for index in indexes:
index = int(index)
if index in itemsdict:
items.append(itemsdict[index])
results = [getsearchresult(items=items)] if items else []
else:
# Regular word search
result = _wordsearch(query, querylist, itemsdict)
results = [result] if result else []
# Check if there's a match between an item set name and query
results.extend(_itemsetsearch(querylist, itemsets,
nametoindexmap, itemsdict))
return results
def visualizeprice(query, itemsdict, pricesource):
"""Return a list of items representing a price if parsed from the query"""
query = parseinput(query)['query']
pricevizmatch = re.match(
r'{}(?: (?:in|to) {})?$'.format(PRICEREGEX, DENOMREGEX),
query.lower())
if pricevizmatch:
amount = pricevizmatch.group(1)
denom = _getdenom(pricevizmatch.group(2))
todenom = _getdenom(pricevizmatch.group(3) or '')
items = _getpriceasitems(amount, denom, todenom,
itemsdict, pricesource)
titlelist = [_getpricestring(item['count'], item['denom'])
for item in items]
title = ' + '.join(titlelist)
if not (len(items) == 1 and denom == items[0]['denom']):
title = '{} = {}'.format(_getpricestring(float(amount), denom),
title)
return [getsearchresult(title, 'price', items)] if items else []
def createitemdict(index, tf2info):
"""Take a TF2 item and return a custom dict with a limited number of
keys that are used for search"""
item = tf2info.items[index]
name = item['item_name']
classes = tf2api.getitemclasses(item)
attributes = tf2api.getitemattributes(item,
tf2info.attributes, tf2info.effects)
storeprice = tf2api.getstoreprice(item, tf2info.storeprices)
backpackprice = tf2api.getmarketprice(item, tf2info.backpackprices)
tradeprice = tf2api.getmarketprice(item, tf2info.tradeprices)
tags = tf2api.getitemtags(item)
# Sort blueprints by crafting chance
blueprint = sorted(tf2info.blueprints[index],
key=lambda k: k['chance'], reverse=True)
description = ''
if 'bundle' in tags and storeprice:
descriptions = tf2info.bundles[index]['descriptions']
text = []
items = []
for i in range(len(descriptions)):
key = str(i)
value = descriptions[key]['value']
if value in tf2info.itemsbyname:
items.append(value)
else:
text.append(value)
description = '{}---{}'.format('\n'.join(text), '\n'.join(items))
elif 'item_description' in item:
description = item['item_description']
if 'bundle' in tags and name in tf2info.itemsets:
description += '---' + '\n'.join(tf2info.itemsets[name]['items'])
levels = OrderedDict.fromkeys(
str(item[i]) for i in ('min_ilevel', 'max_ilevel'))
level = 'Level {} {}'.format('-'.join(levels), item['item_type_name'])
image, image_large = (url and url.replace(
'http://media.steampowered.com', 'https://steamcdn-a.akamaihd.net'
) for url in (item['image_url'], item['image_url_large']))
itemdict = {'index': index,
'name': name,
'image': image,
'image_large': image_large,
'description': description,
'level': level,
'attributes': attributes,
'classes': classes,
'tags': tags,
'storeprice': storeprice,
'marketprice': {'backpack.tf': backpackprice,
'trade.tf': tradeprice},
'blueprints': blueprint}
if 'paint' in tags:
paintvalue = item['attributes'][0]['value']
# Ignore Paint Tool
if paintvalue != 0:
itemdict['image'] = itemdict['image_large'] = (
'/images/paints/Paint_Can_{}.png'.format(paintvalue))
return itemdict
def getsearchresult(title='', type='', items=None):
"""Return a dict containing a group of items used for search results"""
return {'title': title, 'type': type, 'items': items or []}
def getclasstagtitle(classes, tags):
"""Return a title desciribing a class/tag search"""
all_classes = list(tf2api.getallclasses().keys())
classes_text = ', '.join(sorted(classes, key=all_classes.index))
tags_text = ', '.join(sorted(tags)).title()
if len(classes) == 1 and len(tags) == 1:
title = f'{classes_text} {tags_text}'
elif classes and tags:
title = f'{classes_text} × {tags_text}'
elif classes:
title = classes_text
elif tags:
title = tags_text
return title
def isvalidresult(itemdict, strict=True):
"""Check if item has an image, is not a duplicate and is not bundle junk.
If strict is True, competition medals also return False"""
index = itemdict['index']
duplicates = tf2api.getobsoleteindexes()
isvalid = (itemdict['image'] and
index not in duplicates and
not itemdict['name'].startswith('TF_Bundle'))
if strict:
isvalid = (isvalid and 'tournament' not in itemdict['tags'])
return isvalid
def parseinput(query):
"""Parse a search query and return a dict to be used in search function"""
classes = set()
tags = set()
query = query.strip()
if query.startswith('"') and query.endswith('"'):
querylist = [query.strip('"')]
query = ''
else:
querylist = [i for i in _splitspecial(foldaccents(query)) if i not in
('the', 'a', 'of', 's')]
for word in querylist:
class_ = _getclass(word)
tag = _gettag(word)
if class_:
classes.add(class_)
elif tag:
tags.add(tag)
# Simple check to differentiate between word and class/tag search
# Avoids conflicts such as 'meet the medic taunt'
if (len(tags) + len(classes)) != len(querylist):
classes = tags = set()
return {'query': query, 'querylist': querylist,
'classes': classes, 'tags': tags}
def foldaccents(string):
"""Fold accents in a string"""
return (string.replace('ä', 'a')
.replace('é', 'e')
.replace('ò', 'o')
.replace('ü', 'u')
.replace('Ü', 'U'))
def _classtagsearch(classes, tags, itemsdict):
"""Search for items that match classes and tags"""
results = defaultdict(list)
names = set()
title = getclasstagtitle(classes, tags)
titles = [title, 'Multi-Class Items', 'All-Class Items']
# Check if the user is searching for tournament medals
hidemedals = 'tournament' not in tags
# Check if the weapon tag is specified (eg. primary, melee)
hasweapontag = not tags.isdisjoint(tf2api.getweapontags())
for itemdict in itemsdict.values():
itemclasses = itemdict['classes']
itemtags = itemdict['tags']
# Gives a match if there's an intersection between the item's
# classes and the parsed classes in the query. Also gives a match
# if the item doesn't have any classes specified (all-class item)
isclassmatch = (not classes.isdisjoint(itemclasses) or
not itemclasses)
if hasweapontag:
# This avoids showing slot tokens when searching for
# 'primary weapon', 'melee weapon', etc.
istagmatch = tags.issubset(itemtags)
else:
istagmatch = not tags.isdisjoint(itemtags)
if (isclassmatch or not classes) and (istagmatch or not tags):
name = itemdict['name']
# Don't show tournament medals unless explicitly searched
if isvalidresult(itemdict, hidemedals) and name not in names:
if len(itemclasses) == 1:
results[titles[0]].append(itemdict)
elif len(itemclasses) > 1:
results[titles[1]].append(itemdict)
else:
results[titles[2]].append(itemdict)
names.add(name)
results = [getsearchresult(title, items=items)
for title, items in results.items()]
results.sort(key=lambda k: titles.index(k['title']))
return results
def _wordsearch(query, querylist, itemsdict):
"""Search for items whose names match query"""
items = []
names = set()
if query:
querylist = set(querylist + _pluralize(querylist))
else:
pattern = r'\b{}\b'.format(querylist[0])
for itemdict in itemsdict.values():
name = foldaccents(itemdict['name'])
if query:
wordmatch = not querylist.isdisjoint(_splitspecial(name))
else:
wordmatch = (re.search(pattern, name) or
re.search(pattern, name.lower()))
stringmatch = (len(query) > 2 and
(query in name or query in name.lower()))
match = wordmatch or stringmatch
if match and isvalidresult(itemdict, False):
if name not in names:
items.append(itemdict)
names.add(name)
if items:
return getsearchresult(
items=_getsorteditemlist(items, querylist, query))
def _bundlesearch(query, bundles, nametoindexmap, itemsdict):
"""Search for bundles which match query"""
for bundle in bundles.values():
if bundle['name'].lower() == query:
items = _getbundleitems(bundle, nametoindexmap, itemsdict)
return getsearchresult(bundle['name'], 'bundle', items)
def _itemsetsearch(query, itemsets, nametoindexmap, itemsdict):
"""Search for item sets whose names match query"""
results = []
getall = True
if query is None:
isresult = lambda name: True
elif type(query) == list:
isresult = lambda name: not set(_splitspecial(name)).isdisjoint(query)
else:
isresult = lambda name: name.lower() == query
getall = False
for setname, itemset in itemsets.items():
if isresult(setname):
items = _getsetitems(itemset, nametoindexmap, itemsdict)
result = getsearchresult(setname, 'set', items)
if getall:
results.append(result)
else:
return result
if getall:
return results
def _updatesearch(query, itemsdict):
if query == 'jungle inferno':
indexes = [
30876, 30843, 30844, 30842, 30845, 1182, 1183, 30890, 30888, 30889,
30896, 30899, 30898, 30897, 30902, 30901, 30903, 30900, 30905,
30904, 1188, 30914, 30912, 30911, 30910, 1189, 1187, 30913, 30908,
30909, 30916, 30891, 30893, 30892, 30894, 30895, 30884, 30886,
30885, 30887, 30879, 30881, 30877, 30882, 1186, 30915, 30880,
30878, 1185, 30883, 5871, 5873, 5868, 5885, 5884, 5883, 5882, 5875,
5877, 5869, 1178, 1180, 1181, 1179, 1190
]
items = [itemsdict[i] for i in indexes]
return getsearchresult('Jungle Inferno', 'update', items)
def _pricefilter(quality, criteria, amount, denom, results, pricesource):
"""Search for items by price based on criteria"""
if not results:
return
getall = amount is None
if quality in ('Collector', 'Collectors'):
quality = "Collector's"
if quality in ('Uncraft', 'Dirty'):
quality = 'Uncraftable'
results[0]['title'] = '{}: {} {}'.format(
quality, criteria or '',
_getpricestring(amount, denom) if not getall else 'Any')
for idx, result in enumerate(results):
items = []
for itemdict in result['items']:
price = itemdict['marketprice'][pricesource]
if quality not in price:
continue
elif getall:
items.append(itemdict)
continue
price = price[quality]
p = price.split()
valuelow = float(p[0])
valuehigh = float(p[2]) if len(p) == 4 else valuelow
pricedenom = p[-1].rstrip('s').replace('Bud', 'Earbuds')
if denom != pricedenom:
continue
if criteria == '<':
match = valuelow < amount or valuehigh < amount
elif criteria == '>':
match = valuelow > amount or valuehigh > amount
else:
match = valuelow == amount or valuehigh == amount
if match:
items.append(itemdict)
if items:
results[idx]['items'] = items
else:
results[idx] = None
results[:] = [result for result in results if result]
def _getsetitems(itemset, nametoindexmap, itemsdict):
"""Get a list of the items in an item set"""
setitems = []
for name in itemset['items']:
name = (name.replace('The ', '')
.replace("Capone's Capper", "Capo's Capper")
.replace('Conspiratorial Cut', 'Cranial Conspiracy')
.replace('Hundekopf', 'Hundkopf')
.replace('Skinless Slashers', 'Scaly Scrapers')
.replace('Transylvanian Toupe', 'Transylvania Top')
.replace('Yeti_Head', 'Kathman-Hairdo')
.replace('Yeti_Arms', 'Himalayan Hair Shirt')
.replace('Yeti_Legs', 'Abominable Snow Pants'))
setitems.append(itemsdict[nametoindexmap[name]])
return setitems
def _getbundleitems(bundle, nametoindexmap, itemsdict):
"""Get a list of the items in a bundle"""
bundleitems = []
descriptions = bundle['descriptions']
for i in range(len(descriptions)):
key = str(i)
value = descriptions[key]['value']
if value in nametoindexmap:
bundleitems.append(itemsdict[nametoindexmap[value]])
return bundleitems
def _getsorteditemlist(itemslist, querylist, query):
"""Return sorted itemlist based on the intersection between the
search query words and each item's name. Items without a word intersection
are sorted based on where the query is found in their names."""
key = lambda k: (len(set(querylist).intersection(_splitspecial(k['name'])))
or -k['name'].lower().find(query.lower()))
return sorted(itemslist,
key=key,
reverse=True)
def _getpriceasitems(amount, denom, todenom, itemsdict, pricesource):
"""Return a list of itemdicts that visualize a given price and a dict
with the count of each item."""
items = []
amount = _correctprice(amount, denom)
denomtoidx = tf2api.getalldenoms()
denoms = tuple(denomtoidx.keys())
denomtable = _getdenomvalues(itemsdict, pricesource)
if todenom:
amount *= denomtable[denom][todenom]
else:
todenom = denom
# Move to the highest possible denomination
for d in denoms:
value = amount * denomtable[denom][d]
if value >= 1:
amount = value
todenom = d
break
if amount <= 4000:
denomidx = denoms.index(todenom)
# Get count of each denomination and add items to results
for i, d in enumerate(denoms[denomidx:], denomidx):
count = int(round(amount, 10))
if count:
items.append({'item': itemsdict[denomtoidx[d]],
'denom': d,
'count': count})
if i + 1 < len(denoms):
amount = (amount - count) * denomtable[d][denoms[i + 1]]
return items
def _getpricestring(amount, denom):
"""Return a human-readable price string"""
return '{:g} {}'.format(
amount,
denom + 's' if denom in ('Key', 'Weapon') and amount != 1 else denom)
def _getdenomvalues(itemsdict, pricesource):
"""Return a mapping to convert between denominations"""
denomtoidx = tf2api.getalldenoms()
denoms = tuple(denomtoidx.keys())
getprice = lambda denom: _correctprice(
itemsdict[denomtoidx[denom]]['marketprice'][pricesource]['Unique']
.split()[0], denoms[denoms.index(denom) + 1])
table = {'Earbuds': {'Key': getprice('Earbuds')},
'Key': {'Refined': getprice('Key')},
'Refined': {'Reclaimed': 3.0},
'Reclaimed': {'Scrap': 3.0},
'Scrap': {'Weapon': 2.0},
'Weapon': {}}
def fill(from_, to=None, value=1):
if to is None:
to = from_
table[from_][to] = value
table[to][from_] = 1 / value
if denoms.index(to) + 1 < len(denoms):
next_ = denoms[denoms.index(to) + 1]
fill(from_, next_, value * table[to][next_])
for denom in denoms:
fill(denom)
return table
def _correctprice(amount, denom):
limits = {'Refined': 18, 'Reclaimed': 6, 'Scrap': 2, 'Weapon': 1}
if denom in limits:
if '.' in amount:
count, fraction = amount.split('.')
# Check if it's a repeating decimal
if len(fraction) > 1 and len(set(fraction)) == 1:
# Increase precision
amount = '.'.join([count, fraction[0] * 4])
amount = Fraction(amount).limit_denominator(limits[denom])
else:
amount = float(amount)
return amount
def _pluralize(wordlist):
"""Take a list of words and return a list of their plurals"""
return [i + 's' for i in wordlist]
def _splitspecial(string):
"""Convert a string to lowercase and split it at special characters"""
return [i for i in re.split(r'\W+', string.lower()) if i]
def _getclass(word):
"""Parse a word and return TF2 class or alias if it matches one"""
word = word.capitalize()
for name, aliases in tf2api.getallclasses().items():
if word == name or word in aliases:
return name
def _gettag(word):
"""Parse a word and return an item tag if it matches one"""
weapon = ('wep', 'weap')
if word in ('watch', 'watches'):
return 'pda2'
elif word in weapon or word in _pluralize(weapon):
return 'weapon'
for tag in tf2api.getalltags():
if word in (tag, tag + 's'):
return tag
def _getdenom(word):
"""Parse a word and return a price denomination if it matches one"""
denomslist = ('bud', 'key', 'ref', 'rec', 'scrap', 'we')
denoms = dict(zip(denomslist, tf2api.getalldenoms().keys()))
hasdenom = re.search('|'.join(denomslist), word.lower())
if hasdenom:
return denoms[hasdenom.group(0)]
def _parseblueprints(blueprints, itemsbyname):
"""Parse a dictionary of blueprint descriptions"""
url = '/images/items/'
localrepl = {'Any Class Token': 'class_token.png',
'Any Slot Token': 'slot_token.png',
'Any Token': 'token.png'}
repl = {"Any Santa's Little Accomplice Weapon":
"Santa's Little Accomplice Bundle",
'Any Primary Weapon': 'Rocket Launcher',
'Any Secondary Weapon': 'Pistol',
'Any Melee Weapon': 'Fire Axe',
'Any Spy Watch': 'Invis Watch',
'Any Hat': 'Modest Pile of Hat',
'Any Burned Item': 'Burned Banana Peel',
'Any Cursed Object': 'Voodoo-Cursed Object'}
polyweps = ("The Gas Jockey's Gear", "The Saharan Spy", "The Tank Buster",
"The Croc-o-Style Kit", "The Special Delivery")
for class_ in tf2api.getallclasses():
repl['Any {} Weapon'.format(class_)] = '{} Starter Pack'.format(class_)
for name in polyweps:
repl['Any {} Weapon'.format(name)] = name
for i in ('Victory', 'Moonman', 'Brainiac'):
pack = "Dr. Grordbort's {} Pack".format(i)
repl["Any {} Weapon".format(pack)] = pack
blueprintsdict = defaultdict(list)
for b in blueprints:
required = blueprints[b][0]
results = blueprints[b][1]
for name in results:
if name in itemsbyname:
index = itemsbyname[name]['defindex']
chance = int(round(100.0 / len(results)))
blueprintlist = []
for i in OrderedDict.fromkeys(required):
blueprintdict = {}
if i in localrepl:
image = url + localrepl[i]
elif i in repl:
image = itemsbyname[repl[i]]['image_url']
elif i in itemsbyname:
item = itemsbyname[i]
image = item['image_url']
blueprintdict['index'] = item['defindex']
else:
image = '/images/items/whatsthis.png'
blueprintdict['name'] = i
blueprintdict['image'] = image.replace(
'http://media.steampowered.com',
'https://steamcdn-a.akamaihd.net'
)
blueprintdict['count'] = required.count(i)
blueprintlist.append(blueprintdict)
blueprintsdict[index].append({'chance': chance,
'required': blueprintlist})
return blueprintsdict
| {
"repo_name": "mohd-akram/item.tf",
"path": "tf2search.py",
"copies": "1",
"size": "27106",
"license": "mit",
"hash": -257663867956901600,
"line_mean": 32.9573934837,
"line_max": 79,
"alpha_frac": 0.5826629272,
"autogenerated": false,
"ratio": 3.8989928057553955,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9981070325398247,
"avg_score": 0.00011708151142972374,
"num_lines": 798
} |
'''A module for parsing context free grammar specifications using an extended
syntax. This extended syntax allows for symbols with labels longer than one
character. Nonterminal names are written between angle brackets (<...>), and
terminal names are written between double quotes ("...").
Example:
<Sentence> -> <Noun-phrase> <Verb-phrase> | <Sentence> <Prep-phrase>
<Noun-phrase> -> "noun"
<Noun-phrase> -> "det" "noun"
etc.
'''
import re
from cfg.core import Terminal, Nonterminal, ContextFreeGrammar, ProductionRule
class CFGReaderError(Exception):
pass
class CFGReader(object):
'''A parser for the "extended" grammar syntax.'''
NONTERMINAL, TERMINAL, ARROW, PIPE, NEWLINE, WHITESPACE, ERROR, EOF = range(8)
def parse(self, s):
'''Read a grammar from a string in the extended syntax and return the
grammar.'''
self.productions = []
self.tokenizer = iter(self.tokens(s))
self.next_token()
self.read_gram()
if self.token != CFGReader.EOF:
raise CFGReaderError('could not reach EOF')
return ContextFreeGrammar([ProductionRule(left, right) for left, right_sides in self.productions for right in right_sides])
def read_gram(self):
while self.try_read(CFGReader.NEWLINE): pass
if self.try_rule():
while self.try_read(CFGReader.NEWLINE):
while self.try_read(CFGReader.NEWLINE): pass
if not self.try_rule(): break
def read_rule(self):
v = self.value
self.read(CFGReader.NONTERMINAL)
left_side = Nonterminal(v[1:-1])
self.read(CFGReader.ARROW)
self.right_sides = []
self.read_sentence()
while self.try_read(CFGReader.PIPE):
self.read_sentence()
self.productions.append((left_side, self.right_sides))
def read_sentence(self):
self.symbols = []
while self.try_symbol(): pass
self.right_sides.append(self.symbols)
def try_symbol(self):
v = self.value
if self.try_read(CFGReader.NONTERMINAL):
self.symbols.append(Nonterminal(v[1:-1]))
elif self.try_read(CFGReader.TERMINAL):
self.symbols.append(Terminal(v[1:-1]))
else:
return False
return True
def try_read(self, token):
if self.token == token:
self.next_token()
return True
return False
def read(self, token):
if not self.try_read(token):
raise CFGReaderError('unexpected token %r' % self.value)
def try_rule(self):
if self.token == CFGReader.NONTERMINAL:
self.read_rule()
return True
return False
def tokens(self, s):
tokenizer = re.compile(r'(\<[^>]*\>)|(\"[^"]*\")|(\-\>)|(\|)|(\n)|(\s+)|(.)', re.M)
for token_tuple in tokenizer.findall(s):
for i, v in enumerate(token_tuple):
if v and i != self.WHITESPACE:
yield i, v
break
yield CFGReader.EOF, None
def next_token(self):
result = (self.token, self.value) = next(self.tokenizer)
return result
def parse_cfg(s):
'''Parse a string into a ContextFreeGrammar, accepting either the extended
or the standard syntax.'''
try:
return ContextFreeGrammar(s)
except ValueError:
try:
return CFGReader().parse(s)
except CFGReaderError:
raise ValueError('unable to parse string into ContextFreeGrammar')
| {
"repo_name": "bdusell/pycfg",
"path": "src/cfg/cfg_reader.py",
"copies": "1",
"size": "3542",
"license": "mit",
"hash": -3582254568801285600,
"line_mean": 30.9099099099,
"line_max": 131,
"alpha_frac": 0.6055900621,
"autogenerated": false,
"ratio": 3.918141592920354,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.996789073662024,
"avg_score": 0.01116818368002287,
"num_lines": 111
} |
"""A module for parsing vim-otl into a tree."""
import sys
class LineError(Exception):
"""An error that happens during line parsing."""
pass
class Object:
"""Represents an OTL object.
This object may be header, body, or perhaps some other kind of a
body; preformatted or user-defined (or user-defined preformatted).
"""
def __init__(self, object_type, parent, level):
"""Initialise a object.
Params:
object_type (str): Contains the type of the object. The
first object is always a special object
with type None and level -1.
Valid types are None, "header", "body",
"body-pre" and any additional custom
types. -pre is appended to the name of
any custom fixed-width type.
parent (Object): Parent object of this object in the
outline. Always None for a root object.
level (int): Indentation level, which equals outline
level. Always -1 for a root object.
"""
self.__object_type = object_type
self.__parent = parent
self.__level = level
self.__children = []
self.__lines = []
# tell parent we're a child now
if (self.__parent is not None):
self.__parent.__add_child(self)
def __str__(self):
"""Convert object to string.
This string contains a representation of the tree under the
current object.
"""
ret = ""
if self.__level >= 0:
# Root doesn't get these
prefix = " " * self.__level
if self.__object_type != "header":
# Object type
ret += "{}{}:\n".format(prefix, self.__object_type)
for l in self.__lines:
# Each line
ret += "{}{}\n".format(prefix, l)
else:
# Headers are oneline
ret += "{}{} {}: {}\n".format(prefix, self.__object_type,
self.__level, self.__lines[0])
for c in self.__children:
ret += c.__str__()
return ret
def add_line(self, line):
"""Adds a line of content to a list.
Params:
line (str): A line read from a file, preprocessed to be
left-stripped of indentation and possible
object type (say, body text) delimiters.
"""
self.__lines.append(line)
# Properties
@property
def parent(self):
return self.__parent
@property
def level(self):
return self.__level
@property
def object_type(self):
return self.__object_type
@property
def first_line(self):
return self.__lines[0]
@property
def lines(self):
return self.__lines
@property
def children(self):
return self.__children
# Private
def __add_child(self, child):
"""Add a child.
Params:
child (Object): The child to add.
"""
self.__children.append(child)
def _count_level(line):
"""Counts the indentation level of a line.
Params:
line (str): A line read from a file.
"""
count = 0
for c in line:
if c == "\t":
count += 1
else:
break
return count
def _handle_line(current, line, linenum):
"""Handles a line, returning an Object.
Params:
current (Object): The previous object returned by this function,
or root.
line (str): The next line read from a file.
linenum (int): The current line's number
Returns: Object representing the line handled. This object might be
the same object as current!
"""
# Separate level and line content
level = _count_level(line)
# Also remove end-of-line here
lstripped = line.lstrip()[:-1]
if len(lstripped) == 0:
# Nothing at all on this line o_o
# Just return current
return current
if lstripped[0] == ":":
object_type = "body"
if len(lstripped) == 1:
# end-of-line after :
content = ""
elif lstripped[1] != " ":
# the beginning must be ": "
raise LineError("No space after body delimiter on line {}"
.format(linenum))
else:
# Remove extra spaces after this, too
content = lstripped[2:].strip()
elif lstripped[0] == ";":
object_type = "body-pre"
if len(lstripped) < 2 or lstripped[1] != " ":
raise LineError("No space after preformatted body"
" delimiter on line {}".format(linenum))
# Only remove first two chars, and endl
if len(lstripped) == 1:
content = ""
else:
content = lstripped[2:]
elif lstripped[0] == ">":
# Strip delimiter
temp = lstripped[1:]
object_type = "custom"
if len(temp) == 0:
content = ""
elif temp[0] != " ":
# Only split once tho
split = temp.split(" ", maxsplit=1)
custom_type = split[0]
if len(split) < 2:
content = ""
else:
content = split[1]
else:
content = temp.strip()
elif lstripped[0] == "<":
# Strip delimiter
temp = lstripped[1:]
object_type = "custom-pre"
if len(temp) == 0:
content = ""
elif temp[0] != " ":
# Only split once tho
split = temp.split(" ", maxsplit=1)
custom_type = split[0]
if len(split) < 2:
content = ""
else:
content = split[1]
else:
# Don't even strip it
content = temp
elif lstripped[0] == "|":
raise LineError("Tables not supported on line {}".format(linenum))
else:
# Headers are trivial
object_type = "header"
content = lstripped.strip()
# We've handled the line itself, now to create objects
try:
# Handle custom type
# The check here causes a NameError
if custom_type:
if object_type == "custom-pre":
# always add pre to custom pre types
custom_type = custom_type + "-pre"
except NameError:
# custom_type doesn't exist
custom_type = None
if level > current.level:
# We're a child!
if object_type != "header":
if custom_type:
# Handle custom type
object_type = custom_type
new = Object(object_type, current, level)
else:
# Headers are always separate
new = Object(object_type, current, level)
# Add content and return child
new.add_line(content)
return new
elif level == current.level:
# Adjacent, or same!
if object_type != "header":
if custom_type:
object_type = custom_type
# custom_type indicates we ALWAYS start a new object
# It's a sibling to the current one, too
new = Object(object_type, current.parent, level)
elif object_type == current.object_type:
# Same type, non-header, no custom -> combine!
current.add_line(content)
return current
elif object_type in ("custom", "custom-pre")\
and current.object_type not in ("body", "body-pre"):
# Must be custom
if object_type == "custom-pre"\
and current.object_type[-4:] == "-pre":
# custom-pre extends a previous -pre
# Custom reformatted continues!
current.add_line(content)
return current
elif object_type == "custom"\
and current.object_type[-4:] != "pre":
# Custom body extends a previous custom
current.add_line(content)
return current
else:
# Actually was new after all
new = Object(object_type, current.parent, level)
else:
# New adjacent
new = Object(object_type, current.parent, level)
else:
# Header
new = Object(object_type, current.parent, level)
# Add content and return new
new.add_line(content)
return new
elif level < current.level:
# Go up till we find the parent of this new object
parent = current.parent
while level < parent.level:
# If level is beneath level of parent, it's not really our
# parent
# I love this line and I'm so so sorry
parent = parent.parent
# Ok we found our parent. It's root if nothing else.
# Continue as usual. All blocks are now new.
if custom_type:
object_type = custom_type
new = Object(object_type, parent, level)
new.add_line(content)
return new
# That's that I think!
def tree_from_file(f):
# Create tree root
root = Object(object_type=None,
parent=None,
level=-1)
current = root
try:
linenum = 0
for line in f:
# Do it like this so we can accept files we can't read to
# the end immediately!
linenum += 1
current = _handle_line(current, line, linenum)
return root
except UnicodeError:
print("Invalid unicode in input", file=sys.stderr)
except IOError:
print("Could not read file.", file=sys.stderr)
except LineError as e:
print("Error parsing file: ", str(e), file=sys.stderr)
| {
"repo_name": "fennekki/unikko",
"path": "unikko/votl.py",
"copies": "1",
"size": "10146",
"license": "bsd-2-clause",
"hash": 1572526072772238300,
"line_mean": 31.4153354633,
"line_max": 76,
"alpha_frac": 0.5063079046,
"autogenerated": false,
"ratio": 4.593028519692169,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00008192020971573687,
"num_lines": 313
} |
"""A module for periodically displaying progress on a hierarchy of tasks
and estimating time to completion.
>>> import progress, datetime
>>> progress.set_resolution(datetime.datetime.resolution) # show all messages, don't sample
>>> progress.start_task('Repetition', 2)
>>> for rep in range(2): # doctest: +ELLIPSIS
... progress.progress(rep)
... progress.start_task('Example', 3)
... for ex in range(3):
... progress.progress(ex)
... progress.end_task()
...
Repetition 0 of 2 (~0% done, ETA unknown on ...)
Repetition 0 of 2, Example 0 of 3 (~0% done, ETA unknown on ...)
Repetition 0 of 2, Example 1 of 3 (~17% done, ETA ...)
Repetition 0 of 2, Example 2 of 3 (~33% done, ETA ...)
Repetition 0 of 2, Example 3 of 3 (~50% done, ETA ...)
Repetition 1 of 2 (~50% done, ETA ...)
Repetition 1 of 2, Example 0 of 3 (~50% done, ETA ...)
Repetition 1 of 2, Example 1 of 3 (~67% done, ETA ...)
Repetition 1 of 2, Example 2 of 3 (~83% done, ETA ...)
Repetition 1 of 2, Example 3 of 3 (~100% done, ETA ...)
>>> progress.end_task() # doctest: +ELLIPSIS
Repetition 2 of 2 (~100% done, ETA ...)
"""
__author__ = 'wmonroe4'
import datetime
import doctest
from collections import namedtuple
class ProgressMonitor(object):
'''
Keeps track of a hierarchy of tasks and displays percent completion
and estimated completion time.
'''
def __init__(self, resolution=datetime.datetime.resolution):
'''
Create a `ProgressMonitor` object.
:param datetime.datetime resolution: The minimum interval at which
progress updates are shown. The default is to show all updates.
This setting can be modified after creation by assigning to
the `resolution` field of a `ProgressMonitor` object.
(Note that the global `progress.*` functions override this to
show updates every minute by default. This can be reset by
calling `progress.set_resolution(datetime.datetime.resolution)`.)
'''
self.task_stack = []
self.last_report = datetime.datetime.min
self.resolution = resolution
self.start_time = datetime.datetime.now()
def start_task(self, name, size):
'''
Add a task to the stack. If, for example, `name` is `'Iteration'` and
`size` is 10, progress on that task will be shown as
..., Iteration <p> of 10, ...
:param str name: A descriptive name for the type of subtask that is
being completed.
:param int size: The total number of subtasks to complete.
'''
if len(self.task_stack) == 0:
self.start_time = datetime.datetime.now()
self.task_stack.append(Task(name, size, 0))
def progress(self, p):
'''
Update the current progress on the task at the top of the stack.
:param int p: The current subtask number, between 0 and `size`
(passed to `start_task`), inclusive.
'''
self.task_stack[-1] = self.task_stack[-1]._replace(progress=p)
self.progress_report()
def end_task(self):
'''
Remove the current task from the stack.
'''
self.progress(self.task_stack[-1].size)
self.task_stack.pop()
def progress_report(self, force=False):
'''
Print the current progress.
:param bool force: If `True`, print the report regardless of the
elapsed time since the last progress report.
'''
now = datetime.datetime.now()
if (len(self.task_stack) > 1 or self.task_stack[0] > 0) and \
now - self.last_report < self.resolution and not force:
return
stack_printout = ', '.join('%s %s of %s' % (t.name, t.progress, t.size)
for t in self.task_stack)
frac_done = self.fraction_done()
if frac_done == 0.0:
now_str = now.strftime('%c')
eta_str = 'unknown on %s' % now_str
else:
elapsed = (now - self.start_time)
estimated_length = elapsed.total_seconds() / frac_done
eta = self.start_time + datetime.timedelta(seconds=estimated_length)
eta_str = eta.strftime('%c')
print '%s (~%d%% done, ETA %s)' % (stack_printout,
round(frac_done * 100.0),
eta_str)
self.last_report = datetime.datetime.now()
def fraction_done(self, start=0.0, finish=1.0, stack=None):
'''
:return float: The estimated fraction of the overall task hierarchy
that has been finished. A number in the range [0.0, 1.0].
'''
if stack is None:
stack = self.task_stack
if len(stack) == 0:
return start
else:
top_fraction = stack[0].progress * 1.0 / stack[0].size
next_top_fraction = (stack[0].progress + 1.0) / stack[0].size
inner_start = start + top_fraction * (finish - start)
inner_finish = start + next_top_fraction * (finish - start)
return self.fraction_done(inner_start, inner_finish, stack[1:])
Task = namedtuple('Task', ('name', 'size', 'progress'))
_global_t = ProgressMonitor(resolution=datetime.timedelta(minutes=1))
def start_task(name, size):
'''
Call `start_task` on a global `ProgressMonitor`.
'''
_global_t.start_task(name, size)
def progress(p):
'''
Call `progress` on a global `ProgressMonitor`.
'''
_global_t.progress(p)
def end_task():
'''
Call `end_task` on a global `ProgressMonitor`.
'''
_global_t.end_task()
def set_resolution(res):
'''
Change the resolution on the global `ProgressMonitor`.
See `ProgressMonitor.__init__`.
'''
_global_t.resolution = res
__all__ = [
'ProgressMonitor',
'start_task',
'progress',
'end_task',
'set_resolution',
]
if __name__ == '__main__':
doctest.testmod()
| {
"repo_name": "arunchaganty/aeschines",
"path": "third-party/stanza/stanza/monitoring/progress.py",
"copies": "3",
"size": "6016",
"license": "mit",
"hash": -2125625674097820000,
"line_mean": 32.0549450549,
"line_max": 92,
"alpha_frac": 0.5871010638,
"autogenerated": false,
"ratio": 3.831847133757962,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.00012691410540872905,
"num_lines": 182
} |
"""A module for querying datasources (e.g., the World Bank Indicators). They
can optionally be stored locally to reduce internet queries.
"""
import contextlib
import json
import logging
import os
import requests
import warnings
import pandas as pd
from salamanca.utils import backend
WB_INDICATORS = {
'SP.POP.TOTL': 'total_population',
'PA.NUS.PPPC.RF': 'ppp_to_mer', # conversion factor [PPP / MER]
'FP.CPI.TOTL': 'cpi',
'PA.NUS.FCRF': 'exchange_rate',
'NY.GDP.DEFL.ZS': 'gdp_deflator',
'SI.POV.DDAY': 'below_1_90_dollars_per_day_ppp',
'NE.CON.PETC.ZS': 'household_fraction_gdp',
}
INDICATORS_WB = {d: k for k, d in WB_INDICATORS.items()}
WB_URL = 'http://api.worldbank.org/v2/country/{iso}/indicator/{indicator}'
EU_COUNTRIES = [
'AUT', 'BEL', 'CYP',
'DEU', 'ESP', 'EST',
'FIN', 'FRA', 'GRC',
'IRL', 'ITA', 'LTU',
'LUX', 'LVA', 'MLT',
'NLD', 'PRT', 'SVK',
'SVN',
]
@contextlib.contextmanager
def query_rest_api(url, params=None, tries=5):
"""Query a REST API online
Parameters
----------
url : str
url to query
tries : int, optional
number of times to try query before raising an IOError
"""
params = {
'format': 'json',
'per_page': 1000,
**(params if params is not None else {})
}
logging.debug('Querying: {}, tries left: {}'.format(url, tries))
n = 0
while n < tries:
try:
q = requests.get(url, params=params)
result = q.json()
if isinstance(result, dict):
meta = result
elif isinstance(result, list):
meta = result[0]
else:
raise RuntimeError("Unexpected reply payload: {}".format(result))
if 'message' in meta:
raise RuntimeError(meta['message'])
yield result
break
except IOError:
n += 1
else:
raise RuntimeError('Query failed: {}'.format(q.url))
class WorldBank(object):
"""A simple object for querying the World Bank's REST API"""
def __init__(self):
self.query_args = ['date', 'MRV', 'Gapfill', 'frequency']
def _do_query(self, wb, params=None, tries=5):
params = params.copy()
url = WB_URL.format(indicator=wb, iso=params.pop('iso', 'all'))
pages = 1
params['page'] = 0
result = []
while params['page'] < pages:
params['page'] += 1
with query_rest_api(url, params=params) as _result:
pages = _result[0]['pages']
result += _result[1]
logging.debug('Page {} of {} Complete'.format(params['page'], pages))
return result
def query(self, indicator, tries=5, use_cache=True, overwrite=False, **kwargs):
"""
kwargs include
iso
'date',
'MRV',
'Gapfill',
'frequency'
"""
i = indicator
if i in WB_INDICATORS:
# supported wb indicators
wb = i
ind = WB_INDICATORS[i]
elif i in INDICATORS_WB:
# supported indicator
ind = i
wb = INDICATORS_WB[i]
else:
# not supported indicator
ind = i
wb = i
# use cache if no other API kwargs present
if use_cache and kwargs:
warnings.warn('Can not cache queries with additional arguments')
use_cache = False
# read from disc if it already exists
if use_cache:
db = backend()
source = 'wb'
exists = db.exists(source, ind)
if exists:
return db.read(source, ind)
# otherwise get raw data
result = self._do_query(wb, params=kwargs, tries=tries)
# construct as data frame
df = pd.DataFrame(result)
df['country'] = df['country'].apply(lambda x: x['id'])
df.drop(['decimal', 'indicator', 'countryiso3code',
'unit', 'obs_status'],
axis=1, inplace=True)
try:
# convert years if possible
df['date'] = df['date'].astype(int)
except:
pass
# fix up country names to gaurantee ISO3-standard
# in a recent update, some tables were found to be id'd to iso2,
# which is fixed here
# TODO: why are there NaNs? why would any be empty?
df = df.dropna(subset=['country'])
df = df[df['country'] != '']
if len(df['country'].iloc[0]) == 2:
meta = self.iso_metadata()
mapping = {r['iso2Code']: r['id'] for idx, r in meta.iterrows()}
df['country'] = df['country'].map(mapping)
# write to disc if we're caching
if use_cache and (not exists or overwrite):
db.write(source, ind, df)
return df
def iso_metadata(self, overwrite=False, map_cols=None):
db = backend()
source = 'wb'
ind = 'iso_mapping'
if overwrite or not db.exists(source, ind):
url = 'http://api.worldbank.org/v2/country'
with query_rest_api(url) as x:
df = pd.DataFrame(x[1])
idcols = ['adminregion', 'incomeLevel',
'lendingType', 'region']
for col in idcols:
df[col] = df[col].apply(lambda x: x['id'])
db.write(source, ind, df)
df = db.read(source, ind)
if map_cols:
df = df[map_cols].set_index(map_cols[0])[map_cols[1]]
return df
def to_wide(self, df):
return df.pivot(index='country',
columns='date',
values='value').reset_index()
def to_long(self, df):
return (df
.melt(id_vars='country', value_vars=df.columns[1:])
.sort_values(['country', 'date'], ascending=[True, False])
.reset_index(drop=True))
def _merge_eu(self, df):
df = self.to_wide(df).set_index('country')
df.loc[EU_COUNTRIES] = df.loc[EU_COUNTRIES].fillna(df.loc['EMU'])
df = self.to_long(df.reset_index())
return df
def cpi(self, **kwargs):
df = self.query('cpi', **kwargs)
return df
def exchange_rate(self, **kwargs):
df = self.query('exchange_rate', **kwargs)
# update newer currency unions
df = self._merge_eu(df)
return df
def gdp_deflator(self, **kwargs):
df = self.query('gdp_deflator', **kwargs)
return df
def ppp_to_mer(self, **kwargs):
df = self.query('ppp_to_mer', **kwargs)
return df
def household_fraction_gdp(self, **kwargs):
df = self.query('household_fraction_gdp', **kwargs)
return df
| {
"repo_name": "gidden/salamanca",
"path": "salamanca/data.py",
"copies": "1",
"size": "6823",
"license": "apache-2.0",
"hash": -863604975722032800,
"line_mean": 28.9254385965,
"line_max": 83,
"alpha_frac": 0.529972153,
"autogenerated": false,
"ratio": 3.682137075013492,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.4712109228013492,
"avg_score": null,
"num_lines": null
} |
"""A module for querying for articles from the NYTimes."""
import sys
import os
wd = os.path.abspath('.')
sys.path.append(wd + '/../')
import pandas as pd
import numpy as np
from datetime import timedelta
from requests import get
from time import sleep
from os.path import exists
from pymongo import MongoClient
from general_utilities.query_utilities import check_response_code, get_html
from general_utilities.storage_utilities import store_in_mongo
class NYTPageScraper(object):
"""Scraper for pages of results returned by the Article Search API from NYTimes.
Provides an interface to search for results by inputted parameters over a
single date or multiple dates, as well as returning multiple pages from the
search or one page in particular. Searching is limited by the rate limits
applied by the API (developer.nytimes.com/article_search_v2.json).
Limits in terms of the returned results are determined by the search parameters.
Results are returned in batches of 10, and additional results are accessible via
a `page` parameter. The `page` parameter is capped at 100, which caps the
total number of results at 1000 for a single query. The total number of results
can be limited by adding aditional search parameters. This can be a way to ensure
that all possible results are captured for a given query.
Args:
----
queries_path (optional): str
Holds a filepath location to keep track of successfully issued queries.
Expected to be pointed at a `.csv` file.
"""
def __init__(self, queries_path='work/queries.csv'):
self.articles = []
self.queries_path = queries_path
self.base_url = 'http://api.nytimes.com/svc/search/v2/articlesearch.json'
self.scrape = True
def __enter__(self):
"""Set up to make sure there is no duplicate scraping/storing."""
if exists(self.queries_path):
# Use to ensure that there is no duplicate scraping in terms of dates.
self.queries_df = pd.read_csv(self.queries_path, index_col=0,
parse_dates=True)
else:
self.queries_df = pd.DataFrame()
# Use to ensure that there are no duplicate web_urls grabbed.
client = MongoClient()
db = client['nytimes']
collection = db['gen_articles']
res = collection.find({'web_url': {'$exists': 'true'}}, {'web_url': True,
'_id': False})
res_lst = list(res)
self.web_urls = set(article['web_url'] for article in res_lst)
client.close()
return self
def __exit__(self, *args):
"""Save the `queries_df` for later use."""
self.queries_df.sort_index(inplace=True)
self.queries_df.to_csv(self.queries_path)
def scrape_dts(self, start_dt, end_dt, extra_params=None):
"""Scrape the NYTimes for multiple dates, using the inputted parameters.
Loop over each date from the `start_dt` to `end_dt`, calling
`self.scrape_dt`. Scraping over a single day at a time helps to avoid
missing possible search results (see the class docstrings for an explanation).
Args:
-----
start_dt: str
end_dt: str
extra_params (optional): dct
Potential extra parameters to pass in the URL when querying the
API (see params at developer.nytimes.com/article_search_v2.json)).
"""
dt_range = pd.date_range(start_dt, end_dt)
for begin_date in dt_range:
begin_date = begin_date.strftime('%Y%m%d')
end_date = begin_date
self.scrape_dt(begin_date, end_date, extra_params)
def scrape_dt(self, begin_date, end_date, extra_params=None):
"""Scrape the NYT for a single date, using the inputted parameters.
Scrape over as many pages are returned, subject to the page cap at 100.
Args:
-----
begin_date: str
end_date: str
extra_params (optional): dct
Potential extra parameters to pass in the URL when querying the
API (see params at developer.nytimes.com/article_search_v2.json)).
"""
params = None if not extra_params else extra_params.copy()
# Issue the intial query with page equal to 0.
params['page'] = 0
params['begin_date'] = begin_date
params['end_date'] = end_date
self.update_queries_df(begin_date, insert=True)
if self.scrape:
initial_response = self.scrape_single_page(params)
num_results = initial_response['response']['meta']['hits']
if num_results > 10:
max_pages_to_search = min(100, num_results // 10 + 1)
for page in range(1, max_pages_to_search):
sleep(1/5) # Use to avoid hitting the rate limit.
params['page'] = page
self.scrape_single_page(params)
self.dump_articles()
self.update_queries_df(begin_date, insert=False)
def scrape_single_page(self, params):
"""Scrape the NYT for a single page, using the inputted params.
Args:
----
params: dct
Return:
------
response_json: dct
"""
if 'page' not in params:
print('No `page` paramter pased in, using 0...')
params['page'] = 0
response = get(self.base_url, params=params)
status_code = response.status_code
if status_code != 200 and status_code != 429:
print('Bad URL: {}'.format(response.url))
elif status_code == 429:
print('Rate limits hit for the day.')
sys.exit(0)
else:
response_json = response.json()
self.parse_page_results(response_json)
return response_json
def parse_page_results(self, response_json):
"""Parse a single page of results, grabbing each article's desired attributes.
Args:
----
response_json: dct
"""
# Attributes that require no post-processing/farther parsing.
desired_attributes = ('source', 'subsection_name', 'section_name', 'web_url',
'news_desk', 'type_of_material', 'document_type',
'pub_date')
for doc in response_json['response']['docs']:
article_dct = {}
for attr in desired_attributes:
article_dct[attr] = doc.get(attr, None)
keywords = doc.get('keywords', None)
headline_dct = doc.get('headline', None)
if keywords:
keywords_lst = [keywords_dct['value'] for keywords_dct in keywords]
article_dct['keywords'] = keywords_lst
if headline_dct:
headline = headline_dct['main']
article_dct['headline'] = headline
if article_dct['web_url'] not in self.web_urls:
self.articles.append(article_dct)
def dump_articles(self):
"""Dump articles list into Mongo."""
if self.articles:
client = MongoClient()
db = client['nytimes']
collection = db['gen_articles']
collection.insert_many(self.articles)
client.close()
self.articles = [] # Start each day of scraping with an empty list.
def update_queries_df(self, update_dt, insert=True):
"""Modify `self.queries_df` for the inputted dates.
`self.queries_df` will be used to keep track of dates that have already been
scraped. It is indexed by date, and has one column (`scraped`) that holds a 1
if the date has already been scraped for and a 0 otherwise.
If `insert` is True, check if the inputted date is already in
`self.queries_df`. If it isn't, insert a new observation with a 0 value to
denote that the date has not been scraped yet. If `insert` is False, update
the value of the inputted date in `self.queries_df` with a 1.
Args:
----
update_dt: str
insert (optional): bool
"""
update_value = 0 if insert else 1
update_dt = pd.to_datetime(update_dt)
if update_dt in self.queries_df.index:
self.scrape = not self.queries_df.loc[update_dt, 'scraped']
else:
self.scrape = 1
if self.scrape:
self.queries_df.loc[update_dt, 'scraped'] = update_value
class NYTArticleScraper(object):
"""Scraper for URLs pointing at New York Times articles.
Args:
----
db_name: str
coll_name: str
"""
def __init__(self, db_name, coll_name):
self.db_name = db_name
self.coll_name = coll_name
def __enter__(self):
"""Set up URL list for scraping."""
client = MongoClient()
db = client[self.db_name]
collection = db[self.coll_name]
res = collection.find({'web_url': {'$exists': True},
'text' : {'$exists': False}},
{'web_url': True, '_id': False})
self.articles_to_scrape = list(res)
client.close()
return self
def __exit__(self, *args):
"""Ensure that any URLs scraped for get their text attributes updated."""
store_in_mongo(self.articles_to_scrape, self.db_name, self.coll_name,
key='web_url')
def scrape_pages(self):
"""Scrape all pages stored in `self.web_urls`."""
for article in self.articles_to_scrape:
url = article['web_url']
if url.startswith('/'):
url = 'http://www.nytimes.com' + url
sleep(1/20)
soup = get_html(url)
article_txt = self._parse_soup(soup)
if article_txt:
article['text'] = article_txt
def _parse_soup(self, soup):
"""Parse the inputted `soup`.
Args:
----
soup: bs4.BeautifulSoup object
type_of_material: str
Returns:
-------
article_txt: str
"""
content = soup.find('div', attrs={'class': 'story-body'})
if content:
lines = content.findAll('p')
article_txt = ' '.join([line.text for line in lines])
else:
article_txt = None
return article_txt
if __name__ == '__main__':
if len(sys.argv) >= 2:
try:
start_dt = sys.argv[1]
end_dt = sys.argv[2]
except:
error_msg = "Must pass in both a starting and ending date!"
raise Exception(error_msg)
else:
start_dt, end_dt = None, None
extra_params = {'fq' : """source:("The New York Times") AND type_of_material:("News")"""}
api_key = os.environ['NYTIMES_API_KEY']
extra_params['api-key'] = api_key
if start_dt and end_dt:
with NYTPageScraper(queries_path='work/general.csv') as page_scraper:
page_scraper.scrape_dts(start_dt, end_dt, extra_params)
with NYTArticleScraper('nytimes', 'gen_articles') as article_scraper:
article_scraper.scrape_pages()
| {
"repo_name": "sahararaju/dataasservices",
"path": "scraping-job-portals/nytimes/article_scraper.py",
"copies": "2",
"size": "11530",
"license": "apache-2.0",
"hash": 3329139022745923600,
"line_mean": 34.2599388379,
"line_max": 93,
"alpha_frac": 0.5679098005,
"autogenerated": false,
"ratio": 4.110516934046346,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0.01346883445129279,
"num_lines": 327
} |
"""A module for representing universal morphosyntactic feature bundles."""
from typing import Dict, List, Optional, Tuple, Type, Union
from cltk.core.exceptions import CLTKException
from cltk.morphology.universal_dependencies_features import *
__author__ = ["John Stewart <free-variation>"]
class MorphosyntacticFeatureBundle:
"""A representation of a set of features, usually associated with a word form."""
def __init__(self, *features: List[MorphosyntacticFeature]) -> None:
"""
>>> f1 = MorphosyntacticFeatureBundle(F.neg, N.pos, V.neg, Case.accusative)
>>> f1.features
{F: [neg], N: [pos], V: [neg], Case: [accusative]}
"""
self.features = {}
for feature in features:
if isinstance(feature, type) and issubclass(
feature, MorphosyntacticFeature
):
self.features[feature] = Underspecified
else:
if type(feature) in self.features:
self.features[type(feature)].append(feature)
else:
self.features[type(feature)] = [feature]
def __getitem__(
self, feature_name: Union[str, Type[MorphosyntacticFeature]]
) -> List[MorphosyntacticFeature]:
"""
Use dict-type syntax for accessing the values of features.
>>> f1 = f(F.pos, N.pos)
>>> f1[F]
[pos]
>>> f1[V]
Traceback (most recent call last):
cltk.core.exceptions.CLTKException: {F: [pos], N: [pos]} unspecified for V
>>> f1['F']
[pos]
"""
if type(feature_name) == str:
if feature_name not in globals():
raise TypeError(feature_name + " is not a morphosytactic feature")
feature_name = globals()[feature_name]
if not issubclass(feature_name, MorphosyntacticFeature):
raise TypeError(str(feature_name) + " is not a morphosytactic feature")
if feature_name in self.features:
return self.features[feature_name]
else:
raise CLTKException(f"{self} unspecified for {feature_name}")
def __setitem__(
self,
feature_name: Union[str, Type[MorphosyntacticFeature]],
feature_values: Union[MorphosyntacticFeature, List[MorphosyntacticFeature]],
) -> "MorphosyntacticFeatureBundle":
"""
Use dict-type syntax to set the value of features.
>>> f1 = f(F.pos)
>>> f1[N] = N.neg
>>> f1
{F: [pos], N: [neg]}
>>> f1['V'] = V.pos
>>> f1
{F: [pos], N: [neg], V: [pos]}
"""
if type(feature_name) == str:
if feature_name not in globals():
raise TypeError(feature_name + " is not a morphosytactic feature")
feature_name = globals()[feature_name]
if not issubclass(feature_name, MorphosyntacticFeature):
raise TypeError(str(feature_name) + " is not a morphosyntactic feature")
if type(feature_values) is not list:
feature_values = [feature_values]
for value in feature_values:
if value is not None and type(value) != feature_name:
raise TypeError(str(value) + " is not a " + str(feature_name))
self.features[feature_name] = feature_values
return self
def all(
self,
) -> List[Tuple[Type[MorphosyntacticFeature], List[MorphosyntacticFeature]]]:
return self.features.items()
def underspecify(self, feature_name: Type[MorphosyntacticFeature]) -> None:
"""
Underspecify the given feature in the bundle.
>>> f1 = f(F.pos, N.pos, V.neg)
>>> f1.underspecify(F)
>>> f1[F] is Underspecified
True
"""
if not issubclass(feature_name, MorphosyntacticFeature):
raise TypeError(str(feature_name) + " is not a morphosytactic feature")
self.features[feature_name] = Underspecified
def matches(self, other: "MorphosyntacticFeatureBundle") -> bool:
"""
This feature bundle matches other if other contains all the features of this bundle,
i.e. if this bundle is an improper subset of other.
Underspecified features will match.
>>> f1 = f(F, N.pos, V.neg)
>>> f2 = f(F.neg, N.pos, V.neg)
>>> f3 = f(F.pos, N.neg, V.pos)
>>> f1.matches(f2)
True
>>> f1.matches(f3)
False
"""
if other is None:
return False
for f in self.features.keys():
if f not in other.features:
return False
if (
self[f] is not Underspecified
and other[f] is not Underspecified
and not (self[f] == other[f])
):
return False
return True
def __str__(self) -> str:
return str(self.features)
__repr__ = __str__
f = MorphosyntacticFeatureBundle
def to_categorial(pos: int) -> "MorphosyntacticFeatureBundle":
"""Maps UD parts of speech to binary categorial feature bundles.
In some cases these are underspecified, including empty bundles for interjections.
>>> to_categorial(POS.adjective)
{F: [neg], N: [pos], V: [pos]}
>>> to_categorial(POS.particle)
{F: [pos]}
>>> to_categorial(POS.interjection)
{}
"""
if pos == POS.adjective or pos == POS.adverb:
return f(F.neg, N.pos, V.pos)
elif pos == POS.adposition:
return f(F.pos, N.neg, V.neg)
elif pos == POS.auxiliary:
return f(F.pos, N.neg, V.pos)
elif (
pos == POS.coordinating_conjunction
or pos == POS.subordinating_conjunction
or pos == POS.particle
):
return f(F.pos)
elif pos == POS.determiner or pos == POS.pronoun or pos == POS.numeral:
return f(F.pos, N.pos, V.neg)
elif pos == POS.noun or pos == POS.proper_noun:
return f(F.neg, N.pos, V.neg)
elif pos == POS.verb:
return f(F.neg, N.neg, V.pos)
else:
return f()
from_ud_map: Dict[str, Dict[str, MorphosyntacticFeature]] = {
# parts of speech
"POS": {
"ADJ": POS.adjective,
"ADP": POS.adposition,
"ADV": POS.adverb,
"AUX": POS.auxiliary,
"CCONJ": POS.coordinating_conjunction,
"DET": POS.determiner,
"INTJ": POS.interjection,
"NOUN": POS.noun,
"NUM": POS.numeral,
"PART": POS.particle,
"PRON": POS.pronoun,
"PROPN": POS.proper_noun,
"PUNCT": POS.punctuation,
"SCONJ": POS.subordinating_conjunction,
"SYM": POS.symbol,
"VERB": POS.verb,
"X": POS.other,
},
# verbal features
"VerbForm": {
"Conv": VerbForm.converb,
"Fin": VerbForm.finite,
"Gdv": VerbForm.gerundive,
"Ger": VerbForm.gerund,
"Inf": VerbForm.infinitive,
"Part": VerbForm.participle,
"Sup": VerbForm.supine,
"Vnoun": VerbForm.masdar,
},
"Mood": {
"Adm": Mood.admirative,
"Cnd": Mood.conditional,
"Des": Mood.desiderative,
"Imp": Mood.imperative,
"Ind": Mood.indicative,
"Jus": Mood.jussive,
"Nec": Mood.necessitative,
"Opt": Mood.optative,
"Pot": Mood.potential,
"Prp": Mood.purposive,
"Qot": Mood.quotative,
"Sub": Mood.subjunctive,
},
"Tense": {
"Fut": Tense.future,
"Imp": Tense.imperfect,
"Past": Tense.past,
"Pqp": Tense.pluperfect,
"Pres": Tense.present,
},
"Aspect": {
"Hab": Aspect.habitual,
"Imp": Aspect.imperfective,
"Iter": Aspect.iterative,
"Perf": Aspect.perfective,
"Prog": Aspect.progressive,
"Prosp": Aspect.prospective,
},
"Voice": {
"Act": Voice.active,
"Antip": Voice.antipassive,
"Bfoc": Voice.beneficiary_focus,
"Lfoc": Voice.location_focus,
"Caus": Voice.causative,
"Dir": Voice.direct,
"Inv": Voice.inverse,
"Mid": Voice.middle,
"Pass": Voice.passive,
"Rcp": Voice.reciprocal,
},
"Evident": {"Fh": Evidentiality.first_hand, "Nfh": Evidentiality.non_first_hand},
"Polarity": {"Pos": Polarity.pos, "Neg": Polarity.neg},
"Person": {
"0": Person.zeroth,
"1": Person.first,
"2": Person.second,
"3": Person.third,
"4": Person.fourth,
},
"Polite": {
"Elev": Politeness.elevated,
"Form": Politeness.formal,
"Humb": Politeness.humble,
"Infm": Politeness.informal,
},
"Clusivity": {"Ex": Clusivity.exclusive, "In": Clusivity.inclusive},
# nominal
"Gender": {
"Com": Gender.common,
"Fem": Gender.feminine,
"Masc": Gender.masculine,
"Neut": Gender.neuter,
},
"Animacy": {
"Anim": Animacy.animate,
"Hum": Animacy.human,
"Inan": Animacy.inanimate,
"Nhum": Animacy.non_human,
},
"Number": {
"Coll": Number.collective,
"Count": Number.count_plural,
"Dual": Number.dual,
"Grpa": Number.greater_paucal,
"Grpl": Number.greater_plural,
"Inv": Number.inverse_number,
"Pauc": Number.paucal,
"Plur": Number.plural,
"Ptan": Number.plurale_tantum,
"Sing": Number.singular,
"Tri": Number.trial,
},
"NumForm": {
"Word": NumForm.word,
"Digit": NumForm.digit,
"Roman": NumForm.roman,
"Reference": NumForm.reference,
},
"Case": {
# structural cases
"Nom": Case.nominative,
"Acc": Case.accusative,
"Erg": Case.ergative,
"Abs": Case.absolutive,
# oblique cases
"Abe": Case.abessive,
"Ben": Case.befefactive,
"Caus": Case.causative,
"Cmp": Case.comparative,
"Cns": Case.considerative,
"Com": Case.comitative,
"Dat": Case.dative,
"Dis": Case.distributive,
"Equ": Case.equative,
"Gen": Case.genitive,
"Ins": Case.instrumental,
"Par": Case.partitive,
"Voc": Case.vocative,
# spatiotemporal cases
"Abl": Case.ablative,
"Add": Case.additive,
"Ade": Case.adessive,
"All": Case.allative,
"Del": Case.delative,
"Ela": Case.elative,
"Ess": Case.essive,
"Ill": Case.illative,
"Ine": Case.inessive,
"Lat": Case.lative,
"Loc": Case.locative,
"Per": Case.perlative,
"Sub": Case.sublative,
"Sup": Case.superessive,
"Ter": Case.terminative,
"Tem": Case.temporal,
"Tra": Case.translative,
},
"Definite": {
"Com": Definiteness.complex,
"Cons": Definiteness.construct_state,
"Def": Definiteness.definite,
"Ind": Definiteness.indefinite,
"Spec": Definiteness.specific_indefinite,
},
"Degree": {
"Abs": Degree.absolute_superlative,
"Cmp": Degree.comparative,
"Equ": Degree.equative,
"Pos": Degree.positive,
"Sup": Degree.superlative,
},
# other lexical
"PronType": {
"Art": PrononimalType.article,
"Dem": PrononimalType.demonstrative,
"Emp": PrononimalType.emphatic,
"Exc": PrononimalType.exclamative,
"Ind": PrononimalType.indefinite,
"Int": PrononimalType.interrogative,
"Neg": PrononimalType.negative,
"Prs": PrononimalType.personal,
"Rcp": PrononimalType.reciprocal,
"Rel": PrononimalType.relative,
"Tot": PrononimalType.total,
},
"AdpType": {
"Prep": AdpositionalType.preposition,
"Post": AdpositionalType.postposition,
"Circ": AdpositionalType.circumposition,
"Voc": AdpositionalType.vocalized_adposition,
},
"AdvType": {
"Man": AdverbialType.manner,
"Loc": AdverbialType.location,
"Tim": AdverbialType.time,
"Deg": AdverbialType.degree,
"Cau": AdverbialType.cause,
"Mod": AdverbialType.modality,
},
"VerbType": {
"Aux": VerbType.auxiliary,
"Cop": VerbType.copula,
"Mod": VerbType.modal,
"Light": VerbType.light,
},
"NumType": {
"Card": Numeral.cardinal,
"Dist": Numeral.distributive,
"Frac": Numeral.fractional,
"Mult": Numeral.multiplicative,
"Ord": Numeral.ordinal,
"Range": Numeral.range,
"Sets": Numeral.sets,
},
"NameType": {
"Geo": NameType.place,
"Prs": NameType.person,
"Giv": NameType.person_given_name,
"Sur": NameType.person_surname,
"Nat": NameType.nationality,
"Com": NameType.company,
"Pro": NameType.product,
"Oth": NameType.other,
},
"Strength": {"Strong": Strength.strong, "Weak": Strength.weak},
"Poss": {"Yes": Possessive.pos},
"Reflex": {"Yes": Reflexive.pos},
"Foreign": {"Yes": Foreign.pos},
"Abbr": {"Yes": Abbreviation.pos},
"Typo": {"Yes": Typo.pos},
}
def from_ud(feature_name: str, feature_value: str) -> Optional[MorphosyntacticFeature]:
"""For a given Universal Dependencies feature name and value,
return the appropriate feature class/value.
>>> from_ud('Case', 'Abl')
ablative
>>> from_ud('Abbr', 'Yes')
pos
>>> from_ud('PronType', 'Ind')
indefinite
"""
if feature_name in from_ud_map:
feature_map = from_ud_map[feature_name]
else:
msg = f"{feature_name}: Unrecognized UD feature name"
print("From `from_ud():`", msg)
# raise CLTKException(msg)
return None
values = feature_value.split(",")
for value in values:
if value in feature_map:
return feature_map[value]
else:
raise CLTKException(
f"{value}: Unrecognized value for UD feature {feature_name}"
)
| {
"repo_name": "cltk/cltk",
"path": "src/cltk/morphology/morphosyntax.py",
"copies": "4",
"size": "14467",
"license": "mit",
"hash": -7119769147641706000,
"line_mean": 30.8795454545,
"line_max": 92,
"alpha_frac": 0.540747909,
"autogenerated": false,
"ratio": 3.445344129554656,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5986092038554657,
"avg_score": null,
"num_lines": null
} |
"""A module for running Ansible by wrapping command line."""
from os.path import join
import subprocess
import logging
log = logging.getLogger(__name__)
class AnsibleRunner(object):
"""Responsible for running Ansible playbook."""
def __init__(self, playbook_root, inventory_filename, playbook_path,
venv_path, verbosity=0):
"""Initialized the runner."""
self.inventory_filename = inventory_filename
self.playbook_root = playbook_root
self.playbook_path = playbook_path
self.venv_path = venv_path
self.verbosity = verbosity
def run(self):
"""
Run the initialized playbook.
:rtype: tuple of ``str``
:return: A tuple with the process exit code and stdout.
"""
cmd = "cd {0} && ansible-playbook -i {1} {2}".format(
self.playbook_root, self.inventory_filename, self.playbook_path)
if self.venv_path:
cmd = "source {0};{1}".format(join(self.venv_path, 'bin/activate'),
cmd)
log.debug("Running Ansible with command: {0}".format(cmd))
p = subprocess.Popen(cmd, stdout=subprocess.PIPE, shell=True)
(out, err) = p.communicate()
p_status = p.wait()
log.debug("Playbook stdout: %s\nstatus: %s" % (out, p_status))
return (p_status, out)
| {
"repo_name": "afgane/slurmscale",
"path": "slurmscale/util/ansible/cmd.py",
"copies": "1",
"size": "1377",
"license": "mit",
"hash": -139732912861624590,
"line_mean": 35.2368421053,
"line_max": 79,
"alpha_frac": 0.5940450254,
"autogenerated": false,
"ratio": 4.014577259475218,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 1,
"avg_score": 0,
"num_lines": 38
} |
"""A module for running Ansible-related steps."""
from string import Template
INVENTORY_TEMPLATE = Template("""
jetstream-iu0.galaxyproject.org ansible_connection=local
[baseenv]
jetstream-iu0.galaxyproject.org
[baseenv:children]
galaxynodes
# "contoller" node(s) for this cloud (not necessarily a slurm controller)
[controllers]
jetstream-iu0.galaxyproject.org
[slurmservers]
jetstream-iu0.galaxyproject.org
[slurmclients]
jetstream-iu0.galaxyproject.org
[slurmclients:children]
galaxynodes
[slurmelasticservers]
jetstream-iu0.galaxyproject.org
[cvmfsclients]
[cvmfsclients:children]
galaxynodes
controllers
[jetstreamnfsclients]
[jetstreamnfsclients:children]
galaxynodes
[slurmexechosts]
[slurmexechosts:children]
galaxynodes
[galaxynodes]
[galaxynodes:children]
jetstream-iu-large
[jetstream-iu-large]
#jetstream-iu-large0 ansible_host=10.0.0.72
${nodes}
""")
class InventoryFile(object):
"""Module for creating Ansible inventory file."""
@staticmethod
def create(file_path, nodes):
"""
Create the inventory file.
Currently, the inventory file is based on a pre-defined template
where only the worker nodes are modified, according to the supplied
argument.
:type file_path: ``str``
:param file_path: System path for the file where the inventory will be
stored. Note that an existing file will get
overwritten.
:type nodes: ``list`` of ``dicts``
:param nodes: A list of nodes to be added into the inventory file. Each
list item must be a dict with ``name`` and ``ip`` keys.
"""
targets = []
for node in nodes:
targets.append("{0} ansible_host={1}".format(node.get('name'),
node.get('ip')))
with open(file_path, 'w') as f:
f.writelines(INVENTORY_TEMPLATE.substitute(
{'nodes': '\n'.join(targets)}))
| {
"repo_name": "afgane/slurmscale",
"path": "slurmscale/util/ansible/__init__.py",
"copies": "1",
"size": "2016",
"license": "mit",
"hash": 4619496268268762000,
"line_mean": 24.5189873418,
"line_max": 79,
"alpha_frac": 0.6537698413,
"autogenerated": false,
"ratio": 3.587188612099644,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.9740958453399644,
"avg_score": 0,
"num_lines": 79
} |
"""A module for scraping CareerBuilder for jobs.
This module is the driver for a CareerBuilder scraper. It controls the process
of instantiating a Selenium browser to scrape, and controlling that browser
throughout the entire process. It also handles the Threading, parsing, and
storage that takes place.
Usage:
python job_scraper.py <job title> <job location>
"""
import sys
import os
wd = os.path.abspath('.')
sys.path.append(wd + '/../')
import time
import random
import datetime
import pytz
from selenium.webdriver.common.keys import Keys
from general_utilities.navigation_utilities import issue_driver_query
from general_utilities.parsing_utilities import parse_num
from general_utilities.storage_utilities import store_in_mongo
from general_utilities.threading_utilities import HrefQueryThread
def scrape_job_page(driver, job_title, job_location):
"""Scrape a page of jobs from CareerBuilder.
Grab all relevant information possible for each of the jobs posted on a
given page. This typically includes the job title, job location, posting
company, and date posted.
Args:
----
driver: Selenium webdriver
job_title: str
job_location: str
"""
titles, locations, companies, dates, hrefs = query_for_data(driver)
current_date = str(datetime.datetime.now(pytz.timezone('US/Mountain')))
json_dct = {'search_title': job_title, \
'search_location': job_location, \
'search_date': current_date, 'job_site': 'careerbuilder'}
thread_lst = []
for href in hrefs:
try:
thread = HrefQueryThread(href.get_attribute('href'))
except:
print('Exception in href thread builder')
thread = HrefQueryThread('')
thread_lst.append(thread)
thread.start()
mongo_update_lst = []
for title, location, company, date, thread, idx in \
zip(titles, locations, companies, dates, thread_lst, range(len(hrefs))):
try:
mongo_dct = gen_output(json_dct.copy(), title, location,
company, date, thread, idx)
mongo_update_lst.append(mongo_dct)
except:
print('Missed element in careerbuilder!')
store_in_mongo(mongo_update_lst, 'job_postings', 'careerbuilder')
def query_for_data(driver):
"""Grab all the relevant data on a jobs page.
Args:
----
driver: Selenium webdriver
Return:
------
job_titles: list
job_locations: list
posting_companies: list
dates: list
hrefs: list
"""
job_titles = driver.find_elements_by_class_name('job-title')
job_texts = driver.find_elements_by_class_name('job-text')
posting_companies = job_texts[2::3]
job_locations = job_texts[::3]
dates = driver.find_elements_by_css_selector('div .time-posted')
hrefs = driver.find_elements_by_xpath("//h2//a")
return job_titles, job_locations, posting_companies, dates, hrefs
def gen_output(json_dct, title, location, company, date, thread, idx):
"""Format the output dictionary that will end up going into Mongo.
Args:
json_dct: dict
title: Selenium WebElement
location: Selenium WebElement
company: Selenium WebElement
date: Selenium WebElement
thread: RequestThreadInfo object
Return:
------
json_dct: dct
"""
# Need to make sure that the thread is done first.
thread.join()
json_dct['job_title'] = title.text
json_dct['location'] = location.text
json_dct['company'] = company.text
json_dct['date'] = date.text
json_dct['posting_txt'] = thread.posting_txt
return json_dct
def check_if_next(driver):
"""Check if there is a next page of job results to grab.
Grab the clickable job links on the bottom of the page, and check if one reads
'Next'. If so, click it and return True. Otherwise, return False.
Args:
----
driver: Selenium webdriver
Return: bool
"""
# If the following class name is not found, then the next button doesn't exist
# and it will fail. The except block will then catch it and return a False.
try:
last_link = driver.find_element_by_xpath("//a[@aria-label='Next Page']")
last_link.send_keys(Keys.ENTER)
return True
except:
return False
if __name__ == '__main__':
try:
job_title = sys.argv[1]
job_location = sys.argv[2]
except IndexError:
raise Exception('Program needs a job title and job location inputted!')
# Navigate to the base URL and issue the original search query.
base_URL = 'http://www.careerbuilder.com/'
query_params = (('keywords', job_title), ('location', job_location))
driver = issue_driver_query(base_URL, query_params)
# Grab num. jobs
try:
num_jobs_txt = driver.find_element_by_css_selector('div .count').text
num_jobs = int(parse_num(num_jobs_txt, 0))
except:
print('No jobs for search {} in {}'.format(job_title, job_location))
sys.exit(0)
current_date = str(datetime.datetime.now(pytz.timezone('US/Mountain')))
storage_dct = {'job_site': 'careerbuilder', 'num_jobs': num_jobs,
'date': current_date, 'title': job_title, 'location': job_location}
store_in_mongo([storage_dct], 'job_numbers', 'careerbuilder')
is_next = True
while is_next:
jobs = scrape_job_page(driver, job_title, job_location)
is_next = check_if_next(driver)
driver.close()
| {
"repo_name": "sahararaju/dataasservices",
"path": "scraping-job-portals/careerbuilder/job_scraper.py",
"copies": "2",
"size": "5613",
"license": "apache-2.0",
"hash": 2246335516284163600,
"line_mean": 31.6337209302,
"line_max": 85,
"alpha_frac": 0.6406556209,
"autogenerated": false,
"ratio": 3.8183673469387753,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5459022967838776,
"avg_score": null,
"num_lines": null
} |
"""A module for scraping Glassdoor for jobs.
This module is the driver for a Glassdoor scraper. It controls the process of
instantiating a Selenium browser to scrape, and controls that browser throughout
the entire process. It also handles parsing and storing our results.
Usage:
python job_scraper.py <job title> <job location>
"""
import sys
import os
wd = os.path.abspath('.')
sys.path.append(wd + '/../')
import random
import time
import datetime
import pytz
from selenium import webdriver
from selenium.webdriver.common.keys import Keys
from general_utilities.navigation_utilities import issue_driver_query
from general_utilities.parsing_utilities import parse_num
from general_utilities.storage_utilities import store_in_mongo
def scrape_job_page(driver, job_title, job_location):
"""Scrape a page of jobs from Glassdoor.
Grab everything that is possible or relevant for each of the
jobs posted on a given page. This will typically include the job title,
job location, posting company, date posted, and any stars assigned
(if any). Parse the relevant information, and then store it.
Args:
driver: Selenium webdriver
job_title: str
job_location: str
"""
current_date = str(datetime.datetime.now(pytz.timezone('US/Mountain')))
json_dct = {'search_title': job_title, \
'search_location': job_location, \
'search_date': current_date, 'job_site': 'glassdoor'}
jobs = driver.find_elements_by_class_name('jobListing')
mongo_update_lst = [query_for_data(driver, json_dct, job, idx) for
idx, job in enumerate(jobs[:-1])]
store_in_mongo(mongo_update_lst, 'job_postings', 'glassdoor')
def query_for_data(driver, json_dct, job, idx):
"""Grab all info. from the job posting
This will include the job title, the job location, the
posting company, the date posted, and then any stars assigned.
After grabbing this information, click and get the job posting's
actual text.
Args:
driver: Selenium webdriver
json_dct: dict
Dictionary holding the current information that is being stored
for that job posting.
job: Selenium WebElement
idx: int
Holds the # of the job posting the program is on (0 indexed here).
Return: dct
"""
posting_title = job.find_element_by_class_name('title').text
split_posting_company = job.find_element_by_class_name(
'companyInfo').text.split()
posting_location = job.find_element_by_xpath(
"//div//span[@itemprop='jobLocation']").text
try:
posting_date = job.find_element_by_class_name('minor').text
except:
posting_date = ''
# I couldn't think of any clearly better way to do this. If they have
# a number of stars, it comes in the posting companies text. I guess
# I could have done a search and replace, but I'd rather slightly adjust
# some functionality I already have (i.e. parse_num) than build another
# function to find the number of stars, store it, and then replace it with
# empty text.
if parse_num(' '.join(split_posting_company), 0):
num_stars = split_posting_company[0]
posting_company = ' '.join(split_posting_company[1:])
out_json_dct = gen_output(json_dct.copy(), posting_title,
posting_location, posting_date, posting_company, num_stars)
else:
posting_company = ' '.join(split_posting_company)
out_json_dct = gen_output(json_dct.copy(), posting_title,
posting_location, posting_date, posting_company)
out_json_dct['posting_txt'] = grab_posting_txt(driver, job, idx)
return out_json_dct
def gen_output(json_dct, *args):
"""Prep json_dct to be stored in Mongo.
Add in all of the *args into the json_dct so that we can store it
in Mongo. This function expects that the *args come in a specific order,
given by the tuple of strings below (it'll hold the keys to use to store
these things in the json_dct). 'num_stars' isn't necessarily expected
to be passed in (whereas everything else is).
Args:
json_dct: dict
Dictionary that currently stores a couple of things, to be
added to using *args.
*args: Tuple
Holds what to add to the json_dct.
Return: dct
"""
keys_to_add = ('job_title', 'location', 'date', 'company', 'num_stars')
for arg, key in zip(args, keys_to_add):
if arg:
json_dct[key] = arg
return json_dct
def grab_posting_txt(driver, job, idx):
"""Grab the job posting's actual text.
Args:
driver: Selenium webdriver
job: Selenium WebElement
Holds a reference to the current job the program is on.
idx: int
Return: str (posting text)
"""
job_link = job.find_element_by_class_name('jobLink')
job_link.send_keys(Keys.ENTER)
job_link.send_keys(Keys.ESCAPE)
try:
print(job.find_element_by_class_name('reviews-tab-link').text)
except:
pass
time.sleep(random.randint(3, 7))
texts = driver.find_elements_by_class_name('jobDescriptionContent')
return texts[idx].text
def check_if_next(driver, num_pages):
"""Check if there is a next page of job results to grab.
Args:
driver: Selenium webdriver
num_pages: int
Holds the total number of pages that the original search showed.
Return: bool
"""
try:
next_link = driver.find_element_by_xpath("//li[@class='next']")
page_links = driver.find_elements_by_xpath(
"//li//span[@class='disabled']")
last_page = check_if_last_page(page_links, num_pages)
if last_page:
return False
time.sleep(random.randint(3, 6))
next_link.click()
return True
except Exception as e:
print(e)
return False
def check_if_last_page(page_links, num_pages):
"""Parse page links list.
Figure out if current page is the last page.
Args:
page_links: list
Holds Selenium WebElements that refer to page links.
num_pages: int
Return: bool or int
"""
if len(page_links) == 1:
return False
else:
elem1_text = page_links[0].text
elem2_text = page_links[1].text
if elem1_text:
return int(elem1_text) == num_pages
elif elem2_text:
return int(elem2_text) == num_pages
if __name__ == '__main__':
try:
job_title = sys.argv[1]
job_location = sys.argv[2]
except IndexError:
raise Exception('Program needs a job title and job location inputted!')
# Issue the job query.
base_URL = 'https://www.glassdoor.com/index.htm'
query_params = (('KeywordSearch', job_title), ('LocationSearch', job_location))
driver = issue_driver_query(base_URL, query_params)
# Find the text holding the number of jobs, and parse it.
time.sleep(random.randint(7, 15))
num_jobs_txt = driver.find_elements_by_xpath('//header')[1].text
num_jobs = int(parse_num(num_jobs_txt, 0))
current_date = str(datetime.datetime.now(pytz.timezone('US/Mountain')))
storage_dct = {'job_site': 'glassdoor', 'num_jobs': num_jobs,
'date': current_date, 'title': job_title, 'location': job_location}
store_in_mongo([storage_dct], 'job_numbers', 'glassdoor')
# Find the text holding the number of pages in the job search.
time.sleep(random.randint(2, 6))
try:
num_pages_txt = driver.find_element_by_id('ResultsFooter').text
num_pages = int(parse_num(num_pages_txt, 1))
except:
print('No jobs for search {} in {}'.format(job_title, job_location))
sys.exit(0)
# Give it a little time before starting to click and parse
time.sleep(random.randint(6, 12))
is_next = True
while is_next:
jobs = scrape_job_page(driver, job_title, job_location)
time.sleep(random.randint(5, 8))
is_next = check_if_next(driver, num_pages)
driver.close()
| {
"repo_name": "mounicmadiraju/dataasservices",
"path": "scraping-job-portals/glassdoor/job_scraper.py",
"copies": "2",
"size": "8215",
"license": "apache-2.0",
"hash": -4860861829052787000,
"line_mean": 33.5168067227,
"line_max": 83,
"alpha_frac": 0.6366402921,
"autogenerated": false,
"ratio": 3.752855185015989,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.538949547711599,
"avg_score": null,
"num_lines": null
} |
"""A module for scraping Indeed for jobs.
This module is the driver for an Indeed scraper. It controls the process of issuing
requests, parsing the contents of those requests, and storing them. It also handles
the threading and multiprocessing that is used to speed up the scraping process.
Usage:
python job_scraper.py <job title> <job location> <radius>
"""
import sys
import os
wd = os.path.abspath('.')
sys.path.append(wd + '/../')
import multiprocessing
import datetime
import pytz
from functools import partial
from pymongo import MongoClient
from general_utilities.query_utilities import get_html, format_query
from general_utilities.storage_utilities import store_in_mongo
from general_utilities.parsing_utilities import parse_num
from request_threading import RequestInfoThread
def multiprocess_pages(base_URL, job_title, job_location, page_start):
"""Grab the URLS and other relevant info. from job postings on the page.
The Indeed URL used for job searching takes another parameter, `start`, that
allows you to start the job search at jobs 10-20, 20-30, etc. Use this to grab
job results from multiple pages at once, passing the result from a page on to
a thread to grab the details from each job posting.
Args:
----
base_URL: str
job_title: str
job_location: str
page_start: int
"""
url = base_URL + '&start=' + str(page_start)
html = get_html(url)
# Each row corresponds to a job.
rows = html.select('.row')
threads = []
mongo_update_lst = []
for row in rows:
thread = RequestInfoThread(row, job_title, job_location)
thread.start()
threads.append(thread)
for thread in threads:
thread.join()
mongo_update_lst.append(thread.json_dct)
store_in_mongo(mongo_update_lst, 'job_postings', 'indeed')
if __name__ == '__main__':
try:
job_title = sys.argv[1]
job_location = sys.argv[2]
radius = sys.argv[3]
except IndexError:
raise Exception('Program needs a job title, job location, and radius inputted!')
base_URL = 'https://www.indeed.com/jobs?'
query_parameters = ['q={}'.format('+'.join(job_title.split())),
'&l={}'.format('+'.join(job_location.split())),
'&radius={}'.format(radius), '&sort=date', '&fromage=5']
query_URL = format_query(base_URL, query_parameters)
html = get_html(query_URL)
try:
num_jobs_txt = str(html.select('#searchCount'))
num_jobs = int(parse_num(num_jobs_txt, 2))
except:
print('No jobs for search {} in {}'.format(job_title, job_location))
sys.exit(0)
current_date = str(datetime.datetime.now(pytz.timezone('US/Mountain')))
storage_dct = {'job_site': 'indeed', 'num_jobs': num_jobs,
'date': current_date, 'title': job_title, 'location': job_location}
store_in_mongo([storage_dct], 'job_numbers', 'indeed')
# Cycle through all of the job postings that we can and grab the url pointing to
# it, to then query it. All of the jobs should be available via the
# .turnstileLink class, and then the href attribute will point to the URL.
max_start_position = 1000 if num_jobs >= 1000 else num_jobs
start_positions = range(0, max_start_position, 10)
execute_queries = partial(multiprocess_pages, query_URL, \
job_title, job_location)
pool = multiprocessing.Pool(multiprocessing.cpu_count())
pool.map(execute_queries, start_positions)
| {
"repo_name": "mounicmadiraju/dataasservices",
"path": "scraping-job-portals/indeed/job_scraper.py",
"copies": "2",
"size": "3534",
"license": "apache-2.0",
"hash": -1882585385203389200,
"line_mean": 36.5957446809,
"line_max": 88,
"alpha_frac": 0.66270515,
"autogenerated": false,
"ratio": 3.7044025157232703,
"config_test": false,
"has_no_keywords": false,
"few_assignments": false,
"quality_score": 0.5367107665723271,
"avg_score": null,
"num_lines": null
} |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.