id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
158670
|
from sanic.testing import SanicTestClient
async def test_sanic_default_api(sanic_tester: SanicTestClient):
response = await sanic_tester.get("/")
assert response.status == 200
|
158687
|
import bleach
from bleach_allowlist import markdown_tags, markdown_attrs
from markdown import markdown
def bleached_markdown(text, **kwargs):
"""Try to avoid XSS by bleaching markdown output"""
markdown_rendered = markdown(text, **kwargs)
bleached = bleach.clean(markdown_rendered, markdown_tags, markdown_attrs)
return bleached
|
158705
|
import unittest
import torch
from tqdm import tqdm
from data.utils import get_db_container, get_db_info
from utils import get_dataloader, get_train_val_test_datasets
dataset_names = (
'acquirevaluedshopperschallenge',
'homecreditdefaultrisk',
'kddcup2014',
)
scalar_encoders = (
'ScalarRobustScalerEnc',
'ScalarPowerTransformerEnc',
'ScalarQuantileTransformerEnc',
)
class TestDataEncoders(unittest.TestCase):
def get_loaders(self, db_name, encoders, batch_size, num_workers):
db_info = get_db_info(db_name)
max_nodes_per_graph = None
_ = get_db_container(db_name)
train_data, val_data, test_data = get_train_val_test_datasets(dataset_name=db_name,
train_test_split='use_full_train',
encoders=encoders)
train_loader = get_dataloader(dataset=train_data,
batch_size=batch_size,
sampler_class_name='SequentialSampler',
num_workers=num_workers,
max_nodes_per_graph=max_nodes_per_graph)
val_loader = get_dataloader(dataset=val_data,
batch_size=batch_size,
sampler_class_name='SequentialSampler',
num_workers=num_workers,
max_nodes_per_graph=max_nodes_per_graph)
test_loader = get_dataloader(dataset=test_data,
batch_size=batch_size,
sampler_class_name='SequentialSampler',
num_workers=num_workers,
max_nodes_per_graph=max_nodes_per_graph)
loaders = {'train': train_loader,
'val': val_loader,
'test': test_loader}
return db_info, loaders
def test_datapoints_for_appropriate_null_flags_for_scalar_encoders(self):
for db_name in dataset_names:
for scalar_encoder in scalar_encoders:
encoders = {'SCALAR': scalar_encoder}
db_info, loaders = self.get_loaders(db_name, encoders, batch_size=512, num_workers=0)
for split, loader in loaders.items():
for bdgl, features, label in tqdm(loader):
for node_type, node_features in features.items():
for feature_name, feature_data in node_features.items():
feature_type = db_info['node_types_and_features'][node_type][feature_name]['type']
if feature_type == 'SCALAR':
self.assertEqual((feature_data.sum(dim=1) == 0).sum().item(), 0,
"Something didn't get initialized correctly")
supposedly_null_values = feature_data[torch.where(feature_data[:, 1] == 1)][:, 0]
self.assertEqual((supposedly_null_values != 0).sum().item(), 0)
def test_datapoints_have_categorical_value_zero_only_when_they_are_None_in_the_raw_data(self):
for db_name in dataset_names:
for scalar_encoder in scalar_encoders:
encoders = {'SCALAR': scalar_encoder}
db_info, loaders = self.get_loaders(db_name, encoders, batch_size=1, num_workers=0)
for split, loader in loaders.items():
dataset = loader.dataset
for bdgl, features, label in tqdm(loader):
_, (_, _, _, raw_dp_feats, _) = dataset.get_dp_by_id(bdgl.dp_ids[0])
for node_type, node_features in features.items():
for feature_name, feature_data in node_features.items():
feature_type = db_info['node_types_and_features'][node_type][feature_name]['type']
if feature_type == 'CATEGORICAL':
for idx in torch.where(feature_data == 0)[0]:
dp_feat = raw_dp_feats[node_type][feature_name][idx]
self.assertIsNone(dp_feat)
|
158735
|
import torch
import math
import numpy as np
from matplotlib import path
import pdb
class BoxSampler(object):
def __init__(self,
RoI_number=1,
IoU_bin_bases=torch.tensor([0.73,0.12,0.15,0.05,0], dtype=torch.float),
IoU_weights=torch.tensor([0.5,0.6,0.7,0.8,0.9], dtype=torch.float),
IoU_limit_precision=1e-5):
super(BoxSampler,self).__init__()
'''
INPUTS:
RoI_number : Number of RoIs/boxes to generate
IoU_bin_bases : N dimensional tensor storing the lower bounds for the bins.
Ex.[0.5, 0.6, 0.7, 0.8, 0.9] then there are 5 bins from [0.5,0.6] to [0.9, 1.0]
IoU_weights: N dimensional tensor storing the weights of the bins.
IoU_limit_precision: While drawing the limits for an IoU (e.g. see Fig.2 red curves),
it show the precision of the points. This is the part that makes the
algorithm a bit slower and needs an improvement.
'''
self.RoI_number=RoI_number
self.IoU_bin_bases=IoU_bin_bases
self.IoU_weights=IoU_weights
self.IoU_limit_precision=IoU_limit_precision
self.IoU_bin_tops=torch.cat([IoU_bin_bases[1:], torch.tensor([1.])])
self.bin_width=self.IoU_bin_tops-self.IoU_bin_bases
# We assume that self.reference_box is a square. Following coordinates are preferred
# since even the IoU=0.5, the limits will be always positive (see Fig.2 or Fig.6 in the paper).
self.reference_box=[0.3, 0.3, 0.6, 0.6]
def isnan(self,x):
return x != x
def sample_single(self, B, IoUs, imgSize):
'''
Samples a set of bounding boxes for a given input BB.
INPUTS:
B : Input BB (i.e. B in Alg.1 in the paper) Mx4 dimensional tensor.
A BB is represented by [TL_x, TL_y, BR_x, BR_y]
IoUs : Set of IoU thresholds. T in Alg.1. A box is generated for each IoU.
imgSize : [width, height] of the image. Ensures that the generated box is in the image.
'''
#Normalize the input box such that it is shifted/scaled on the reference box
#that resides at [0.3, 0.3, 0.6, 0.6]. Save scale and shift, for renormalization
#before returning. All operations are conducted within [0, 1] range. Hence we do not,
#normalize image, we normalize the boxes owing to Theorems 1 and 2 in the paper.
inputBox, scale, shift=self.normalize(B.clone().detach().unsqueeze(0))
#BoundingBoxGenerator is doing exactly what Alg.1 in the paper achieves.
#Given a GT/input BB and and IoU, it generates the boxes with the desired IoU.
#To make it more efficient, it generates sample_count boxes for
#a GT at once.
sample_count =IoUs.shape[0]
sampledBoxSet=self.BoundingBoxGenerator(inputBox.squeeze(), IoUs, sample_count)
#Given the generated boxes from a BB, now we map the generated boxes to the image by reshifting and rescaling.
sampledBoxSet=self.unnormalize(sampledBoxSet, scale[0], shift[0])
#Clamp the boxes from 0 and imgSize to ensure that they are in the image.
sampledBoxSet[:,[0,2]]=torch.clamp(sampledBoxSet[:,[0,2]], 0, imgSize[0])
sampledBoxSet[:,[1,3]]=torch.clamp(sampledBoxSet[:,[1,3]], 0, imgSize[1])
#Compute the bbox overlaps of the generated boxes.
generated_box_overlaps=self.computeBoxToBoxIoU(B.expand(sample_count,5)[:,:4], sampledBoxSet).squeeze()
return sampledBoxSet, generated_box_overlaps
def sample(self, inputBoxSet, imgSize):
'''
INPUTS:
inputBoxSet : Input BBs (i.e. ground truths-GTs in Alg.2 in the paper)
Mx5 dimensional tensor.
Each box is represented by [TL_x, TL_y, BR_x, BR_y, gt_label]
imgSize : [width, height] of an image
'''
#Normalize the input boxes such that all are shifted/scaled on the reference box
#that resides at [0.3, 0.3, 0.6, 0.6]. Save scales and shifts, for renormalization
#before returning. All operations are conducted within [0, 1] range. Hence we do not,
#normalize image, we normalize the boxes owing to Theorems 1 and 2.
inputBoxSet, scales, shifts=self.normalize(inputBoxSet)
boxNumber=inputBoxSet.size()[0]
#Annotations of the datasets may be incorrect especially for small objects.
#In some cases TL_x=BR_x (same for y). If there is such kind of very rare examples,
#then we catch the error here, and discard the corrupted annotation.
validIndices=torch.cuda.ByteTensor(boxNumber).fill_(1)
flag=0
for i in range(boxNumber):
if self.isnan(inputBoxSet[i,0]) or self.isnan(inputBoxSet[i,1]):
validIndices[i]=0
flag=1
if flag==1:
inputBoxSet = inputBoxSet[validIndices,:]
scales = scales[validIndices,:]
shifts = shifts[validIndices,:]
boxNumber=inputBoxSet.size()[0]
# InstanceAllocation determines:
# 1-perInputAllocation: Number of boxes to be generated for each gt. So, it is a boxNumber sized tensor.
# 2-positiveRoI_number: In some cases, number of boxes can be 1 or 2 more. So we keep the number of returned boxes.
# The sum of perInputAllocation should also provide this number.
# 3-inputBoxSetExtended: positiveRoI_numberx5 dimensional array for gts. Basically, each BB in inputBoxSet is
# duplicated for perInputAllocation[i] times. We use this info to validate/return the IoUs of
# generated boxes on computeBoxToBoxIoU function.
perInputAllocation, positiveRoI_number, inputBoxSetExtended =self.InstanceAllocation(inputBoxSet)
# Another question is the IoU distribution over the boxes. Having estimated the number of generated boxes
# for each GT, IoUAllocation assigns an IoU using the desired distribution (i.e. self.IoU_weights) for each box.
IoUSet=self.IoUAllocation(inputBoxSetExtended,positiveRoI_number)
#Initialize the necessary data structures to be returned
sampledBoxSet=torch.cuda.FloatTensor(positiveRoI_number,4).fill_(-1)
gt_inds=torch.cuda.LongTensor(positiveRoI_number).fill_(0)
indexPointer=0
for i in range(boxNumber):
#BoundingBoxGenerator is doing exactly what Alg.1 in the paper achieves.
#Given a GT and and IoU, it generates the boxes with the desired IoU.
#To make it more efficient, it generates perInputAllocation[i] boxes for
#a GT at once.
sampledBoxSet[indexPointer:indexPointer+perInputAllocation[i],:]=self.BoundingBoxGenerator(inputBoxSet[i,:],\
IoUSet[indexPointer:indexPointer+perInputAllocation[i]],\
perInputAllocation[i])
#Given the generated boxes from a GT (also GT), now we map the generated boxes to the image by reshifting and rescaling.
sampledBoxSet[indexPointer:indexPointer+perInputAllocation[i],:]=self.unnormalize(sampledBoxSet[indexPointer:indexPointer+perInputAllocation[i],:], scales[i], shifts[i])
inputBoxSetExtended[indexPointer:indexPointer+perInputAllocation[i],:4] = self.unnormalize(inputBoxSetExtended[indexPointer:indexPointer+perInputAllocation[i],:4], scales[i], shifts[i])
#In mmdetection, the association between the boxes are tracked, hence we store the mapping.
gt_inds[indexPointer:indexPointer+perInputAllocation[i]]=i+1
#Update indexpointer to show next empty cell.
indexPointer+=perInputAllocation[i]
#Clamp the boxes from 0 and imgSize to ensure that they are in the image.
sampledBoxSet[:,[0,2]]=torch.clamp(sampledBoxSet[:,[0,2]], 0, imgSize[0])
sampledBoxSet[:,[1,3]]=torch.clamp(sampledBoxSet[:,[1,3]], 0, imgSize[1])
#Compute the bbox overlaps of the generated boxes.
generated_box_overlaps=self.computeBoxToBoxIoU(inputBoxSetExtended[:,:4],sampledBoxSet).squeeze()
return sampledBoxSet, inputBoxSetExtended[:,-1].type(torch.cuda.LongTensor),generated_box_overlaps,gt_inds
def normalize(self, boxes):
#Compute shifts
shifts = boxes[:,[0,1]]
#Compute scales
scales = (torch.cat(((boxes[:,2]-boxes[:,0]).unsqueeze(1), (boxes[:,3]-boxes[:,1]).unsqueeze(1)),1))/(self.reference_box[2]-self.reference_box[0])
#All the boxes are normalized to reference box.
#One can safely following two lines by assigning the boxes[:,:4] to reference box.
boxes[:,[0,2]]=(boxes[:,[0,2]]-shifts[:,0].unsqueeze(1))/scales[:,0].unsqueeze(1)+self.reference_box[0]
boxes[:,[1,3]]=(boxes[:,[1,3]]-shifts[:,1].unsqueeze(1))/scales[:,1].unsqueeze(1)+self.reference_box[1]
return boxes, scales, shifts
def unnormalize(self, boxes,scales,shifts):
#self.reference_box[1] will work also, for different reference boxes please correct here.
boxes[:,:4]-=self.reference_box[0]
#Map the normalized boxes to the image coordinates
boxes[:,[0,2]]=boxes[:,[0,2]]*scales[0]+shifts[0]
boxes[:,[1,3]]=boxes[:,[1,3]]*scales[1]+shifts[1]
return boxes
def InstanceAllocation(self,inputBoxSet):
#Determine the number of classes and ensure the sampling to be balanced over classes
#instead of the instances. Note that this idea originates from OFB sampling in the paper.
#Here BB generator generates class-balanced examples. Hence determine perClassAllocation
# in this manner.
classes=torch.unique(inputBoxSet[:,-1])
classNumber=classes.size()[0]
perClassAllocation=math.ceil(self.RoI_number/classNumber)
#Count the number of instances from each class
classIndices=torch.cuda.FloatTensor(classNumber,inputBoxSet.size()[0]).fill_(0)
for i in range(classNumber):
classIndices[i,:]=inputBoxSet[:,-1]==classes[i]
classCounts=torch.sum(classIndices,1)
#Distribute the perClassAllocation over instances of each class equally
perInstanceAllocation=torch.ceil(perClassAllocation/classCounts)
#count the total number of positive examples determined in this fashion
positiveRoI_number=torch.sum(classCounts*perInstanceAllocation).int()
extendedInputBoxSet=torch.cuda.FloatTensor(positiveRoI_number,5).fill_(0)
instanceNumber=inputBoxSet.size()[0]
indexTracker=0
perInputAllocation=torch.cuda.FloatTensor(inputBoxSet.size()[0]).fill_(0)
for i in range(instanceNumber):
index=classes==inputBoxSet[i,-1]
extendedInputBoxSet[indexTracker:indexTracker+perInstanceAllocation[index].int()]=inputBoxSet[i,:].expand(perInstanceAllocation[index].int(),5)
indexTracker+=perInstanceAllocation[index].int()
perInputAllocation[i]=perInstanceAllocation[index].int()
# if positiveRoI_number>self.RoI_number:
# delete_idx=torch.multinomial(perInstanceAllocation,positiveRoI_number-self.RoI_number,replacement=False)
# pdb.set_trace()
# delete_idx=torch.randint(positiveRoI_number, [positiveRoI_number-self.RoI_number])
return perInputAllocation.int(), positiveRoI_number.item(), extendedInputBoxSet
def IoUAllocation(self,inputBoxSet, positiveRoI_number):
#Determine the number of examples to be sampled from each bin
IoUIndices=torch.multinomial(self.IoU_weights,positiveRoI_number,replacement=True)
#Sample the exact IoUs consdiering the bin length and base of each bin
IoUSet=(self.IoU_bin_bases[IoUIndices]+torch.rand(positiveRoI_number)*self.bin_width[IoUIndices]).cuda()
#If IoU is larger than 0.95, then it can be problematic during sampling, so set it to 0.95 for stability.
IoUSet[IoUSet>0.95]=0.95
return IoUSet
def findBottomRightMaxBorders(self,inputBox, IoU, boxArea,proposedx1,proposedy1):
xA = torch.max(proposedx1, inputBox[0])#alpha
yA = torch.max(proposedy1, inputBox[1])
xB = inputBox[2]
yB = inputBox[3]
I=torch.clamp(xB - xA,min=0) * torch.clamp(yB - yA,min=0)
limitLeftX=IoU*boxArea+xA*IoU*(inputBox[3]-yA)+xA*(inputBox[3]-yA)-IoU*proposedx1*(inputBox[3]-proposedy1)
limitLeftX/=((IoU+1)*(inputBox[3]-yA)-IoU*(inputBox[3]-proposedy1))
limitRightX=(I/IoU-boxArea+I)/(inputBox[3]-proposedy1)
limitRightX+=proposedx1
limitTopY=IoU*boxArea+IoU*(inputBox[2]-xA)*yA+yA*(inputBox[2]-xA)-IoU*proposedy1*(inputBox[2]-proposedx1)
limitTopY/=((IoU+1)*(inputBox[2]-xA)-IoU*(inputBox[2]-proposedx1))
limitBottomY=(I/IoU-boxArea+I)/(inputBox[2]-proposedx1)
limitBottomY+=proposedy1
return limitLeftX,limitRightX,limitTopY,limitBottomY
def findBottomRightBorders(self,inputBox, IoU, boxArea,proposedx1,proposedy1,limitLeftX,limitRightX,limitTopY,limitBottomY):
xA = torch.max(proposedx1, inputBox[0])#alpha
yA = torch.max(proposedy1, inputBox[1])
xB = inputBox[2]
yB = inputBox[3]
I=torch.clamp(xB - xA,min=0) * torch.clamp(yB - yA,min=0)
y2TR=torch.arange(limitTopY, inputBox[3]+self.IoU_limit_precision, step=self.IoU_limit_precision).cuda()
yBnew = torch.min(y2TR, inputBox[3])
Inew=torch.clamp(xB - xA,min=0) * torch.clamp(yBnew - yA,min=0)
x2TR=(Inew/IoU-boxArea+Inew)/(y2TR-proposedy1)
x2TR+=proposedx1
x2BR=torch.arange(limitRightX, inputBox[2]-self.IoU_limit_precision, step=-self.IoU_limit_precision).cuda()
y2BR=(I/IoU-boxArea+I)/(x2BR-proposedx1)
y2BR+=proposedy1
y2BL=torch.arange(limitBottomY, inputBox[3]-self.IoU_limit_precision, step=-self.IoU_limit_precision).cuda()
yBnew = torch.min(y2BL, inputBox[3])
x2BL=IoU*boxArea+xA*IoU*(yBnew-yA)+xA*(yBnew-yA)-IoU*proposedx1*(y2BL-proposedy1)
x2BL/=((IoU+1)*(yBnew-yA)-IoU*(y2BL-proposedy1))
x2TL=torch.arange(limitLeftX, inputBox[2]+self.IoU_limit_precision, step=self.IoU_limit_precision).cuda()
xBnew = torch.min(x2TL, inputBox[2])
y2TL=IoU*boxArea+IoU*(xBnew-xA)*yA+yA*(xBnew-xA)-IoU*proposedy1*(x2TL-proposedx1)
y2TL/=((IoU+1)*(xBnew-xA)-IoU*(x2TL-proposedx1))
x2=torch.cat((x2TR,x2BR,x2BL,x2TL))
y2=torch.cat((y2TR,y2BR,y2BL,y2TL))
bottomRightBorders=torch.cat((x2.unsqueeze(1),1-y2.unsqueeze(1)),1)
return bottomRightBorders
def findTopLeftPointBorders(self,inputBox, IoU,boxArea):
#Top Left
y1TR=torch.arange((((inputBox[3]*(IoU-1))+ inputBox[1])/IoU), inputBox[1], step=self.IoU_limit_precision).cuda()
x1TR=inputBox[2]-(boxArea/(IoU*(inputBox[3]-y1TR)))
inv_idx = torch.arange(y1TR.size(0)-1, -1, -1).long()
y1TR = y1TR[inv_idx]
x1TR = x1TR[inv_idx]
#Top Right
x1BR=torch.arange(inputBox[0], inputBox[2]-IoU*(inputBox[2]-inputBox[0]), step=self.IoU_limit_precision).cuda()
I=(inputBox[2]-x1BR)*(inputBox[3]-inputBox[1])
y1BR=inputBox[3]-(I/IoU-boxArea+I)/(inputBox[2]-x1BR)
#Top Left
y1BL=torch.arange(inputBox[1], inputBox[3]-(boxArea*IoU)/(inputBox[2]-inputBox[0]), step=self.IoU_limit_precision).cuda()
x1BL=inputBox[2]-((boxArea*IoU)/((inputBox[3]-y1BL)))
#Top Right
y1TL=torch.arange(inputBox[1], inputBox[3]-(boxArea*IoU)/(inputBox[2]-inputBox[0]), step=self.IoU_limit_precision).cuda()
I=(inputBox[2]-inputBox[0])*(inputBox[3]-y1TL)
x1TL=inputBox[2]-(I/IoU-boxArea+I)/(inputBox[3]-y1TL)
inv_idx = torch.arange(y1TL.size(0)-1, -1, -1).long()
y1TL = y1TL[inv_idx]
x1TL = x1TL[inv_idx]
x1=torch.cat((x1TR, x1BR,x1BL,x1TL))
y1=torch.cat((y1TR, y1BR,y1BL,y1TL))
P=torch.cat((x1.unsqueeze(1),1-y1.unsqueeze(1)),1)
return P
def BoundingBoxGenerator(self, inputBox, IoUSet, numBoxes):
sampledBox=torch.cuda.FloatTensor(numBoxes,4).fill_(-1)
boxArea=(inputBox[3]-inputBox[1])*(inputBox[2]-inputBox[0])
box=inputBox
for i in range(numBoxes):
#In order to prevent bias for a single corner, decide which corner to pick first
if np.random.uniform()<0.5:
flag=1
inputBox=torch.tensor([1-box[2],1-box[3],1-box[0],1-box[1],box[4]]).cuda()
else:
flag=0
inputBox=box
#Step 1 in Algorithm 1
topLeftBorders=self.findTopLeftPointBorders(inputBox, IoUSet[i], boxArea)
sampledBox[i,0],sampledBox[i,1]=self.samplePolygon(topLeftBorders, inputBox)
#Step 2 in Algorithm 1
limitLeftX,limitRightX,limitTopY,limitBottomY=self.findBottomRightMaxBorders(inputBox, IoUSet[i], boxArea,sampledBox[i,0],sampledBox[i,1])
bottomRightBorders=self.findBottomRightBorders(inputBox, IoUSet[i], boxArea, sampledBox[i,0], sampledBox[i,1], limitLeftX, limitRightX, limitTopY, limitBottomY)
sampledBox[i,2],sampledBox[i,3]=self.samplePolygon(bottomRightBorders, inputBox)
#If the box is reversed above then assign the reversed coordinates.
if flag==1:
sampledBox[i,:]=torch.tensor([1-sampledBox[i,2],1-sampledBox[i,3],1-sampledBox[i,0],1-sampledBox[i,1]]).cuda()
return sampledBox
def samplePolygon(self,P, box):
maxX=torch.max(P[:,0])
maxY=torch.max(1-P[:,1])
minX=torch.min(P[:,0])
minY=torch.min(1-P[:,1])
inpoly=0
while inpoly==0:
proposedx1, proposedy1=self.sampleRectangle([minX,minY,maxX,maxY])
#Next line is bottleneck
p = path.Path(P.cpu().numpy())
if p.contains_point([proposedx1,1-proposedy1]):
inpoly=1
return (proposedx1,proposedy1)
def sampleRectangle(self,B,numSamples=1):
x=torch.rand([numSamples])*(B[2]-B[0])+B[0]
y=torch.rand([numSamples])*(B[3]-B[1])+B[1]
return (x,y)
def computeBoxToBoxIoU(self,box_a,box_b):
max_xy = torch.min(box_a[:, 2:].unsqueeze(0), box_b[:, 2:].unsqueeze(0))
min_xy = torch.max(box_a[:, :2].unsqueeze(0), box_b[:, :2].unsqueeze(0))
interside = torch.clamp((max_xy - min_xy), min=0)
inter = interside[:, :, 0] * interside[:, :, 1]
area_a = ((box_a[:, 2]-box_a[:, 0]) * (box_a[:, 3]-box_a[:, 1])).unsqueeze(0)
area_b = ((box_b[:, 2]-box_b[:, 0]) * (box_b[:, 3]-box_b[:, 1])).unsqueeze(0)
union = area_a + area_b - inter
IoU=inter / union
return IoU
|
158748
|
CENTS_PER_DOLLAR = 100
class Order(object):
def __init__(self, id, owner, ticker, type, price, qty):
if int(id) < 0:
raise ValueError()
if round(float(price), 2) <= 0:
raise ValueError()
if int(qty) <= 0:
raise ValueError()
self.__id = id
self.__owner = owner
self.__ticker = str(ticker)
self.__type = type
self.__price = round(float(price), 2)
self.__qty = int(qty)
def __eq__(self, o):
if isinstance(o, Order):
return self.id == o.id and self.owner == o.owner and \
self.ticker == o.ticker and self.type == o.type and \
self.price == o.price and self.qty == o.qty
else:
return False
@property
def id(self):
return self.__id
@property
def owner(self):
return self.__owner
@owner.setter
def owner(self, owner):
self.__owner = owner
@property
def ticker(self):
return self.__ticker
@ticker.setter
def ticker(self, ticker):
self.__ticker = str(ticker)
@property
def type(self):
return self.__type
@property
def price(self):
return round(self.__price, 2)
@price.setter
def price(self, price):
if round(float(price)) <= 0:
raise ValueError()
self.__price = round(float(price))
@property
def qty(self):
return self.__qty
@qty.setter
def qty(self, qty):
if int(qty) <= 0:
raise ValueError()
self.__qty = int(qty)
def __str__(self):
return "{0}: {1} for {2} @ ${3} by {4}".format(self.ticker, self.type,
self.qty, self.price,
self.owner)
def __repr__(self):
s = "ID: " + str(self.id) + "\n"
s += "Owner: " + str(self.owner) + "\n"
s += "Ticker: " + self.ticker + "\n"
s += "Type: " + str(self.type) + "\n"
s += "Price: $" + str(self.price) + "\n"
s += "Quantity: " + str(self.qty) + "\n"
return s
|
158763
|
from os import path
from tinydb import Query, TinyDB
from nazurin.config import DATA_DIR
class Local(object):
"""Local database driver using TinyDB."""
def collection(self, key):
self.db = TinyDB(path.join(DATA_DIR, key + '.json'))
return self
def document(self, key):
self._key = key
return self
async def get(self):
Document = Query()
result = self.db.search(Document.key == self._key)
if result:
return result[0]
else:
return None
async def insert(self, key, data):
if key:
data['key'] = key
return self.db.insert(data)
async def update(self, data):
Document = Query()
return self.db.update(data, Document.key == self._key)
async def delete(self):
Document = Query()
return self.db.remove(Document.key == self._key)
|
158772
|
from lessweb import Application
from lessweb.plugin import database
from controller import list_reply
database.init(uri='mysql+mysqlconnector://root:pwd@localhost/db')
app = Application()
app.add_get_mapping('/reply/list', list_reply)
if __name__ == '__main__':
app.run()
|
158775
|
from flask import Flask
from flask_cors import CORS
from flask import request
from flask import json
from db import write_event
# Initiating the flask app
app = Flask(__name__)
# CORS wrapper to allow cross domain requests
CORS(app)
@app.route('/add_event', methods=['POST'])
def add_event():
"""
Recieve the data to write to the DB
"""
# data sent through the post request
event_data = request.get_json()
# Write to DB
write_event(event_data)
return "Called /post_example \n"
|
158790
|
import demistomock as demisto
import jwt
from Pcysys import Client, pentera_run_template_command, pentera_get_task_run_status_command, \
pentera_get_task_run_full_action_report_command, pentera_authentication
MOCK_PENTERA_FULL_ACTION_REPORT = 'penterascan-5e4530961deb8eda82b08730.csv'
MOCK_CSV = open('TestData/mock_csv_file', 'r').read()
MOCK_AUTHENTICATION = {
"token": "TOKEN",
"tgt": "TGT"
}
MOCK_AUTHENTICATION_EXP = 1579763364
MOCK_RUN_TEMPLATE = {
"taskRuns": [
{
"status": "Running",
"taskRunId": "5e41923cf24e1f99979b1cb4",
"taskRunName": "Test mock task run name",
"startTime": 1581348380358.0,
"endTime": 1581349123973.0,
}
],
}
MOCK_TASK_RUN_STATS = {
"taskRuns": [
{
"taskRunId": "5e41923cf24e1f99979b1cb4",
"taskRunName": "Test mock task run name",
"startTime": 1581348380358.0,
"endTime": 1581349123973.0,
"status": "Warning"
}
]
}
def test_pentera_get_task_run_full_action_report(mocker, requests_mock):
mocker.patch.object(demisto, 'params', return_value={
'url': 'https://pentera.com',
'port': '8181'
})
mocker.patch.object(demisto, 'getIntegrationContext', return_value={
'base_url': 'https://pentera.com',
'tgt': 'omgNewTGT',
'accessToken': '<PASSWORD>',
'expiry': MOCK_AUTHENTICATION_EXP
})
mocker.patch.object(demisto, 'args', return_value={
'task_run_id': '5e4530961deb8eda82b08730'
})
requests_mock.get('https://pentera.com:8181/api/v1/taskRun/5e4530961deb8eda82b08730/fullActionReportCSV',
text=MOCK_CSV)
client_id = demisto.params().get('clientId')
tgt = demisto.params().get('tgt')
base_url = demisto.params()['url'].rstrip('/') + ':' + demisto.params()['port']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
args = demisto.args()
client = Client(
base_url=base_url,
tgt=tgt,
verify=verify_certificate,
client_id=client_id,
proxy=proxy,
headers={'Accept': 'application/json'})
entries = pentera_get_task_run_full_action_report_command(client, args)
raw_csv_file_name = entries[0]['File']
assert raw_csv_file_name == MOCK_PENTERA_FULL_ACTION_REPORT
task_run_id = entries[1]['EntryContext']['Pentera.TaskRun(val.ID == obj.ID)']['ID']
assert task_run_id == '5e4530961deb8eda82b08730'
operation_type = entries[1]['EntryContext']['Pentera.TaskRun(val.ID == obj.ID)']['FullActionReport'][0][
'Operation Type']
assert operation_type == 'BlueKeep (CVE-2019-0708) Vulnerability Discovery'
def test_pentera_get_task_run_stats(mocker, requests_mock):
mocker.patch.object(demisto, 'params', return_value={
'url': 'https://pentera.com',
'port': '8181'
})
mocker.patch.object(demisto, 'getIntegrationContext', return_value={
'base_url': 'https://pentera.com',
'tgt': 'omgNewTGT',
'accessToken': 'omg<PASSWORD>',
'expiry': MOCK_AUTHENTICATION_EXP
})
mocker.patch.object(demisto, 'args', return_value={
'task_run_id': '5e41923cf24e1f99979b1cb4'
})
requests_mock.get('https://pentera.com:8181/api/v1/taskRun/5e41923cf24e1f99979b1cb4',
json=MOCK_RUN_TEMPLATE)
client_id = demisto.params().get('clientId')
tgt = demisto.params().get('tgt')
base_url = demisto.params()['url'].rstrip('/') + ':' + demisto.params()['port']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
args = demisto.args()
client = Client(
base_url=base_url,
tgt=tgt,
verify=verify_certificate,
client_id=client_id,
proxy=proxy,
headers={'Accept': 'application/json'})
readable, parsed, raw = pentera_get_task_run_status_command(client, args)
assert parsed['Pentera.TaskRun(val.ID == obj.ID)']['ID'] == MOCK_TASK_RUN_STATS['taskRuns'][0]['taskRunId']
def test_pentera_run_template(mocker, requests_mock):
mocker.patch.object(demisto, 'params', return_value={
'url': 'https://pentera.com',
'port': '8181'
})
mocker.patch.object(demisto, 'getIntegrationContext', return_value={
'base_url': 'https://pentera.com',
'tgt': 'omgNewTGT',
'accessToken': 'omgNewSecret',
'expiry': MOCK_AUTHENTICATION_EXP
})
mocker.patch.object(demisto, 'args', return_value={
'template_name': 'omgRunThisTemplate'
})
requests_mock.post('https://pentera.com:8181/api/v1/template/runBulk', json=MOCK_RUN_TEMPLATE)
client_id = demisto.params().get('clientId')
tgt = demisto.params().get('tgt')
base_url = demisto.params()['url'].rstrip('/') + ':' + demisto.params()['port']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
args = demisto.args()
client = Client(
base_url=base_url,
tgt=tgt,
verify=verify_certificate,
client_id=client_id,
proxy=proxy,
headers={'Accept': 'application/json'})
readable, parsed, raw = pentera_run_template_command(client, args)
assert parsed['Pentera.TaskRun(val.ID == obj.ID)']['Status'] == MOCK_RUN_TEMPLATE['taskRuns'][0]['status']
def test_pentera_authentication(mocker, requests_mock):
mocker.patch.object(demisto, 'params', return_value={
'clientId': 'mmtzv',
'tgt': 'omgSecretsWow',
'url': 'https://pentera.com',
'port': '8181'
})
mocker.patch.object(jwt, 'get_unverified_header',
return_value={'alg': 'HS256', 'exp': 1579763364, 'iat': 1579762464})
requests_mock.post('https://pentera.com:8181/auth/token', json=MOCK_AUTHENTICATION)
mocker.patch.object(demisto, 'args', return_value={})
mocker.patch.object(demisto, 'setIntegrationContext')
client_id = demisto.params().get('clientId')
tgt = demisto.params().get('tgt')
base_url = demisto.params()['url'].rstrip('/') + ':' + demisto.params()['port']
verify_certificate = not demisto.params().get('insecure', False)
proxy = demisto.params().get('proxy', False)
client = Client(
base_url=base_url,
tgt=tgt,
verify=verify_certificate,
client_id=client_id,
proxy=proxy,
headers={'Accept': 'application/json'})
pentera_authentication(client)
assert demisto.setIntegrationContext.call_count == 1
integration_context = demisto.setIntegrationContext.call_args[0][0]
assert isinstance(integration_context, dict)
assert integration_context['expiry'] == MOCK_AUTHENTICATION_EXP
assert integration_context['accessToken'] == MOCK_AUTHENTICATION['token']
|
158866
|
import similarity
from similarity.normalized_levenshtein import NormalizedLevenshtein
from similarity.jarowinkler import JaroWinkler
from similarity.metric_lcs import MetricLCS
from similarity.qgram import QGram
from similarity.jaccard import Jaccard
from similarity.cosine import Cosine
from fuzzywuzzy import fuzz
from fuzzywuzzy import process
import torch
normalized_levenshtein = NormalizedLevenshtein()
jarowinkler = JaroWinkler()
metric_lcs = MetricLCS()
qgram2 = QGram(2)
qgram3 = QGram(3)
qgram4 = QGram(4)
cosine = Cosine(2)
jaccard = Jaccard(2)
def extract_string_similarity_vector(instance: dict):
"""
Returns a vector encoding a variety of lexical similarity metrics given a dictionary containing keys
sentence_1,sentence_2
:return: a vector containing similarity scores
"""
s1 = instance['sentence_1']
s2 = instance['sentence_2']
return torch.tensor([
normalized_levenshtein.similarity(s1,s2),
jarowinkler.similarity(s1,s2),
metric_lcs.distance(s1,s2),
qgram2.distance(s1,s2),
qgram3.distance(s1,s2),
qgram4.distance(s1,s2),
jaccard.similarity(s1,s2),
cosine.similarity(s1,s2),
fuzz.partial_token_set_ratio(s1,s2),
fuzz.partial_token_sort_ratio(s1,s2),
fuzz.token_set_ratio(s1,s2),
fuzz.token_sort_ratio(s1,s2),
fuzz.QRatio(s1,s2),
fuzz.UQRatio(s1,s2),
fuzz.UWRatio(s1,s2),
fuzz.WRatio(s1,s2)
])
def string_similarity_features(data: list):
temp = extract_string_similarity_vector(data[0])
dataset = torch.empty((len(data), temp.shape[0]), dtype=torch.float)
for idx, instance in enumerate(data):
dataset[idx] = extract_string_similarity_vector(instance)
return dataset
|
158893
|
import tensorflow as tf
def kl_divergence_unit_gaussian(mu, log_sigma_sq, mean_batch=True, name='kl_divergence_unit_gaussian'):
# KL divergence between a multivariate Gaussian distribution with diagonal covariance and an
# isotropic Gaussian distribution
with tf.name_scope(name):
latent_loss = -0.5 * tf.reduce_sum(1 + log_sigma_sq - tf.square(mu) - tf.exp(log_sigma_sq), axis=1)
if mean_batch:
latent_loss = tf.reduce_mean(latent_loss, axis=0)
tf.losses.add_loss(latent_loss)
return latent_loss
def kl_divergence_gaussian(mu1, log_sigma_sq1, mu2, log_sigma_sq2, mean_batch=True, name='kl_divergence_gaussian'):
# KL divergence between two multivariate Gaussian distributions with diagonal covariance
# All inputs must be matrices of [batch size, number of features]
with tf.name_scope(name):
k = tf.cast(tf.shape(mu1), mu1.dtype)[1] # Number of features
kl_div = 0.5 * (
tf.reduce_sum(log_sigma_sq2, axis=1) - tf.reduce_sum(log_sigma_sq1, axis=1) - # log(|sigma1|/|sigma2|)
k + # -k
tf.reduce_sum(tf.exp(log_sigma_sq1 - log_sigma_sq2), axis=1) + # trace(inv(sigma1) sigma2)
tf.einsum('bi,bi->b', (mu2 - mu1) ** 2, tf.exp(-log_sigma_sq2)) # (mu2 - mu1)^T inv(sigma2) (mu2 - mu1)
)
if mean_batch:
return tf.reduce_mean(kl_div, axis=0)
else:
return kl_div
def kl_divergence_mv_gaussian(mu1, mu2, sigma1, sigma2, mean_batch=True, name='kl_divergence_mv_gaussian'):
# KL divergence between two multivariate Gaussian distributions
# KL(N(mu1, sigma1) | N(mu2, sigma2))
with tf.name_scope(name):
from mvg_distributions.covariance_representations import CovarianceFull
covar1 = CovarianceFull(covariance=sigma1)
covar2 = CovarianceFull(covariance=sigma2)
return kl_divergence_mv_gaussian_v2(sigma1=covar1, sigma2=covar2, mu1=mu1, mu2=mu2, mean_batch=mean_batch)
def kl_divergence_mv_gaussian_v2(sigma1, sigma2, mu1=None, mu2=None, mean_batch=True, name='kl_divergence_mv_gaussian'):
# KL divergence between two multivariate Gaussian distributions
# KL(N(mu1, sigma1) | N(mu2, sigma2))
# sigma1 and sigma2 are Covariance objects
# mu1 and mu2 tensors of [batch size, num features], if None, they are assumed to be zero
with tf.name_scope(name):
from mvg_distributions.covariance_representations import Covariance
assert isinstance(sigma1, Covariance)
assert isinstance(sigma2, Covariance)
if mu1 is None:
assert mu2 is None
if mu2 is None:
assert mu1 is None
# This is equivalent to
# tr_sig1_2 = tf.trace(tf.matmul(sigma2.precision, sigma1.covariance))
# but it avoids doing the matmul for the off-diagonal elements
tr_sig1_2 = tf.einsum('bij,bji->b', sigma2.precision, sigma1.covariance)
k = tf.cast(tf.shape(sigma1.covariance)[1], sigma1.covariance.dtype)
log_det = sigma2.log_det_covariance() - sigma1.log_det_covariance()
if mu1 is not None:
tf.assert_rank_at_least(mu1, 2) # [Batch size, num features]
tf.assert_rank_at_least(mu2, 2)
sq_error = sigma2.x_precision_x(mu2 - mu1)
kl_div = 0.5 * (tr_sig1_2 + sq_error - k + log_det)
else:
kl_div = 0.5 * (tr_sig1_2 - k + log_det)
if mean_batch:
kl_div = tf.reduce_mean(kl_div, axis=0)
return kl_div
|
158903
|
import fbuild.builders.ocaml.ocamlfind
from fbuild.path import Path
def build(ctx):
ocaml = fbuild.builders.ocaml.ocamlfind.Ocaml(ctx,
packages=['unix'])
libb = ocaml.ocamlc.build_lib('libb', Path.glob('b*.ml{,i}'),
packages=['num'])
liba = ocaml.ocamlc.build_lib('liba', Path.glob('a*.ml{,i}'), libs=[libb])
exe = ocaml.ocamlc.build_exe('exe.byte', ['exe.ml'],
libs=[libb, liba],
packages=['num'])
ctx.logger.log(' * running %s:' % exe)
ctx.execute([exe])
libb = ocaml.ocamlopt.build_lib('libb', Path.glob('b*.ml{,i}'),
packages=['num'])
liba = ocaml.ocamlopt.build_lib('liba', Path.glob('a*.ml{,i}'), libs=[libb])
exe = ocaml.ocamlopt.build_exe('exe.native', ['exe.ml'],
libs=[libb, liba],
packages=['num'])
ctx.logger.log(' * running %s:' % exe)
ctx.execute([exe])
# We can also build bytecode and native libraries at the same time.
libb = ocaml.build_lib('libb', Path.glob('b*.ml{,i}'),
packages=['num'])
liba = ocaml.build_lib('liba', Path.glob('a*.ml{,i}'), libs=[libb])
exe = ocaml.build_exe('exe', ['exe.ml'],
libs=[libb, liba],
packages=['num']).bytecode
|
158913
|
import re
import numpy as np
from numpy.core.einsumfunc import _parse_einsum_input
from paderbox.array.segment import segment_axis
__all__ = [
'split_complex_features',
'merge_complex_features',
'tbf_to_tbchw',
'morph',
]
def split_complex_features(X):
""" Split a complex valued input array into two stacked real parts.
:param variable: Complex input array with T times B times F features
:return: Real output array with T times B times 2*F features
"""
return np.concatenate((np.asarray(X.real), np.asarray(X.imag)), axis=2)
def merge_complex_features(X):
""" Merge a two stacked real parts into a complex array.
:param variable: Real input array with T times B times 2*F features
:return: Complex input array with T times B times F features
"""
bins = X.shape[-1]
return X[:, :, :bins // 2] + 1j * X[:, :, bins // 2:]
def tbf_to_tbchw(x, left_context, right_context, step_width,
pad_mode='symmetric', pad_kwargs=None):
""" Transfroms data from TxBxF format to TxBxCxHxW format
This is only relevant for training a neural network in frames mode.
The abbreviations stand for:
T: Time frames
B: Batch size
F: Feature size
C: Channel (almost always 1)
H: Height of the convolution filter
W: Width of the convolution filter
:param x: Data to be transformed
:param left_context: Context size left to current frame
:param right_context: Context size right to current frame
:param step_width: Step width for window
:param pad_mode: Mode for padding. See :numpy.pad for details
:param pad_kwargs: Kwargs for pad call
:return: Transformed data
"""
if pad_kwargs is None:
pad_kwargs = dict()
x = np.pad(x,
((left_context, right_context), (0, 0), (0, 0)),
mode=pad_mode, **pad_kwargs)
window_size = left_context + right_context + 1
return segment_axis(
x, window_size, step_width, axis=0, end='cut'
).transpose(0, 2, 3, 1)[:, :, None, :, :]
def _normalize(op):
op = op.replace(',', '')
op = op.replace(' ', '')
op = ' '.join(c for c in op)
op = op.replace(' * ', '*')
op = op.replace('- >', '->')
op = op.replace('. . .', '...')
return op
def _shrinking_reshape(array, source, target):
source, target = source.split(), target.replace(' * ', '*').split()
if '...' in source:
assert '...' in target, (source, target)
independent_dims = array.ndim - len(source) + 1
import string
ascii_letters = [
s
for s in string.ascii_letters
if s not in source and s not in target
]
index = source.index('...')
source[index:index + 1] = ascii_letters[:independent_dims]
index = target.index('...')
target[index:index + 1] = ascii_letters[:independent_dims]
input_shape = {key: array.shape[index] for index, key in enumerate(source)}
output_shape = []
for t in target:
product = 1
if not t == '1':
t = t.split('*')
for t_ in t:
product *= input_shape[t_]
output_shape.append(product)
return array.reshape(output_shape)
def _expanding_reshape(array, source, target, **shape_hints):
try: # Check number of inputs for unflatten operations
assert len(re.sub(r'.\*', '', source.replace(' ', ''))) == array.ndim, \
(array.shape, source, target)
except AssertionError: # Check number of inputs for ellipses operations
assert len(re.sub(r'(\.\.\.)|(.\*)', '', source.replace(' ', ''))) <= \
array.ndim,(array.shape, source, target)
def _get_source_grouping(source):
"""
Gets axis as alphanumeric.
"""
source = ' '.join(source)
source = source.replace(' * ', '*')
groups = source.split()
groups = [group.split('*') for group in groups]
return groups
if '*' not in source:
return array
source, target = source.split(), target.replace(' * ', '*').split()
if '...' in source:
assert '...' in target, (source, target)
independent_dims = array.ndim - len(source) + 1
import string
ascii_letters = [
s
for s in string.ascii_letters
if s not in source and s not in target
]
index = source.index('...')
source[index:index + 1] = ascii_letters[:independent_dims]
index = target.index('...')
target[index:index + 1] = ascii_letters[:independent_dims]
target_shape = []
for axis, group in enumerate(_get_source_grouping(source)):
if len(group) == 1:
target_shape.append(array.shape[axis:axis + 1])
else:
shape_wildcard_remaining = True
for member in group:
if member in shape_hints:
target_shape.append([shape_hints[member]])
else:
if shape_wildcard_remaining:
shape_wildcard_remaining = False
target_shape.append([-1])
else:
raise ValueError('Not enough shape hints provided.')
target_shape = np.concatenate(target_shape, 0)
array = array.reshape(target_shape)
return array
def morph(operation, array, reduce=None, **shape_hints):
""" This is an experimental version of a generalized reshape.
See test cases for examples.
"""
operation = _normalize(operation)
source, target = operation.split('->')
# Expanding reshape
array = _expanding_reshape(array, source, target, **shape_hints)
# Initial squeeze
squeeze_operation = operation.split('->')[0].split()
for axis, op in reversed(list(enumerate(squeeze_operation))):
if op == '1':
array = np.squeeze(array, axis=axis)
# Transpose
transposition_operation = operation.replace('1', ' ').replace('*', ' ')
try:
in_shape, out_shape, (array, ) = _parse_einsum_input([transposition_operation.replace(' ', ''), array])
if len(set(in_shape) - set(out_shape)) > 0:
assert reduce is not None, ('Missing reduce function', reduce, transposition_operation)
reduce_axis = tuple([i for i, s in enumerate(in_shape) if s not in out_shape])
array = reduce(array, axis=reduce_axis)
in_shape = ''.join([s for s in in_shape if s in out_shape])
array = np.einsum(f'{in_shape}->{out_shape}', array)
except ValueError as e:
msg = (
f'op: {transposition_operation} ({in_shape}->{out_shape}), '
f'shape: {np.shape(array)}'
)
if len(e.args) == 1:
e.args = (e.args[0] + '\n\n' + msg,)
else:
print(msg)
raise
# Final reshape
source = transposition_operation.split('->')[-1]
target = operation.split('->')[-1]
return _shrinking_reshape(array, source, target)
|
158934
|
import os
import tkinter as tk
import tkinter.ttk as ttk
import pygubu
PROJECT_PATH = os.path.abspath(os.path.dirname(__file__))
PROJECT_UI = os.path.join(PROJECT_PATH, "newproject")
class NewprojectApp:
def __init__(self, master=None):
# build ui
self.frame_1 = ttk.Frame(master)
self.button_1 = ttk.Button(self.frame_1)
self.button_1.configure(text='button_1')
self.button_1.grid(column='0', row='0')
self.button_2 = ttk.Button(self.frame_1)
self.button_2.configure(text='button_2')
self.button_2.grid(column='0', row='1')
self.label_1 = ttk.Label(self.frame_1)
self.label_1.configure(text='<NAME>')
self.label_1.grid(column='0', row='2')
self.combobox_1 = ttk.Combobox(self.frame_1)
self.combobox_1.grid(column='0', row='3')
self.entry_1 = ttk.Entry(self.frame_1)
_text_ = '''entry_1'''
self.entry_1.delete('0', 'end')
self.entry_1.insert('0', _text_)
self.entry_1.grid(column='0', row='4')
self.label_2 = ttk.Label(self.frame_1)
self.label_2.configure(text='label_2')
self.label_2.grid(column='0', row='5')
self.menubutton_1 = ttk.Menubutton(self.frame_1)
self.menubutton_1.configure(text='menubutton_1')
self.menubutton_1.grid(column='0', row='6')
self.message_1 = tk.Message(self.frame_1)
self.message_1.configure(text='message_1')
self.message_1.grid(column='0', row='7')
self.__tkvar = tk.StringVar(value='')
__values = []
self.optionmenu_1 = tk.OptionMenu(self.frame_1, self.__tkvar, None, *__values, command=None)
self.optionmenu_1.grid(column='0', row='8')
self.progressbar_1 = ttk.Progressbar(self.frame_1)
self.progressbar_1.configure(orient='horizontal')
self.progressbar_1.grid(column='0', row='9')
self.radiobutton_1 = ttk.Radiobutton(self.frame_1)
self.radiobutton_1.configure(text='radiobutton_1')
self.radiobutton_1.grid(column='0', row='10')
self.scrollbar_1 = ttk.Scrollbar(self.frame_1)
self.scrollbar_1.configure(orient='horizontal')
self.scrollbar_1.grid(column='0', row='11')
self.spinbox_1 = ttk.Spinbox(self.frame_1)
_text_ = '''spinbox_1'''
self.spinbox_1.delete('0', 'end')
self.spinbox_1.insert('0', _text_)
self.spinbox_1.grid(column='0', row='12')
self.sizegrip_1 = ttk.Sizegrip(self.frame_1)
self.sizegrip_1.grid(column='0', row='13')
self.sizegrip_2 = ttk.Sizegrip(self.frame_1)
self.sizegrip_2.grid(column='0', row='14')
self.scrollbar_2 = ttk.Scrollbar(self.frame_1)
self.scrollbar_2.configure(orient='horizontal')
self.scrollbar_2.grid(column='0', row='15')
self.progressbar_2 = ttk.Progressbar(self.frame_1)
self.progressbar_2.configure(orient='horizontal')
self.progressbar_2.grid(column='0', row='16')
__values = []
self.optionmenu_2 = tk.OptionMenu(self.frame_1, self.__tkvar, None, *__values, command=None)
self.optionmenu_2.grid(column='0', row='17')
self.label_3 = ttk.Label(self.frame_1)
self.label_3.configure(text='label_3')
self.label_3.grid(column='0', row='18')
self.checkbutton_1 = ttk.Checkbutton(self.frame_1)
self.checkbutton_1.configure(text='checkbutton_1')
self.checkbutton_1.grid(column='0', row='19')
self.button_3 = ttk.Button(self.frame_1)
self.button_3.configure(text='button_3')
self.button_3.grid(column='0', row='20')
self.menubutton_2 = ttk.Menubutton(self.frame_1)
self.menu_1 = tk.Menu(self.menubutton_2)
self.menubutton_2.configure(text='menubutton_2')
self.menubutton_2.grid(column='0', row='21')
self.frame_1.configure(height='200', width='200')
self.frame_1.grid(column='0', row='0')
# Main widget
self.mainwindow = self.frame_1
def run(self):
self.mainwindow.mainloop()
if __name__ == '__main__':
root = tk.Tk()
app = NewprojectApp(root)
app.run()
|
159081
|
from __future__ import division
import numpy as np
import utils
import pdb
data = utils.load_dataset("credittest")
Xvalid, yvalid = data['X'], data['y']
def kappa(ww, delta):
ww = np.array(ww)
yhat = np.sign(np.dot(Xvalid, ww))
ww2 = np.array(ww + delta)
yhat2 = np.sign(np.dot(Xvalid, ww2))
P_A = np.sum(yhat == yhat2) / float(yvalid.size)
P_E = 0.5
return (P_A - P_E) / (1 - P_E)
def roni(ww, delta):
ww = np.array(ww)
yhat = np.sign(np.dot(Xvalid, ww))
ww2 = np.array(ww + delta)
yhat2 = np.sign(np.dot(Xvalid, ww2))
g_err = np.sum(yhat != yvalid) / float(yvalid.size)
new_err = np.sum(yhat2 != yvalid) / float(yvalid.size)
return new_err - g_err
# Returns the index of the row that should be used in Krum
def krum(deltas, clip):
# assume deltas is an array of size group * d
n = len(deltas)
deltas = np.array(deltas)
scores = get_krum_scores(deltas, n - clip)
good_idx = np.argpartition(scores, n - clip)[:(n - clip)]
print(good_idx)
return good_idx
# return np.mean(deltas[good_idx], axis=0)
def get_krum_scores(X, groupsize):
krum_scores = np.zeros(len(X))
# Calculate distances
distances = np.sum(X**2, axis=1)[:, None] + np.sum(
X**2, axis=1)[None] - 2 * np.dot(X, X.T)
for i in range(len(X)):
krum_scores[i] = np.sum(np.sort(distances[i])[1:(groupsize - 1)])
return krum_scores
if __name__ == "__main__":
pdb.set_trace()
|
159125
|
from Utils.Data.DatasetUtils import is_test_or_val_set, get_train_set_id_from_test_or_val_set
from Utils.Data.Dictionary.MappingDictionary import *
from Utils.Data.Features.Generated.GeneratedFeature import GeneratedFeaturePickle, GeneratedFeatureOnlyPickle
from Utils.Data.Features.MappedFeatures import MappedFeatureTweetLanguage
def top_popular_language(dataset_id: str, top_n: int = 5):
# if is_test_or_val_set(dataset_id):
# dataset_id = get_train_set_id_from_test_or_val_set(dataset_id)
#
# dataframe = TweetFeatureIsLanguage(dataset_id).load_or_create()
#
# popularity_list = [(dataframe[column].sum(), dataframe[column]) for column in dataframe.columns]
#
# popularity_list = sorted(popularity_list, key=lambda x: x[0], reverse=True)
#
# selected_column = [tuple[1] for tuple in popularity_list][: top_n]
#
# selected_column_id = [col.name.split("_")[2] for col in selected_column]
#
# return selected_column_id
return [0, 1, 2, 10]
class TweetFeatureIsLanguage(GeneratedFeatureOnlyPickle):
def __init__(self, dataset_id: str, selected_languages: list = []):
super().__init__("tweet_is_language_x", dataset_id)
self.pck_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/is_language/{self.feature_name}.pck.gz")
self.csv_path = pl.Path(
f"{Feature.ROOT_PATH}/{self.dataset_id}/generated/is_language/{self.feature_name}.csv.gz")
self.selected_languages = selected_languages
def load_feature(self):
dataframe = super().load_feature()
top_pop_list = []
if len(self.selected_languages) > 0:
selected_columns = ["is_language_" + str(language) for language in self.selected_languages]
return dataframe[selected_columns]
else:
return dataframe
def create_feature(self):
# Load the languages
languages = MappingLanguageDictionary().load_or_create().values()
# Load the language column
language_feature = MappedFeatureTweetLanguage(self.dataset_id)
language_df = language_feature.load_or_create()
# Creating the dataframe
dataframe = pd.DataFrame()
# Populating the dataframe
for language in languages:
dataframe[f"is_language_{language}"] = (language_df[language_feature.feature_name] == language)
self.save_feature(dataframe)
|
159128
|
from python.enclave_interfaces import GlobalTensor as gt
from python.timer_utils import NamedTimerInstance
from python.torch_utils import compare_expected_actual
from python.tensor_loader import TensorLoader
import torch
class SecretLayerBase(TensorLoader):
PrevLayer = None
NextLayer = None
PlainForwardResult = None
PlainBackwardResult = None
PlainBackwardResult = None
LearnableParamsList = None
StoreInEnclave = True
IsDummyForS2 = True
ForwardFunc = None
BackwardFunc = None
PlainFunc = None
SecretOpList = None
GradFunction = None
grad_func_for_speed = None
LearnableParamsList = None
def __init__(self, sid, LayerName):
super().__init__()
self.sid = sid
self.LayerName = LayerName
def set_eid(self, eid):
super().set_eid(eid)
# for f in self.SecretOpList:
# f.set_eid(eid)
def init_shape(self):
raise NotImplementedError
def init_params(self):
return
def name_modifier(self, name):
return self.LayerName + " - " + str(name)
def link_tensors(self):
if self.PrevLayer is not None:
gt.link_tags(self.get_tag("input", remap=False), self.PrevLayer.get_tag("output", remap=False))
gt.link_tags(self.get_tag("DerInput", remap=False), self.PrevLayer.get_tag("DerOutput", remap=False))
if self.NextLayer is not None:
gt.link_tags(self.get_tag("output", remap=False), self.NextLayer.get_tag("input", remap=False))
gt.link_tags(self.get_tag("DerOutput", remap=False), self.NextLayer.get_tag("DerInput", remap=False))
def register_next_layer(self, layer):
self.NextLayer = layer
def register_prev_layer(self, layer):
self.PrevLayer = layer
def forward_tensor_transfer(self, transfer_tensor="input"):
if self.PrevLayer is not None and self.PrevLayer.StoreInEnclave is True and self.StoreInEnclave is False:
self.transfer_enclave_to_cpu(transfer_tensor)
if self.PrevLayer is not None and self.PrevLayer.StoreInEnclave is False and self.StoreInEnclave is True:
self.transfer_cpu_to_enclave(transfer_tensor)
def backward_tensor_transfer(self, transfer_tensor="DerOutput"):
if self.NextLayer is not None and self.NextLayer.StoreInEnclave is True and self.StoreInEnclave is False:
self.transfer_enclave_to_cpu(transfer_tensor)
if self.NextLayer is not None and self.NextLayer.StoreInEnclave is False and self.StoreInEnclave is True:
self.transfer_cpu_to_enclave(transfer_tensor)
def set_tensor_with_name(self, name, t):
if t is not None:
self.set_cpu(name, t)
if self.StoreInEnclave:
self.set_tensor(name, t)
def forward_transfer_to_plain(self, name):
if self.PrevLayer is not None and self.PrevLayer.StoreInEnclave:
self.transfer_enclave_to_cpu(name)
def backward_transfer_to_plain(self, name):
if self.NextLayer is not None and self.NextLayer.StoreInEnclave:
self.transfer_enclave_to_cpu(name)
# If this layer is store in enclave, then load the tensor from enclave to plaintext cpu
def make_sure_cpu_is_latest(self, name):
if self.StoreInEnclave:
self.transfer_enclave_to_cpu(name)
def load_tensors(self, input_tensor, der_output_tensor):
self.set_tensor_with_name("input", input_tensor)
self.set_tensor_with_name("DerOutput", der_output_tensor)
def requires_grad_on_cpu(self, name):
tensor = self.get_cpu(name)
if tensor.is_leaf is False:
return
tensor.requires_grad = True
def plain_forward(self):
self.make_sure_cpu_is_latest("input")
with NamedTimerInstance(f"S{self.sid}: {self.LayerName} PlainForward"):
torch.set_num_threads(1)
self.PlainForwardResult = self.PlainFunc(self.get_cpu("input"))
torch.set_num_threads(4)
def plain_backward(self):
self.make_sure_cpu_is_latest("DerOutput")
GradFunction = self.PlainForwardResult.grad_fn
with NamedTimerInstance(f"S{self.sid}: {self.LayerName} PlainBackward"):
torch.set_num_threads(1)
self.PlainBackwardResult = GradFunction(self.get_cpu("DerOutput"))
torch.set_num_threads(4)
def show_plain_error(self):
if self.StoreInEnclave:
self.transfer_enclave_to_cpu("output")
err = compare_expected_actual(self.PlainForwardResult, self.get_cpu("output"), get_relative=True)
print(f"S{self.sid}: {self.LayerName} Forward Error: {err}")
if self.PlainBackwardResult is None:
return
if self.StoreInEnclave:
self.transfer_enclave_to_cpu("DerInput")
err = compare_expected_actual(self.PlainBackwardResult, self.get_cpu("DerInput"), show_where_err=False, get_relative=True)
print(f"S{self.sid}: {self.LayerName} Backward Error {err}")
def inject_params(self, param):
return
|
159275
|
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
class FaceMeshBlock(nn.Module):
"""This is the main building block for architecture
which is just residual block with one dw-conv and max-pool/channel pad
in the second branch if input channels doesn't match output channels"""
def __init__(self, in_channels: int, out_channels: int, kernel_size: int = 3, stride: int = 1):
super(FaceMeshBlock, self).__init__()
self.stride = stride
self.channel_pad = out_channels - in_channels
if stride == 2:
self.max_pool = nn.MaxPool2d(kernel_size=stride, stride=stride)
padding = 0
else:
padding = (kernel_size - 1) // 2
self.convs = nn.Sequential(
nn.Conv2d(in_channels=in_channels, out_channels=in_channels,
kernel_size=(kernel_size, kernel_size), stride=(stride, stride), padding=(padding, padding),
groups=in_channels, bias=True),
nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=(1, 1), stride=(1, 1), padding=(0, 0), bias=True),
)
self.act = nn.PReLU(out_channels)
def forward(self, x):
if self.stride == 2:
h = F.pad(x, (0, 2, 0, 2), "constant", 0)
x = self.max_pool(x)
else:
h = x
if self.channel_pad > 0:
x = F.pad(x, (0, 0, 0, 0, 0, self.channel_pad), "constant", 0)
return self.act(self.convs(h) + x)
class FaceMesh(nn.Module):
"""The FaceMesh face landmark model from MediaPipe."""
def __init__(self):
super(FaceMesh, self).__init__()
self.num_coords = 468
self.x_scale = 192.0
self.y_scale = 192.0
self.min_score_thresh = 0.75
self._define_layers()
def _define_layers(self):
self.backbone = nn.Sequential(
nn.Conv2d(in_channels=3, out_channels=16, kernel_size=(3, 3), stride=(2, 2), padding=(0, 0), bias=True),
nn.PReLU(16),
FaceMeshBlock(16, 16),
FaceMeshBlock(16, 16),
FaceMeshBlock(16, 32, stride=2),
FaceMeshBlock(32, 32),
FaceMeshBlock(32, 32),
FaceMeshBlock(32, 64, stride=2),
FaceMeshBlock(64, 64),
FaceMeshBlock(64, 64),
FaceMeshBlock(64, 128, stride=2),
FaceMeshBlock(128, 128),
FaceMeshBlock(128, 128),
FaceMeshBlock(128, 128, stride=2),
FaceMeshBlock(128, 128),
FaceMeshBlock(128, 128),
)
self.coord_head = nn.Sequential(
FaceMeshBlock(128, 128, stride=2),
FaceMeshBlock(128, 128),
FaceMeshBlock(128, 128),
nn.Conv2d(128, 32, (1, 1)),
nn.PReLU(32),
FaceMeshBlock(32, 32),
nn.Conv2d(32, 1404, (3, 3))
)
self.conf_head = nn.Sequential(
FaceMeshBlock(128, 128, stride=2),
nn.Conv2d(128, 32, (1, 1)),
nn.PReLU(32),
FaceMeshBlock(32, 32),
nn.Conv2d(32, 1, (3, 3))
)
def forward(self, x):
x = nn.ReflectionPad2d((1, 0, 1, 0))(x)
b = x.shape[0] # batch size, needed for reshaping later
x = self.backbone(x) # (b, 128, 6, 6)
c = self.conf_head(x) # (b, 1, 1, 1)
c = c.view(b, -1) # (b, 1)
r = self.coord_head(x) # (b, 1404, 1, 1)
r = r.reshape(b, -1) # (b, 1404)
return [r, c]
def _device(self):
"""Which device (CPU or GPU) is being used by this model?"""
return self.conf_head[1].weight.device
def load_weights(self, path):
self.load_state_dict(torch.load(path))
self.eval()
@staticmethod
def _preprocess(x):
"""Converts the image pixels to the range [-1, 1]."""
return x.float() / 127.5 - 1.0
def predict_on_image(self, img):
"""Makes a prediction on a single image.
Arguments:
img: a NumPy array of shape (H, W, 3) or a PyTorch tensor of
shape (3, H, W). The image's height and width should be
128 pixels.
Returns:
A tensor with face detections.
"""
if isinstance(img, np.ndarray):
img = torch.from_numpy(img).permute((2, 0, 1))
return self.predict_on_batch(img.unsqueeze(0))[0]
def predict_on_batch(self, x):
"""Makes a prediction on a batch of images.
Arguments:
x: a NumPy array of shape (b, H, W, 3) or a PyTorch tensor of
shape (b, 3, H, W). The height and width should be 128 pixels.
Returns:
A list containing a tensor of face detections for each image in
the batch. If no faces are found for an image, returns a tensor
of shape (0, 17).
Each face detection is a PyTorch tensor consisting of 17 numbers:
- y-min, x-min, y-max, x-max
- x,y-coordinates for the 6 key-points
- confidence score
"""
if isinstance(x, np.ndarray):
x = torch.from_numpy(x).permute((0, 3, 1, 2))
assert x.shape[1] == 3
assert x.shape[2] == 192
assert x.shape[3] == 192
# 1. Preprocess the images into tensors:
x = x.to(self._device())
x = self._preprocess(x)
# 2. Run the neural network:
with torch.no_grad():
out = self.__call__(x)
# 3. Post-process the raw predictions:
detections, confidences = out
detections[0:-1:3] *= self.x_scale
detections[1:-1:3] *= self.y_scale
return detections.view(-1, 3), confidences
|
159301
|
import os
import json
import argparse
from tensorflow.keras.utils import get_file
from sklearn.metrics.pairwise import cosine_similarity as measure
from paz.backend.camera import VideoPlayer, Camera
from scenes import DictionaryView
from model import AutoEncoder
from pipelines import ImplicitRotationPredictor
parser = argparse.ArgumentParser(description='Implicit orientation demo')
parser.add_argument('-c', '--camera_id', type=int, default=0,
help='Camera device ID')
parser.add_argument('-f', '--y_fov', type=float, default=3.14159 / 4.0,
help='field of view')
parser.add_argument('-v', '--viewport_size', type=int, default=128,
help='Size of rendered images')
parser.add_argument('-d', '--distance', type=float, default=0.3,
help='Distance between camera and 3D model')
parser.add_argument('-s', '--shift', type=float, default=0.01,
help='Shift')
parser.add_argument('-l', '--light', type=int, default=10,
help='Light intensity')
parser.add_argument('-b', '--background', type=int, default=0,
help='Plain background color')
parser.add_argument('-r', '--roll', type=float, default=3.14159,
help='Maximum roll')
parser.add_argument('-t', '--translate', type=float, default=0.01,
help='Maximum translation')
parser.add_argument('-p', '--top_only', type=int, default=0,
help='Rendering mode')
parser.add_argument('--theta_steps', type=int, default=10,
help='Amount of steps taken in the X-Y plane')
parser.add_argument('--phi_steps', type=int, default=10,
help='Amount of steps taken from the Z-axis')
parser.add_argument('--model_name', type=str,
default='VanillaAutoencoder128_128_035_power_drill',
help='Model directory name without root')
parser.add_argument('--model_path', type=str,
default=os.path.join(
os.path.expanduser('~'), '.keras/paz/models/'),
help='Root directory PAZ trained models')
args = parser.parse_args()
path = os.path.join(args.model_path, args.model_name)
parameters = json.load(open(os.path.join(path, 'hyperparameters.json'), 'r'))
size = parameters['image_size']
latent_dimension = parameters['latent_dimension']
weights_path = os.path.join(path, args.model_name + '_weights.hdf5')
obj_path = get_file('textured.obj', None,
cache_subdir='paz/datasets/ycb/models/035_power_drill/')
renderer = DictionaryView(
obj_path, (args.viewport_size, args.viewport_size), args.y_fov,
args.distance, bool(args.top_only), args.light, args.theta_steps,
args.phi_steps)
encoder = AutoEncoder((size, size, 3), latent_dimension, mode='encoder')
encoder.load_weights(weights_path, by_name=True)
decoder = AutoEncoder((size, size, 3), latent_dimension, mode='decoder')
decoder.load_weights(weights_path, by_name=True)
inference = ImplicitRotationPredictor(encoder, decoder, measure, renderer)
player = VideoPlayer((1280, 960), inference, camera=Camera(args.camera_id))
player.run()
|
159307
|
import sys
import os
UTILS_DIR = os.path.join(os.path.abspath(os.path.dirname(__file__)), '..', 'utils')
sys.path.insert(1, UTILS_DIR)
from training import train, test
if __name__ == '__main__':
BREAK_EARLY = False
BATCH_SIZE = 500
print("For this one, I just want to try out a different KL or two. Because we didn't get the competitive results we asked for the other way.")
for data_subdir in ['msd']:
actor_path = "VAE_ACTOR_TRAIN_{}_KL=0.1".format(data_subdir)
train(
model_class='multi_vae',
data_subdir=data_subdir,
n_epochs_pred_only=100,
n_epochs_ac_only=0,
n_epochs_pred_and_ac=0,
# max_kl=0.2,
max_kl=0.1,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.01,
evaluation_metric="NDCG",
logging_frequency=200,
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
version_tag="FULL_RUN_ON_MSD_ONLY",
path_to_save_actor=actor_path,
log_critic_training_error=False,
)
print("Now, hopefully on to testing...")
test(
model_class='multi_vae',
data_subdir=data_subdir,
n_epochs_pred_only=100,
n_epochs_ac_only=0,
n_epochs_pred_and_ac=0,
# max_kl=0.2,
max_kl=0.1,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.01,
evaluation_metric="NDCG",
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
version_tag="FULL_RUN_ON_MSD_ONLY",
)
print("On to round 2! Now we'll do the critic.")
train(
model_class='multi_vae',
data_subdir=data_subdir,
n_epochs_pred_only=0,
n_epochs_ac_only=50,
n_epochs_pred_and_ac=50,
# max_kl=0.2,
max_kl=0.1,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.01,
evaluation_metric="NDCG",
logging_frequency=200,
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
version_tag="FULL_RUN_ON_MSD_ONLY",
restore_trained_actor_path=actor_path,
)
print("Now, hopefully on to testing...")
test(
model_class='multi_vae',
data_subdir=data_subdir,
n_epochs_pred_only=0,
n_epochs_ac_only=50,
n_epochs_pred_and_ac=50,
# max_kl=0.2,
max_kl=0.1,
ac_reg_loss_scaler=0.0,
actor_reg_loss_scaler=0.01,
evaluation_metric="NDCG",
batch_size=BATCH_SIZE,
break_early=BREAK_EARLY,
verbose=False,
version_tag="FULL_RUN_ON_MSD_ONLY",
restore_trained_actor_path=actor_path,
)
print("Bye bye")
exit()
# train(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=200,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# path_to_save_actor="200_epochs_HIS_DAE",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Now, hopefully on to testing...")
# test(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("On to round 2! Now we'll do the critic.")
# train(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# # ac_reg_loss_scaler=0.0005,
# ac_reg_loss_scaler=0.0,
# # actor_reg_loss_scaler=0.00001,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# # path_to_save_last_actor="LAST_ACTOR_OF_200_epochs_HIS_KL_annealing",
# restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Now, hopefully on to testing...")
# test(
# # model_class="wmf",
# model_class='multi_dae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=100,
# # min_kl=0.0001,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0,
# actor_reg_loss_scaler=0.01,
# # positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="TRAINING_DAE",
# # path_to_save_actor="200_epochs_HIS_DAE",
# restore_trained_actor_path="200_epochs_HIS_DAE",
# )
# print("Bye bye")
# exit()
# for positive_weights in [2.0, 5.0, 10.0, 30.0, 50.0, 100.0]:
# train(
# model_class="wmf",
# # model_class='multi_vae',
# # model_class='warp_encoder',
# n_epochs_pred_only=50,
# n_epochs_ac_only=0,
# n_epochs_pred_and_ac=0,
# epochs_to_anneal_over=50,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0005,
# actor_reg_loss_scaler=0.00001,
# positive_weights=positive_weights,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="making_one_trained_on_each_eval_metric",
# # path_to_save_actor="test_actor_save",
# # restore_trained_actor_path="last_actor_after_150_trained_epochs"
# )
# exit("Exiting gracefully!")
# # train(
# # model_class='multi_vae',
# # # model_class='warp_encoder',
# # n_epochs_pred_only=0,
# # n_epochs_ac_only=50,
# # n_epochs_pred_and_ac=100,
# # epochs_to_anneal_over=50,
# # max_kl=0.2,
# # ac_reg_loss_scaler=0.0005,
# # # evaluation_metric='AP',
# # evaluation_metric="NDCG",
# # # logging_frequency=25,
# # # logging_frequency=50,
# # logging_frequency=50,
# # batch_size=500,
# # # batch_size=25,
# # break_early=False,
# # verbose=False,
# # # path_to_save_actor="best_ndcg_trained_150_epochs",
# # # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# # version_tag="making_one_trained_on_each_eval_metric",
# # # path_to_save_actor="test_actor_save",
# # restore_trained_actor_path="last_actor_after_150_trained_epochs"
# # )
# print("Just for good measure, I'm going to run the test function too.")
# test(
# model_class='multi_vae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=100,
# epochs_to_anneal_over=50,
# max_kl=0.2,
# ac_reg_loss_scaler=0.0005,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# # logging_frequency=50,
# # logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# # path_to_save_actor="best_ndcg_trained_150_epochs",
# # path_to_save_last_actor="last_actor_after_150_trained_epochs",
# version_tag="making_one_trained_on_each_eval_metric",
# # path_to_save_actor="test_actor_save",
# restore_trained_actor_path="last_actor_after_150_trained_epochs"
# )
# exit("Bye bye now! I doubt it will make it here, but a man can dream.")
# for ac_reg_loss_scaler in [0.0, 1e-3, 1e-2, 1e-1]:
# train(
# model_class='multi_vae',
# # model_class='warp_encoder',
# n_epochs_pred_only=0,
# n_epochs_ac_only=50,
# n_epochs_pred_and_ac=50,
# epochs_to_anneal_over=50,
# max_kl=0.2,
# # evaluation_metric='AP',
# evaluation_metric="NDCG",
# # logging_frequency=25,
# logging_frequency=50,
# batch_size=500,
# # batch_size=25,
# break_early=False,
# verbose=False,
# ac_reg_loss_scaler=ac_reg_loss_scaler,
# version_tag="hyperparameter_ac_reg",
# # path_to_save_actor="best_ndcg_trained_100_epochs",
# # path_to_save_last_actor="last_actor_after_100_trained_epochs",
# # path_to_save_actor="test_actor_save",
# restore_trained_actor_path="best_ndcg_trained_100_epochs",
# )
|
159318
|
import pandas as pd
import pickle
import re
import matplotlib.pyplot as plt
import collections
from pprint import pprint
from daisylu_vectors import *
#
# merged here: from daisylu_new_dryrun import *
from daisylu_config import *
from daisylu_system import *
from sentences import *
import networkx as nx
import argparse
import subprocess
import threading
import time
import sys
#
# 1 merge daisylu_new_dryrun to here
# 2 daisylu_dryrun?
#
#
def addTorchNetworkResults(sents, dbrf, dbf, systemName, NoConceptThreshold = 0.5, conceptRejectionThreshold=0.0):
def logPMToProb(logPM):
pMass = [math.exp(float(z)) for z in logPM ]
probs = [pm/sum(pMass) for pm in pMass]
return probs
dbfn = getSystemPath('daisylu') + 'data/%s' % dbf
dbrfn = getSystemPath('daisylu') + 'results/%s' % dbrf
if (systemName == 'AMRL0'):
# 2c. add L0 nn output to data frames
tp, _, _, features, _ = getComparisonDFrames(dbfn, dbrfn)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
for _, row in singleSentDF.iterrows():
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
df.loc[df.wordIX == wordIX, 'pVectorL0'] = pVector
lProb = np.array(floatCSVToList(pVector)[0])
if (True):
# With: F1 57.18, prec 59.71, recall 54.85
# With 0.65 threshold: F1 57.49, prec 58.09, recall 56.90
# Without: F1 57.35, prec 60.57, recall 54.46
feats = features['L0']['tokens']
lst = floatCSVToList(pVector)
logProbs, probs = normalizeLogProbs1d({0:lst[0]})
L0ToProb = dict(zip(feats,probs[0]))
sortedTuples = sorted(zip(feats,probs[0]), key=lambda x: x[1], reverse=True )
if sortedTuples[0][0] == 'O':
if sortedTuples[0][1] >= NoConceptThreshold:
result = 'O'
prob = sortedTuples[0][1]
else:
result = sortedTuples[1][0]
prob = sortedTuples[1][1]
else:
result = sortedTuples[0][0]
prob = sortedTuples[0][1]
if result=='UNKNOWN':
if not 'NEWPrediction' in df:
result = 'S_txPred-01' # a default
else:
result = 'S_txPred' # NEW default, was S_txPred-01
if (conceptRejectionThreshold > 0.0) and not (result=='txNamed'):
if prob <= conceptRejectionThreshold:
result='O'
dsfString = listToCSV(logPMToProb(lProb))
df.loc[df.wordIX == wordIX, 'distSG'] = dsfString
df.loc[df.wordIX == wordIX, 'txBIOES'] = result
df.loc[df.wordIX == wordIX, 'txBIOESProb'] = prob
# 2d. add wikification to data frames (alternate is to run wikification first and use NER as input to L0
# Wikification should happen here, creating the 'namedCategory'
# and the 'wiki' attribute
df['nameCategory'] = 'GetFromWikification'
df['wiki'] = 'GetFromWikification'
if not 'NEWPrediction' in df:
predictConceptKinds(df, None)
elif (systemName == 'AMRL0Args'):
tp, df, _, _, _ = getComparisonDFrames(dbfn, dbrfn, pVector2d=True)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
argsDict = {}
for _, row in singleSentDF.iterrows():
pWordIX = row['pWordIX']-1 # adjust for lua
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
if pVector:
df.loc[df.wordIX == pWordIX, 'pVectorL0Args'] = pVector
if result != 'O':
if not pWordIX in argsDict:
argsDict[pWordIX] = []
argsDict[pWordIX].append([wordIX, result])
for pWordIX in argsDict.keys():
for i,rel in enumerate(argsDict[pWordIX]):
df.loc[df.wordIX == pWordIX, 'ar%d_ix' % i] = rel[0]
df.loc[df.wordIX == pWordIX, 'ar%d_arg' % i] = rel[1]
elif (systemName == 'AMRL0Nargs'):
# 2c. add L0 nn output to data frames
tp, df, _, _, _ = getComparisonDFrames(dbfn, dbrfn, pVector2d=True)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
nargsDict = {}
for _, row in singleSentDF.iterrows():
pWordIX = row['pWordIX']-1 # adjust for lua
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
if pVector:
df.loc[df.wordIX == wordIX, 'pVectorL0Nargs'] = pVector
if result != 'O':
if not pWordIX in nargsDict:
nargsDict[pWordIX] = []
nargsDict[pWordIX].append([wordIX, result])
#print 'DEBUG', pWordIX, wordIX, result
for pWordIX in nargsDict.keys():
for i,rel in enumerate(nargsDict[pWordIX]):
df.loc[df.wordIX == pWordIX, 'nar%d_ix' % i] = rel[0]
df.loc[df.wordIX == pWordIX, 'nar%d_lbl' % i] = rel[1]
elif (systemName == 'AMRL0Attr'):
# 2c. add L0 nn output to data frames
tp, df, _, _, _ = getComparisonDFrames(dbfn, dbrfn)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
for _, row in singleSentDF.iterrows():
pWordIX = row['pWordIX']-1 # adjust for lua
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
df.loc[df.wordIX == wordIX, 'pVectorL0Attr'] = pVector
i=0
if result == 'polarity':
df.loc[df.wordIX == wordIX, 'attr%d_val' % i] = '-'
df.loc[df.wordIX == wordIX, 'attr%d_lbl' % i] = 'polarity'
elif result == 'TOP':
df.loc[df.wordIX == wordIX, 'attr%d_val' % i] = df.loc[df.wordIX == wordIX, 'kind']
df.loc[df.wordIX == wordIX, 'attr%d_lbl' % i] = 'TOP'
elif result == 'quant':
print 'skipping quant HMM'
#df.loc[df.wordIX == wordIX, 'attr%d_val' % i] = 'HMM'
#df.loc[df.wordIX == wordIX, 'attr%d_lbl' % i] = 'quant'
if (systemName == 'AMRL0Ncat'):
# 2c. add L0 nn output to data frames
tp, _, _, features, _ = getComparisonDFrames(dbfn, dbrfn)
for sentIX in range(len(sents['test'])):
sentence = sents['test'][sentIX]
singleSentDF = tp[ tp['sentIX']==(sentIX+1) ]
df = sentence.predictedDFrame
for _, row in singleSentDF.iterrows():
wordIX = row['wordIX']
result = row['result']
pVector = row['pVector']
df.loc[df.wordIX == wordIX, 'pVectorNcat'] = pVector
if (True):
# With: F1 57.18, prec 59.71, recall 54.85
# With 0.65 threshold: F1 57.49, prec 58.09, recall 56.90
# Without: F1 57.35, prec 60.57, recall 54.46
feats = features['ncat']['tokens']
lst = floatCSVToList(pVector)
logProbs, probs = normalizeLogProbs1d({0:lst[0]})
L0ToProb = dict(zip(feats,probs[0]))
sortedTuples = sorted(zip(feats,probs[0]), key=lambda x: x[1], reverse=True )
if sortedTuples[0][0] == 'O':
if sortedTuples[0][1] >= 0.95:
result = '-'
prob = sortedTuples[0][1]
else:
result = sortedTuples[1][0]
prob = sortedTuples[1][1]
else:
result = sortedTuples[0][0]
prob = sortedTuples[0][1]
if result=='UNKNOWN':
result = 'person' # a default
df.loc[df.wordIX == wordIX, 'NcatResult'] = result
df.loc[df.wordIX == wordIX, 'NcatProb'] = prob
def filenamesForNNTag(nnTag, modelInfo, sessionTag):
pid = modelInfo[nnTag]['id']
if isinstance(pid, int):
modelFn = '%05d_best_dev' % pid
else:
modelFn = pid
modelCreationDBFn = modelInfo[nnTag]['db']
if not sessionTag:
sessionTag = ''
z = sessionTag.split('/')
stag = z[-1]
testVectorDBFn = '%s_%stestVectors.db' % (nnTag, stag)
resultsDBFn = '%s_%sresults.db' % (nnTag, stag)
return (modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn)
def daisyluSystemEndToEnd(inputFn, sents=None, useNER=True, useCacheIfAvail=True, sessionTag = None,
modelInfo=None, debugSave=False, NoConceptThreshold=0.5, conceptRejectionThreshold=0.0,
NEWPrediction=False, L0OnlyFromFeaturesDB=False, useDistSG=False):
'''
Generate list of sentence objects with predicted dataframes from an input text file.
:param inputFn: Text input file with "::tags" including ::snt specification separated by blank lines
:param sents: Optional array of pre-processed sentence objects (could include golden info)
:param useNER: Use NER output from wikification
:param sessionTag: optional Prefix tag for generated files
:param modelInfo: Optional structure defining saved nn models and vector/architecture files
'''
#if NEWPrediction:
# keepSense=False
#else:
keepSense=True
modelDBFn=''
pid=0
# 1. create a standard data frame with one row per word and add to each sentence
if not sents:
sentsRaw = {'test':[]}
sentsRaw['test'], _ = readAllAMR(inputFn)
sents = sentsRaw
ixList = range(len(sents['test']))
initializePredictionDataFrames(sents, ixList, NEWPrediction=NEWPrediction)
# 2a. create vector db from the sentences
if (useNER):
nnTag = 'AMRL0'
else:
nnTag = 'AMRL0NoNER'
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
wordDF = createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense)
# 2b. run SG neural net
runNetwork('SG',testVectorDBFn, modelFn, resultsDBFn)
# 2c. add SG nn output to data frames
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, 'AMRL0',
NoConceptThreshold = NoConceptThreshold,
conceptRejectionThreshold = conceptRejectionThreshold)
if debugSave: pickle.dump( sents, open( 'e2eDebug2.pcl', "wb" ) )
nnTag = 'AMRL0Ncat'
if nnTag in modelInfo: # named category is a new option
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG)
runNetwork('Cat',testVectorDBFn, modelFn, resultsDBFn)
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, nnTag)
if debugSave: pickle.dump( sents, open( 'e2eDebugNcat.pcl', "wb" ) )
nnTag = 'AMRL0Args'
if nnTag in modelInfo: # named category is a new option
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG)
runNetwork('Args',testVectorDBFn, modelFn, resultsDBFn)
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, nnTag)
if debugSave: pickle.dump( sents, open( 'e2eDebug3.pcl', "wb" ) )
nnTag = 'AMRL0Nargs'
if nnTag in modelInfo: # named category is a new option
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG)
runNetwork('Nargs',testVectorDBFn, modelFn, resultsDBFn)
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, nnTag)
if debugSave: pickle.dump( sents, open( 'e2eDebug4.pcl', "wb" ) )
nnTag = 'AMRL0Attr'
if nnTag in modelInfo: # named category is a new option
(modelCreationDBFn, modelFn, testVectorDBFn, resultsDBFn) = filenamesForNNTag(nnTag, modelInfo, sessionTag)
createVectorsFromDataFrames(sents, 'predictedDFrame', modelCreationDBFn, testVectorDBFn, nnTag, keepSense=keepSense,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB, useDistSG=useDistSG)
runNetwork('Attr',testVectorDBFn, modelFn, resultsDBFn)
addTorchNetworkResults(sents, resultsDBFn, testVectorDBFn, nnTag)
if debugSave: pickle.dump( sents, open( 'e2eDebug5.pcl', "wb" ) )
return sents, wordDF
def alignedInputDryrunFlow(amrSents, outFn, sessionTag,
modelInfo=None,
useCacheIfAvail=True,
useNER=False, debugSave=False, checkResults=False,
NoConceptThreshold=0.65,
conceptRejectionThreshold=0.0,
forceSubGroupConnectionThreshold=0.35,
NEWPrediction=False,
L0OnlyFromFeaturesDB=False,
useDistSG=False): # instead of just ::snt, read from alignments amr, try to use the same sentence boundaries in multi-sent
sents, wordDF = daisyluSystemEndToEnd(None, sents=amrSents, useNER=useNER,
sessionTag = sessionTag,
modelInfo=modelInfo,
useCacheIfAvail= useCacheIfAvail,
conceptRejectionThreshold=conceptRejectionThreshold,
debugSave=debugSave,
NoConceptThreshold=NoConceptThreshold,
NEWPrediction=NEWPrediction,
L0OnlyFromFeaturesDB=L0OnlyFromFeaturesDB,
useDistSG=useDistSG )
for i in range(len(sents['test'])):
s = sents['test'][i]
print i, s.source['metadata']['id'], s.multiSentIX, s.tokens
createOutputTextFile(sents, outFn, modelInfo=modelInfo,
forceSubGroupConnectionThreshold=forceSubGroupConnectionThreshold)
if checkResults:
pickle.dump( sents, open( sessionTag + outFn + '_2.pcl', "wb" ) )
return sents
def removeWikiAttrs(sents):
for i,sentence in enumerate(sents['test']):
G = sentence.singleComponentGraph['graph']
for lbl in G.nodes():
for a in G.node[lbl]['attributes'].keys():
if (a == 'wiki'):
print 'removing wiki', G.node[lbl]['attributes'][a]
del G.node[lbl]['attributes'][a]
return sents
if __name__ == '__main__':
desc = """
python daisylu_main.py
"""
parser = argparse.ArgumentParser(description=desc)
parser.add_argument('-a','--aligned', help='aligned input', action='store_true', default=True)
parser.add_argument('-i','--infile', help='input file name', required=False, default='TINY_amr-bank-struct-v1.6-test.txt')
parser.add_argument('-o','--outfile', help='output file name', required=False, default='TINY_amr-bank-struct-v1.6-test.amr')
parser.add_argument('-g','--goldfile', help='gold file name', required=False, default='TINY_amr-bank-struct-v1.6-test')
parser.add_argument('-t','--tag', help='results and temp file tag', required=False, default='tmp_')
parser.add_argument('-m','--modelString', help='modelString, like REFERENCE_MODELS', required=False, default='REFERENCE_MODELS')
parser.add_argument('-pid','--pid', help='pid for AWS', required=False, default=-1, type=int)
parser.add_argument('-nct','--noConceptThreshold', help='no Concept Threshold prob', required=False, default=0.65, type=float)
parser.add_argument('-sgt','--subGroupThreshold', help='sub Group Threshold prob', required=False, default=0.55, type=float)
parser.add_argument('-noWiki','--noWiki', help='remove wiki references (LDC2014)', action='store_true', default=False )
args = vars(parser.parse_args())
pprint (args)
WordRepsFileLocations.init('../data/WORD_LIST.txt')
pd.set_option('display.width', 1000)
pd.set_option('display.max_rows', 2000)
useDistSG=True
mi = {}
mi['AMRL0NoNER'] = { 'id': 0, 'db': 'None' }
mi['AMRL0'] = { 'id': './models/SG.model@./models/SG.weights' , 'db': 'LDC15_G300ML_Concepts.db' }
mi['AMRL0Args'] = { 'id': './models/Args.model@./models/Args.weights' , 'db': 'LDC15_G300ML_SG_prob_Args.db' }
mi['AMRL0Nargs'] = { 'id': './models/Nargs.model@./models/Nargs.weights' , 'db': 'LDC15_G300ML_SG_prob_Nargs.db' }
mi['AMRL0Attr'] = { 'id': './models/Attr.model@./models/Attr.weights' , 'db': 'LDC15_G300ML_SG_prob_Attr.db' }
mi['AMRL0Ncat'] = { 'id': './models/Ncat.model@./models/Ncat.weights' , 'db': 'LDC15_G300ML_SG_prob_Cat.db' }
modelInfoDict = {'REFERENCE_MODELS': mi}
outfile1 = args['outfile']
outfile2 = 'corrected-' + outfile1
sList={}
sList['test'], _ = readAllAMR(args['infile'])
sents = alignedInputDryrunFlow(sList, outfile1,
args['tag'],
useNER=True,
modelInfo = modelInfoDict[args['modelString']],
conceptRejectionThreshold=0.20, # <------------------------ New
NoConceptThreshold=args['noConceptThreshold'],
forceSubGroupConnectionThreshold=args['subGroupThreshold'],
NEWPrediction=True,
useDistSG=useDistSG )
forceICorefs(sents)
removeQuantHMMAttrs(sents)
translateCountryCat(sents)
if args['noWiki']:
removeWikiAttrs(sents)
createOutputTextFile(sents, outfile2, modelInfo=modelInfoDict[args['modelString']], forceSubGroupConnectionThreshold=args['subGroupThreshold'] )
if args['goldfile']:
cmd = getSystemPath('smatchCommand') + ' -r 25 -f %s %s' % ( args['goldfile'], outfile2)
print cmd
res = subprocess.check_output(cmd, shell=True)
print 'result is ', res
print 'Done'
exit(1)
|
159328
|
import re
def sort_urls(url_list,reverse=True):
return sorted(url_list, key=lambda k: k['bandwidth'], reverse=reverse)
def name_checker(name):
name = name.replace("'", "")
name = re.findall(r"([\w\d-]+)", name)
return ' '.join([x for x in name])
|
159352
|
from amaranth.vendor.lattice_ice40 import *
from amaranth.vendor.lattice_ice40 import __all__
import warnings
warnings.warn("instead of nmigen.vendor.lattice_ice40, use amaranth.vendor.lattice_ice40",
DeprecationWarning, stacklevel=2)
|
159359
|
from __future__ import annotations
manylinux1_compatible = False
manylinux2010_compatible = False
manylinux2014_compatible = False
def manylinux_compatible(*_, **__): # PEP 600
return False
|
159388
|
from boa_test.tests.boa_test import BoaFixtureTest
from boa.compiler import Compiler
from neo.Core.TX.Transaction import Transaction
from neo.Prompt.Commands.BuildNRun import TestBuild
class TestContract(BoaFixtureTest):
def test_Account(self):
output = Compiler.instance().load('%s/boa_test/example/blockchain/AccountTest.py' % TestContract.dirname).default
out = output.write()
string_ouput = output.to_s()
self.assertGreater(len(string_ouput), 0)
account = self.wallet_1_script_hash.Data
bad_account = bytearray(b'S\xefB\xc8\xdf!^\xbeZ|z\xe8\x01\xcb\xc3\xac/\xacE)')
tx, results, total_ops, engine = TestBuild(out, ['get_hash', bad_account], self.GetWallet1(), '07', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBoolean(), True)
tx, results, total_ops, engine = TestBuild(out, ['get_hash', account], self.GetWallet1(), '07', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetByteArray(), account)
tx, results, total_ops, engine = TestBuild(out, ['get_votes', account], self.GetWallet1(), '07', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetArray(), [])
tx, results, total_ops, engine = TestBuild(out, ['get_balance_gas', account], self.GetWallet1(), '07', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 1399980000)
tx, results, total_ops, engine = TestBuild(out, ['get_balance_neo', account], self.GetWallet1(), '07', '05')
self.assertEqual(len(results), 1)
self.assertEqual(results[0].GetBigInteger(), 5000000000)
|
159480
|
import matplotlib
matplotlib.use('Agg') #display backend
import matplotlib.pyplot as plt
import numpy as np
import os
from scipy.spatial import KDTree
import scipy.stats as st
from scipy.optimize import curve_fit as cu
from astropy.io import fits
import astropy.cosmology as co
from legacyanalysis.pathnames import get_indir,get_outdir,make_dir
indir= get_indir('cosmos')
#CREATES THE CATALOG LIST
catList = ["catalog-R3-R4.fits",
"catalog-R2-R4.fits",
"catalog-R2-R3.fits",
"catalog-R1-R4.fits",
"catalog-R1-R3.fits",
"catalog-R1-R2.fits"]
for cnt,cat in enumerate(catList): catList[cnt]= os.path.join(indir,cat)
# EACH CATALOG NEEDS TO HAVE THE TYPICAL DECALS CATALOG ENTRIES WITH "_1" AND "_2" APPENDED FOR DR2 and DR3
# DEFINES THE GAUSSIAN FUNCTION
gfun = lambda x, m0, s0 : st.norm.pdf(x,loc=m0,scale=s0)
#OPENS A FILE TO WRITE OUTPUTS
f=open(os.path.join(get_outdir('cosmos'),"depth-comparisonp.txt"),"w")
f.write("20<g<21.5 \n")
# CREATES A FIGURE
plt.figure(2,(5,5))
plt.axes([0.17,0.15,0.75,0.75])
# PLOT THE EXPECTED NORMAL DISTRIBUTION
plt.plot(np.arange(-10,6,0.1), st.norm.pdf(np.arange(-10,6,0.1),loc=0,scale=1), 'k--', lw=2, label='N(0,1)')
# LOOPS OVER MATCHED CATALOGS
for ii, el in enumerate(catList):
hdu=fits.open(el)
dr2=hdu[1].data
# DEFINES MAGNITUDES TO SELECT A MAGNITUDE BIN
g_mag_dr2 = 22.5 - 2.5 * np.log10(dr2['decam_flux_2'].T[1] / dr2['decam_mw_transmission_2'].T[1])
r_mag_dr2 = 22.5 - 2.5 * np.log10(dr2['decam_flux_2'].T[2] / dr2['decam_mw_transmission_2'].T[2])
z_mag_dr2 = 22.5 - 2.5 * np.log10(dr2['decam_flux_2'].T[4] / dr2['decam_mw_transmission_2'].T[4])
# SELECT A POPULATION OF SOURCES
sel = (dr2['type_2'] == "PSF")&(g_mag_dr2>20)&(g_mag_dr2<21.5)
# COMPARES THE PHOTOMETRIC OUTPUTS
df_g = dr2[sel]['decam_flux_1'].T[1] / dr2[sel]['decam_mw_transmission_1'].T[1] - dr2[sel]['decam_flux_2'].T[1] / dr2[sel]['decam_mw_transmission_2'].T[1]
sigma_g = (1./dr2[sel]['decam_flux_ivar_1'].T[1] + 1./dr2[sel]['decam_flux_ivar_2'].T[1])**(-0.5)
# CREATES THE HISTOGRAM
area=1 #plot is normalized so does not matter
nnn,bbb, ppp=plt.hist(df_g * sigma_g, bins=np.arange(-4,4.5,0.25), weights = np.ones_like(df_g)/area, histtype='step', label=str(ii), normed=True)
#FITS A GAUSSIAN TO THE HISTOGRAM AND WRITES THE OUTPUT
out = cu(gfun,(bbb[1:]+bbb[:-1])/2.,nnn,p0=(0,1))
f.write(el + '\n')
f.write(str(ii) + " " + str(out[0]))
f.write('\n')
plt.xlabel('(g(Ri)-g(FD))/sqrt(var_g(Ri) + var_g(FD))')
plt.ylabel('Normed counts')
plt.xlim((-4,4))
plt.ylim((0,0.7))
gp = plt.legend(loc=2, fontsize=10)
gp.set_frame_on(False)
plt.title('20<g<21.5 type PSF')
plt.grid()
#SAVES THE PLOT AND CLOSES THE FILE WHERE THINGS WERE WRITTEnp.
path=os.path.join(get_outdir('cosmos'),"plotsRc")
make_dir(path)
plt.savefig(os.path.join(path, "comparison-depth-normed-g-20-215-cosmos.png"))
plt.clf()
f.close()
print('finished comparison: cosmos')
|
159527
|
import re
from typing import Any
from pytest import mark, warns
from omegaconf import OmegaConf
def test_legacy_env_is_cached(monkeypatch: Any) -> None:
monkeypatch.setenv("FOOBAR", "1234")
c = OmegaConf.create({"foobar": "${env:FOOBAR}"})
with warns(UserWarning):
before = c.foobar
monkeypatch.setenv("FOOBAR", "3456")
assert c.foobar == before
@mark.parametrize(
"value,expected",
[
# bool
("false", False),
("true", True),
# int
("10", 10),
("-10", -10),
# float
("10.0", 10.0),
("-10.0", -10.0),
# strings
("off", "off"),
("no", "no"),
("on", "on"),
("yes", "yes"),
(">1234", ">1234"),
(":1234", ":1234"),
("/1234", "/1234"),
# yaml strings are not getting parsed by the env resolver
("foo: bar", "foo: bar"),
("foo: \n - bar\n - baz", "foo: \n - bar\n - baz"),
# more advanced uses of the grammar
("ab \\{foo} cd", "ab \\{foo} cd"),
("ab \\\\{foo} cd", "ab \\\\{foo} cd"),
(" 1 2 3 ", " 1 2 3 "),
("\t[1, 2, 3]\t", "\t[1, 2, 3]\t"),
(" {a: b}\t ", " {a: b}\t "),
],
)
def test_legacy_env_values_are_typed(
monkeypatch: Any, value: Any, expected: Any
) -> None:
monkeypatch.setenv("MYKEY", value)
c = OmegaConf.create({"my_key": "${env:MYKEY}"})
with warns(UserWarning, match=re.escape("The `env` resolver is deprecated")):
assert c.my_key == expected
|
159559
|
import numpy as np
import matplotlib.pyplot as plt
from sklearn import svm, datasets
# import some data to play with
iris = datasets.load_iris()
X = iris.data[:, :2] # we only take the first two features. We could
y = iris.target
C = 1.0 # SVM regularization parameter
svc = svm.SVC(kernel='linear', C=1,gamma=0).fit(X, y)
# create a mesh to plot in
x_min, x_max = X[:, 0].min() - 1, X[:, 0].max() + 1
y_min, y_max = X[:, 1].min() - 1, X[:, 1].max() + 1
h = (x_max / x_min)/100
xx, yy = np.meshgrid(np.arange(x_min, x_max, h),
np.arange(y_min, y_max, h))
plt.subplot(1, 1, 1)
Z = svc.predict(np.c_[xx.ravel(), yy.ravel()])
Z = Z.reshape(xx.shape)
plt.contourf(xx, yy, Z, cmap=plt.cm.Paired, alpha=0.8)
plt.scatter(X[:, 0], X[:, 1], c=y, cmap=plt.cm.Paired)
plt.xlabel('Sepal length')
plt.ylabel('Sepal width')
plt.xlim(xx.min(), xx.max())
plt.title('SVC with linear kernel')
plt.show()
|
159580
|
import random
import types
from .const import *
from .genetic import encoder
from .formatter import *
from .utils import longest_common_subseqence as lcs
__all__ = ['transform_column']
regex_table = {
INDEX_TABLE(0x00) : '\\d',
INDEX_TABLE(0x01) : '[A-Z]',
INDEX_TABLE(0x02) : '[a-z]',
INDEX_TABLE(0x03) : '[A-Za-z]',
INDEX_TABLE(0x04) : '[0-9A-F]',
INDEX_TABLE(0x05) : '[0-9a-f]',
INDEX_TABLE(0x06) : '\\w',
INDEX_TABLE(0x07) : '\\s',
INDEX_TABLE(0x08) : space_only_format,
INDEX_TABLE(0x09) : '.',
INDEX_TABLE(0x0a) : char_or_format,
INDEX_TABLE(0x0b) : char_or_format,
INDEX_TABLE(0x0c) : char_range_format,
INDEX_TABLE(0x0d) : char_range_format,
INDEX_TABLE(0x0e) : string_or_format,
INDEX_TABLE(0x0f) : char_range_format,
INDEX_TABLE(0x10) : col_char_range_format,
INDEX_TABLE(0x11) : col_char_range_format,
INDEX_TABLE(0x12) : col_char_range_format,
INDEX_TABLE(0x13) : col_char_range_format,
INDEX_TABLE(0x14) : col_char_range_format,
}
def find_sequence(seq, target):
# 從後面找,找出 Upper bound,確保 sequence 可以完整找完
def find_remain(seq, target, cur=[], inc=0, idx=0):
cur = []
t_idx = 0
start = 0
while t_idx < len(target):
for i in range(start, len(seq)):
if seq[i][0] == target[t_idx]:
t_idx += 1
cur.append(i+1)
start = i + 1
break
return start
obj = {}
for idx, s in enumerate(seq):
sym = s[0]
if not sym in obj:
obj[sym] = []
obj[sym].append((s[1], idx))
output = []
lb = -1 # lower bound
for i in range(len(target)):
sym = target[i]
ub = len(seq) - find_remain(seq[::-1], target[i+1:][::-1])
selectable = obj[sym]
selectable = list(filter(lambda x: ub > x[1] > lb, selectable))
# best = sorted(selectable, key=lambda x: -x[0])[0]
# lb = best[1]
lb = random.choice(selectable)[1]
output.append(lb)
return output
def decoder(subsequences, subsequence):
"""
輸出 Regex
"""
regex = ''
tmp = ''
for i in range(len(subsequences[0])):
cnts = [len(seq[i]) for seq in subsequences]
targets = [seq[i] for seq in subsequences]
if i % 2 == 0 : # 不一定的
if len(set(targets)) > 3 or random.randint(0,99) % 2 == 1 : # 有很多不一樣
_next = '.*'
elif len(''.join(set(targets))) == 1 : # 只有幾個不一樣
_next = char_range_format(targets, cnts)
else:
setstr = set()
for ss in targets:
if ss == '' : continue
setstr.add(escape_format(ss, False))
_next = f"({'|'.join(setstr)})"
if '' in targets :
_next += '?'
if sum(cnts) == 0 or (_next.startswith('.') and tmp.startswith('.')):
tmp = ''
else :
tmp = _next
else:
typ = regex_table[chr(ord(subsequence[i//2]) & 0x7f)]
if type(typ) == types.FunctionType :
tmp = typ(targets, cnts)
else :
tmp = typ
tmp += freq_counter(cnts)
regex += tmp
return regex
def transform_column(column, gene):
"""
全部搞在一起
"""
t_column = encoder(column, gene)
f_columns = type_counter(t_column)
type_list = [''.join(map(str, [idx for idx, count in col]))
for col in f_columns]
subsequence = lcs(type_list)
# 轉換完後,如果各位沒有共同的子序列,這段沒有比較的意義,直接退出
if subsequence == '':
return '.*', INDEX_TABLE(9)
seq_count = []
for ff in f_columns:
targets = find_sequence(ff, subsequence)
l_count = []
cnt = 0
for i in range(0, len(ff)):
if i in targets:
if ff[i][0] != '9':
l_count.append(cnt)
l_count.append(ff[i][1])
else:
# 如果也是 any 就合併進去
l_count.append(0)
l_count.append(ff[i][1] + cnt)
cnt = 0
else:
cnt += ff[i][1]
seq_count.append(l_count)
strs = []
for str_idx, seq_c in enumerate(seq_count):
idx = 0
tmp = []
for cnt in seq_c:
tmp.append(column[str_idx][idx:idx+cnt])
idx += cnt
tmp.append(column[str_idx][idx:])
strs.append(tmp)
return decoder(strs, subsequence), subsequence
|
159587
|
import datetime
import json
import types
from uuid import UUID
import lazy_object_proxy
from future.utils import iteritems
from simpleflow.futures import Future
def serialize_complex_object(obj):
if isinstance(
obj, bytes
): # Python 3 only (serialize_complex_object not called here in Python 2)
return obj.decode("utf-8", errors="replace")
if isinstance(obj, datetime.datetime):
r = obj.isoformat()
if obj.microsecond:
r = r[:23] + r[26:] # milliseconds only
if r.endswith("+00:00"):
r = r[:-6] + "Z"
return r
elif isinstance(obj, datetime.date):
return obj.isoformat()
elif isinstance(obj, datetime.time):
r = obj.isoformat()
if obj.microsecond:
r = r[:12]
return r
elif isinstance(obj, types.GeneratorType):
return [i for i in obj]
elif isinstance(obj, Future):
return obj.result
elif isinstance(obj, UUID):
return str(obj)
elif isinstance(obj, lazy_object_proxy.Proxy):
return str(obj)
elif isinstance(obj, (set, frozenset)):
return list(obj)
raise TypeError(
"Type %s couldn't be serialized. This is a bug in simpleflow,"
" please file a new issue on GitHub!" % type(obj)
)
def _resolve_proxy(obj):
if isinstance(obj, dict):
return {k: _resolve_proxy(v) for k, v in iteritems(obj)}
if isinstance(obj, (list, tuple)):
return [_resolve_proxy(v) for v in obj]
if isinstance(obj, lazy_object_proxy.Proxy):
return str(obj)
return obj
def json_dumps(obj, pretty=False, compact=True, **kwargs):
"""
JSON dump to string.
:param obj:
:type obj: Any
:param pretty:
:type pretty: bool
:param compact:
:type compact: bool
:return:
:rtype: str
"""
if "default" not in kwargs:
kwargs["default"] = serialize_complex_object
if pretty:
kwargs["indent"] = 4
kwargs["sort_keys"] = True
kwargs["separators"] = (",", ": ")
elif compact:
kwargs["separators"] = (",", ":")
kwargs["sort_keys"] = True
try:
return json.dumps(obj, **kwargs)
except TypeError:
# lazy_object_proxy.Proxy subclasses basestring: serialize_complex_object isn't called on python2
# and some versions of pypy
obj = _resolve_proxy(obj)
return json.dumps(obj, **kwargs)
def json_loads_or_raw(data):
"""
Try to get a JSON object from a string.
If this isn't JSON, return the raw string.
:param data: string; should be in JSON format
:return: JSON-decoded object or raw data
"""
if not data:
return None
try:
return json.loads(data)
except Exception:
return data
|
159596
|
from dependency_injector.wiring import Provide, inject
from fastapi.params import Depends
from server.container import AppContainer
from server.utils import make_router
from tarkov.profile.dependencies import with_profile
from tarkov.insurance.interfaces import IInsuranceService
from tarkov.models import TarkovSuccessResponse
from tarkov.offraid.requests import OffraidSaveRequest
from tarkov.offraid.services import OffraidSaveService
from tarkov.profile.profile import Profile
offraid_router = make_router(tags=["Offraid"])
@offraid_router.put("/raid/profile/save")
@inject
def singleplayer_raid_profile_save(
request: OffraidSaveRequest,
profile: Profile = Depends(with_profile),
offraid_service: OffraidSaveService = Depends(
Provide[AppContainer.offraid.service]
),
insurance_service: IInsuranceService = Depends(
Provide[AppContainer.insurance.service]
),
) -> TarkovSuccessResponse:
if request.is_player_scav:
raise NotImplementedError
insured_items = insurance_service.get_insurance(
profile=profile,
offraid_profile=request.profile,
is_alive=request.health.is_alive,
)
for trader_id, items in insured_items.items():
insurance_service.send_insurance_mail(
items=items,
trader_id=trader_id,
profile=profile,
)
insurance_service.remove_insurance(items=items, profile=profile)
offraid_service.update_profile(
profile=profile,
raid_profile=request.profile,
raid_health=request.health,
)
return TarkovSuccessResponse(data=None)
|
159603
|
from jwt.compat import constant_time_compare
from jwt.utils import force_bytes
class TestCompat:
def test_constant_time_compare_returns_true_if_same(self):
assert constant_time_compare(
force_bytes('abc'), force_bytes('abc')
)
def test_constant_time_compare_returns_false_if_diff_lengths(self):
assert not constant_time_compare(
force_bytes('abc'), force_bytes('abcd')
)
def test_constant_time_compare_returns_false_if_totally_different(self):
assert not constant_time_compare(
force_bytes('abcd'), force_bytes('efgh')
)
|
159615
|
import socket
import sys
import os
from control_flow_constants import *
print("HELLO")
from timeit import default_timer as timer
start = timer()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
server_address = (CHEETAH_MASTER_IP, CHEETAH_MASTER_PORT)
sock.bind(server_address)
for i in range(CHEETAH_WORKER_NODES):
sock.recvfrom(4096)
end = timer()
print("End to end delay:", end - start, "s")
|
159654
|
import os
import django
BASE_PATH = os.path.dirname(__file__)
if django.VERSION[:2] >= (1, 3):
DATABASES = {
'default': {
'ENGINE': 'django.db.backends.sqlite3',
'NAME': ':memory:',
}
}
else:
DATABASE_ENGINE = 'sqlite3'
DATABASE_NAME = ':memory:'
SITE_ID = 1
DEBUG = True
TEST_RUNNER = 'django_coverage.coverage_runner.CoverageRunner'
COVERAGE_MODULE_EXCLUDES = [
'tests$', 'settings$', 'urls$',
'common.views.test', '__init__', 'django',
'migrations', 'djcelery'
]
COVERAGE_REPORT_HTML_OUTPUT_DIR = os.path.join(BASE_PATH, 'coverage')
INSTALLED_APPS = [
'django.contrib.admin',
'django.contrib.auth',
'django.contrib.contenttypes',
'django.contrib.sessions',
'django.contrib.sites',
'app_metrics',
'app_metrics.tests',
'djcelery',
'django_coverage'
]
ROOT_URLCONF = 'app_metrics.tests.urls'
CELERY_ALWAYS_EAGER = True
APP_METRICS_BACKEND = 'app_metrics.backends.db'
APP_METRICS_MIXPANEL_TOKEN = None
APP_METRICS_DISABLED = False
SECRET_KEY = "herp-derp"
|
159673
|
import itertools
import requests
from geosolver import settings
import networkx as nx
__author__ = 'minjoon'
class SyntaxParse(object):
def __init__(self, words, directed, undirected, rank, score):
self.words = words
self.directed = directed
self.undirected = undirected
self.rank = rank
self.score = score
def get_words(self, span):
return tuple(self.words[idx] for idx in range(*span))
def get_word(self, index):
if index < 0:
return None
return self.words[index]
def get_pos_by_index(self, index):
if index not in self.undirected._node:
return None
tag = self.undirected._node[index]['tag']
return tag
def get_pos_by_span(self, span):
"""
If the span is > 2 words, then obatin the tag of the latter word (higher plain index).
Usually the compound is the former.
"""
return self.get_pos_by_index(span[-1]-1)
def iterate_spans(self, maxlen=2):
for start in range(len(self.words)):
for spanlen in range(maxlen):
end = start + spanlen + 1
if end <= len(self.words):
yield (start, end)
def shortest_path_between_spans(self, s0, s1, directed=False):
paths = [self.shortest_path_between_indices(i0, i1, directed)
for i0, i1 in itertools.product(range(*s0), range(*s1))]
return min(paths, key=lambda path: len(path))
def shortest_path_between_indices(self, i0, i1, directed=False):
graph = self.undirected
if directed:
graph = self.directed
path = nx.shortest_path(graph, i0, i1)
return path
def distance_between_spans(self, s0, s1, directed=False):
distances = [self.distance_between_indices(i0, i1, directed)
for i0, i1 in itertools.product(range(*s0), range(*s1))]
return min(distances)
def plain_distance_between_spans(self, s0, s1, directed=False):
distances = [self.plain_distance_between_indices(i0, i1, directed)
for i0, i1 in itertools.product(range(*s0), range(*s1))]
return min(distances)
def distance_between_indices(self, i0, i1, directed=False):
graph = self.undirected
if directed:
graph = self.directed
d = nx.shortest_path_length(graph, i0, i1)
return d
def plain_distance_between_indices(self, i0, i1, directed=False):
if directed:
return i1 - i0
return abs(i1-i0)
def relation_between_spans(self, s0, s1, directed=False):
relations = [self.relation_between_indices(i0, i1, directed)
for i0, i1 in itertools.product(range(*s0), range(*s1))]
for relation in relations:
if relation is not None:
return relation
return None
def relation_between_indices(self, i0, i1, directed=False):
graph = self.undirected
if directed: graph = self.directed
if i1 in graph[i0]:
label = graph[i0][i1]['label']
return label
return None
def get_neighbors(self, span, directed=False):
graph = self.undirected
if directed: graph = self.directed
nbrs = {}
for from_ in range(*span):
for to in graph[from_]:
nbrs[to] = graph[from_][to]['label']
return nbrs
class SyntaxParser(object):
def get_syntax_parses(self, words, k, unique=True):
"""
Returns a list of (tree, score) pair in order of decreasing score
"""
raise Exception("This function must be overriden!")
def get_best_syntax_parse(self, words, parser=True):
return self.get_syntax_parses(words, 1, parser=parser)[0]
class StanfordDependencyParser(SyntaxParser):
"""
Connects to stanford parser sever via http.
"""
def __init__(self, server_url):
self.server_url = server_url
def get_syntax_parses(self, words, k, unique=True, parser=True):
# FIXME : this should be fixed at geoserver level
words = {key: word.lstrip().rstrip() for key, word in words.items()}
if not parser:
return [SyntaxParse(words, None, None, None, None)]
sentence = [words[index] for index in sorted(words.keys())]
neutral_sentence = [_neutralize(word) for word in sentence]
params = {'words': '+'.join(neutral_sentence), 'k': k, 'paragraph': ' '.join(neutral_sentence)}
r = requests.get(self.server_url, params=params)
data = r.json()
trees = []
for rank, tree_data in enumerate(data):
score = tree_data['score']
tuples = tree_data['tuples']
graph = nx.DiGraph()
for label, from_, to, from_tag, to_tag in tuples:
from_ -= 1
to -= 1
if from_ < 0:
continue
graph.add_edge(from_, to, label=label)
if 'label' not in graph._node[from_]:
graph._node[from_]['label'] = "%s-%d" % (words[from_], from_)
graph._node[from_]['word'] = words[from_]
graph._node[from_]['tag'] = from_tag
if 'label' not in graph._node[to]:
graph._node[to]['label'] = "%s-%d" % (words[to], to)
graph._node[to]['word'] = words[to]
graph._node[to]['tag'] = to_tag
if unique and not any(_match_trees(syntax_tree.directed, graph) for syntax_tree in trees):
tree = SyntaxParse(words, graph, graph.to_undirected(), rank, score)
trees.append(tree)
return trees
def _neutralize(word):
if word.startswith("@v"):
return 'number'
if word.startswith("@s"):
return "statement"
return word
def _match_trees(tree0, tree1, match_edge_label=False):
"""
Returns True if tree0 and tree1 are identical.
Edge labels are not considered unless match_edge_label is set to True.
:param tree0:
:param tree1:
:param match_edge_label:
:return:
"""
assert isinstance(tree0, nx.DiGraph)
assert isinstance(tree1, nx.DiGraph)
for u, v, data in tree0.edges(data=True):
if not tree1.has_edge(u, v):
return False
if match_edge_label and data['label'] != tree1[u][v]['label']:
return False
return True
stanford_parser = StanfordDependencyParser(settings.STANFORD_PARSER_SERVER_URL)
|
159712
|
import scipy
import matplotlib.pyplot as plt
filename = 'cyclic_test_data.txt'
data = scipy.loadtxt(filename,delimiter=',')
t = data[:,0]
v = data[:,1]
figsize = 12, 8
xlim = t.min(), t.max()
ylim = -0.2, 1.2
linewidth = 2
fig = plt.figure(1,figsize=figsize)
vpos_arrow_low = -0.15
# Quiet time fill
plt.fill([0.0,1.0,1.0,0.0],[-0.2, -0.2, 1.2, 1.2], color=(0.3,0.3,0.3), alpha=0.1)
# Quiet time label
plt.text(0.5,vpos_arrow_low,'quietTime',ha='center')
plt.arrow(0.2,vpos_arrow_low+0.01,-0.158, 0.0, ec='k', fc='k')
plt.arrow(0.8,vpos_arrow_low+0.01, 0.17, 0.0, ec='k', fc='k')
# 1st Period label
plt.text(2.0, vpos_arrow_low, 'period (cycle 1)', ha='center')
plt.arrow(1.6, vpos_arrow_low+0.01, -0.56, 0.0, fc='k', ec='k')
plt.arrow(2.4, vpos_arrow_low+0.01, 0.57, 0.0, fc='k', ec='k')
# 2nd Period label
plt.text(4.0, vpos_arrow_low, 'period (cycle 2)', ha='center')
plt.arrow(3.6, vpos_arrow_low+0.01, -0.56, 0.0, fc='k', ec='k')
plt.arrow(4.4, vpos_arrow_low+0.01, 0.57, 0.0, fc='k', ec='k')
# Midline
plt.plot([1.0, xlim[1]], [0.5, 0.5], 'k',linewidth=linewidth)
# Offset lines and labels
hpos_offset_arrow = 1.6
plt.plot([1.0, hpos_offset_arrow+0.03], [0.0, 0.0], 'k', linewidth=linewidth)
plt.text(hpos_offset_arrow,0.25,'offset',va='center',rotation=90)
plt.arrow(hpos_offset_arrow+0.034, 0.18, 0.0, -0.13, fc='k', ec='k')
plt.arrow(hpos_offset_arrow+0.034, 0.32, 0.0, 0.13, fc='k', ec='k')
# Amplitude lines and labels
hpos_amp_arrow = 1.4
plt.plot([hpos_amp_arrow-0.03,2.0], [1.1, 1.1], 'k', linewidth=linewidth)
plt.text(hpos_amp_arrow,0.8,'amplitude', va='center', rotation=90)
plt.arrow(hpos_amp_arrow+0.034, 0.7, 0.0, -0.15, fc='k', ec='k')
plt.arrow(hpos_amp_arrow+0.034, 0.9, 0.0, 0.15, fc='k', ec='k')
# Quite Value
plt.figtext(0.05, 0.155, 'quietValue')
plt.plot(t,v,'b',linewidth=linewidth)
plt.xlabel('time (s)')
plt.ylabel('potential (V)')
plt.xlim(*xlim)
plt.ylim(*ylim)
plt.grid('on')
plt.title('Cyclic voltammetry parameters')
plt.savefig('cyclic_test_fig.png')
plt.show()
|
159713
|
import numpy as np
import torch.nn as nn
import torch
import math
import time
from visualDet3D.networks.backbones import resnet
class YoloMono3DCore(nn.Module):
"""Some Information about YoloMono3DCore"""
def __init__(self, backbone_arguments=dict()):
super(YoloMono3DCore, self).__init__()
self.backbone =resnet(**backbone_arguments)
def forward(self, x):
x = self.backbone(x['image'])
x = x[0]
return x
|
159749
|
tutor = False
def pancakesort(array):
if len(array) <= 1:
return array
if tutor:
print()
for size in range(len(array), 1, -1):
maxindex = max(range(size), key=lamdba i: array[i])
if maxindex+1 != size:
if maxindex != 0:
if tutor:
print(
'With: %r doflip %i' % (
' '.join(str(x) for x in array), maxindex+1)
)
array[:maxindex+1] = reversed(array[:maxindex+1])
if tutor:
print(
'With: %r doflip %i' % (
' '.join(str(x) for x in array), size
)
)
array[:size] = reversed(array[:size])
if tutor:
print()
|
159759
|
import sys,os
import torch
import torch.nn as nn
import config
import numpy as np
from .smpl import SMPL
sys.path.append(os.path.abspath(__file__).replace('models/smpl_regressor.py',''))
from config import args
class SMPLR(nn.Module):
def __init__(self, use_gender=False):
super(SMPLR, self).__init__()
model_path = args().smpl_model_path
J_reg_extra_path = args().smpl_J_reg_extra_path
if use_gender:
self.smpl_female = SMPL(model_path, J_reg_extra9_path=args().smpl_J_reg_extra_path, J_reg_h36m17_path=args().smpl_J_reg_h37m_path, gender='female',create_transl=False)
self.smpl_male = SMPL(model_path,J_reg_extra9_path=args().smpl_J_reg_extra_path, J_reg_h36m17_path=args().smpl_J_reg_h37m_path, gender='male',create_transl=False)
self.smpls = {'f':self.smpl_female, 'm':self.smpl_male}
else:
self.smpl_neutral = SMPL(model_path, J_reg_extra9_path=args().smpl_J_reg_extra_path, J_reg_h36m17_path=args().smpl_J_reg_h37m_path, gender='neutral',create_transl=False)
self.smpls = {'n':self.smpl_neutral}
def forward(self, pose, betas, gender='n'):
if isinstance(pose, np.ndarray):
pose, betas = torch.from_numpy(pose).float(),torch.from_numpy(betas).float()
if len(pose.shape)==1:
pose, betas = pose.unsqueeze(0), betas.unsqueeze(0)
outputs = self.smpls[gender](poses=pose, betas=betas)
return outputs
|
159807
|
import torch
import torch.nn as nn
class Actor(nn.Module):
def __init__(self, state_size, action_size, args):
super(Actor, self).__init__()
self.fc1 = nn.Linear(state_size, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, action_size)
def forward(self, x):
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
policy = self.fc3(x)
return policy
class Critic(nn.Module):
def __init__(self, state_size, action_size, args):
super(Critic, self).__init__()
self.fc1 = nn.Linear(state_size + action_size, args.hidden_size)
self.fc2 = nn.Linear(args.hidden_size, args.hidden_size)
self.fc3 = nn.Linear(args.hidden_size, 1)
def forward(self, states, actions):
x = torch.cat([states, actions], dim=1)
x = torch.relu(self.fc1(x))
x = torch.relu(self.fc2(x))
q_value = self.fc3(x)
return q_value
|
159860
|
import numpy as np
import cv2
import operator
import numpy as np
from matplotlib import pyplot as plt
def plot_many_images(images, titles, rows=1, columns=2):
"""Plots each image in a given list as a grid structure. using Matplotlib."""
for i, image in enumerate(images):
plt.subplot(rows, columns, i+1)
plt.imshow(image, 'gray')
plt.title(titles[i])
plt.xticks([]), plt.yticks([]) # Hide tick marks
plt.show()
def show_image(img):
"""Shows an image until any key is pressed"""
# print(type(img))
# print(img.shape)
# cv2.imshow('image', img) # Display the image
# cv2.imwrite('images/gau_sudoku3.jpg', img)
# cv2.waitKey(0) # Wait for any key to be pressed (with the image window active)
# cv2.destroyAllWindows() # Close all windows
return img
def show_digits(digits, colour=255):
"""Shows list of 81 extracted digits in a grid format"""
rows = []
with_border = [cv2.copyMakeBorder(img.copy(), 1, 1, 1, 1, cv2.BORDER_CONSTANT, None, colour) for img in digits]
for i in range(9):
row = np.concatenate(with_border[i * 9:((i + 1) * 9)], axis=1)
rows.append(row)
img = show_image(np.concatenate(rows))
return img
def convert_when_colour(colour, img):
"""Dynamically converts an image to colour if the input colour is a tuple and the image is grayscale."""
if len(colour) == 3:
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
return img
def display_points(in_img, points, radius=5, colour=(0, 0, 255)):
"""Draws circular points on an image."""
img = in_img.copy()
# Dynamically change to a colour image if necessary
if len(colour) == 3:
if len(img.shape) == 2:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
elif img.shape[2] == 1:
img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR)
for point in points:
img = cv2.circle(img, tuple(int(x) for x in point), radius, colour, -1)
show_image(img)
return img
def display_rects(in_img, rects, colour=(0, 0, 255)):
"""Displays rectangles on the image."""
img = convert_when_colour(colour, in_img.copy())
for rect in rects:
img = cv2.rectangle(img, tuple(int(x) for x in rect[0]), tuple(int(x) for x in rect[1]), colour)
show_image(img)
return img
def display_contours(in_img, contours, colour=(0, 0, 255), thickness=2):
"""Displays contours on the image."""
img = convert_when_colour(colour, in_img.copy())
img = cv2.drawContours(img, contours, -1, colour, thickness)
show_image(img)
def pre_process_image(img, skip_dilate=False):
"""Uses a blurring function, adaptive thresholding and dilation to expose the main features of an image."""
# Gaussian blur with a kernal size (height, width) of 9.
# Note that kernal sizes must be positive and odd and the kernel must be square.
proc = cv2.GaussianBlur(img.copy(), (9, 9), 0)
# Adaptive threshold using 11 nearest neighbour pixels
proc = cv2.adaptiveThreshold(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2)
# Invert colours, so gridlines have non-zero pixel values.
# Necessary to dilate the image, otherwise will look like erosion instead.
proc = cv2.bitwise_not(proc, proc)
if not skip_dilate:
# Dilate the image to increase the size of the grid lines.
kernel = np.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]],np.uint8)
proc = cv2.dilate(proc, kernel)
return proc
def find_corners_of_largest_polygon(img):
"""Finds the 4 extreme corners of the largest contour in the image."""
opencv_version = cv2.__version__.split('.')[0]
if opencv_version == '3':
_, contours, h = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Find contours
else:
contours, h = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Find contours
contours = sorted(contours, key=cv2.contourArea, reverse=True) # Sort by area, descending
polygon = contours[0] # Largest image
# Use of `operator.itemgetter` with `max` and `min` allows us to get the index of the point
# Each point is an array of 1 coordinate, hence the [0] getter, then [0] or [1] used to get x and y respectively.
# Bottom-right point has the largest (x + y) value
# Top-left has point smallest (x + y) value
# Bottom-left point has smallest (x - y) value
# Top-right point has largest (x - y) value
bottom_right, _ = max(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_left, _ = min(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
bottom_left, _ = min(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
top_right, _ = max(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1))
# Return an array of all 4 points using the indices
# Each point is in its own array of one coordinate
return [polygon[top_left][0], polygon[top_right][0], polygon[bottom_right][0], polygon[bottom_left][0]]
def distance_between(p1, p2):
"""Returns the scalar distance between two points"""
a = p2[0] - p1[0]
b = p2[1] - p1[1]
return np.sqrt((a ** 2) + (b ** 2))
def crop_and_warp(img, crop_rect):
"""Crops and warps a rectangular section from an image into a square of similar size."""
# Rectangle described by top left, top right, bottom right and bottom left points
top_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3]
# Explicitly set the data type to float32 or `getPerspectiveTransform` will throw an error
src = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32')
# Get the longest side in the rectangle
side = max([
distance_between(bottom_right, top_right),
distance_between(top_left, bottom_left),
distance_between(bottom_right, bottom_left),
distance_between(top_left, top_right)
])
# Describe a square with side of the calculated length, this is the new perspective we want to warp to
dst = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32')
# Gets the transformation matrix for skewing the image to fit a square by comparing the 4 before and after points
m = cv2.getPerspectiveTransform(src, dst)
# Performs the transformation on the original image
return cv2.warpPerspective(img, m, (int(side), int(side)))
def infer_grid(img):
"""Infers 81 cell grid from a square image."""
squares = []
side = img.shape[:1]
side = side[0] / 9
# Note that we swap j and i here so the rectangles are stored in the list reading left-right instead of top-down.
for j in range(9):
for i in range(9):
p1 = (i * side, j * side) # Top left corner of a bounding box
p2 = ((i + 1) * side, (j + 1) * side) # Bottom right corner of bounding box
squares.append((p1, p2))
return squares
def cut_from_rect(img, rect):
"""Cuts a rectangle from an image using the top left and bottom right points."""
return img[int(rect[0][1]):int(rect[1][1]), int(rect[0][0]):int(rect[1][0])]
def scale_and_centre(img, size, margin=0, background=0):
"""Scales and centres an image onto a new background square."""
h, w = img.shape[:2]
def centre_pad(length):
"""Handles centering for a given length that may be odd or even."""
if length % 2 == 0:
side1 = int((size - length) / 2)
side2 = side1
else:
side1 = int((size - length) / 2)
side2 = side1 + 1
return side1, side2
def scale(r, x):
return int(r * x)
if h > w:
t_pad = int(margin / 2)
b_pad = t_pad
ratio = (size - margin) / h
w, h = scale(ratio, w), scale(ratio, h)
l_pad, r_pad = centre_pad(w)
else:
l_pad = int(margin / 2)
r_pad = l_pad
ratio = (size - margin) / w
w, h = scale(ratio, w), scale(ratio, h)
t_pad, b_pad = centre_pad(h)
img = cv2.resize(img, (w, h))
img = cv2.copyMakeBorder(img, t_pad, b_pad, l_pad, r_pad, cv2.BORDER_CONSTANT, None, background)
return cv2.resize(img, (size, size))
def find_largest_feature(inp_img, scan_tl=None, scan_br=None):
"""
Uses the fact the `floodFill` function returns a bounding box of the area it filled to find the biggest
connected pixel structure in the image. Fills this structure in white, reducing the rest to black.
"""
img = inp_img.copy() # Copy the image, leaving the original untouched
height, width = img.shape[:2]
max_area = 0
seed_point = (None, None)
if scan_tl is None:
scan_tl = [0, 0]
if scan_br is None:
scan_br = [width, height]
# Loop through the image
for x in range(scan_tl[0], scan_br[0]):
for y in range(scan_tl[1], scan_br[1]):
# Only operate on light or white squares
if img.item(y, x) == 255 and x < width and y < height: # Note that .item() appears to take input as y, x
area = cv2.floodFill(img, None, (x, y), 64)
if area[0] > max_area: # Gets the maximum bound area which should be the grid
max_area = area[0]
seed_point = (x, y)
# Colour everything grey (compensates for features outside of our middle scanning range
for x in range(width):
for y in range(height):
if img.item(y, x) == 255 and x < width and y < height:
cv2.floodFill(img, None, (x, y), 64)
mask = np.zeros((height + 2, width + 2), np.uint8) # Mask that is 2 pixels bigger than the image
# Highlight the main feature
if all([p is not None for p in seed_point]):
cv2.floodFill(img, mask, seed_point, 255)
top, bottom, left, right = height, 0, width, 0
for x in range(width):
for y in range(height):
if img.item(y, x) == 64: # Hide anything that isn't the main feature
cv2.floodFill(img, mask, (x, y), 0)
# Find the bounding parameters
if img.item(y, x) == 255:
top = y if y < top else top
bottom = y if y > bottom else bottom
left = x if x < left else left
right = x if x > right else right
bbox = [[left, top], [right, bottom]]
return img, np.array(bbox, dtype='float32'), seed_point
def extract_digit(img, rect, size):
"""Extracts a digit (if one exists) from a Sudoku square."""
digit = cut_from_rect(img, rect) # Get the digit box from the whole square
# Use fill feature finding to get the largest feature in middle of the box
# Margin used to define an area in the middle we would expect to find a pixel belonging to the digit
h, w = digit.shape[:2]
margin = int(np.mean([h, w]) / 2.5)
_, bbox, seed = find_largest_feature(digit, [margin, margin], [w - margin, h - margin])
digit = cut_from_rect(digit, bbox)
# Scale and pad the digit so that it fits a square of the digit size we're using for machine learning
w = bbox[1][0] - bbox[0][0]
h = bbox[1][1] - bbox[0][1]
# Ignore any small bounding boxes
if w > 0 and h > 0 and (w * h) > 100 and len(digit) > 0:
return scale_and_centre(digit, size, 4)
else:
return np.zeros((size, size), np.uint8)
def get_digits(img, squares, size):
"""Extracts digits from their cells and builds an array"""
digits = []
img = pre_process_image(img.copy(), skip_dilate=True)
# cv2.imshow('img', img)
for square in squares:
digits.append(extract_digit(img, square, size))
return digits
def parse_grid(path):
original = cv2.imread(path, cv2.IMREAD_GRAYSCALE)
processed = pre_process_image(original)
# cv2.namedWindow('processed',cv2.WINDOW_AUTOSIZE)
# processed_img = cv2.resize(processed, (500, 500)) # Resize image
# cv2.imshow('processed', processed_img)
corners = find_corners_of_largest_polygon(processed)
cropped = crop_and_warp(original, corners)
# cv2.namedWindow('cropped',cv2.WINDOW_AUTOSIZE)
# cropped_img = cv2.resize(cropped, (500, 500)) # Resize image
# cv2.imshow('cropped', cropped_img)
squares = infer_grid(cropped)
# print(squares)
digits = get_digits(cropped, squares, 28)
# print(digits)
final_image = show_digits(digits)
return final_image
def extract_sudoku(image_path):
final_image = parse_grid(image_path)
return final_image
#if __name__ == '__main__':
# main()
|
159891
|
class ServiceTypes():
MachineLearning = "ml"
Vision = "vision"
ChatBot = "bot"
Speech = "speech"
LangIntent = "intent"
LangEntity = "entity"
|
159898
|
import os
import glob
import subprocess
import yaml
subfolder = os.listdir("models")[0]
folder = os.path.join("models", subfolder)
files = glob.glob("%s/[0-9]*.ckpt" % folder)
ids = sorted([int(f[len(folder)+1:-5]) for f in files])
config_path = glob.glob("*.yaml")[0]
config = yaml.safe_load(open(config_path, "r", encoding="utf-8"))
keep_last_ckpts = config["combiner_training"]["keep_last_ckpts"]
ids = ids[0:keep_last_ckpts]
inputs_str = " ".join(["%s/%d.ckpt" % (folder, id) for id in ids])
output_str = "%s/averaged.ckpt" % folder
subprocess.call("python3 scripts/average_checkpoints.py --inputs %s --output %s" % (inputs_str, output_str), shell=True)
subprocess.call("cp models/%s/best.ckpt ." % subfolder, shell=True)
subprocess.call("cp models/%s/averaged.ckpt ." % subfolder, shell=True)
|
159899
|
from gurobipy import Model, GRB, quicksum
from itertools import product
from networkx import non_edges
def create_model(G, clique_size):
r""" Create an ILP for the clique packing problem
:param G: an :py:class:`~graphilp.imports.ilpgraph.ILPGraph`
:param clique_size: size of the clique to be packed
:return: a `gurobipy model <https://www.gurobi.com/documentation/9.1/refman/py_model.html>`_
ILP:
Let :math:`k` be the size of the cliques to be packed, :math:`\overline{E}` be the complement of
the edge set, and :math:`C = \{0, \ldots, \lfloor |V|/k \rfloor \}`
an index set for the possible cliques packed into :math:`G`.
The ILP formulation uses the following variables:
.. list-table::
:widths: 20 80
:header-rows: 0
* - :math:`y_{c}`
- Binary variables indicating whether a clique with index :math:`c` is used.
* - :math:`a_{cv}`
- Binary variables indicating whether vertex :math:`v` is part of the clique with index :math:`c`.
.. math::
:nowrap:
\begin{align*}
\max \sum_{c \in C} y_c\\
\text{s.t.} &&\\
\forall v \in V: \sum_{c \in C} a_{cv} \leq 1 && \text{(each vertex can be in at most one clique)}\\
\forall \{u, v\} \in \overline{E}: \forall c \in C:\\
a_{cu} + a_{cv} \leq 1
&& \text{(unconnected vertices cannot be in the same clique)}\\
\forall c \in C: \sum_{v \in V} a_{cv} - k y_c = 0 && \text{(chosen cliques need to have k members)}\\
\forall c \in C: \forall v \in V: y_c - a_{cv} \geq 0
&& \text{(cluster with } \geq 1 \text { vertex needs to be chosen as clique)}\\
\end{align*}
Example:
.. list-table::
:widths: 50 50
:header-rows: 0
* - .. image:: images/example_clique_packing.png
- `Packing tetrahedra <https://github.com/VF-DE-CDS/GraphILP-API/blob/develop/graphilp/examples/CliquePackingExample.ipynb>`_
How many vertex disjoint tetrahedra can you pack in a grid graph?
"""
# create model
m = Model("graphilp_clique_packing")
# add variables for edges and nodes
max_clusters = 1 + G.G.number_of_nodes() // clique_size
# cluster choice variables
cluster_choice = m.addVars(max_clusters, vtype=GRB.BINARY)
# cluster assignment variables
cluster_assignment = m.addVars(product(range(max_clusters), G.G.nodes()), vtype=GRB.BINARY)
G.cluster_assignment = cluster_assignment
m.update()
# create constraints
# each vertex can be in at most one clique
for v in G.G.nodes():
m.addConstr(quicksum([cluster_assignment[(c, v)] for c in range(max_clusters)]) <= 1)
# clique condition: vertices not connected by an egde cannot be in the same clique
for u, v in non_edges(G.G):
for c in range(max_clusters):
m.addConstr(cluster_assignment[(c, u)] + cluster_assignment[(c, v)] <= 1)
# chosen cliques need to have k members
for c in range(max_clusters):
m.addConstr(quicksum([cluster_assignment[(c, v)] for v in G.G.nodes()])
- clique_size * cluster_choice[c] == 0)
# each cluster with at least one vertex needs to be chosen as a clique
for c in range(max_clusters):
for v in G.G.nodes():
m.addConstr(cluster_choice[c] - cluster_assignment[(c, v)] >= 0)
m.update()
# set optimisation objective: pack as many cliques as possible
m.setObjective(quicksum(cluster_choice), sense=GRB.MAXIMIZE)
return m
def extract_solution(G, model):
""" Get a dictionary of vertex to clique assignments
If a vertex is not assigned to a clique, its value in the dictionary is zero.
:param G: an :py:class:`~graphilp.imports.ilpgraph.ILPGraph`
:param model: a solved Gurobi model for clique packing
:returns: a dictionary mapping vertices to cliques
"""
cliques = {v: 0 for v in G.G.nodes()}
for k, v in G.cluster_assignment.items():
if v.X > 0.5:
cliques[k[1]] = k[0]+1
return cliques
|
159905
|
from edi_835_parser.elements import Element
# https://ushik.ahrq.gov/dr.ui.drValueDomain_View?system=mdr&ValueDomainID=4933000&CallingRoutine=$CallingRoutine$&OrganizationID=3&RecordOffset=11&Referer=ValueDomain
identification_code_qualifiers = {
'MI': 'member identification number',
'C': "insured's changed unique identification number",
'PC': 'provider commercial number',
'XX': 'national provider id',
}
class IdentificationCodeQualifier(Element):
def parser(self, value: str) -> str:
return identification_code_qualifiers.get(value, value)
|
159923
|
import unittest
from mock import Mock
from mock import patch
import subprocess
import logging
from twilio.rest import TwilioRestClient
from twilio.exceptions import TwilioException
from .context import configure
class ConfigureTest(unittest.TestCase):
def setUp(self):
self.configure = configure.Configure(account_sid="ACxxxxx",
auth_token="<PASSWORD>",
phone_number="+15555555555",
app_sid="APzzzzzzzzz")
self.configure.client = TwilioRestClient(self.configure.account_sid,
self.configure.auth_token)
class TwilioTest(ConfigureTest):
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
def test_createNewTwiMLApp(self, MockApp, MockApps):
# Mock the Applications resource and its create method.
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.create.return_value = \
MockApp.return_value
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test
self.configure.createNewTwiMLApp(self.configure.voice_url,
self.configure.sms_url)
# Assert
app_create = self.configure.client.applications.create
app_create.assert_called_once_with(voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name="Hackpack for Heroku "
"and Flask")
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
def test_createNewTwiMLAppNegativeInput(self, MockApp, MockApps):
# Mock the Applications resource and its create method.
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.create.return_value = \
MockApp.return_value
# Mock our input .
configure.raw_input = lambda _: 'n'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.createNewTwiMLApp,
self.configure.voice_url,
self.configure.sms_url)
@patch('twilio.rest.resources.Applications')
def test_createNewTwiMLAppException(self, MockApps):
# Mock the Applications resource and its create method.
self.configure.client.applications = MockApps.return_value
def raiseException(*args, **kwargs):
raise TwilioException("Test error.")
self.configure.client.applications.create.side_effect = raiseException
# Mock our input .
configure.raw_input = lambda _: 'y'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.createNewTwiMLApp,
self.configure.voice_url,
self.configure.sms_url)
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
def test_setAppSidRequestUrls(self, MockApp, MockApps):
# Mock the Applications resource and its update method.
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.update.return_value = \
MockApp.return_value
# Test
self.configure.setAppRequestUrls(self.configure.app_sid,
self.configure.voice_url,
self.configure.sms_url)
# Assert
app_create = self.configure.client.applications.update
app_create.assert_called_once_with(self.configure.app_sid,
voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name='Hackpack for Heroku '
'and Flask')
@patch('twilio.rest.resources.Applications')
def test_setAppSidRequestUrls404Error(self, MockApps):
# Mock the Applications resource and its update method.
self.configure.client.applications.update = MockApps()
def raiseException(*args, **kwargs):
raise TwilioException("HTTP ERROR 404.")
self.configure.client.applications.update.side_effect = raiseException
# Test
self.assertRaises(configure.ConfigurationError,
self.configure.setAppRequestUrls,
self.configure.app_sid,
self.configure.voice_url,
self.configure.sms_url)
@patch('twilio.rest.resources.Applications')
def test_setAppSidRequestUrls500Error(self, MockApps):
# Mock the Applications resource and its update method.
self.configure.client.applications.update = MockApps()
def raiseException(*args, **kwargs):
raise TwilioException("HTTP ERROR 500.")
self.configure.client.applications.update.side_effect = raiseException
# Test
self.assertRaises(configure.ConfigurationError,
self.configure.setAppRequestUrls,
self.configure.app_sid,
self.configure.voice_url,
self.configure.sms_url)
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_retrievePhoneNumber(self, MockPhoneNumber, MockPhoneNumbers):
# Mock the PhoneNumbers resource and its list method.
mock_num = MockPhoneNumber.return_value
mock_num.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.list.return_value = [mock_num]
# Test
self.configure.retrievePhoneNumber(self.configure.phone_number)
# Assert
num_l = self.configure.client.phone_numbers.list
num_l.assert_called_once_with(phone_number=self.configure.phone_number)
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_purchasePhoneNumber(self, MockPhoneNumber, MockPhoneNumbers):
# Mock the PhoneNumbers resource and its search and purchase methods
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase = mock_phone_number
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test
self.configure.purchasePhoneNumber()
# Assert
purchase = self.configure.client.phone_numbers.purchase
purchase.assert_called_once_with(area_code="646")
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_purchasePhoneNumberNegativeInput(self, MockPhoneNumbers,
MockPhoneNumber):
# Mock the PhoneNumbers resource and its search and purchase methods
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase = mock_phone_number
# Mock our input.
configure.raw_input = lambda _: 'n'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.purchasePhoneNumber)
@patch('twilio.rest.resources.PhoneNumbers')
def test_purchasePhoneNumberExceptionOnPurchase(self, MockPhoneNumbers):
# Mock the PhoneNumbers resource and its search and purchase methods
self.configure.client.phone_numbers.purchase = MockPhoneNumbers()
def raiseException(*args, **kwargs):
raise TwilioException("Test error.")
self.configure.client.phone_numbers.purchase.side_effect = \
raiseException
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.purchasePhoneNumber)
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_configure(self, MockPhoneNumber, MockPhoneNumbers, MockApp,
MockApps):
# Mock the Applications resource and its update method.
mock_app = MockApp.return_value
mock_app.sid = self.configure.app_sid
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.update.return_value = \
mock_app
# Mock the PhoneNumbers resource and its list method.
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.sid = "PN123"
mock_phone_number.friendly_name = "(555) 555-5555"
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.list.return_value = \
[mock_phone_number]
# Test
self.configure.configureHackpack(self.configure.voice_url,
self.configure.sms_url,
self.configure.app_sid,
self.configure.phone_number)
# Assert
apps = self.configure.client.applications.update
apps.assert_called_once_with(self.configure.app_sid,
voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name='Hackpack for Heroku '
'and Flask')
update = self.configure.client.phone_numbers.update
app_sid = self.configure.app_sid
update.assert_called_once_with("PN123",
voice_application_sid=app_sid,
sms_application_sid=app_sid)
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_configureNoApp(self, MockPhoneNumber, MockPhoneNumbers, MockApp,
MockApps):
# Mock the Applications resource and its update method.
mock_app = MockApp.return_value
mock_app.sid = self.configure.app_sid
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.create.return_value = \
mock_app
# Mock the PhoneNumbers resource and its list method.
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.sid = "PN123"
mock_phone_number.friendly_name = "(555) 555-5555"
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.list.return_value = \
[mock_phone_number]
# Set AppSid to None
self.configure.app_sid = None
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test
self.configure.configureHackpack(self.configure.voice_url,
self.configure.sms_url,
self.configure.app_sid,
self.configure.phone_number)
# Assert
create = self.configure.client.applications.create
create.assert_called_once_with(voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name="Hackpack for Heroku "
"and Flask")
update = self.configure.client.phone_numbers.update
update.assert_called_once_with("PN123",
voice_application_sid=mock_app.sid,
sms_application_sid=mock_app.sid)
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_configureNoPhoneNumber(self, MockPhoneNumber, MockPhoneNumbers,
MockApp, MockApps):
# Mock the Applications resource and its update method.
mock_app = MockApp.return_value
mock_app.sid = self.configure.app_sid
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.update.return_value = \
mock_app
# Mock the PhoneNumbers resource and its list method.
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.sid = "PN123"
mock_phone_number.friendly_name = "(555) 555-5555"
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase.return_value = \
mock_phone_number
# Set AppSid to None
self.configure.phone_number = None
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test
self.configure.configureHackpack(self.configure.voice_url,
self.configure.sms_url,
self.configure.app_sid,
self.configure.phone_number)
# Assert
update = self.configure.client.applications.update
update.assert_called_once_with(self.configure.app_sid,
voice_url=self.configure.voice_url,
sms_url=self.configure.sms_url,
friendly_name='Hackpack for Heroku '
'and Flask')
update = self.configure.client.phone_numbers.update
app_sid = self.configure.app_sid
update.assert_called_once_with("PN123",
voice_application_sid=app_sid,
sms_application_sid=app_sid)
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_configureNoPhoneNumberTwilioError(self, MockPhoneNumber,
MockPhoneNumbers, MockApp,
MockApps):
# Mock the Applications resource and its update method.
mock_app = MockApp.return_value
mock_app.sid = self.configure.app_sid
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.update.return_value = \
mock_app
# Mock the PhoneNumbers resource and its list method.
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.sid = "PN123"
mock_phone_number.friendly_name = "(555) 555-5555"
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
def raiseException(*args, **kwargs):
raise TwilioException("Test error.")
self.configure.client.phone_numbers.update.side_effect = \
raiseException
# Mock our input.
configure.raw_input = lambda _: 'y'
# Test
self.assertRaises(configure.ConfigurationError,
self.configure.configureHackpack,
self.configure.voice_url,
self.configure.sms_url,
self.configure.app_sid,
self.configure.phone_number)
@patch.object(subprocess, 'call')
@patch.object(configure.Configure, 'configureHackpack')
def test_start(self, mock_configureHackpack, mock_call):
mock_call.return_value = None
self.configure.host = 'http://look-here-snacky-11211.herokuapp.com'
self.configure.start()
m = mock_configureHackpack
m.assert_called_once_with('http://look-here-snacky-11211.herokuapp.com'
'/voice',
'http://look-here-snacky-11211.herokuapp.com'
'/sms',
self.configure.app_sid,
self.configure.phone_number)
@patch.object(subprocess, 'call')
@patch.object(configure.Configure, 'configureHackpack')
@patch.object(configure.Configure, 'getHerokuHostname')
def test_startWithoutHostname(self, mock_getHerokuHostname,
mock_configureHackpack, mock_call):
mock_call.return_value = None
mock_getHerokuHostname.return_value = 'http://look-here-snacky-11211' \
'.herokuapp.com'
self.configure.start()
m = mock_configureHackpack
m.assert_called_once_with('http://look-here-snacky-11211.herokuapp.com'
'/voice',
'http://look-here-snacky-11211.herokuapp.com'
'/sms',
self.configure.app_sid,
self.configure.phone_number)
class HerokuTest(ConfigureTest):
def test_getHerokuHostname(self):
test = self.configure.getHerokuHostname(git_config_path='./tests'
'/test_assets'
'/good_git_'
'config')
self.assertEquals(test, 'http://look-here-snacky-11211.herokuapp.com')
def test_getHerokuHostnameNoSuchFile(self):
self.assertRaises(configure.ConfigurationError,
self.configure.getHerokuHostname,
git_config_path='/tmp')
def test_getHerokuHostnameNoHerokuRemote(self):
self.assertRaises(configure.ConfigurationError,
self.configure.getHerokuHostname,
git_config_path='./tests/test_assets/bad_git_config')
@patch.object(subprocess, 'call')
def test_setHerokuEnvironmentVariables(self, mock_call):
mock_call.return_value = None
configuration = {'TWILIO_ACCOUNT_SID': self.configure.account_sid,
'TWILIO_AUTH_TOKEN': self.configure.auth_token,
'TWILIO_APP_SID': self.configure.app_sid,
'TWILIO_CALLER_ID': self.configure.phone_number}
self.configure.setHerokuEnvironmentVariables(**configuration)
args, kwargs = mock_call.call_args
self.assertTrue("heroku" in args[0],
"Heroku toolbelt not present in call: "
"{0}".format(args[0]))
self.assertTrue("config:add" in args[0],
"Config:add not present in call: "
"{0}".format(args[0]))
config = ["{0}={1}".format(k, v) for k, v in configuration.items()]
for item in config:
self.assertTrue(item in args[0],
"Missing config from call_args: {0} Instead got: "
"{0}".format(item, args[0]))
class MiscellaneousTest(unittest.TestCase):
def test_configureWithoutAccountSid(self):
test = configure.Configure(account_sid=None, auth_token=None,
phone_number=None, app_sid=None)
self.assertRaises(configure.ConfigurationError,
test.start)
def test_configureWithoutAuthToken(self):
test = configure.Configure(account_sid='ACxxxxxxx', auth_token=None,
phone_number=None, app_sid=None)
self.assertRaises(configure.ConfigurationError,
test.start)
class InputTest(ConfigureTest):
@patch('twilio.rest.resources.Applications')
@patch('twilio.rest.resources.Application')
def test_createNewTwiMLAppWtfInput(self, MockApp, MockApps):
# Mock the Applications resource and its create method.
self.configure.client.applications = MockApps.return_value
self.configure.client.applications.create.return_value = \
MockApp.return_value
# Mock our input
configure.raw_input = Mock()
configure.raw_input.return_value = 'wtf'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.createNewTwiMLApp,
self.configure.voice_url,
self.configure.sms_url)
count = configure.raw_input.call_count
self.assertTrue(configure.raw_input.call_count == 3, "Prompt did "
"not appear three times, instead: %i".format(count))
self.assertFalse(self.configure.client.applications.create.called,
"Unexpected request to create AppSid made.")
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_purchasePhoneNumberWtfInput(self, MockPhoneNumbers,
MockPhoneNumber):
# Mock the PhoneNumbers resource and its search and purchase methods
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase = mock_phone_number
# Mock our input.
configure.raw_input = Mock()
configure.raw_input.return_value = 'wtf'
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.purchasePhoneNumber)
self.assertTrue(configure.raw_input.call_count == 3, "Prompt did "
"not appear three times, instead: %i" %
configure.raw_input.call_count)
self.assertFalse(self.configure.client.phone_numbers.purchase.called,
"Unexpected request to create AppSid made.")
@patch('twilio.rest.resources.PhoneNumbers')
@patch('twilio.rest.resources.PhoneNumber')
def test_purchasePhoneNumberWtfInputConfirm(self,
MockPhoneNumbers,
MockPhoneNumber):
# Mock the PhoneNumbers resource and its search and purchase methods
mock_phone_number = MockPhoneNumber.return_value
mock_phone_number.phone_number = self.configure.phone_number
self.configure.client.phone_numbers = MockPhoneNumbers.return_value
self.configure.client.phone_numbers.purchase = mock_phone_number
# Mock our input.
configure.raw_input = Mock()
configure.raw_input.side_effect = ['y', 'wtf', 'wtf', 'wtf']
# Test / Assert
self.assertRaises(configure.ConfigurationError,
self.configure.purchasePhoneNumber)
self.assertTrue(configure.raw_input.call_count == 4, "Prompt did "
"not appear three times, instead: %i" %
configure.raw_input.call_count)
self.assertFalse(self.configure.client.phone_numbers.purchase.called,
"Unexpectedly requested phone number purchase.")
class CommandLineTest(unittest.TestCase):
def test_account_sid(self):
parser = configure.parse_args(['-SACxxx'])
self.assertEquals(parser.account_sid, 'ACxxx')
def test_new_phone_number(self):
parser = configure.parse_args(['--new'])
self.assertEquals(parser.phone_number, None)
def test_custom_domain(self):
parser = configure.parse_args(['-dtwilio.com'])
self.assertEquals(parser.host, "twilio.com")
def test_debug(self):
parser = configure.parse_args(['-D'])
self.assertTrue(parser.logger.level, logging.DEBUG)
|
159942
|
from collections import OrderedDict
from django.db.models import Count, Case, When, IntegerField, Sum
from onadata.apps.fieldsight.models import Site
from onadata.apps.fsforms.models import FInstance, FieldSightXF
class BarGenerator(object):
def __init__(self, sites):
self.data = OrderedDict()
self.data['Unstarted'] = 0
self.data['< 20'] = 0
self.data['20 - 40'] = 0
self.data['40 - 60'] = 0
self.data['60 - 80'] = 0
self.data['80 <'] = 0
self.data['Completed'] = 0
for site in sites:
progress_range = self.get_range(site.progress())
self.data[progress_range] +=1
def get_range(self, progress):
if progress == 0: return self.data.keys()[0]
if progress in range(1,20): return self.data.keys()[1]
if progress in range(20,40): return self.data.keys()[2]
if progress in range(40,60): return self.data.keys()[3]
if progress in range(60,80): return self.data.keys()[4]
if progress in range(80,100): return self.data.keys()[5]
if progress == 100: return self.data.keys()[6]
class ProgressBarGenerator(object):
def __init__(self, project):
self.data = OrderedDict()
data = Site.objects.filter(project_id = project.id).aggregate(
unstarted = Sum(
Case(When(current_progress = 0, then= 1),
output_field = IntegerField())
),
first = Sum(
Case(When(current_progress__gte = 1, current_progress__lt = 20, then= 1),
output_field = IntegerField())
),
second = Sum(
Case(When(current_progress__gte = 20, current_progress__lt= 40, then= 1),
output_field = IntegerField())
),
third = Sum(
Case(When(current_progress__gte = 40, current_progress__lt= 60, then= 1),
output_field = IntegerField())
),
fourth = Sum(
Case(When(current_progress__gte = 60, current_progress__lt= 80, then= 1),
output_field = IntegerField())
),
fifth = Sum(
Case(When(current_progress__gte = 80, current_progress__lt= 100, then= 1),
output_field = IntegerField())
),
sixth = Sum(
Case(When(current_progress = 100, then = 1),
output_field = IntegerField())
)
)
self.data['Unstarted'] = 0 if data['unstarted'] is None else data['unstarted']
self.data['< 20'] = 0 if data['first'] is None else data['first']
self.data['20 - 40'] = 0 if data['second'] is None else data['second']
self.data['40 - 60'] = 0 if data['third'] is None else data['third']
self.data['60 - 80'] = 0 if data['fourth'] is None else data['fourth']
self.data['80 <'] = 0 if data['fifth'] is None else data['fifth']
self.data['Completed'] = 0 if data['sixth'] is None else data['sixth']
|
159948
|
full_dataset = [
{'name': 'Peach', 'items': ['green shell', 'banana', 'green shell',], 'finish': 3},
{'name': 'Peach', 'items': ['green shell', 'banana', 'green shell',], 'finish': 1},
{'name': 'Bowser', 'items': ['green shell',], 'finish': 1},
{'name': None, 'items': ['green shell',], 'finish': 2},
{'name': 'Bowser', 'items': ['green shell',], 'finish': 1},
{'name': None, 'items': ['red shell',], 'finish': 1},
{'name': 'Yoshi', 'items': ['banana', 'blue shell', 'banana'], 'finish': 7},
{'name': 'DK', 'items': ['blue shell', 'star',], 'finish': 1},
]
|
159988
|
import numpy as np
import tensorflow as tf
from tqdm import tqdm
from debias.squad_eval.squad_v1_official_evaluation import normalize_answer, f1_score
from debias.utils.ops import get_best_span
def eval_squad(predicted_ans, actual_answers, use_tqdm=False):
"""
:param predicted_ans: List of strings,
:param actual_answers: List of list of strings
:param use_tqdm: Show progress with tqdm
:return: ndarray of size [n_answers, 2] with the em/f1 scores
"""
if use_tqdm:
it = tqdm(list(zip(predicted_ans, actual_answers)), ncols=100, desc="eval")
else:
it = zip(predicted_ans, actual_answers)
scores = np.zeros((len(predicted_ans), 2), dtype=np.float32)
for i, (predicted_ans, actual) in enumerate(it):
em = 0
f1 = 0
if len(actual) > 0:
predicted_ans = normalize_answer(predicted_ans)
for ans in actual:
if len(ans) == 0:
continue
ans = normalize_answer(ans)
em = em or (ans == predicted_ans)
f1 = max(f1, f1_score(predicted_ans, ans))
else:
em = len(predicted_ans) == 0
f1 = len(predicted_ans) == 0
scores[i] = (em, f1)
return scores
def _eval_squad_from_spans(spans, invs, texts, actual_answers):
predicted_answers = []
actual_answers = [[x.decode("utf-8") for x in ans if len(x) > 0] for ans in actual_answers]
for i in range(len(spans)):
inv = invs[i]
text = texts[i].decode("utf-8")
ans = text[inv[spans[i, 0], 0]:inv[spans[i, 1], 1]]
predicted_answers.append(ans)
return eval_squad(predicted_answers, actual_answers)
def eval_squad_op(span_logits, inv, passage_text, actual_answers, max_bound):
"""Tensorflow op to compute em/f1 scores using SQuAD metrics"""
batch = inv.shape.as_list()[0]
predicted_span = get_best_span(span_logits, max_bound)
scores = tf.py_func(
_eval_squad_from_spans,
[predicted_span, inv, passage_text, actual_answers],
tf.float32, False)
scores.set_shape([batch, 2])
return scores
|
160037
|
import os
def data_info(type='train'):
dataset_directory = '../../../EgoGesture Dataset/'
folder_names = ['SingleOne', 'SingleTwo', 'SingleThree', 'SingleFour', 'SingleFive', 'SingleSix', 'SingleSeven',
'SingleEight']
if type is 'train':
nTrain = 0
for folder in folder_names:
nTrain = nTrain + len(os.listdir(dataset_directory + folder + '/'))
return nTrain
elif type is 'valid':
nValid = 0
for folder in folder_names:
nValid = nValid + len(os.listdir(dataset_directory + folder + '/'))
return nValid
|
160068
|
from guizero import App, Text
a = App()
a.font = "courier new"
t1 = Text(a)
t1.value = "{}, {}, {}".format(t1.font, t1.text_size, t1.text_color)
t2 = Text(a, font="arial")
t2.value = "{}, {}, {}".format(t2.font, t2.text_size, t2.text_color)
t3 = Text(a, color="red", size=8, font="verdana")
t3.value = "{}, {}, {}".format(t3.font, t3.text_size, t3.text_color)
a.display()
|
160138
|
from typing import Dict
from ray.rllib.agents.callbacks import DefaultCallbacks
from ray.rllib.env import BaseEnv
from ray.rllib.evaluation import MultiAgentEpisode, RolloutWorker
from ray.rllib.policy import Policy
from ray.rllib.utils.typing import PolicyID
class CustomCallbacks(DefaultCallbacks):
def __init__(self, legacy_callbacks_dict: Dict[str, callable] = None):
super(CustomCallbacks, self).__init__(legacy_callbacks_dict)
def on_episode_end(self, worker: "RolloutWorker", base_env: BaseEnv,
policies: Dict[PolicyID, Policy],
episode: MultiAgentEpisode, **kwargs):
env = base_env.get_unwrapped()[0]
if env.last_time_step != float('inf'):
episode.custom_metrics['make_span'] = env.last_time_step
|
160161
|
import pytest
from api.base.settings.defaults import API_BASE
from osf_tests.factories import (
AuthUserFactory,
RegionFactory,
InstitutionFactory
)
@pytest.mark.django_db
class TestRegionList:
@pytest.fixture()
def region(self):
return RegionFactory(name='Frankfort', _id='eu-central-1')
@pytest.fixture()
def regions_url(self):
return '/{}regions/'.format(
API_BASE)
@pytest.fixture()
def user(self):
usr = AuthUserFactory()
inst = InstitutionFactory()
usr.affiliated_institutions.add(inst)
return usr
def test_region_list(self, app, region, regions_url, user):
# test length and not auth
res = app.get(regions_url, auth=user.auth)
data = res.json['data']
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(data) == 1
def test_custom_storage_region_list(self, app, region, regions_url, user):
RegionFactory(_id=user.affiliated_institutions.first()._id)
res = app.get(regions_url, auth=user.auth)
data = res.json['data']
assert res.status_code == 200
assert res.content_type == 'application/vnd.api+json'
assert len(data) == 1
|
160172
|
import os, time
cpp_directories = ['../cpp/state-engine/', '../cpp/' '../cpp/interpolation-engine/']
cpp_files = [['SkyState.cpp',\
'autoexposure/LightingAnalyzer.cpp',\
'world_state/AstroTime.cpp',\
'world_state/Location.cpp',\
'astro_bodies/SkyManager.cpp',\
'astro_bodies/AstronomicalBody.cpp',\
'astro_bodies/Sun.cpp',\
'astro_bodies/Moon.cpp',\
'astro_bodies/Planet.cpp',\
'astro_bodies/planets/Earth.cpp',\
'astro_bodies/OtherPlanet.cpp',\
'astro_bodies/planets/Mercury.cpp',\
'astro_bodies/planets/Venus.cpp',\
'astro_bodies/planets/Mars.cpp',\
'astro_bodies/planets/Jupiter.cpp',\
'astro_bodies/planets/Saturn.cpp'],\
['SkyInterpolator.cpp',\
'color/ColorInterpolator.cpp'
]]
module_file = ['state-engine.js', 'interpolation-engine.js']
exported_functions = [['_main', '_setupSky', '_updateSky', '_initializeMeteringAndLightingDependencies',\
'_updateMeteringData', '_updateDirectLighting', '_updateHemisphericalLightingData'],\
['_main', '_setupInterpolators', '_updateFinalAstronomicalValues', '_updateAstronomicalTimeData',\
'_tick_astronomicalInterpolations', '_setSunAndMoonTimeTo', '_denormalizeSkyIntensity0', '_updateLightingValues',\
'_tick_lightingInterpolations', '_bolometricMagnitudeToLuminosity', '_luminosityToAtmosphericIntensity',\
'_initializeLightingValues']]
cpp_update_date = {}
def recursivelyWalkDirectories(absolute_cpp_directory, file_check_callback):
for root, dirs, files in os.walk(absolute_cpp_directory):
for file in files:
call_back_status = file_check_callback(root, file)
if call_back_status:
return True
for dir in dirs:
call_back_status = recursivelyWalkDirectories('{}/{}'.format(absolute_cpp_directory, dir), file_check_callback)
if call_back_status:
return True
return False
for i, cpp_directory in enumerate(cpp_directories):
absolute_cpp_directory = os.path.abspath(cpp_directory)
def intializeFileUpdateTimes(root, file):
if file.endswith('.h') or file.endswith('.cpp'):
cpp_update_date[file] = os.path.getmtime('{}/{}'.format(root, file))
#Never break on this
return False
recursivelyWalkDirectories(absolute_cpp_directory, intializeFileUpdateTimes)
def CPPWatcher():
#This should run forever, repeatedly creating our files from the updated structure every time
for i, cpp_directory in enumerate(cpp_directories):
os.system('clear')
os.chdir(cpp_directory)
os.system("emcc {} -s WASM=1 -s EXPORTED_FUNCTIONS='[{}]' -o {} -s ALLOW_MEMORY_GROWTH=1;".format(' '.join(cpp_files[i]), ', '.join(exported_functions[i]), module_file[i]))
os.chdir('../../python')
print 'Watching for updates...'
#Watch loop
while True:
break_loops = True
for i, cpp_directory in enumerate(cpp_directories):
absolute_cpp_directory = os.path.abspath(cpp_directory)
def checkForUpdates(root, file):
#Only check cpp and header files and determine if the file had an update
absolute_file_path = '{}/{}'.format(root, file)
if (file.endswith('.h') or file.endswith('.cpp')) and (cpp_update_date[file] < os.path.getmtime(absolute_file_path)):
os.system('clear')
print 'Change found in file {}, updating.'.format(file)
cpp_update_date[file] = os.path.getmtime(absolute_file_path)
os.chdir(cpp_directory)
os.system("emcc {} -s WASM=1 -s EXPORTED_FUNCTIONS='[{}]' -O3 {} -s ALLOW_MEMORY_GROWTH=1;".format(' '.join(cpp_files[i]), ', '.join(exported_functions[i]), module_file[i]))
os.chdir('../../python')
print "WASM update at: {}".format(time.strftime('%H:%M %Y-%m-%d'))
print "-"*15
return True
return False
#Update our file list and check for any changes to any of our files
recursivelyWalkDirectories(absolute_cpp_directory, checkForUpdates)
#And we only do this every five seconds because this is a little more costly
time.sleep(1)
#Run the main application! :D
CPPWatcher()
|
160174
|
import typing_extensions, typing
from dispike.creating.models import permissions
class PermissionGenerator(object):
"""A helper that helps you create proper permissions for a slash command."""
def __init__(
self,
restrict_to_role: bool = False,
restrict_to_user: bool = False,
restricted_role_id: typing.Union[list[int], int] = [],
restricted_user_id: typing.Union[list[int], int] = [],
allow_all_except_or_deny_all_except: typing.Union[
typing_extensions.Literal["allow_all"],
typing_extensions.Literal["deny_all"],
] = "allow_all",
):
"""A Helper that helps you create proper permissions for a slash command. Generates a NewApplicationPermission.
Args:
restrict_to_role (bool, optional): Whether to restrict to a specific Discord Role ID.. Defaults to False.
restrict_to_user (bool, optional): Whether to restrict to a specific Discord User ID. Defaults to False.
restricted_role_id (int, optional): The restricted role ID. Defaults to None.
restricted_user_id (int, optional): The restricted user ID. Defaults to None.
allow_all_except_or_deny_all_except (Union["allow_all", "deny_all"], optional): Whether to allow only X or allow everyone else other than X. Defaults to "allow_all".
Raises:
TypeError: If `allow_all_except_or_deny_all_except` is not a valid value.
ValueError: If you attempt to combine restrictions.
"""
self.__restrict_to_role = restrict_to_role
self.__restrict_to_user = restrict_to_user
if restrict_to_role == False:
self.__restricted_role_id = []
elif isinstance(restrict_to_role, int):
self.__restricted_role_id = [restricted_role_id]
elif isinstance(restrict_to_role, list):
if restrict_to_role == []:
self.__restricted_role_id = []
else:
for _item in restricted_role_id:
if isinstance(_item, int) == False:
raise ValueError(
f"Value passed to restricted roles is not a valid type. Recieved {type(restrict_to_role)}.. Need <int>!"
)
self.__restricted_role_id = restricted_role_id
else:
raise TypeError(
"Unknown type recieved for role.. If this is a string, go ahead and convert it to an int"
)
if restrict_to_user == False:
self.__restricted_user_id = []
elif isinstance(restrict_to_user, int):
self.__restricted_user_id = [restricted_user_id]
elif isinstance(restrict_to_user, list):
if restricted_user_id == []:
self.__restricted_user_id = []
else:
for _item in restricted_role_id:
if isinstance(_item, int) == False:
raise ValueError(
f"Value passed to restricted users is not a valid type. Recieved {type(restrict_to_user)}.. Need <int>!"
)
self.__restricted_user_id = restricted_user_id
else:
raise TypeError(
"Unknown type recieved for user.. If this is a string, go ahead and convert it to an int"
)
if allow_all_except_or_deny_all_except not in ["allow_all", "deny_all"]:
raise TypeError(
f"{allow_all_except_or_deny_all_except} is unknown. Cannot determine whether to deny or allow based on permission"
)
else:
if allow_all_except_or_deny_all_except == "deny_all":
self.__allow_all_except_or_deny_all_except = False
else:
self.__allow_all_except_or_deny_all_except = True
if self.__restrict_to_role and self.__restricted_role_id is []:
raise ValueError("Cannot restrict to role without a role id")
if self.__restrict_to_user and self.__restricted_user_id is []:
raise ValueError("Cannot restrict to user without a user id")
@property
def restricted_to_role(self) -> bool:
return self.__restrict_to_role
@property
def restricted_to_user(self) -> bool:
return self.__restrict_to_user
@property
def restricted_role_id(self) -> typing.List[int]:
return self.__restricted_role_id
@property
def restricted_user_id(self) -> typing.List[int]:
return self.__restricted_user_id
@property
def allow_all_except_or_deny_all_except(self) -> bool:
return self.__allow_all_except_or_deny_all_except
def remove_role_from_restricted_roles(self, role_id: int):
if role_id in self.__restricted_role_id:
self.__restricted_role_id.remove(role_id)
def add_role_to_restricted_roles(
self, role_id: int, ignore_if_already_present: bool = False
):
if role_id in self.__restricted_role_id:
if ignore_if_already_present:
return
else:
raise ValueError(
f"Role {role_id} is already in the restricted roles. Cannot add it again."
)
else:
self.__restricted_role_id.append(role_id)
def remove_user_from_restricted_roles(self, user_id: int):
if user_id in self.__restrict_to_user:
self.__restrict_to_user.remove(user_id)
def add_user_to_restricted_users(
self, user_id: int, ignore_if_already_present: bool = False
):
if user_id in self.__restrict_to_user:
if ignore_if_already_present:
return
else:
raise ValueError(
f"User {user_id} is already in the restricted users. Cannot add it again."
)
else:
self.__restrict_to_user.append(user_id)
@allow_all_except_or_deny_all_except.setter
def set_allow_all_except_or_deny_all_except(self, new_value):
if new_value not in ["allow_all", "deny_all"]:
raise TypeError(
f"{new_value} is unknown. Cannot determine whether to deny or allow based on permission"
)
else:
if new_value == "deny_all":
self.__allow_all_except_or_deny_all_except = False
else:
self.__allow_all_except_or_deny_all_except = True
@property
def created(self) -> permissions.NewApplicationPermission:
"""Returns a NewApplicationPermission object based on the current attributes.
Returns:
permissions.NewApplicationPermission: A properly configured object.
"""
_permissions_generated = []
if self.restricted_to_role:
if self.__restricted_role_id == []:
raise ValueError("Cannot create restriction for roles with no role ids")
for role_to_restrict in self.restricted_role_id:
_permissions_generated.append(
permissions.ApplicationCommandPermissions(
id=role_to_restrict,
type=permissions.ApplicationCommandPermissionType.ROLE,
permission=self.__allow_all_except_or_deny_all_except,
)
)
if self.restricted_to_user:
if self.__restricted_user_id == []:
raise ValueError("Cannot create restriction for roles with no role ids")
for role_to_restrict in self.restricted_user_id:
_permissions_generated.append(
permissions.ApplicationCommandPermissions(
id=role_to_restrict,
type=permissions.ApplicationCommandPermissionType.USER,
permission=self.__allow_all_except_or_deny_all_except,
)
)
return permissions.NewApplicationPermission(permissions=_permissions_generated)
|
160176
|
from server.models import PluginScriptRow
import sal.plugin
class ARDInfo(sal.plugin.DetailPlugin):
description = "Apple Remote Desktop's Computer Information Fields"
supported_os_families = [sal.plugin.OSFamilies.darwin]
def get_context(self, machine, **kwargs):
context = self.super_get_context(machine, **kwargs)
ard_info = {}
for i in range(1, 5):
key = 'ARD_Info_{}'.format(i)
row = PluginScriptRow.objects.filter(
submission__machine=machine,
submission__plugin='ARD_Info',
pluginscript_name=key)
try:
val = row.first().pluginscript_data
except AttributeError:
val = ""
ard_info[key] = val
context['data'] = ard_info
return context
|
160181
|
import wx
if __name__ == '__main__':
from tests.testapp import testapp
from gui.uberwidgets.umenu import UMenuBar, UMenu
from gui import skin
app = a = testapp(skinname = 'jeffrey')
wx.InitAllImageHandlers()
wx.UpdateUIEvent.SetMode(wx.UPDATE_UI_PROCESS_SPECIFIED)
f = wx.Frame(None, -1, 'menu test')
f.Bind(wx.EVT_MENU, lambda e: msg('%s %s %s' % (e, e.EventType, e.Id)))
f.Bind(wx.EVT_CLOSE, lambda e: app.ExitMainLoop())
p = wx.Panel(f)
f.CenterOnScreen()
p.Sizer = wx.BoxSizer(wx.VERTICAL)
bar = UMenuBar(p, p.Sizer)
bmps = [wx.Bitmap('..\\..\\..\\res\\%s.png' % aa) for aa in ('online', 'away', 'offline')]
m = UMenu(f)
m.AddItem('&Preferences\tCtrl+P', callback = lambda: msg('prefs'), bitmap = skin.get('serviceicons.aim'))
accounts_item = m.AddItem('&Accounts\tCtrl+A', callback = lambda: msg('show accounts!'))
m.AddSep()
sub = UMenu(f)
g = sub.AddItem('one', callback = lambda: msg('one!'))
sub.AddItem('two\tCtrl+T', bitmap = bmps[1])
three = sub.AddItem('three', bitmap = bmps[2])
sub4 = UMenu(f)
sub4.AddItem('foo1')
sub4.AddItem('&foo')
sub4.AddItem('&foo2')
sub4.AddItem('bar')
sub4.AddItem('another &foo')
sub4.AddItem('meep')
sub4.AddItem('fooness')
sub.AddSubMenu(sub4, 'foobarmeep')
g.SetBitmap(bmps[0])
sub2 = UMenu(f); add = sub2.AddCheckItem
add('four')
add('five\tCtrl+F')
add('six')
sub3 = UMenu(f); add = sub3.AddRadioItem
add('seven')
add('eight\tCtrl+F')
add('nine')
def msg(msg):
print msg
m.AddSubMenu(sub, 'Submenu', onshow = lambda: g.SetText('one shown!'))
m.AddSubMenu(sub2, 'Checks', onshow = lambda: msg('submenu 2 onshow'))
m.AddSubMenu(sub3, 'Radios')
m.AddItem('&Close\tCtrl+W', callback = lambda: f.Close())
m2 = UMenu(f, onshow = lambda menu: msg('wut')); add = m2.AddItem
add('&Undo\tCtrl+Z')
add('&Redo\tCtrl+Y')
m2.AddSep()
add('Cu&t\tCtrl+X')
add('&Copy\tCtrl+C')
add('&Paste\tCtrl+V')
bar.Append(m, '&File')
bar.Append(m2, '&Edit')
def menu_open(e):
print vars(e)
def popup(e):
m.PopupMenu()
p.Bind(wx.EVT_RIGHT_UP, popup)
#p.Bind(wx.EVT_PAINT, lambda e: wx.PaintDC(p).DrawBitmap(bmps[0], 10, 10, True))
button = wx.Button(p, -1, 'toggle skin')
button2 = wx.Button(p, -1, 'events')
def showevents(e):
from gui.uberwidgets.umenu import menuEventHandler
from pprint import pprint
from util import funcinfo
for id, cb in menuEventHandler(f).cbs.iteritems():
print id, funcinfo(cb)
button2.Bind(wx.EVT_BUTTON, showevents)
wut = False
def toggle(e):
global wut
wut = not wut
mb = wx.GetApp().skin.tree['menubar']
mb.mode = 'skin' if mb.get('mode', 'skin').lower() == 'native' else 'native'
from gui.skin.skintree import refresh_wx_tree
refresh_wx_tree()
p.Sizer.Layout()
button.Bind(wx.EVT_BUTTON, toggle)
p.Sizer.Add(bar.SizableWindow)
p.Sizer.Add((30, 140), 0)
p.Sizer.Add(button)
p.Sizer.Add(button2)
f.Show()
def wutcapture():
win =wx.Window.GetCapture()
if win:
print 'capture', wx.Window.GetCapture(),'with',wx.Window.GetCapture().menu[0]
print 'focus ', wx.Window.FindFocus()
print
a.timer = wx.PyTimer(wutcapture)
a.timer.Start(3000, False)
a.MainLoop()
#from util import profile; profile(a.MainLoop)
#a.MainLoop()
|
160190
|
from __future__ import unicode_literals
from django import forms
from django.db.models import Q
from django.utils.translation import gettext_lazy as _
from symposion.proposals.models import SupportingDocument
# @@@ generic proposal form
class AddSpeakerForm(forms.Form):
email = forms.EmailField(
label=_(
"Email address of new speaker (use their email address, not yours)"
)
)
def __init__(self, *args, **kwargs):
self.proposal = kwargs.pop("proposal")
super(AddSpeakerForm, self).__init__(*args, **kwargs)
def clean_email(self):
value = self.cleaned_data["email"]
# Verify whether this email address is the user's own
# email address.
if self.proposal.speaker.user.email == value:
raise forms.ValidationError(
_("You can't invite yourself to this proposal")
)
exists = self.proposal.additional_speakers.filter(
Q(user=None, invite_email=value) | Q(user__email=value)
).exists()
if exists:
raise forms.ValidationError(
_(
"This email address has already been "
"invited to your talk proposal"
)
)
return value
class SupportingDocumentCreateForm(forms.ModelForm):
class Meta:
model = SupportingDocument
fields = ["file", "description"]
|
160236
|
SOC_IRAM_LOW = 0x40020000
SOC_IRAM_HIGH = 0x40070000
SOC_DRAM_LOW = 0x3ffb0000
SOC_DRAM_HIGH = 0x40000000
SOC_RTC_DRAM_LOW = 0x3ff9e000
SOC_RTC_DRAM_HIGH = 0x3ffa0000
SOC_RTC_DATA_LOW = 0x50000000
SOC_RTC_DATA_HIGH = 0x50002000
|
160243
|
import datetime
import os
import copy
import json
import numpy as np
from pytz import timezone
from gamified_squad import GamifiedSquad
from agent import CustomAgent
import generic
import evaluate
SAVE_CHECKPOINT = 100000
def train():
time_1 = datetime.datetime.now()
config = generic.load_config()
env = GamifiedSquad(config)
env.split_reset("train")
agent = CustomAgent(config, env.has_token_set)
if config["general"]["visdom"]:
# visdom
import visdom
viz = visdom.Visdom()
plt_win = None
eval_plt_win = None
plt_q_value_win = None
plt_steps_win = None
eval_plt_steps_win = None
viz_avg_ig_acc, viz_avg_qa_acc = [], []
viz_avg_ig_q_value = []
viz_eval_ig_acc, viz_eval_qa_acc, viz_eval_steps = [], [], []
viz_avg_steps = []
step_in_total = 0
batch_no = 0
episode_no = 0
running_avg_qa_acc = generic.HistoryScoreCache(capacity=50)
running_avg_ig_acc = generic.HistoryScoreCache(capacity=50)
running_avg_qa_loss = generic.HistoryScoreCache(capacity=50)
running_avg_ig_loss = generic.HistoryScoreCache(capacity=50)
running_avg_ig_q_value = generic.HistoryScoreCache(capacity=50)
running_avg_steps = generic.HistoryScoreCache(capacity=50)
output_dir = "."
data_dir = "."
json_file_name = agent.experiment_tag.replace(" ", "_")
best_qa_acc_so_far = 0.0
prev_performance = 0.0
i_am_patient = 0
# load model from checkpoint
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
print("checkpoint already exist.")
exit(0)
if os.path.exists(data_dir + "/" + agent.load_graph_generation_model_from_tag + ".pt"):
agent.load_pretrained_graph_generation_model(data_dir + "/" + agent.load_graph_generation_model_from_tag + ".pt")
if agent.load_pretrained:
if os.path.exists(data_dir + "/" + agent.load_from_tag + ".pt"):
agent.load_pretrained_model(data_dir + "/" + agent.load_from_tag + ".pt") # load partial graph
agent.update_target_net()
while(True):
if episode_no > agent.max_episode:
break
np.random.seed(episode_no)
env.seed(episode_no)
obs, infos = env.reset()
batch_size = len(obs)
report = agent.report_frequency > 0 and (episode_no % agent.report_frequency <= max(episode_no - batch_size, 0) % agent.report_frequency)
__save__ = episode_no % SAVE_CHECKPOINT <= max(episode_no - batch_size, 0) % SAVE_CHECKPOINT
if report:
print("====================================================================================", episode_no)
print("-- Q: %s" % (agent.bert_tokenizer.decode(infos[0]["q"]).encode('utf-8')))
print("-- A: %s" % (infos[0]["a_string"][0].encode('utf-8')))
agent.train()
agent.init(obs, infos)
quest_list = agent.get_game_quest_info(infos)
agent.kg.push_batch_question(quest_list, [item["q_srl"] for item in infos])
previous_dynamics = None
previous_belief = None
input_quest, input_quest_mask, quest_id_list = agent.get_agent_inputs(quest_list)
tmp_replay_buffer = []
print_cmds = []
prev_commands = ["restart" for _ in range(batch_size)]
belief_buffer = []
act_randomly = False if agent.noisy_net else episode_no < agent.learn_start_from_this_episode
for _ in range(agent.max_nb_steps_per_episode):
# generate commands
if agent.noisy_net:
agent.reset_noise() # Draw a new set of noisy weights
commands, replay_info, current_dynamics, current_belief = agent.act(obs, infos, input_quest, input_quest_mask, quest_id_list, prev_commands, previous_dynamics, previous_belief, random=act_randomly)
tmp_replay_buffer.append(replay_info)
obs, infos = env.step(commands)
prev_commands = commands
previous_dynamics = current_dynamics
previous_belief = current_belief
belief_buffer.append(current_belief)
if agent.noisy_net and step_in_total % agent.update_per_k_game_steps == 0:
agent.reset_noise() # Draw a new set of noisy weights
if episode_no >= agent.learn_start_from_this_episode and step_in_total % agent.update_per_k_game_steps == 0:
interaction_loss, interaction_q_value = agent.update_interaction()
if interaction_loss is not None:
running_avg_ig_loss.push(interaction_loss)
running_avg_ig_q_value.push(interaction_q_value)
qa_loss = agent.update_qa()
if qa_loss is not None:
running_avg_qa_loss.push(qa_loss)
step_in_total += 1
still_running = generic.to_np(replay_info[-1])
print_cmds.append(commands[0] if still_running[0] else "--")
if np.sum(still_running) == 0:
break
if report:
print(" / ".join(print_cmds).encode('utf-8'))
# The agent has exhausted all steps, now answer question.
chosen_head_tails = agent.answer_question_act(agent.naozi.get(), quest_list, current_belief) # batch
chosen_head_tails_np = generic.to_np(chosen_head_tails)
chosen_answer_strings = generic.get_answer_strings(agent.naozi.get(), chosen_head_tails_np, agent.bert_tokenizer, agent.special_token_ids)
answer_strings = [item["a_string"] for item in infos]
answer_token_ids = [item["a"] for item in infos]
qa_reward_np = generic.get_qa_reward(chosen_answer_strings, answer_strings)
obs_strings = [agent.bert_tokenizer.decode(agent.naozi.get(i)) for i in range(batch_size)]
ig_reward_np = generic.get_sufficient_info_reward(agent.naozi.get(), answer_token_ids)
ig_reward = generic.to_pt(ig_reward_np, enable_cuda=False, type='float') # batch
# push qa experience into qa replay buffer
replay_node_vocab = agent.kg.get_node_vocabulary()
replay_relation_vocab = agent.kg.get_relation_vocabulary()
replay_triplets = agent.kg.get_triplets()
for b in range(batch_size): # data points in batch
is_prior = qa_reward_np[b] > agent.qa_reward_prior_threshold * agent.qa_replay_memory.avg_rewards()
# if the agent is not in the correct state, do not push it into replay buffer
if np.mean(ig_reward_np[b]) == 0.0:
continue
agent.qa_replay_memory.push(is_prior, qa_reward_np[b], agent.naozi.get_sentence_lists(b), quest_list[b], replay_node_vocab[b], replay_relation_vocab[b], replay_triplets[b], answer_token_ids[b], belief_buffer[-1][b].cpu() if belief_buffer[-1][b] is not None else None)
# small positive reward whenever it answers question correctly
masks_np = [generic.to_np(item[-1]) for item in tmp_replay_buffer]
command_rewards_np = []
for i in range(len(tmp_replay_buffer)):
if i == len(tmp_replay_buffer) - 1:
r = ig_reward * tmp_replay_buffer[i][-1]
r_np = ig_reward_np * masks_np[i]
else:
# give reward only at that one game step, not all
r = ig_reward * (tmp_replay_buffer[i][-1] - tmp_replay_buffer[i + 1][-1])
r_np = ig_reward_np * (masks_np[i] - masks_np[i + 1])
tmp_replay_buffer[i].append(r)
command_rewards_np.append(r_np)
command_rewards_np = np.array(command_rewards_np)
if report:
print(command_rewards_np[:, 0])
# push experience into replay buffer
for b in range(len(ig_reward_np)):
is_prior = np.sum(command_rewards_np, 0)[b] > 0.0
mem = []
for i in range(len(tmp_replay_buffer)):
batch_description_list, batch_chosen_indices, batch_chosen_ctrlf_indices, batch_graph_node_vocabulary, batch_graph_relation_vocabulary, batch_graph_triplets, _, batch_rewards = tmp_replay_buffer[i]
mem.append([copy.deepcopy(batch_description_list[b]),
copy.deepcopy(quest_list[b]),
batch_chosen_indices[b],
batch_chosen_ctrlf_indices[b],
copy.deepcopy(batch_graph_node_vocabulary[b]),
copy.deepcopy(batch_graph_relation_vocabulary[b]),
copy.deepcopy(batch_graph_triplets[b]),
copy.deepcopy(belief_buffer[i][b].cpu()) if belief_buffer[i][b] is not None else None,
batch_rewards[b]])
if masks_np[i][b] == 0.0:
break
agent.replay_memory.push(is_prior, mem)
qa_acc = np.mean(qa_reward_np)
ig_acc = np.mean(ig_reward_np)
step_masks_np = np.sum(np.array(masks_np), 0) # batch
for i in range(len(qa_reward_np)):
# if the answer is totally wrong, we assume it used all steps
if qa_reward_np[i] == 0.0:
step_masks_np[i] = agent.max_nb_steps_per_episode
used_steps = np.mean(step_masks_np)
running_avg_qa_acc.push(qa_acc)
running_avg_ig_acc.push(ig_acc)
running_avg_steps.push(used_steps)
print_rewards = np.sum(np.mean(command_rewards_np, -1))
if report:
print("-- OBS: %s" % (obs_strings[0].encode('utf-8')))
print("-- PRED: %s" % (chosen_answer_strings[0].encode('utf-8')))
# finish game
agent.finish_of_episode(episode_no, batch_no, batch_size)
time_2 = datetime.datetime.now()
eastern_time = datetime.datetime.now(timezone('US/Eastern')).strftime("%b %d %Y %H:%M:%S")
if report:
print("Episode: {:3d} | {:s} | time spent: {:s} | interaction loss: {:2.3f} | interaction qvalue: {:2.3f} | qa loss: {:2.3f} | rewards: {:2.3f} | qa acc: {:2.3f}/{:2.3f} | sufficient info: {:2.3f}/{:2.3f} | used steps: {:2.3f}".format(episode_no, eastern_time, str(time_2 - time_1).rsplit(".")[0], running_avg_ig_loss.get_avg(), running_avg_ig_q_value.get_avg(), running_avg_qa_loss.get_avg(), print_rewards, qa_acc, running_avg_qa_acc.get_avg(), ig_acc, running_avg_ig_acc.get_avg(), running_avg_steps.get_avg()))
if __save__:
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_ep" + str(episode_no) + "_model.pt")
if not report or episode_no < agent.learn_start_from_this_episode:
episode_no += batch_size
batch_no += 1
continue
eval_qa_acc, eval_ig_acc, eval_used_steps = 0.0, 0.0, 0.0
# evaluate
if agent.run_eval:
eval_qa_acc, eval_ig_acc, eval_used_steps = evaluate.evaluate(env, agent, "valid")
env.split_reset("train")
# if run eval, then save model by eval accucacy
if eval_qa_acc >= best_qa_acc_so_far:
best_qa_acc_so_far = eval_qa_acc
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
curr_performance = eval_qa_acc
else:
if running_avg_qa_acc.get_avg() >= best_qa_acc_so_far:
best_qa_acc_so_far = running_avg_qa_acc.get_avg()
agent.save_model_to_path(output_dir + "/" + agent.experiment_tag + "_model.pt")
curr_performance = running_avg_qa_acc.get_avg()
if prev_performance <= curr_performance:
i_am_patient = 0
else:
i_am_patient += 1
prev_performance = curr_performance
# if patient >= patience, resume from checkpoint
if agent.patience > 0 and i_am_patient >= agent.patience:
if os.path.exists(output_dir + "/" + agent.experiment_tag + "_model.pt"):
print('reload from a good checkpoint...')
agent.load_pretrained_model(output_dir + "/" + agent.experiment_tag + "_model.pt", load_partial_graph=False)
agent.update_target_net()
i_am_patient = 0
# plot using visdom
if config["general"]["visdom"] and not agent.debug_mode:
viz_avg_ig_acc.append(running_avg_ig_acc.get_avg())
viz_avg_qa_acc.append(running_avg_qa_acc.get_avg())
viz_avg_ig_q_value.append(running_avg_ig_q_value.get_avg())
viz_eval_ig_acc.append(eval_ig_acc)
viz_eval_qa_acc.append(eval_qa_acc)
viz_eval_steps.append(eval_used_steps)
viz_avg_steps.append(running_avg_steps.get_avg())
viz_x = np.arange(len(viz_avg_ig_acc)).tolist()
if plt_win is None:
plt_win = viz.line(X=viz_x, Y=viz_avg_ig_acc,
opts=dict(title=agent.experiment_tag + "_train"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_avg_qa_acc,
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_avg_ig_acc) - 1], Y=[viz_avg_ig_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_avg_qa_acc) - 1], Y=[viz_avg_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_train"),
win=plt_win,
update='append', name="qa")
if plt_q_value_win is None:
plt_q_value_win = viz.line(X=viz_x, Y=viz_avg_ig_q_value,
opts=dict(title=agent.experiment_tag + "_train_q_value"),
name="sufficient info")
else:
viz.line(X=[len(viz_avg_ig_q_value) - 1], Y=[viz_avg_ig_q_value[-1]],
opts=dict(title=agent.experiment_tag + "_train_q_value"),
win=plt_q_value_win,
update='append', name="sufficient info")
if plt_steps_win is None:
plt_steps_win = viz.line(X=viz_x, Y=viz_avg_steps,
opts=dict(title=agent.experiment_tag + "_train_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_avg_steps[-1]],
opts=dict(title=agent.experiment_tag + "_train_step"),
win=plt_steps_win,
update='append', name="used steps")
if agent.run_eval:
if eval_plt_win is None:
eval_plt_win = viz.line(X=viz_x, Y=viz_eval_ig_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
name="sufficient info")
viz.line(X=viz_x, Y=viz_eval_qa_acc,
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win, update='append', name="qa")
else:
viz.line(X=[len(viz_eval_ig_acc) - 1], Y=[viz_eval_ig_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="sufficient info")
viz.line(X=[len(viz_eval_qa_acc) - 1], Y=[viz_eval_qa_acc[-1]],
opts=dict(title=agent.experiment_tag + "_eval"),
win=eval_plt_win,
update='append', name="qa")
if eval_plt_steps_win is None:
eval_plt_steps_win = viz.line(X=viz_x, Y=viz_eval_steps,
opts=dict(title=agent.experiment_tag + "_eval_step"),
name="used steps")
else:
viz.line(X=[len(viz_avg_steps) - 1], Y=[viz_eval_steps[-1]],
opts=dict(title=agent.experiment_tag + "_eval_step"),
win=eval_plt_steps_win,
update='append', name="used steps")
# write accucacies down into file
_s = json.dumps({"time spent": str(time_2 - time_1).rsplit(".")[0],
"sufficient info": str(running_avg_ig_acc.get_avg()),
"qa": str(running_avg_qa_acc.get_avg()),
"sufficient qvalue": str(running_avg_ig_q_value.get_avg()),
"eval sufficient info": str(eval_ig_acc),
"eval qa": str(eval_qa_acc),
"eval steps": str(eval_used_steps),
"used steps": str(running_avg_steps.get_avg())})
with open(output_dir + "/" + json_file_name + '.json', 'a+') as outfile:
outfile.write(_s + '\n')
outfile.flush()
episode_no += batch_size
batch_no += 1
if __name__ == '__main__':
train()
|
160327
|
from .concept_extractor.extractor import TextblobTfIdfExtractStrategy
from .models import Article, GraphArticle
import logging
logger = logging.getLogger()
logger.setLevel(logging.INFO)
logger.addHandler(logging.StreamHandler())
class ConceptRunner:
_tfidf_extractor = None
@classmethod
def _get_tfidf_extractor(cls):
if not cls._tfidf_extractor:
cls._tfidf_extractor = TextblobTfIdfExtractStrategy()
return cls._tfidf_extractor
@classmethod
def _extract_and_save(cls, article, disconnectAll=False):
article_node = GraphArticle.nodes.get_or_none(uid=article.id)
if not article_node:
article_node = GraphArticle(
uid=article.id, name=article.title).save()
if disconnectAll:
article_node.related.disconnect_all()
keywords = cls._get_tfidf_extractor().extract_keyphrases(article.text)
logger.info(f"Extracted keywords {keywords}")
for keyword in keywords:
related_title = keyword['word']
related_article = Article.objects.filter(
title__iexact=related_title).first()
if not related_article:
related_article = Article(title=related_title, workspace=article.workspace)
related_article.save()
related_article_node = GraphArticle.nodes.get_or_none(
uid=related_article.id)
if not related_article_node:
related_article_node = GraphArticle(
uid=related_article.id, name=related_article.title)
article_node.save()
related_article_node.save()
logger.info(
f"Set {article_node.name} as related to {related_article_node.name}")
if article_node.related.is_connected(related_article_node):
rel = article_node.related.relationship(related_article_node)
rel.tf_idf = keyword['tf-idf']
else:
article_node.related.connect(
related_article_node, {'tf_idf': keyword['tf-idf']})
@classmethod
def generate_graph(cls):
print("[ConceptRunner] generating graph for all articles")
articles = Article.objects.all()
for article in articles:
cls._extract_and_save(article)
@classmethod
def generate_concepts_for_article(cls, articleId):
article = Article.objects.get(pk=articleId)
if article:
cls._extract_and_save(article, disconnectAll=False)
|
160328
|
from .object import Object
from .vuex_instance import VuexInstance
class VueInstance(Object):
@staticmethod
def __can_wrap__(obj):
return hasattr(obj, "_isVue") and obj._isVue
@property
def store(self):
store = self.__getattr__("store")
return VuexInstance(
state=store.state,
getters=store.getters,
commit=store.commit,
dispatch=store.dispatch,
)
def __getattr__(self, item):
try:
return Object.from_js(getattr(self._js, item))
except AttributeError:
if not item.startswith("$"):
return self.__getattr__("${}".format(item))
raise
def __setattr__(self, key, value):
if key in ["_js"]:
object.__setattr__(self, key, value)
elif hasattr(getattr(self, key), "__set__"):
getattr(self, key).__set__(value)
else:
if key not in dir(getattr(self._js, "$props", [])):
setattr(self._js, key, value)
Object.SubClasses.append(VueInstance)
|
160343
|
from ..service.autodiscover import Autodiscover, Authentication
class GetUserSettings(Autodiscover):
"""GetUserSettings EWS Autodiscover endpoint
retrieves the authenticated or provided users settings
"""
RESULTS_KEY = 'UserSettings'
def __init__(self, user=None):
"""Retrieves the user settings for the authenticated or provided user.
Args:
user (str, optional): A user to retrieve user settings for. Defaults to None.
"""
self.user = user
def soap(self):
if not self.user:
self.user = Authentication.credentials[0]
return self.A_NAMESPACE.GetUserSettingsRequestMessage(
self.A_NAMESPACE.Request(
self.A_NAMESPACE.Users(
self.A_NAMESPACE.User(
self.A_NAMESPACE.Mailbox(self.user)
)
),
self.A_NAMESPACE.RequestedSettings(
self.A_NAMESPACE.Setting('InternalEwsUrl'),
self.A_NAMESPACE.Setting('ExternalEwsUrl'),
self.A_NAMESPACE.Setting('UserDisplayName'),
self.A_NAMESPACE.Setting('UserDN'),
self.A_NAMESPACE.Setting('UserDeploymentId'),
self.A_NAMESPACE.Setting('InternalMailboxServer'),
self.A_NAMESPACE.Setting('MailboxDN'),
self.A_NAMESPACE.Setting('ActiveDirectoryServer'),
self.A_NAMESPACE.Setting('EwsSupportedSchemas'),
self.A_NAMESPACE.Setting('InternalRpcClientServer'),
self.A_NAMESPACE.Setting('InternalEcpUrl'),
self.A_NAMESPACE.Setting('InternalEcpVoicemailUrl'),
self.A_NAMESPACE.Setting('InternalEcpEmailSubscriptionsUrl'),
self.A_NAMESPACE.Setting('InternalEcpTextMessagingUrl'),
self.A_NAMESPACE.Setting('InternalEcpDeliveryReportUrl'),
self.A_NAMESPACE.Setting('InternalEcpRetentionPolicyTagsUrl'),
self.A_NAMESPACE.Setting('InternalEcpPublishingUrl'),
self.A_NAMESPACE.Setting('InternalOABUrl'),
self.A_NAMESPACE.Setting('InternalUMUrl'),
self.A_NAMESPACE.Setting('InternalWebClientUrls'),
self.A_NAMESPACE.Setting('PublicFolderServer'),
self.A_NAMESPACE.Setting('ExternalMailboxServer'),
self.A_NAMESPACE.Setting('ExternalMailboxServerRequiresSSL'),
self.A_NAMESPACE.Setting('ExternalMailboxServerAuthenticationMethods'),
self.A_NAMESPACE.Setting('EcpVoicemailUrlFragment'),
self.A_NAMESPACE.Setting('EcpEmailSubscriptionsUrlFragment'),
self.A_NAMESPACE.Setting('EcpTextMessagingUrlFragment'),
self.A_NAMESPACE.Setting('EcpDeliveryReportUrlFragment'),
self.A_NAMESPACE.Setting('EcpRetentionPolicyTagsUrlFragment'),
self.A_NAMESPACE.Setting('ExternalEcpUrl'),
self.A_NAMESPACE.Setting('EcpPublishingUrlFragment'),
self.A_NAMESPACE.Setting('ExternalEcpVoicemailUrl'),
self.A_NAMESPACE.Setting('ExternalEcpEmailSubscriptionsUrl'),
self.A_NAMESPACE.Setting('ExternalEcpTextMessagingUrl'),
self.A_NAMESPACE.Setting('ExternalEcpDeliveryReportUrl'),
self.A_NAMESPACE.Setting('EcpEmailSubscriptionsUrlFragment'),
self.A_NAMESPACE.Setting('ExternalEcpRetentionPolicyTagsUrl'),
self.A_NAMESPACE.Setting('ExternalEcpPublishingUrl'),
self.A_NAMESPACE.Setting('ExternalOABUrl'),
self.A_NAMESPACE.Setting('ExternalUMUrl'),
self.A_NAMESPACE.Setting('ExternalWebClientUrls'),
self.A_NAMESPACE.Setting('CrossOrganizationSharingEnabled'),
self.A_NAMESPACE.Setting('AlternateMailboxes'),
self.A_NAMESPACE.Setting('CasVersion'),
self.A_NAMESPACE.Setting('InternalPop3Connections'),
self.A_NAMESPACE.Setting('ExternalPop3Connections'),
self.A_NAMESPACE.Setting('InternalImap4Connections'),
self.A_NAMESPACE.Setting('ExternalImap4Connections'),
self.A_NAMESPACE.Setting('InternalSmtpConnections'),
self.A_NAMESPACE.Setting('ExternalSmtpConnections'),
self.A_NAMESPACE.Setting('InternalServerExclusiveConnect'),
self.A_NAMESPACE.Setting('ExternalServerExclusiveConnect'),
self.A_NAMESPACE.Setting('ExchangeRpcUrl'),
self.A_NAMESPACE.Setting('ShowGalAsDefaultView'),
self.A_NAMESPACE.Setting('AutoDiscoverSMTPAddress'),
self.A_NAMESPACE.Setting('InteropExternalEwsUrl'),
self.A_NAMESPACE.Setting('ExternalEwsVersion'),
self.A_NAMESPACE.Setting('InteropExternalEwsVersion'),
self.A_NAMESPACE.Setting('MobileMailboxPolicyInterop'),
self.A_NAMESPACE.Setting('GroupingInformation'),
self.A_NAMESPACE.Setting('UserMSOnline'),
self.A_NAMESPACE.Setting('MapiHttpEnabled')
)
),
)
|
160357
|
import uuid
class Node:
def toRow(self, headers):
row = []
for field in headers: row.append(str(getattr(self, field)))
return '|'.join(row)
|
160364
|
import sys
sys.path.insert(0, '../../../src_python')
import nmpccodegen as nmpc
import nmpccodegen.tools as tools
import nmpccodegen.models as models
import nmpccodegen.controller as controller
import nmpccodegen.controller.obstacles as obstacles
import nmpccodegen.Cfunctions as cfunctions
import nmpccodegen.example_models as example_models
import math
import numpy as np
import matplotlib.pyplot as plt
import math
import sys
import time
def init_controller_files(controller_name):
## -- GENERATE STATIC FILES --
# start by generating the static files and folder of the controller
trailer_controller_location = "../../../test_controller_builds/" + controller_name
tools.Bootstrapper.bootstrap(trailer_controller_location, simulation_tools=True)
return trailer_controller_location
## -----------------------------------------------------------------
def generate_controller_with_obs(trailer_controller_location,reference_state,Q,R,rectangular_obstacle_1,obstacle_weight,horizon,display_figure=True,index_figure=0):
# get the continious system equations
(system_equations,number_of_states,number_of_inputs,coordinates_indices) = example_models.get_trailer_model(L=0.5)
step_size = 0.05
# simulation_time = 10
# number_of_steps = math.ceil(simulation_time / step_size)
integrator = "RK44"
constraint_input = cfunctions.IndicatorBoxFunction([-1,-1],[1,1]) # input needs stay within these borders
model = models.Model_continious(system_equations, constraint_input, step_size, number_of_states,\
number_of_inputs,coordinates_indices, integrator)
# reference_state=np.array([2,2,0])
stage_cost = controller.Stage_cost_QR(model, Q, R)
# define the controller
trailer_controller = controller.Nmpc_panoc(trailer_controller_location,model,stage_cost)
trailer_controller.horizon = horizon
trailer_controller.step_size = step_size
trailer_controller.integrator_casadi = True
trailer_controller.panoc_max_steps= 1000
trailer_controller._lbgfs_buffer_size = 20
trailer_controller.min_residual = -5
# add an obstacle
trailer_controller.add_obstacle(rectangular_obstacle_1)
# generate the code
trailer_controller.generate_code()
# -- simulate controller --
# setup a simulator to test
sim = tools.Simulator(trailer_controller.location)
initial_state=np.array([0.01,0.,0.])
state=initial_state
state_history = np.zeros((number_of_states,horizon))
sim.set_weight_obstacle(0,obstacle_weight)
reference_input = np.array([0, 0])
(sim_data, full_solution) = sim.simulate_nmpc_multistep_solution(initial_state, reference_state, reference_input,
number_of_inputs * horizon)
inputs = np.reshape(full_solution, (horizon, number_of_inputs))
print("solved NMPC problem time="+ sim_data.time_string + " number of panoc iterations=" + str(
sim_data.panoc_interations))
for i in range(0,horizon):
state = model.get_next_state_numpy(state,inputs[i,:])
state_history[:,i] = np.reshape(state[:],number_of_states)
print("Reference state:")
print(reference_state)
print("Final state:")
print(state)
if(display_figure==True):
plt.figure(index_figure)
example_models.trailer_print(state_history)
rectangular_obstacle_1.plot()
plt.xlim([-2.2, 2.2])
plt.ylim([-0.1, 2.2])
# plt.clf()
return state
def main():
# create static files
trailer_move_diag_obs_location_ = init_controller_files("trailer_move_diag_obs")
trailer_move_right_obs_location_ = init_controller_files("trailer_move_right_obs")
trailer_move_move_up_obs_location_ = init_controller_files("trailer_move_up_obs")
# Start simulating:
# TEST 1
rectangular_center_coordinates = np.array([0.75, 0.45])
rectangular_width = 0.5
rectangular_height = 0.3
rectangular_obstacle_1 = obstacles.Obstacle_rectangular(rectangular_center_coordinates, \
rectangular_width, rectangular_height)
Q = np.diag([10., 10., 1.])
R = np.diag([1., 1.]) * 0.01
obstacle_weight = 10000.
horizon = 50
reference_state = np.array([2, 0.5, 0])
current_state = generate_controller_with_obs(trailer_move_diag_obs_location_, reference_state, Q,R, \
rectangular_obstacle_1 , obstacle_weight,\
horizon,display_figure=True,index_figure=0)
# TEST 2
rectangular_center_coordinates_2 = np.array([1, 0.])
rectangular_width_2 = 0.5
rectangular_height_2 = 0.2
rectangular_obstacle_2 = obstacles.Obstacle_rectangular(rectangular_center_coordinates_2, \
rectangular_width_2, rectangular_height_2)
Q = np.diag([10., 10., 1.])*1.
R = np.diag([1., 1.]) * 0.01
obstacle_weight = 1000.
horizon = 50
reference_state = np.array([2, 0, 0])
current_state = generate_controller_with_obs(trailer_move_right_obs_location_, reference_state, Q, R,\
rectangular_obstacle_2,obstacle_weight,horizon,\
display_figure=True,index_figure=1)
# TEST 3
rectangular_center_coordinates = np.array([0.6, 0.5])
rectangular_width = 1.2
rectangular_height = 0.2
rectangular_obstacle_3 = obstacles.Obstacle_rectangular(rectangular_center_coordinates, \
rectangular_width, rectangular_height)
Q = np.diag([10., 10., 0.1])
R = np.diag([1., 1.]) * 0.01
obstacle_weight = 10000.
horizon = 50
reference_state = np.array([0, 2, 0])
current_state = generate_controller_with_obs(trailer_move_move_up_obs_location_, reference_state, Q, R,\
rectangular_obstacle_3,obstacle_weight,horizon,\
display_figure=True,index_figure=2)
plt.show()
if __name__ == '__main__':
main()
|
160387
|
from vectorhub.bi_encoders.qa.tfhub import LAReQA2Vec
from ....test_utils import assert_encoder_works
def test_lare_qa_works():
"""
Testing for LAReQA works
"""
encoder = LAReQA2Vec()
assert_encoder_works(encoder, data_type='text', model_type='bi_encoder')
|
160449
|
from menpo.base import Vectorizable
from menpo.landmark import Landmarkable
from menpo.transform.base import Transformable
from menpo.visualize import LandmarkableViewable, Viewable
class Shape(Vectorizable, Transformable, Landmarkable, LandmarkableViewable, Viewable):
"""
Abstract representation of shape. Shapes are :map:`Transformable`,
:map:`Vectorizable`, :map:`Landmarkable`, :map:`LandmarkableViewable` and
:map:`Viewable`. This base class handles transforming landmarks when the
shape is transformed. Therefore, implementations of :map:`Shape` have to
implement the abstract :meth:`_transform_self_inplace` method that handles
transforming the :map:`Shape` itself.
"""
def _transform_inplace(self, transform):
"""
Transform the landmarks and the shape itself.
Parameters
----------
transform : `function`
A function to transform the spatial data with.
Returns
-------
self : `type(self)`
A pointer to `self` (the result of :meth:`_transform_self_inplace`).
"""
if self.has_landmarks:
self.landmarks._transform_inplace(transform)
return self._transform_self_inplace(transform)
def _transform_self_inplace(self, transform):
"""
Implement this method to transform the concrete implementation of a
shape. This is then called by the Shape's :meth:`_transform_inplace`
method, which will have updated the landmarks beforehand.
Parameters
----------
transform : `function`
A function to transform the spatial data with.
Returns
-------
self : `type(self)`
A pointer to `self`.
"""
pass
|
160459
|
from math import *
import numpy as np
from NodeGeneratorBase import *
from Spheral import (Vector1d, Tensor1d, SymTensor1d,
Vector2d, Tensor2d, SymTensor2d, rotationMatrix2d, testPointInBox2d,
Vector3d, Tensor3d, SymTensor3d, rotationMatrix3d, testPointInBox3d)
from SpheralTestUtilities import fuzzyEqual
#-------------------------------------------------------------------------------
# Class to generate 1-D node positions for a fixed node mass to fit the given
# density profile in a range (xmin, xmax).
#-------------------------------------------------------------------------------
class GenerateNodeProfile1d(NodeGeneratorBase):
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self,
nx, # number of points to generate
rho, # density profile
xmin,
xmax,
nNodePerh = 2.01,
numbins = 10000):
assert nx > 0
assert xmin < xmax
assert nNodePerh > 0.0
# If the user provided a constant for rho, then use the constantRho
# class to provide this value.
if type(rho) == type(1.0):
self.rhofunc = ConstantRho(rho)
# In the constant rho case, no need to kill ourselves figuring out complicated fits...
dx = (xmax - xmin)/nx
mi = dx*rho
self.x = [xmin + (i+0.5)*dx for i in xrange(nx)]
self.H = [SymTensor1d(1.0/(nNodePerh*dx)) for i in xrange(nx)]
self.m = [mi]*nx
self.rho = [rho]*nx
else:
self.rhofunc = rho
# Build the evenly sampled cumulative mass as a function of position.
ok = False
while not ok:
dx = (xmax - xmin)/numbins
mcum = np.cumsum(np.array([0.0] + [0.5*dx*(self.rhofunc(xmin + i*dx) + self.rhofunc(xmin + (i + 1)*dx)) for i in xrange(numbins)]))
# Find the target mass per node.
mi = mcum[-1]/nx
# Do we need to have a finer binning?
if mcum[-1]/mi > 0.5*numbins:
numbins = int(2*mcum[-1]/mi)
print "Warning, boosting numbins to %i to increase mass resolution for interpolation" % numbins
else:
ok = True
# Now go through and bisect for positions to get the mass per point we want.
xi = xmin
self.x = []
self.rho = []
mtarget = -0.5*mi
while xi < xmax:
mtarget += mi
if mtarget <= mcum[-1]:
i = np.searchsorted(mcum, mtarget) - 1
assert mtarget >= mcum[i] and mtarget <= mcum[i+1]
xi = xmin + (i + (mtarget - mcum[i])/(mcum[i+1] - mcum[i]))*dx
assert (xi >= xmin + i*dx) and (xi <= xmin + (i+1)*dx)
self.x.append(xi)
self.rho.append(self.rhofunc(xi))
else:
xi = xmax
n = len(self.x)
print "Generated %i 1D points." % n
self.m = [mi]*n
# Figure out the H.
self.H = []
for i in xrange(n):
if i == 0:
dxavg = self.x[i+1] - self.x[i]
elif i == n-1:
dxavg = self.x[i] - self.x[i-1]
else:
dxavg = 0.5*(self.x[i+1] - self.x[i-1])
self.H.append(SymTensor1d(1.0/(nNodePerh*dxavg)))
# Have the base class break up the serial node distribution
# for parallel cases.
NodeGeneratorBase.__init__(self, True,
self.x, self.m, self.rho, self.H)
return
#---------------------------------------------------------------------------
# Get the position for the given node index.
#---------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
return Vector1d(self.x[i])
#---------------------------------------------------------------------------
# Get the mass for the given node index.
#---------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#---------------------------------------------------------------------------
# Get the mass density for the given node index.
#---------------------------------------------------------------------------
def localMassDensity(self, i):
assert i >= 0 and i < len(self.x)
return self.rho[i]
#---------------------------------------------------------------------------
# Get the H tensor for the given node index.
#---------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
#-------------------------------------------------------------------------------
# Similarly generate a 1D profile in 2D along the x-direction.
#-------------------------------------------------------------------------------
class GeneratePlanarNodeProfile2d(NodeGeneratorBase):
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self,
nx, # target number of points in x
ny, # target number of points in y
rho, # density profile, must be 1D function
xmin, # (xmin, ymin) coordinates
xmax, # (xmax, ymax) coordinates
nNodePerh = 2.01,
numbins = 10000,
SPH = False):
assert nx > 0
assert ny > 0
assert xmin[0] < xmax[0]
assert xmin[1] < xmax[1]
assert nNodePerh > 0.0
# First use the 1D generator to generate a 1D slice profile along x.
gen1d = GenerateNodeProfile1d(nx = nx,
rho = rho,
xmin = xmin[0],
xmax = xmax[0],
nNodePerh = nNodePerh,
numbins = numbins)
# Stitch the 1D profiles back into serial data.
gen1d.x = mpi.allreduce(gen1d.x, mpi.SUM)
gen1d.m = mpi.allreduce(gen1d.m, mpi.SUM)
gen1d.rho = mpi.allreduce(gen1d.rho, mpi.SUM)
gen1d.H = mpi.allreduce(gen1d.H, mpi.SUM)
n1d = len(gen1d.x)
# Replicate the 1D slices into the full 2D data.
self.x = []
self.y = []
self.m = []
self.rho = []
self.H = []
dy = (xmax[1] - xmin[1])/ny
hyinv = 1.0/(nNodePerh*dy)
for iy in xrange(ny):
self.x += gen1d.x
self.y += [xmin[1] + (iy + 0.5)*dy]*n1d
self.m += [mi*(xmax[1] - xmin[1])/ny for mi in gen1d.m]
self.rho += gen1d.rho
self.H += [SymTensor2d(H1d.xx, 0.0, 0.0, hyinv) for H1d in gen1d.H]
# Have the base class break up the serial node distribution
# for parallel cases.
NodeGeneratorBase.__init__(self, True,
self.x, self.y, self.m, self.rho, self.H)
# If we're forcing round H tensors, do it.
if SPH:
self.makeHround()
return
#---------------------------------------------------------------------------
# Get the position for the given node index.
#---------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
assert len(self.x) == len(self.y)
return Vector2d(self.x[i], self.y[i])
#---------------------------------------------------------------------------
# Get the mass for the given node index.
#---------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#---------------------------------------------------------------------------
# Get the mass density for the given node index.
#---------------------------------------------------------------------------
def localMassDensity(self, i):
assert i >= 0 and i < len(self.x)
return self.rho[i]
#---------------------------------------------------------------------------
# Get the H tensor for the given node index.
#---------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
#-------------------------------------------------------------------------------
# Similarly generate a 1D profile in 3D along the x-direction.
#-------------------------------------------------------------------------------
class GeneratePlanarNodeProfile3d(NodeGeneratorBase):
#---------------------------------------------------------------------------
# Constructor
#---------------------------------------------------------------------------
def __init__(self,
nx, # target number of points in x
ny, # target number of points in y
nz, # target number of points in z
rho, # density profile, must be 1D function
xmin, # (xmin, ymin, zmin) coordinates
xmax, # (xmax, ymax, zmax) coordinates
nNodePerh = 2.01,
numbins = 10000,
SPH = False):
assert nx > 0
assert ny > 0
assert nz > 0
assert xmin[0] < xmax[0]
assert xmin[1] < xmax[1]
assert xmin[2] < xmax[2]
assert nNodePerh > 0.0
# First use the 1D generator to generate a 1D slice profile along x.
gen1d = GenerateNodeProfile1d(nx = nx,
rho = rho,
xmin = xmin[0],
xmax = xmax[0],
nNodePerh = nNodePerh,
numbins = numbins)
# Stitch the 1D profiles back into serial data.
gen1d.x = mpi.allreduce(gen1d.x, mpi.SUM)
gen1d.m = mpi.allreduce(gen1d.m, mpi.SUM)
gen1d.rho = mpi.allreduce(gen1d.rho, mpi.SUM)
gen1d.H = mpi.allreduce(gen1d.H, mpi.SUM)
n1d = len(gen1d.x)
# Replicate the 1D slices into the full 3D data.
self.x = []
self.y = []
self.z = []
self.m = []
self.rho = []
self.H = []
dy = (xmax[1] - xmin[1])/ny
dz = (xmax[2] - xmin[2])/nz
hyinv = 1.0/(nNodePerh*dy)
hzinv = 1.0/(nNodePerh*dz)
for iz in xrange(nz):
for iy in xrange(ny):
self.x += gen1d.x
self.y += [xmin[1] + (iy + 0.5)*dy]*n1d
self.z += [xmin[2] + (iz + 0.5)*dz]*n1d
self.m += [mi*(xmax[1] - xmin[1])*(xmax[2] - xmin[2])/(ny*nz) for mi in gen1d.m]
self.rho += gen1d.rho
self.H += [SymTensor3d(H1d.xx, 0.0, 0.0,
0.0, hyinv, 0.0,
0.0, 0.0, hzinv) for H1d in gen1d.H]
# Have the base class break up the serial node distribution
# for parallel cases.
NodeGeneratorBase.__init__(self, True,
self.x, self.y, self.z, self.m, self.rho, self.H)
# If we're forcing round H tensors, do it.
if SPH:
self.makeHround()
return
#---------------------------------------------------------------------------
# Get the position for the given node index.
#---------------------------------------------------------------------------
def localPosition(self, i):
assert i >= 0 and i < len(self.x)
assert len(self.x) == len(self.y)
assert len(self.x) == len(self.z)
return Vector3d(self.x[i], self.y[i], self.z[i])
#---------------------------------------------------------------------------
# Get the mass for the given node index.
#---------------------------------------------------------------------------
def localMass(self, i):
assert i >= 0 and i < len(self.m)
return self.m[i]
#---------------------------------------------------------------------------
# Get the mass density for the given node index.
#---------------------------------------------------------------------------
def localMassDensity(self, i):
assert i >= 0 and i < len(self.x)
return self.rho[i]
#---------------------------------------------------------------------------
# Get the H tensor for the given node index.
#---------------------------------------------------------------------------
def localHtensor(self, i):
assert i >= 0 and i < len(self.H)
return self.H[i]
|
160485
|
import open3d as o3d
image_rgb = o3d.io.read_image("image.png")
image_depth = o3d.io.read_image("image_depth.png")
width = int(image_rgb.get_max_bound()[0])
height = int(image_rgb.get_max_bound()[1])
camera_intrinsic = o3d.camera.PinholeCameraIntrinsic(width=width, height=height, fx=500, fy=500,cx=width/2, cy=height/2)
image_rgbd = o3d.geometry.RGBDImage.create_from_color_and_depth(image_rgb, image_depth, convert_rgb_to_intensity=False)
pcd = o3d.geometry.PointCloud.create_from_rgbd_image(image_rgbd, camera_intrinsic)
pcd.estimate_normals(search_param=o3d.geometry.KDTreeSearchParamHybrid(radius=0.1, max_nn=30))
# Flip it, otherwise the pointcloud will be upside down
pcd.transform([[1, 0, 0, 0], [0, -1, 0, 0], [0, 0, -1, 0], [0, 0, 0, 1]])
o3d.visualization.draw_geometries([pcd], width=640, height=480)
o3d.io.write_point_cloud("output.ply", pcd)
|
160621
|
from distutils.core import setup
setup(
name = 'py-geohash-any',
packages = ['py_geohash_any', 'py_geohash_any.tests'],
version = '1.1',
description = 'Python geohash library designed to use any encoding',
author = '<NAME>',
author_email = '<EMAIL>',
url = 'https://github.com/kylebebak/py-geohash-any',
download_url = 'https://github.com/kylebebak/py-geohash-any/tarball/1.1',
keywords = ['geohash', 'encoding', 'base', 'map'],
classifiers = [],
license = 'CC BY-SA 4.0'
)
|
160645
|
from setuptools import setup, find_packages
def parse_requirements(requirement_file):
with open(requirement_file) as f:
return f.readlines()
setup(
name='graphish',
version='1.3.0',
packages=find_packages(exclude=['tests*']),
license='MIT',
description='A Python package to search & delete & move emails using Microsoft Graph API',
long_description=open('README.md').read(),
long_description_content_type="text/markdown",
install_requires=parse_requirements('./requirements.txt'),
keywords=['graph', 'microsoft', 'office365', 'email', 'ediscovery', 'phish', 'phishing', 'swimlane'],
url='https://github.com/swimlane/graphish',
author='Swimlane',
author_email='<EMAIL>',
python_requires='>=2.6, !=3.0.*, !=3.1.*, !=3.2.*, <4'
)
|
160708
|
import h5py
import random
import numpy as np
import pdb
import torch
class DataLoaderSimple(object):
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Does not use expert rewards.
"""
def __init__(self, opts):
"""
Loads the dataset and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, h5_path_unseen (optional)
mask_path (optional)
"""
# ---- Load the dataset ----
self.h5_file = h5py.File(opts.h5_path, 'r')
self.data = {}
self.data['train'] = np.array(self.h5_file['train'])
self.data['val'] = np.array(self.h5_file['val'])
self.data['test'] = np.array(self.h5_file['test'])
if 'val_highres' in self.h5_file.keys():
self.data['val_highres'] = np.array(self.h5_file['val_highres'])
self.data['test_highres'] = np.array(self.h5_file['test_highres'])
# ---- Load the unseen classes ----
if opts.h5_path_unseen != '':
h5_file_unseen = h5py.File(opts.h5_path_unseen, 'r')
self.data['test_unseen'] = np.array(h5_file_unseen['test'])
# ---- Save settings needed for batching operations ----
# Dataset statistics
self.train_count = self.h5_file['train'].shape[0]
self.val_count = self.h5_file['val'].shape[0]
self.test_count = self.h5_file['test'].shape[0]
if opts.h5_path_unseen != '':
self.test_unseen_count = self.data['test_unseen'].shape[0]
if hasattr(opts, 'mask_path') and opts.mask_path != '':
mask_file = h5py.File(opts.mask_path, 'r')
self.masks = {}
self.masks['test'] = np.array(mask_file['test_mask'])
if opts.h5_path_unseen != '':
self.masks['test_unseen'] = np.array(mask_file['test_unseen_mask'])
self.hasmasks = True
else:
self.hasmasks = False
self.pano_shape = self.h5_file['train'].shape[1:]
# Iteration tracker
self.train_idx = 0
self.val_idx = 0
self.test_idx = 0
if opts.h5_path_unseen != '':
self.test_unseen_idx = 0
self.batch_size = opts.batch_size
# Shuffle the training data indices and access them in the shuffled order
self.shuffle = opts.shuffle
self.shuffled_idx = list(range(self.h5_file['train'].shape[0]))
if self.shuffle:
random.shuffle(self.shuffled_idx)
# Debug mode
self.debug = opts.debug
self.N = self.data['train'].shape[1]
self.M = self.data['train'].shape[2]
self.C = self.data['train'].shape[3]
self.H = self.data['train'].shape[4]
self.W = self.data['train'].shape[5]
if 'val_highres' in self.data:
self.H_highres = self.data['val_highres'].shape[4]
self.W_highres = self.data['test_highres'].shape[5]
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, depleted
def next_batch_val(self, highres=False):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_highres: BxNxMxCx448x448 (optional)
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = np.array(self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :])
if highres:
out_highres = np.array(self.data['val_highres'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :])
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if highres:
assert(out_highres.shape == (batch_size, self.N, self.M, self.C, self.H_highres, self.W_highres))
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
if not highres:
return out, depleted
else:
return out, out_highres, depleted
def next_batch_test(self, highres=False):
"""
Returns the next testing batch
out: BxNxMxCx32x32
out_highres: BxNxMxCx448x448 (optional)
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.test_count - self.test_idx)
out = np.array(self.data['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if highres:
out_highres = np.array(self.data['test_highres'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.debug:
assert((batch_size == self.batch_size) or (self.test_idx + batch_size == self.test_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if highres:
assert(out_highres.shape == (batch_size, self.N, self.M, self.C, self.H_highres, self.W_highres))
if self.hasmasks:
assert(out_masks.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.test_idx + batch_size == self.test_count:
depleted = True
self.test_idx = 0
else:
depleted = False
self.test_idx = self.test_idx + batch_size
if not highres:
return out, out_masks, depleted
else:
return out, out_highres, out_masks, depleted
def next_batch_test_unseen(self):
"""
Returns the next unseen classes testing batch
out: BxNxMxCx32x32
"""
batch_size = min(self.batch_size, self.test_unseen_count - self.test_unseen_idx)
out = np.array(self.data['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.debug:
assert((batch_size == self.batch_size) or (self.test_unseen_idx + batch_size == self.test_unseen_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.test_unseen_idx + batch_size == self.test_unseen_count:
depleted = True
self.test_unseen_idx = 0
else:
depleted = False
self.test_unseen_idx = self.test_unseen_idx + batch_size
return out, out_masks, depleted
class DataLoaderExpert(DataLoaderSimple):
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Uses expert rewards.
"""
def __init__(self, opts):
"""
Loads the dataset, rewards and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, rewards_h5_path
"""
# ---- Load the dataset, save settings ----
super(DataLoaderExpert, self).__init__(opts)
# ---- Load the rewards ----
rewards_file = h5py.File(opts.rewards_h5_path)
self.rewards = {}
# These are KxNxM arrays containing rewards corresponding to each views of
# all panoramas in the train and val splits
self.rewards['train'] = np.array(rewards_file['train/nms'])
self.rewards['val'] = np.array(rewards_file['val/nms'])
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
out_rewards = self.rewards['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :]
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, out_rewards, depleted
def next_batch_val(self):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :]
out_rewards = self.rewards['val'][self.val_idx:(self.val_idx+batch_size), :, :]
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
return out, out_rewards, depleted
class DataLoaderExpertPolicy(DataLoaderSimple):
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Uses expert trajectories.
"""
def __init__(self, opts):
"""
Loads the dataset, utility maps and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, utility_h5_path, h5_path_unseen, debug
"""
# ---- Load the dataset, save the settings ----
super(DataLoaderExpertPolicy, self).__init__(opts)
self.trajectories_type = opts.trajectories_type
if opts.trajectories_type == 'utility_maps':
# ---- Load the utility maps ----
utility_file = h5py.File(opts.utility_h5_path)
self.utility_maps = {}
# These are KxNxMxNxM arrays
for split in utility_file.keys():
self.utility_maps[split] = np.array(utility_file[split]['utility_maps'])
elif opts.trajectories_type == 'expert_trajectories':
# ---- Load the trajectories ----
# {'train': #train_samples x T-1 numpy array, 'val': #val_samples x T-1 numpy array}
self.trajectories = torch.load(opts.utility_h5_path)
else:
raise ValueError('Wrong trajectories_type!')
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)]]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['train'][(i, j)][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :]
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, out_maps, depleted
def next_batch_val(self):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :]
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['val'][self.val_idx:(self.val_idx+batch_size)]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['val'][(i, j)][self.val_idx:(self.val_idx+batch_size), :]
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
return out, out_maps, depleted
def next_batch_test(self, highres=False):
"""
Returns the next testing batch
out: BxNxMxCx32x32
out_masks: ???
out_maps: BxNxMxNxM
out_highres: BxNxMxCx448x448 (optional)
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.test_count - self.test_idx)
out = np.array(self.data['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if highres:
out_highres = np.array(self.data['test_highres'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test'][self.test_idx:(self.test_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['test'][self.test_idx:(self.test_idx+batch_size)]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['test'][(i, j)][self.test_idx:(self.test_idx+batch_size), :]
if self.debug:
assert((batch_size == self.batch_size) or (self.test_idx + batch_size == self.test_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if highres:
assert(out_highres.shape == (batch_size, self.N, self.M, self.C, self.H_highres, self.W_highres))
if self.hasmasks:
assert(out_masks.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.test_idx + batch_size == self.test_count:
depleted = True
self.test_idx = 0
else:
depleted = False
self.test_idx = self.test_idx + batch_size
if not highres:
return out, out_masks, out_maps, depleted
else:
return out, out_highres, out_masks, out_maps, depleted
def next_batch_test_unseen(self):
"""
Returns the next unseen classes testing batch
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
out_masks: ???
depleted: is the epoch over?
"""
batch_size = min(self.batch_size, self.test_unseen_count - self.test_unseen_idx)
out = np.array(self.data['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :])
if self.hasmasks:
out_masks = self.masks['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :, :, :, :, :]
else:
out_masks = None
if self.trajectories_type == 'utility_maps':
out_maps = self.utility_maps['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx + batch_size)]
else:
out_maps = {}
for i in range(self.N):
for j in range(self.M):
out_maps[(i, j)] = self.trajectories['test_unseen'][self.test_unseen_idx:(self.test_unseen_idx+batch_size), :]
if self.debug:
assert((batch_size == self.batch_size) or (self.test_unseen_idx + batch_size == self.test_unseen_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
if self.trajectories_type == 'utility_maps':
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
else:
assert(len(out_maps.keys()) == self.M * self.N)
assert(out_maps[(0, 0)].shape[0] == batch_size)
if self.test_unseen_idx + batch_size == self.test_unseen_count:
depleted = True
self.test_unseen_idx = 0
else:
depleted = False
self.test_unseen_idx = self.test_unseen_idx + batch_size
return out, out_masks, out_maps, depleted
class DataLoaderExpertBoth(DataLoaderSimple):
# TODO: Need to update trajectories_type here
# TODO: Add next_batch_test with expert trajectories option here
"""
DataLoader class for abstracting the reading, batching and shuffling operations
Uses expert trajectories and rewards.
"""
def __init__(self, opts):
"""
Loads the dataset, utility maps and saves settings needed:
(1) dataset statistics (2) shuffle (3) debug statistics (4) iteration tracker
Opts required: seed, h5_path, shuffle, batch_size, utility_h5_path, rewards_h5_path, h5_path_unseen, debug
"""
# ---- Load the dataset, save the settings ----
super(DataLoaderExpertBoth, self).__init__(opts)
# ---- Load the utility maps and rewards ----
utility_file = h5py.File(opts.utility_h5_path)
rewards_file = h5py.File(opts.rewards_h5_path)
self.rewards = {}
self.utility_maps = {}
# These are KxNxMxNxM arrays
self.utility_maps['train'] = np.array(utility_file['train/utility_maps'])
self.utility_maps['val'] = np.array(utility_file['val/utility_maps'])
# These are KxNxM arrays containing rewards corresponding to each views of
# all panoramas in the train and val splits
self.rewards['train'] = np.array(rewards_file['train/nms'])
self.rewards['val'] = np.array(rewards_file['val/nms'])
def next_batch_train(self):
"""
Returns the next training batch (indexed by self.shuffled_idx and starting at self.train_idx)
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.train_count - self.train_idx)
out = np.array(self.data['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :, :, :, :])
out_maps = self.utility_maps['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)]]
out_rewards = self.rewards['train'][self.shuffled_idx[self.train_idx:(self.train_idx+batch_size)], :, :]
if self.debug:
assert((batch_size == self.batch_size) or (self.train_idx + batch_size == self.train_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.train_idx + batch_size == self.train_count:
depleted = True
self.train_idx = 0
else:
depleted = False
self.train_idx = self.train_idx + batch_size
return out, out_maps, out_rewards, depleted
def next_batch_val(self):
"""
Returns the next validation batch
out: BxNxMxCx32x32
out_maps: BxNxMxNxM
out_rewards: BxNxM
"""
batch_size = min(self.batch_size, self.val_count - self.val_idx)
out = self.data['val'][self.val_idx:(self.val_idx+batch_size), :, :, :, :, :]
out_maps = self.utility_maps['val'][self.val_idx:(self.val_idx+batch_size)]
out_rewards = self.rewards['val'][self.val_idx:(self.val_idx+batch_size)]
if self.debug:
assert((batch_size == self.batch_size) or (self.val_idx + batch_size == self.val_count))
assert(out.shape == (batch_size, self.N, self.M, self.C, self.H, self.W))
assert(out_maps.shape == (batch_size, self.N, self.M, self.N, self.M))
assert(out_rewards.shape == (batch_size, self.N, self.M))
if self.val_idx + batch_size == self.val_count:
depleted = True
self.val_idx = 0
else:
depleted = False
self.val_idx = self.val_idx + batch_size
return out, out_maps, out_rewards, depleted
|
160762
|
from arbitrage.public_markets._bitfinex import Bitfinex
class BitfinexUSD(Bitfinex):
def __init__(self):
super().__init__("USD", "btcusd")
|
160795
|
import pygame
import time
scoreDataA = []
scoreDataB = []
scoreDataC = []
scoreDataD = []
def saveFile():
index = input("Enter your song INDEX:")
with open("SongData.h", "w+") as f:
f.writelines("const int LENGTH_" + str(index) + " = " + str(len(scoreDataA)) + ";")
f.write("static const bool ScoreData_" + str(index) + "[4][" + str(len(scoreDataA)) + "] PROGMEM = {" +
str(scoreDataA).replace('[', '{').replace(']', '}') + ",\n" +
str(scoreDataB).replace('[', '{').replace(']', '}') + ",\n" +
str(scoreDataC).replace('[', '{').replace(']', '}') + ",\n" +
str(scoreDataD).replace('[', '{').replace(']', '}') + "\n};")
def appender(keys, target, store):
if(keys[target]):
store.append(1)
else:
store.append(0)
def main():
print("Now we are starting creating songs.")
print(
"Key [A] [S] [D] [F] represents four keys on the keyboard. [Enter] to end.")
input(" Please do this in the pygame window, or we won't be able to detect keyboard events. Press any key to start.\n")
pygame.init()
screen = pygame.display.set_mode((640, 480))
pygame.display.set_caption('Song Creator')
while(1):
keys = pygame.key.get_pressed()
appender(keys, pygame.K_a, scoreDataA)
appender(keys, pygame.K_s, scoreDataB)
appender(keys, pygame.K_d, scoreDataC)
appender(keys, pygame.K_f, scoreDataD)
print(str(keys[pygame.K_a]) + str(keys[pygame.K_s]) +
str(keys[pygame.K_d]) + str(keys[pygame.K_f]))
time.sleep(0.1)
if(keys[pygame.K_RETURN]):
pygame.quit()
break
pygame.event.pump()
print(scoreDataA)
print(scoreDataB)
print(scoreDataC)
print(scoreDataD)
saveFile()
input("End. Song length: " + str(len(scoreDataA)))
main()
|
160828
|
import os, sys, vcs, cdms2
f = cdms2.open(os.path.join(vcs.sample_data,"clt.nc"))
V = f("clt")
x = vcs.init()
x.plot(V, bg=1)
|
160845
|
expected_output = {
"interface": {
"GigabitEthernet1/0/1": {
"out": {
"mcast_pkts": 188396,
"bcast_pkts": 0,
"ucast_pkts": 124435064,
"name": "GigabitEthernet1/0/1",
"octets": 24884341205,
},
"in": {
"mcast_pkts": 214513,
"bcast_pkts": 0,
"ucast_pkts": 15716712,
"name": "GigabitEthernet1/0/1",
"octets": 3161931167,
},
}
}
}
|
160884
|
from random import *
class Temporales:
def __init__(self, tablaSimbolos={}, variables={}, funciones={}, temp= 0, etiqueta = 0, parametro=0, funcion = 1, retorno = 0):
self.variables = variables.copy()
self.parametro = parametro
self.funciones = funciones.copy()
self.tablaSimbolos = tablaSimbolos.copy()
self.temp = temp
self.etiqueta = etiqueta
self.funcion = funcion
self.retorno = retorno
def limpiar(self):
self.temp = 0
self.parametro = 0
self.etiqueta = 0
def varTemporal(self):
variable = "t" + str(self.temp)
self.temp += 1
return str(variable)
def varParametro(self):
variable = "p" + str(self.parametro)
self.parametro += 1
return str(variable)
def varRetorno(self):
variable = "r" + str(self.retorno)
self.retorno += 1
return str(variable)
def varFuncion(self):
variable = "F" + str(self.funcion)
self.funcion += 1
return str(variable)
def varFuncionAnterior(self):
variable = self.funcion
return variable
def etiquetaT(self):
variable = "L" + str(self.etiqueta)
self.etiqueta += 1
return variable
def agregarVar(self, varT, variableObjeto):
self.variables[varT] = variableObjeto
def agregarSimbolo(self, simbolo):
rand = randint(1, 25000)
self.tablaSimbolos[str(simbolo.nombre)+str(rand)] = simbolo
def obtenerSimbolo(self, simbolo):
if not simbolo in self.tablaSimbolos:
pass
return None
else:
return self.tablaSimbolos[simbolo]
def actualizarSimbolo(self, simbolo, nuevoSi):
if not simbolo in self.tablaSimbolos:
print("Si se actualizo.")
pass
else:
self.tablaSimbolos[simbolo] = nuevoSi
class tipoSimbolo():
def __init__(self, temporal, nombre, tipo, tam, pos, rol, ambito):
self.temporal = temporal
self.nombre = nombre
self.tipo = tipo
self.tam = tam
self.pos = pos
self.rol = rol
self.ambito = ambito
|
160900
|
import sys
import math
import numpy as np
from datetime import datetime
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
def ortho_weight(ndim):
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype('float32')
def norm_weight(nin, nout=None, scale=0.01, ortho=True):
"""
Random weights drawn from a Gaussian
"""
if nout is None:
nout = nin
if nout == nin and ortho:
W = ortho_weight(nin)
else:
W = scale * np.random.randn(nin, nout)
return W.astype('float32')
class LSTM_Cell(nn.Module):
def __init__(self, device, in_dim, mem_dim):
super(LSTM_Cell, self).__init__()
self.device = device
self.in_dim = in_dim
self.mem_dim = mem_dim
def new_gate():
h = nn.Linear(self.mem_dim, self.mem_dim, bias=False)
h.weight.data.copy_(torch.from_numpy(ortho_weight(self.mem_dim)))
return h
def new_W():
w = nn.Linear(self.in_dim, self.mem_dim)
w.weight.data.copy_(torch.from_numpy(ortho_weight(self.mem_dim)))
return w
self.ih = new_gate()
self.fh = new_gate()
self.oh = new_gate()
self.ch = new_gate()
self.cx = new_W()
self.ox = new_W()
self.fx = new_W()
self.ix = new_W()
def forward(self, input, h, c):
u = F.tanh(self.cx(input) + self.ch(h))
i = F.sigmoid(self.ix(input) + self.ih(h))
f = F.sigmoid(self.fx(input) + self.fh(h))
c = i*u + f*c
o = F.sigmoid(self.ox(input) + self.oh(h))
h = o * F.tanh(c)
return c, h
class LSTM(nn.Module):
def __init__(self, device, in_dim, mem_dim):
super(LSTM, self).__init__()
self.device = device
self.in_dim = in_dim
self.mem_dim = mem_dim
self.TreeCell = LSTM_Cell(device, in_dim, mem_dim)
self.output_module = None
def forward(self, x, x_mask):
"""
:param x: #step x #sample x dim_emb
:param x_mask: #step x #sample
:param x_left_mask: #step x #sample x #step
:param x_right_mask: #step x #sample x #step
:return:
"""
h = Variable(torch.zeros(x.size(1), x.size(2)))
c = Variable(torch.zeros(x.size(1), x.size(2)))
if torch.cuda.is_available():
h=h.to(self.device)
c=c.to(self.device)
all_hidden=[]
for step in range(x.size(0)):
input=x[step] # #sample x dim_emb
step_c, step_h=self.TreeCell(input, h, c)
h=x_mask[step][:,None] * step_h + (1. - x_mask[step])[:,None] * h
c = x_mask[step][:, None] * step_c + (1. - x_mask[step])[:, None] * c
all_hidden.append(torch.unsqueeze(h,0))
return torch.cat(all_hidden,0)
class ESIM(nn.Module):
"""
Implementation of the multi feed forward network model described in
the paper "A Decomposable Attention Model for Natural Language
Inference" by <NAME> al., 2016.
It applies feedforward MLPs to combinations of parts of the two sentences,
without any recurrent structure.
"""
def __init__(self, num_units, num_classes, embedding_size, dropout, device=0,
training=True, project_input=True,
use_intra_attention=False, distance_biases=10, max_sentence_length=30):
"""
Create the model based on MLP networks.
:param num_units: size of the networks
:param num_classes: number of classes in the problem
:param embedding_size: size of each word embedding
:param use_intra_attention: whether to use intra-attention model
:param training: whether to create training tensors (optimizer)
:param project_input: whether to project input embeddings to a
different dimensionality
:param distance_biases: number of different distances with biases used
in the intra-attention model
"""
super(ESIM, self).__init__()
self.arch = "ESIM"
self.num_units = num_units
self.num_classes = num_classes
self.project_input = project_input
self.embedding_size=embedding_size
self.distance_biases=distance_biases
self.max_sentence_length=max_sentence_length
self.device = device
self.dropout = nn.Dropout(p=dropout)
self.lstm_intra=LSTM(device, embedding_size, num_units)
self.linear_layer_compare = nn.Sequential(nn.Linear(4*num_units*2, num_units), nn.ReLU(), nn.Dropout(p=dropout))
# nn.Dropout(p=0.2), nn.Linear(num_units, num_units), nn.ReLU())
self.lstm_compare=LSTM(device, embedding_size, num_units)
self.linear_layer_aggregate = nn.Sequential(nn.Dropout(p=dropout), nn.Linear(4*num_units*2, num_units), nn.ReLU(),
nn.Dropout(p=dropout), nn.Linear(num_units, num_classes))
self.init_weight()
def ortho_weight(self):
"""
Random orthogonal weights
Used by norm_weights(below), in which case, we
are ensuring that the rows are orthogonal
(i.e W = U \Sigma V, U has the same
# of rows, V has the same # of cols)
"""
ndim=self.num_units
W = np.random.randn(ndim, ndim)
u, s, v = np.linalg.svd(W)
return u.astype('float32')
def initialize_lstm(self):
if torch.cuda.is_available():
init=torch.Tensor(np.concatenate([self.ortho_weight(),self.ortho_weight(),self.ortho_weight(),self.ortho_weight()], 0)).to(self.device)
else:
init = torch.Tensor(
np.concatenate([self.ortho_weight(), self.ortho_weight(), self.ortho_weight(), self.ortho_weight()], 0))
return init
def init_weight(self):
#nn.init.normal(self.linear_layer_project,mean=0,std=0.1)
#print(self.linear_layer_attend[3])
#self.linear_layer_attend[1].weight.data.normal_(0, 0.01)
#self.linear_layer_attend[1].bias.data.fill_(0)
#self.linear_layer_attend[4].weight.data.normal_(0, 0.01)
#self.linear_layer_attend[4].bias.data.fill_(0)
self.linear_layer_compare[0].weight.data.normal_(0, 0.01)
self.linear_layer_compare[0].bias.data.fill_(0)
#self.linear_layer_compare[4].weight.data.normal_(0, 0.01)
#self.linear_layer_compare[4].bias.data.fill_(0)
self.linear_layer_aggregate[1].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[1].bias.data.fill_(0)
self.linear_layer_aggregate[4].weight.data.normal_(0, 0.01)
self.linear_layer_aggregate[4].bias.data.fill_(0)
def attention_softmax3d(self,raw_attentions):
reshaped_attentions = raw_attentions.view(-1, raw_attentions.size(2))
out=nn.functional.softmax(reshaped_attentions, dim=1)
return out.view(raw_attentions.size(0),raw_attentions.size(1),raw_attentions.size(2))
def _transformation_input(self,embed_sent, x1_mask):
embed_sent = self.word_embedding(embed_sent)
embed_sent = self.dropout(embed_sent)
hidden=self.lstm_intra(embed_sent, x1_mask)
return hidden
def aggregate(self,v1, v2):
"""
Aggregate the representations induced from both sentences and their
representations
:param v1: tensor with shape (batch, time_steps, num_units)
:param v2: tensor with shape (batch, time_steps, num_units)
:return: logits over classes, shape (batch, num_classes)
"""
v1_mean = torch.mean(v1, 0)
v2_mean = torch.mean(v2, 0)
v1_max, _ = torch.max(v1, 0)
v2_max, _ = torch.max(v2, 0)
out = self.linear_layer_aggregate(torch.cat((v1_mean, v1_max, v2_mean, v2_max), 1))
#v1_sum=torch.sum(v1,1)
#v2_sum=torch.sum(v2,1)
#out=self.linear_layer_aggregate(torch.cat([v1_sum,v2_sum],1))
return out
def cosine_interaction(self, tensor1, tensor2):
"""
:param tensor1: #step1 * dim
:param tensor2: #step2 * dim
:return: #step1 * #step2
"""
simCube_0=tensor1[0].view(1,-1)
simCube_1=tensor2[0].view(1,-1)
for i in range(tensor1.size(0)):
for j in range(tensor2.size(0)):
if not(i==0 and j==0):
simCube_0=torch.cat((simCube_0, tensor1[i].view(1,-1)))
simCube_1=torch.cat((simCube_1, tensor2[j].view(1,-1)))
simCube=F.cosine_similarity(simCube_0, simCube_1)
return simCube.view(tensor1.size(0),tensor2.size(0))
def create_mask(self, sent):
masks = []
sent_lengths = [len(s.split(" ")) for s in sent]
max_len = max(sent_lengths)
for s_length in sent_lengths:
pad_mask = np.zeros(max_len)
pad_mask[:s_length] = 1
masks.append(pad_mask)
masks = np.array(masks)
return torch.from_numpy(masks).float().to(self.device)
#def forward(self, x1, x1_mask, x2, x2_mask):
def forward(self, sent1, sent2, ext_feats=None, word_to_doc_count=None, raw_sent1=None, raw_sent2=None, visualize=False):
# idx = [i for i in range(embed_sent.size(1) - 1, -1, -1)]
# if torch.cuda.is_available():
# idx = torch.cuda.LongTensor(idx)
# else:
# idx = torch.LongTensor(idx)
sent1 = sent1.permute(2, 0, 1) # from [B * D * T] to [T * B * D]
sent2 = sent2.permute(2, 0, 1)
x1_mask = self.create_mask(raw_sent1)
x2_mask = self.create_mask(raw_sent2)
x1_mask = x1_mask.permute(1, 0)
x2_mask = x2_mask.permute(1, 0)
#x1 = self.word_embedding(x1)
x1 = self.dropout(sent1)
#x2 = self.word_embedding(x2)
x2 = self.dropout(sent2)
idx_1 = [i for i in range(x1.size(0) - 1, -1, -1)]
idx_1 = Variable(torch.LongTensor(idx_1))
if torch.cuda.is_available():
idx_1 = idx_1.to(self.device)
x1_r=torch.index_select(x1,0,idx_1)
x1_mask_r=torch.index_select(x1_mask,0,idx_1)
idx_2=[i for i in range(x2.size(0) -1, -1, -1)]
idx_2 = Variable(torch.LongTensor(idx_2))
if torch.cuda.is_available():
idx_2 = Variable(torch.LongTensor(idx_2)).to(self.device)
x2_r=torch.index_select(x2,0,idx_2)
x2_mask_r=torch.index_select(x2_mask, 0, idx_2)
proj1=self.lstm_intra(x1, x1_mask)
proj1_r=self.lstm_intra(x1_r, x1_mask_r)
proj2=self.lstm_intra(x2, x2_mask)
proj2_r=self.lstm_intra(x2_r, x2_mask_r)
ctx1=torch.cat((proj1, torch.index_select(proj1_r,0,idx_1)),2)
ctx2=torch.cat((proj2, torch.index_select(proj2_r, 0, idx_2)),2)
# ctx1: #step1 x #sample x #dimctx
# ctx2: #step2 x #sample x #dimctx
ctx1 = ctx1 * x1_mask[:, :, None]
ctx2 = ctx2 * x2_mask[:, :, None]
# weight_matrix: #sample x #step1 x #step2
weight_matrix = torch.matmul(ctx1.permute(1, 0, 2), ctx2.permute(1, 2, 0))
if visualize:
return weight_matrix
weight_matrix_1 = torch.exp(weight_matrix - weight_matrix.max(1, keepdim=True)[0]).permute(1, 2, 0)
weight_matrix_2 = torch.exp(weight_matrix - weight_matrix.max(2, keepdim=True)[0]).permute(1, 2, 0)
# weight_matrix_1: #step1 x #step2 x #sample
weight_matrix_1 = weight_matrix_1 * x1_mask[:, None, :]
weight_matrix_2 = weight_matrix_2 * x2_mask[None, :, :]
alpha = weight_matrix_1 / weight_matrix_1.sum(0, keepdim=True)
beta = weight_matrix_2 / weight_matrix_2.sum(1, keepdim=True)
self.alpha=alpha
self.beta=beta
ctx2_ = (torch.unsqueeze(ctx1,1) * torch.unsqueeze(alpha,3)).sum(0)
ctx1_ = (torch.unsqueeze(ctx2, 0) * torch.unsqueeze(beta,3)).sum(1)
# cosine distance and Euclidean distance
'''
tmp_result=[]
for batch_i in range(ctx1.size(1)):
tmp_result.append(torch.unsqueeze(self.cosine_interaction(ctx1[:,batch_i,:], ctx2[:,batch_i,:]), 0))
weight_matrix=torch.cat(tmp_result)
weight_matrix_1 = torch.exp(weight_matrix - weight_matrix.max(1, keepdim=True)[0]).permute(1, 2, 0)
weight_matrix_2 = torch.exp(weight_matrix - weight_matrix.max(2, keepdim=True)[0]).permute(1, 2, 0)
# weight_matrix_1: #step1 x #step2 x #sample
weight_matrix_1 = weight_matrix_1 * x1_mask[:, None, :]
weight_matrix_2 = weight_matrix_2 * x2_mask[None, :, :]
alpha = weight_matrix_1 / weight_matrix_1.sum(0, keepdim=True)
beta = weight_matrix_2 / weight_matrix_2.sum(1, keepdim=True)
ctx2_cos_ = (torch.unsqueeze(ctx1, 1) * torch.unsqueeze(alpha, 3)).sum(0)
ctx1_cos_ = (torch.unsqueeze(ctx2, 0) * torch.unsqueeze(beta, 3)).sum(1)
'''
inp1 = torch.cat([ctx1, ctx1_, ctx1 * ctx1_, ctx1 - ctx1_], 2)
inp2 = torch.cat([ctx2, ctx2_, ctx2 * ctx2_, ctx2 - ctx2_], 2)
#inp1 = torch.cat([ctx1, ctx1_, ctx1_cos_, ctx1 * ctx1_, ctx1 * ctx1_cos_, ctx1 - ctx1_, ctx1 - ctx1_cos_], 2)
#inp2 = torch.cat([ctx2, ctx2_, ctx2_cos_, ctx2 * ctx2_, ctx2 * ctx2_cos_, ctx2 - ctx2_, ctx2 - ctx2_cos_], 2)
inp1=self.dropout(self.linear_layer_compare(inp1))
inp2=self.dropout(self.linear_layer_compare(inp2))
inp1_r=torch.index_select(inp1, 0, idx_1)
inp2_r=torch.index_select(inp2, 0, idx_2)
v1=self.lstm_compare(inp1, x1_mask)
v2=self.lstm_compare(inp2, x2_mask)
v1_r = self.lstm_compare(inp1_r, x1_mask)
v2_r = self.lstm_compare(inp2_r, x2_mask)
v1=torch.cat((v1, torch.index_select(v1_r, 0, idx_1)),2)
v2=torch.cat((v2, torch.index_select(v2_r, 0, idx_2)),2)
out = self.aggregate(v1, v2)
out = F.log_softmax(out, dim=1)
return out
|
160913
|
from rq import get_current_job
from app import db
from app.models import Tasks
def _set_task_progress(progress: int) -> None:
"""
A helper function which updates the progress status of a background task
Parameters
----------
progress : int
The percentage of the task progress
"""
job = get_current_job()
if job:
job.meta["progress"] = progress
job.save_meta()
if progress >= 100:
task = Tasks.query.filter(task_id=job.get_id()).first()
task.complete = True
db.session.commit()
|
160921
|
import track_utils as utils
import pickle
import re
import os
import scipy.signal as signal
import multiprocessing
import time
import numpy as np
import copy
import ipdb
import collections
import glob
global global_tracks
def smooth_padding(scores, smooth_window, smoothing):
# define the window size in which we pool both start/end pad values
pad_window = max(min(smooth_window, 10), 1)
# get start and end pad values
sz = min(pad_window, scores.shape[0])
pad_start = np.median(scores[:sz])
pad_end = np.median(scores[-sz:])
# pad original scores
scores_padded = np.concatenate((np.repeat(pad_start, smooth_window),
scores, np.repeat(pad_end, smooth_window)))
# apply smoothing and truncate to the original size
if smoothing == 'median':
smoothed = signal.medfilt(scores_padded, smooth_window)
elif smoothing == 'average':
smoothed = np.convolve(
scores_padded,
np.ones((smooth_window, )) / smooth_window,
mode='same')
return smoothed[smooth_window:-smooth_window]
def _process_scores(args):
tracks, smooth_window, smoothing, tscore_th = args
del_idx = []
for idx, track in enumerate(tracks):
if not tscore_th is None:
if track['det_track_score'] < tscore_th:
del_idx.append(idx) # rm track with score below th
continue
if smooth_window > 1:
track['scores'] = smooth_padding(track['scores'], smooth_window,
smoothing)
for idx in del_idx[::-1]:
del tracks[idx]
return tracks
def track_scoring(scores, N=40):
# compute on top N scores
scores = np.sort(scores)[::-1] # sort by descending order
return scores[:N].mean()
def tpfp(iou_matrix, th, ignoreGT=None):
nDet, nGT = iou_matrix.shape
tp = np.zeros(nDet, dtype=bool)
fp = np.zeros(nDet, dtype=bool)
if ignoreGT is None:
ignoreGT = np.zeros(nGT, dtype=bool)
else:
assert len(ignoreGT) == nGT
best_gt = iou_matrix.argmax(1)
best_iou = iou_matrix[range(len(best_gt)), best_gt]
gt_covered = np.zeros(nGT, dtype=bool)
for i in range(nDet):
if best_iou[i] >= th:
i_best = best_gt[i]
if not ignoreGT[i_best]:
if gt_covered[i_best]:
fp[i] = 1 # duplicate
else:
tp[i] = 1
gt_covered[i_best] = 1
else:
fp[i] = 1
return tp, fp
def ap(rec, prec):
# From Online Real-time Multiple Spatiotemporal Action Localisation and Prediction, <NAME> et al. (ICCV 2007)
# and Deep Learning for Detecting Multiple Space-Time Action Tubes in Videos, S.Saha et al. (BMVC16)
# following the PASCAL VOC 2011 devkit
if rec.shape[0] == 0 or prec.shape[0] == 0:
return 0
if prec[0] == 0:
assert rec[0] == 0
else:
assert prec[0] == 1
if rec[0] > 0: assert prec[0] == 1
else: assert prec[0] == 0
# compute the precision envelope
# interpolate precision: given a recall r
# retain the highest precision at recall >= r
prec = np.maximum.accumulate(prec[::-1])[::-1]
# insert dummy 0 for first delta
rec = np.insert(rec, 0, 0)
prec = np.insert(prec, 0, 0)
# to calculate area under PR curve, look for cutoff points
# where X axis (recall) changes value
cut = rec[1:] != rec[:-1]
# and sum (\Delta recall) * prec
ap = ((rec[1:][cut] - rec[:-1][cut]) * prec[1:][cut]).sum()
return ap
def _temporal_localization(args):
tracks, loc_th, min_length = args
new_tracks = []
optional_fields = [
'subname'
] # copy these fields to the subtracks if they exist in the track
for track in tracks:
# init
scores = track['scores']
T = track['N_frames']
start_frame = track['tbound'][0] # frame count starts at 1
v = track['videoname']
assert scores.shape[0] == T
track_used = np.zeros(track['N_frames'], dtype=bool)
positions = np.array(range(track['N_frames']), dtype=long)
while ((~track_used).any()):
max_pos = scores[~track_used].argmax(
) # get max score on remaining frames
max_pos = positions[~track_used][
max_pos] # get the position on the whole track
max_value = scores[max_pos]
if max_value >= loc_th:
_start, _end = max_pos, max_pos
while _end < T - 1 and scores[_end + 1] >= loc_th:
_end += 1
while _start > 0 and scores[_start - 1] >= loc_th:
_start -= 1
else:
break
track_used[_start:_end + 1] = True
tlen = _end - _start + 1
if tlen >= min_length:
new_t = {
'videoname': v,
'N_frames': tlen,
'tbound': (_start + start_frame, _end + start_frame)
}
for ff in ['boxes', 'scores']:
new_t[ff] = track[ff][_start:_end + 1]
for ff in optional_fields:
if ff in track:
new_t[ff] = track[ff]
new_t['track_score'] = track_scoring(new_t['scores'])
new_tracks.append(new_t)
return new_tracks
def _nms(args):
tracks, nms = args
iou_matrix, _, _ = utils.st_overlap_tracksets(tracks)
t_scores = [x['track_score'] for x in tracks]
idx = utils.nms(t_scores, iou_matrix, nms)
return [tracks[t] for t in idx]
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
nclasses = x.shape[1]
_max = np.repeat(np.max(x, 1)[:, None], nclasses,
1) # for numrical stability
e_x = np.exp(x - _max)
_sum = np.repeat(e_x.sum(1)[:, None], nclasses, 1)
return np.divide(e_x, _sum)
def _loadvideotracks(args):
v, trackpath, nclasses, regress, linear_regressor, track_class_agnostic, scale, normalization_fn = args
if not linear_regressor is None:
assert not regress
proc = multiprocessing.current_process().name
if proc == "PoolWorker-1":
print v, proc
vidpath = '%s/%s' % (trackpath[0], v)
numExp = len(trackpath)
orignal_file_path = vidpath + '.pkl'
if os.path.exists(orignal_file_path):
# the original track path has been detected (tracks are not scored) useful for recall computation
from_original_file = True
with open(orignal_file_path) as f:
original_file = pickle.load(f)
ntracks = len(original_file['tracks'])
assert numExp == 1
else:
from_original_file = False
if os.path.exists(vidpath):
tracklist = os.listdir(vidpath)
ntracks = len(tracklist)
else:
ntracks = 0
if ntracks == 0:
print 'Warning: no intput track found in %s' % vidpath
loadedtracks = [{v: []} for i in range(nclasses)]
for t in range(ntracks):
for exp in range(numExp):
if from_original_file:
tfile = original_file['tracks'][t]
tfile['scores'] = np.zeros([tfile['N_frames'],
nclasses + 1]) # make dummy scores
else:
vidpath = '%s/%s' % (trackpath[exp], v)
tpath = '%s/track%05d.pkl' % (vidpath, t + 1)
with open(tpath) as f:
tfile = pickle.load(f)
nframes = tfile['tbound'][1] - tfile['tbound'][0] + 1
assert nframes == tfile['boxes'].shape[0] and nframes == tfile[
'N_frames']
if 'box_label' in tfile:
tlabel = tfile['box_label']
else:
assert track_class_agnostic, 'if there is no label for the track we must be in class agnostic mode'
tlabel = -1
if not track_class_agnostic:
assert tlabel > 0, 'if we do not consider all tracks (not class agnostic) then tracks have to be labeled'
allscore = tfile['scores']
if not (type(tfile['scores']) == np.ndarray):
allscore = allscore.numpy()
if normalization_fn is not None:
if normalization_fn == 'softmax':
allscore = softmax(allscore)
else:
raise ValueError('unknown normalization function')
for c in range(nclasses):
if (not track_class_agnostic and tlabel != c + 1):
continue
cscore = allscore[:, c] / numExp
if cscore.shape[0] != nframes:
# there is one score per feature, we have to duplicate
f2c = tfile['frame2chunk']
nChunks = cscore.shape[0]
assert nChunks == (f2c[-1] - f2c[0] +
1), 'there is not one pred per chunk!'
c0 = f2c[0]
_dupl = [(i_c + c0 == f2c).sum() for i_c in range(nChunks)]
cscore = np.repeat(cscore, _dupl)
assert cscore.shape[0] == nframes
if exp == 0:
track = {}
fields = ['N_frames', 'tbound']
if regress:
track['boxes'] = tfile['reg_boxes'][c]
elif not linear_regressor is None:
track['boxes'] = utils.apply_lin_regressor(
tfile['boxes'], linear_regressor[c],
tfile['WH_size'])
else:
fields.append('boxes')
if 'track_score' in tfile:
track['det_track_score'] = tfile['track_score']
for ff in fields:
track[ff] = tfile[ff]
track['videoname'] = v
track['scores'] = cscore
loadedtracks[c][v].append(track)
cmpboxes = tfile['boxes']
if scale > 0:
# scale boxes if one scale has been passed
boxes = np.array(track['boxes']) # clone it
utils.scaleboxes_(boxes, scale)
if 'WH_size' in tfile:
WHmax = tfile['WH_size']
else:
WHmax = (320, 240)
boxes = utils.clipboxes(boxes, WHmax)
track['boxes'] = boxes
else:
track = loadedtracks[c][v][-1]
track['scores'] += cscore
assert (cmpboxes - tfile['boxes']).sum(
) == 0, 'Tracks to combine do not have same boxes'
return loadedtracks
def append_tpfp(tp,
fp,
scores,
tracks,
gttracks,
i_det,
iou_th,
on_keyframes,
overlap_only=False,
eval_recall=False):
if eval_recall:
assert overlap_only
N = len(tracks)
if overlap_only:
t_scores = [1 for x in tracks]
else:
t_scores = [x['track_score'] for x in tracks]
if gttracks:
if on_keyframes:
ignoreGT = np.zeros(len(gttracks), dtype=bool)
for ig, cgt in enumerate(gttracks):
if cgt['isAmbiguous']:
ignoreGT[ig] = True
if eval_recall:
sub_gttracks = [] # skip ignored
for i_gt, skip in enumerate(ignoreGT):
if not skip:
sub_gttracks.append(gttracks[i_gt])
iou_matrix, _, _, per_gtframe_sp = utils.st_recall_trackset_keyframes(
tracks, sub_gttracks)
else:
iou_matrix, _, _ = utils.st_overlap_trackset_keyframes(
tracks, gttracks)
else:
ignoreGT = None
if eval_recall:
iou_matrix, _, _, per_gtframe_sp = utils.st_recall_tracksets(
tracks, gttracks)
else:
iou_matrix, _, _ = utils.st_overlap_tracksets(tracks, gttracks)
if eval_recall:
n_annot = per_gtframe_sp.shape[1]
if N == 0:
return 0, 0, n_annot
max_iou_per_gt = iou_matrix.max(0)
max_iou_per_annot = per_gtframe_sp.max(0)
recalled_gt = (max_iou_per_gt >= iou_th).sum()
recalled_annot = (max_iou_per_annot >= iou_th).sum()
return recalled_gt, recalled_annot, n_annot
if overlap_only:
return 0, iou_matrix
t_tp, t_fp = tpfp(iou_matrix, iou_th, ignoreGT)
else: # all false if no GT of this class
if eval_recall:
return 0, 0, 0
assert not overlap_only
t_tp, t_fp = False, True
tp[i_det:i_det + N] = t_tp
fp[i_det:i_det + N] = t_fp
scores[i_det:i_det + N] = t_scores
return i_det + N
def get_class_gt(gts, c):
# get GT for the current class
gttracks = []
for g in gts['gts']:
if g['label'] == c + 1:
gttracks.append(g)
return gttracks
def get_ap(tp, fp, scores, i_det, num_positives):
# truncate
tp = tp[:i_det]
fp = fp[:i_det]
scores = scores[:i_det]
# sort according to score
sidx = scores.argsort()[::-1]
tp = tp[sidx]
fp = fp[sidx]
scores = scores[sidx]
# AP
tp = tp.cumsum(dtype=float)
fp = fp.cumsum()
if len(fp) > 0 and fp[0] == 0 and tp[0] == 0:
# at least the first detection was assigned to a ignored GT
_ze = np.logical_and(fp == 0, tp == 0)
_max_ze = len(_ze) - _ze[::-1].argmax() - 1
assert _ze[:_max_ze + 1].sum() == _max_ze + 1
# ignore these firsts elements
tp = tp[_max_ze + 1:]
fp = fp[_max_ze + 1:]
scores = scores[_max_ze + 1:]
rec = tp / num_positives
if num_positives == 0:
print 'WARNING THERE ARE NO POSITIVES IN GT!'
rec[:] = 0
prec = tp / (fp + tp)
c_ap = ap(rec, prec)
# recall
recv = 0 if rec.shape[0] == 0 else rec[-1]
issame = scores.shape[0] > 1 and scores[0] == scores[
1] # check if (first) scores are exactly equal
return c_ap, recv, issame
def get_num_positives(gttracks):
ngts = len(gttracks)
for cgt in gttracks:
if 'isAmbiguous' in cgt and cgt['isAmbiguous']:
ngts -= 1
return ngts
def _sub_st_loc_map(args):
testlist, iou_th, i_th, nclasses, gt, on_keyframes, eval_recall = args
testtracks = global_tracks[i_th]
_MAXDET = int(1e3 * len(testlist))
log_str = ''
proc = multiprocessing.current_process().name
log_str += '\n\nST-loc @IoU=%.02f' % (iou_th)
if eval_recall:
final_str = '\nMEAN:\n ST-RECALL = %.3f / ANNOT-RECALL = %.3f'
else:
final_str = '\nmAP = %.3f / rec = %.3f'
aps = np.zeros(nclasses)
recs = np.zeros(nclasses)
for c in range(nclasses):
nodetstr = ''
num_positives = 0
num_annotations = 0
ctracks = testtracks[c]
tp = np.zeros(_MAXDET, dtype=bool)
fp = np.zeros(_MAXDET, dtype=bool)
scores = np.zeros(_MAXDET, dtype=float)
i_det = 0 # next idx to fill
for v in testlist:
gts = gt[v]
assert len(gts['gts']
) > 0, 'This was supposed to be handled in loadtracks'
gttracks = get_class_gt(gts, c)
ngts = get_num_positives(gttracks)
num_positives += ngts
if not v in ctracks:
if ngts > 0:
nodetstr += '%s (%d GTs) ' % (v, ngts)
continue
tracks = testtracks[c][v]
ntracks = len(tracks)
prev_i_det = i_det
if eval_recall:
recalled_gt, recalled_annot, n_annot = append_tpfp(
tp,
fp,
scores,
tracks,
gttracks,
i_det,
iou_th,
on_keyframes,
overlap_only=True,
eval_recall=eval_recall)
num_annotations += n_annot
aps[c] += recalled_gt
recs[c] += recalled_annot
continue
i_det = append_tpfp(tp, fp, scores, tracks, gttracks, i_det,
iou_th, on_keyframes)
assert num_positives > 0, 'No GT for class %d' % (c + 1)
if eval_recall:
aps[c] = aps[c] / num_positives # ST recall
recs[c] = recs[c] / num_annotations # annot recall
log_str += '\nClass %d ST-recall = %.3f - Annot-recall = %.3f' % (
c + 1, aps[c], recs[c])
continue
c_ap, rec, issame = get_ap(tp, fp, scores, i_det, num_positives)
aps[c] = c_ap
recs[c] = rec
# disp info
if nodetstr != '':
log_str += '\nNo detection for %s' % (nodetstr)
if issame:
log_str += '\nWARNING SOME SCORES ARE EXACTLY EQUAL, ORDER MIGHT MATTER!'
# compute nb of FP in the first 50% of TP
s_tp = tp[:i_det]
tp_idx = np.linspace(0, i_det, i_det, dtype=int)
tp_idx = tp_idx[s_tp]
tpnum = len(tp_idx)
if tpnum >= 50:
fp_at_50 = fp[:tp_idx[49]].sum() # FP in first TOP50
else:
fp_at_50 = fp.sum()
log_str += '\nClass %d AP = %.3f (max recall = %.3f) - P/TP/FP/FP@50 %d/%d/%d/%d' % (
c + 1, c_ap, rec, num_positives, tpnum, fp.sum(), fp_at_50)
log_str += final_str % (aps.mean(), recs.mean())
return iou_th, i_th, aps, log_str
class Evaluation():
def __init__(self,
datasetname,
exppath,
testlistpath,
iou,
loc_th=0.1,
smooth_window=-1,
nms=0.2,
nthreads=5,
min_length=22,
regress=False,
smoothing='median',
track_class_agnostic=False,
force_no_regressor=False,
tscore_th=None,
eval_recall=False,
from_original_track_files=None,
scale=-1,
normalization_fn=None,
one_th_per_iou=False,
cachedir='.'):
# params specific to datasets
if datasetname == 'UCF101':
self.nclasses = 24
self.gtpath = '/sequoia/data2/gcheron/UCF101/detection/gtfile.py'
self.on_keyframes = False
self.linear_regressor = None
elif datasetname == 'DALY':
self.nclasses = 10
self.gtpath = '/sequoia/data2/gcheron/DALY/gtfile.pkl'
self.on_keyframes = True
if force_no_regressor:
self.linear_regressor = None
else:
with open('/sequoia/data2/gcheron/DALY/reg_matrices.pkl',
'r') as f:
self.linear_regressor = np.load(f)
else:
raise ValueError('Unknown dataset %s', (datasetname))
self.tscore_th = tscore_th # threshold for a track to be selected
self.track_class_agnostic = track_class_agnostic # for each class, consider tracks from all detection classes
assert not (regress and (not self.linear_regressor is None))
# if eval_recall:
# if on_keyframes, get the % of keyframes recovered by the input tracks @ at spatial th: iou_th
# otherwise, get the % of spatio-temporal GT intervals recovered by the input tracks @ at S-T th: iou_th
self.eval_recall = eval_recall
self.scale = scale # scale boxes by this factor if > 0
self.normalization_fn = normalization_fn
# data paths
self.datasetname = datasetname
self.from_original_track_files = from_original_track_files
if self.from_original_track_files is not None:
exppath = self.from_original_track_files
if type(exppath) != list:
self.trackpath = [exppath]
else:
self.trackpath = exppath
self.cachedir = cachedir
self.cachepath = self.cachedir + '/evaluation_cache/'
for i in range(len(self.trackpath)):
expname = re.sub('(.*[^/])(.*)', r'\1',
self.trackpath[i]) # remove eventual last /
expname = re.sub('.*/', '', expname)
self.cachepath += '__++' + expname
if from_original_track_files is None:
self.trackpath[i] += '/tracks'
self.testlistpath = testlistpath
# track post-processing params
self.loc_th = loc_th # array of localization th (D x N), N number of sets
if type(self.loc_th) == np.ndarray:
assert self.loc_th.ndim == 2
if self.loc_th.shape[0] == 1:
self.loc_th = self.loc_th.repeat(self.nclasses, 0)
else:
assert self.loc_th.shape[0] == self.nclasses
else: # this is just a scalar
self.loc_th = np.zeros((self.nclasses, 1))
self.loc_th[:] = loc_th
self.smooth_window = smooth_window
self.smoothing = smoothing
self.nms = nms
self.min_length = min_length
# eval params
self.iou = iou # list of different iou of evaluation
if type(self.iou) != list:
self.iou = [self.iou]
self.ap = np.zeros(
(len(self.iou), self.loc_th.shape[1], self.nclasses)) - 1
self.one_th_per_iou = one_th_per_iou # evaluate only on th list per iou (loc_th[i] ---> iou[i])
if self.one_th_per_iou:
assert self.loc_th.shape[1] == len(self.iou)
# other
self.nthreads = nthreads
self.regress = regress # if there are several track paths (exps) the regression from the first one is considered
with open(self.gtpath) as f:
self.gt = pickle.load(f)
#exec("for v in self.gt:\n\tfor t in self.gt[v]['gts']:\n\t\tt['boxes']-=1") # ONE LINE DEBUG :)
if self.from_original_track_files is not None:
assert self.eval_recall
if self.eval_recall:
# do not need it
self.loc_th = self.loc_th[:, 0, None] # keep only one
self.loc_th[:] = -1
# check params
if isinstance(loc_th, list):
assert len(loc_lh) == self.nclasses
def eval(self):
c_time = time.time()
self.loadtracks()
print 'Loading time %d s' % (time.time() - c_time)
c_time = time.time()
if self.eval_recall:
self.testtracks = []
self.testtracks.append(
self.loadedtracks) # mimic testtracks at only on det th
else:
self.post_process_tracks()
print 'Post proc time %d s' % (time.time() - c_time)
c_time = time.time()
calibration = self.st_loc_map()
print 'mAP time %d s' % (time.time() - c_time)
print self.trackpath
return calibration
def get_test_list(self):
# get test list
vlist = []
self.testlist = []
with open(self.testlistpath) as f:
vlist = f.readlines()
vlist = [re.sub(' .*', '', x.strip()) for x in vlist]
for v in vlist:
if self.gt[v]['N_gts'] < 1:
print 'Discard %s (no GT available)' % v
continue
self.testlist.append(v)
def loadtracks(self):
self.load_cache = self.cachepath + '/tracks_%s_N-1_reg%d' % (re.sub(
'.*/', '', self.testlistpath), self.regress)
if not self.linear_regressor is None:
self.load_cache += '_linreg'
if self.track_class_agnostic:
self.load_cache += '_trackCAgno'
if self.scale > 0:
self.load_cache += '_sc%.3f' % self.scale
if self.normalization_fn is not None:
self.load_cache += '_' + self.normalization_fn
this_cache = self.load_cache + '/loaded.pkl'
cache_fields = ['testlist', 'loadedtracks']
if os.path.exists(this_cache):
# load cache
print 'loading file: %s' % this_cache
with open(this_cache, 'rb') as f:
cachefile = pickle.load(f)
for ff in cache_fields:
setattr(self, ff, cachefile[ff])
t_total = cachefile['t_total']
else:
self.get_test_list() # get test list
# multithreading: load track files
tlist = self.testlist
#tlist=tlist[0:15] # DEBUG
#for v in tlist:
# _loadvideotracks((v, self.trackpath, self.nclasses, self.regress, self.linear_regressor,
# self.track_class_agnostic, self.scale, self.normalization_fn))
res = self.run_multiprocessing(
_loadvideotracks,
[(v, self.trackpath, self.nclasses, self.regress,
self.linear_regressor, self.track_class_agnostic, self.scale,
self.normalization_fn) for v in tlist])
# reorder tracks
t_total = 0
d_total = 0
self.loadedtracks = [{} for i in range(self.nclasses)]
for vid in res:
for c in range(self.nclasses):
v = vid[c].keys()
assert len(v) == 1
v = v[0]
assert not v in self.loadedtracks[c]
self.loadedtracks[c][v] = vid[c][v]
t_total += len(vid[c][v])
# save cache
if not os.path.exists(self.load_cache):
os.makedirs(self.load_cache)
cachefile = {}
for ff in cache_fields:
cachefile[ff] = getattr(self, ff)
cachefile['t_total'] = t_total
with open(this_cache, 'w') as f:
pickle.dump(cachefile, f)
d_total = self.getNumDets(self.loadedtracks)
print '%d tracks have been loaded (%d detections)' % (t_total, d_total)
def getNumDets(self, loadedtracks):
d = 0
for ctracks in loadedtracks:
for vtracks in ctracks:
for track in ctracks[vtracks]:
d += track['boxes'].shape[0]
return d
def post_process_tracks(self):
self.testtracks = []
self.pproc_cache = []
for i_th in range(self.loc_th.shape[1]):
self.sub_post_process_tracks(i_th)
def get_sw(self, loc_ths):
mw = np.array(loc_ths)
mw[:] = self.smooth_window
mw[loc_ths <= 0] = -1 # no smoothing
return mw
def sub_post_process_tracks(self, i_th):
assert len(self.testtracks) == i_th
loc_ths = self.loc_th[:, i_th]
assert len(loc_ths) == self.nclasses
mw = self.get_sw(loc_ths)
# per-class cache
this_cache = []
str_cache = self.load_cache + '/pproc_nms%.4f' % (self.nms)
if self.smoothing == 'median':
smstr = 'med'
else:
smstr = self.smoothing
if not self.tscore_th is None:
str_cache += '_tsth%.3f' % self.tscore_th
for c, lt in enumerate(loc_ths):
cstr = '_Class%d_lt%.4f_%s%d' % (c, lt, smstr, mw[c])
if lt > 0:
ml = self.min_length
else:
ml = -1
cstr = str_cache + cstr + '_mlen%d' % ml
this_cache.append(cstr)
self.pproc_cache.append(this_cache)
pproc_cache = [] # already post-proc tracks
class_tracks = [] # tracks to post-proc
self.testtracks.append([]) # final tracks
some_missing = False
for c in range(self.nclasses):
self.testtracks[i_th].append({})
pproc_cache.append([])
c_cache = this_cache[c]
if os.path.exists(c_cache):
if i_th > 0 and c_cache == self.pproc_cache[0][c]:
# if the cache path is equal to the first one (not dependent of i_th)
pproc_cache[c] = self.testtracks[0][
c] # take the same videos instead of loading again
else:
with open(c_cache, 'rb') as f:
print 'loading %s' % (c_cache)
pproc_cache[c] = pickle.load(f)
ctracks = [] # no need to post-proc
else:
ctracks = self.loadedtracks[c]
some_missing = True
class_tracks.append(ctracks)
if some_missing:
class_tracks = self.run_multiprocessing(
_process_scores,
[(ctracks[v], int(mw[c]), self.smoothing, self.tscore_th, c)
for c, ctracks in enumerate(class_tracks)
for v in ctracks], True, True)
class_tracks = self.run_multiprocessing(
_temporal_localization,
[(v, loc_ths[c], self.min_length, c)
for c, ctracks in enumerate(class_tracks)
for v in ctracks], True, True)
class_tracks = self.run_multiprocessing(
_nms, [(v, self.nms, c)
for c, ctracks in enumerate(class_tracks)
for v in ctracks], True) # no track1by1 for NMS
for c, ctracks in enumerate(class_tracks):
for vid in ctracks:
if len(vid) > 0:
v = vid[0]['videoname']
assert not v in self.testtracks[i_th][c]
self.testtracks[i_th][c][v] = vid
# save cache
c_cache = this_cache[c]
if not os.path.exists(c_cache):
with open(c_cache, 'w') as f:
pickle.dump(self.testtracks[i_th][c], f)
# merge eventual cache
for c, ctracks in enumerate(pproc_cache):
if len(ctracks) > 0:
assert len(self.testtracks[i_th][c]) == 0
self.testtracks[i_th][c] = ctracks
def run_multiprocessing(self,
_fun,
arglist,
class_format=False,
track1by1=False):
pool = multiprocessing.Pool(self.nthreads)
if class_format: # last argument is the class
classes = [x[-1] for x in arglist]
arglist = [x[:-1] for x in arglist]
_la = len(arglist)
if track1by1:
# split all video tracks (create videos with only 1 track)
_tmp = []
vididx = [
] # assign and idx to a video (same for each if its tracks)
v_count = -1
for V in arglist:
v = V[0]
assert type(
v
) == list, 'with track1by1, first arg is supposed to contain the tracks!'
v_count += 1
if len(v) == 0: # add a dummy track for video with no tracks
_tmp.append(
tuple([[]] + list(V[1:]))) # copy video parameters
vididx.append(v_count)
else:
for t in range(len(v)): # for all video tracks
_tmp.append(
tuple([[v[t]]] +
list(V[1:]))) # copy video parameters
vididx.append(v_count)
arglist = _tmp
res = pool.map(_fun, arglist)
pool.terminate()
pool.join()
if track1by1: # reshape
_tmp = []
assert type(
res[0]
) == list, 'with track1by1, result is supposed to be a list of tracks'
for i, tracks in enumerate(res):
idx = vididx[i]
if len(_tmp) <= idx:
_tmp.append([])
for t in tracks: # one track could generate several outputs
_tmp[idx].append(t) # merge them to the video outputs
res = _tmp
_lr = len(res)
assert _la == _lr, 'number of results (%d) is different from number of inputs (%d)' % (
_lr, _la)
if class_format: # reshape
_tmp = [[] for i in range(self.nclasses)]
for i, c in enumerate(classes):
_tmp[c].append(res[i])
res = _tmp
classes = np.array(classes)
for i in range(self.nclasses):
assert len(res[i]) == (classes == i).sum()
return res
def st_loc_map(self):
global global_tracks
global_tracks = self.testtracks
n_iou = len(self.iou)
n_th = self.loc_th.shape[1]
all_aps = self.run_multiprocessing(
_sub_st_loc_map,
[(self.testlist, self.iou[i_iou], i_th, self.nclasses, self.gt,
self.on_keyframes, self.eval_recall) for i_iou in range(n_iou)
for i_th in range(n_th)])
for i_iou in range(n_iou):
for i_th in range(n_th):
_iou, _ith, _aps, _log = all_aps[i_iou * n_th + i_th]
assert (self.ap[i_iou, i_th, :] != -1).sum() == 0
assert self.iou[i_iou] == _iou and i_th == _ith
self.ap[i_iou, i_th, :] = _aps
print _log
if self.eval_recall:
print '\nIoU / ST-RECALL'
iou_str = ''
strec_str = ''
for i in range(n_iou):
iou_str += '%.3f ' % self.iou[i]
strec_str += '%.3f ' % self.ap[i, 0, :].mean()
print iou_str
print strec_str + '\n'
return
# print summary
allth = not self.one_th_per_iou
print '\n\n=============================='
print 'Thresholds:'
_str = ''
for i_th in range(n_th):
_str += '\nSet %d:' % (i_th)
for c in range(self.nclasses):
if c % 5 == 0:
_str += '\n'
_str += '%d: %.1E - ' % (c, self.loc_th[c, i_th])
print _str
for i_iou in range(n_iou):
th_means = self.ap[i_iou].mean(1)
best_i_th = th_means.argmax()
loc_th = self.loc_th[0, best_i_th]
print '\nST-loc @IoU=%.02f --> Best mAP = %.3f [loc_th = %.3f (%d)] ' % (
self.iou[i_iou], th_means[best_i_th], loc_th, best_i_th)
# print ap per class
for c in range(self.nclasses):
_apstr = ''
if allth:
# get ap from all TH
for i_th in range(n_th):
_apstr += '%.3f ' % (self.ap[i_iou, i_th, c])
else:
# get ap from the corresponding TH
assert n_iou == n_th
_apstr += '%.3f ' % (self.ap[i_iou, i_iou, c])
print _apstr
# print mAP
_mapstr = ''
if allth:
# get map from all TH
for i_th in range(n_th):
_mapstr += '%.3f ' % (self.ap[i_iou, i_th, :].mean())
else:
# get map from the corresponding TH
_mapstr += '%.3f ' % (self.ap[i_iou, i_iou, :].mean())
print '|\n|___> mAP = %s' % (_mapstr)
if n_th == 1:
print '\nIoU / mAP'
iou_str = ''
map_str = ''
for i in range(n_iou):
iou_str += '%.3f ' % self.iou[i]
map_str += '%.3f ' % self.ap[i, 0, :].mean()
print iou_str
print map_str + '\n'
if allth:
return self.run_validation()
def track_postproc(self, multiclass_tracks, do_nms=True):
assert self.loc_th.shape == (self.nclasses,
1), 'only one loc th set must be defined'
class_tracks = [] # tracks to post-proc
mw = self.get_sw(self.loc_th)
# multiclass_tracks: vid x tracks x (scores)classes (each tracks has info for all classes)
class_vid_tracks = [{} for c in range(self.nclasses)]
for vtracks in multiclass_tracks:
for c, ctracks in vtracks.iteritems():
class_vtracks = []
if len(ctracks) > 0:
for track in ctracks:
vid = track['videoname']
if not vid in class_vid_tracks[c]:
class_vid_tracks[c][vid] = []
class_vtracks.append(track)
# add all tracks of vid for class c
class_tracks.append((class_vtracks, int(mw[c]),
self.smoothing, self.tscore_th, c))
class_tracks = self.run_multiprocessing(_process_scores, class_tracks,
True, True)
class_tracks = self.run_multiprocessing(
_temporal_localization, [(v, self.loc_th[c], self.min_length, c)
for c, ctracks in enumerate(class_tracks)
for v in ctracks], True, True)
if do_nms:
class_tracks = self.run_multiprocessing(
_nms, [(v, self.nms, c)
for c, ctracks in enumerate(class_tracks)
for v in ctracks], True) # no track1by1 for NMS
# fill 'class_vid_tracks' correspondances
for c, ctracks in enumerate(class_tracks):
for i, vid in enumerate(ctracks):
if len(vid) > 0:
v = vid[0]['videoname']
if v in class_vid_tracks[c] and 'subname' in vid[0]:
# if subname field is there we append subtracks from all tracks of this video
class_vid_tracks[c][v] += vid
else:
# otherwise, only one set of subtracks is expected per video
assert not class_vid_tracks[c][v]
class_vid_tracks[c][
v] = vid # note the videos with no track anymore are still evaluated
return class_vid_tracks
def add_tracks2eval(self,
class_vid_tracks,
i_dets,
class_npos,
tpfpscores,
on_keyframes,
overlap_only=False):
iou = self.iou
if overlap_only:
ov = [{} for c in range(self.nclasses)]
iou = [self.iou[0]] # use only one dummy IoU
for c in range(self.nclasses):
ctracks = class_vid_tracks[c]
for v in ctracks:
i_det = i_dets[c]
gts = self.gt[v]
if len(gts['gts']) == 0:
if c == 0: print 'no GT: discard %s' % (v)
assert not overlap_only
continue
gttracks = get_class_gt(gts, c)
ngts = get_num_positives(gttracks)
tracks = ctracks[v]
if not overlap_only:
class_npos[c] += ngts
elif ngts == 0 or not tracks:
ov[c][v] = np.zeros((len(tracks), ngts), dtype=float)
continue
if not tracks:
#if ngts > 0:
# print '%s (%d GTs) ' % (v,ngts)
continue
for i in range(len(iou)):
if overlap_only:
tp, fp, scores = None, None, None
else:
tp, fp, scores = tpfpscores[i][c]
new_i_det, ov_ = append_tpfp(tp, fp, scores, tracks,
gttracks, i_det, iou[i],
on_keyframes, overlap_only)
if overlap_only:
ov[c][v] = ov_
if i == len(iou) - 1:
i_dets[c] = new_i_det
if overlap_only:
return ov
def get_mAP(self, i_dets, class_npos, tpfpscores):
aps = np.zeros((len(self.iou), self.nclasses))
recs = np.zeros((len(self.iou), self.nclasses))
for i in range(len(self.iou)):
for c in range(self.nclasses):
i_det = i_dets[c]
num_positives = class_npos[c]
tp, fp, scores = tpfpscores[i][c]
aps[i][c], recs[i][c], _ = get_ap(tp, fp, scores, i_det,
num_positives)
return aps, recs, self.iou
def run_validation(self):
n_iou = len(self.iou)
calibration = {}
print 'Per-class validation:'
_map_str = ''
for i_iou in range(n_iou):
_str = ''
aps, best_th = self.validate_perclass_th(i_iou)
calibration[self.iou[i_iou]] = best_th
_ap = aps.mean()
_str += 'ST-loc @IoU=%.02f -- %.3f mAP\nTH:' % (self.iou[i_iou],
_ap)
_map_str += '%.3f ' % _ap
for th in best_th:
_str += ' %.3f' % th
print _str
print _map_str
return calibration
def validate_perclass_th(self, i_iou):
n_th = self.loc_th.shape[1]
aps = self.ap[i_iou].max(0)
best_th = self.ap[i_iou].argmax(0)
best_th = self.loc_th[range(self.nclasses), best_th].tolist()
return aps, best_th
|
160925
|
import FWCore.ParameterSet.Config as cms
tdcZeros = cms.VPSet(cms.PSet(
endRun = cms.int32(31031),
tdcZero = cms.double(1050.5),
startRun = cms.int32(27540)
),
cms.PSet(
endRun = cms.int32(999999),
tdcZero = cms.double(1058.5),
startRun = cms.int32(31032)
))
|
160969
|
import json
from unittest import mock
import alteia
from tests.core.resource_test_base import ResourcesTestBase
DEFAULT_MOCK_CONTENT = {'url': 'some url',
'connection': {
'max_retries': 1,
'disable_ssl_certificate': True}}
class TestSDK(ResourcesTestBase):
def test_resource_attributes(self):
self.assertIsInstance(self.sdk.missions,
alteia.apis.client.projectmngt.missionsimpl.MissionsImpl)
attrs = dir(self.sdk)
self.assertIn('annotations', attrs)
self.assertIn('flights', attrs)
self.assertIn('missions', attrs)
self.assertIn('projects', attrs)
self.assertIn('datasets', attrs)
@mock.patch('alteia.core.config.read_file')
def test_missing_url(self, mock_read_file):
mock_read_file.return_value = json.dumps({})
with self.assertRaises(alteia.core.errors.ConfigError):
alteia.SDK()
@mock.patch('alteia.core.config.read_file')
def test_missing_credentials(self, mock_read_file):
mock_read_file.return_value = json.dumps({'url': 'some url'})
with self.assertRaises(alteia.core.errors.ConfigError):
alteia.SDK()
@mock.patch('alteia.core.config.read_file')
def test_wrong_config(self, mock_read_file):
mock_read_file.return_value = '{"content":"content"}'
with self.assertRaises(Exception):
alteia.SDK(user='username', password='password')
@mock.patch('alteia.core.connection.token.TokenManager.renew_token')
def test_renew_token_at_init(self, mock):
alteia.SDK(user='username', password='password')
self.assertTrue(mock.called)
|
161050
|
import os
import sys
sys.path.insert(0, os.path.abspath(
os.path.join(os.path.dirname(__file__), '..')))
import numpy as np
import util
from ebnn.utils import binary_util
if __name__ == '__main__':
parser = util.default_parser('MLP Example')
args = parser.parse_args()
# get the dataset (default is MNIST)
train, test = util.get_dataset(args.dataset)
x = train._datasets[0][0:20]
if 'binary' in args.dataset:
x[x==-1] = 0
x = x.astype(np.uint8)
x_str = binary_util.np_to_packed_uint8C(x, 'train_data', 'row_major')
else:
x_str = binary_util.np_to_floatC(x, 'train_data', 'row_major')
y_str = binary_util.np_to_floatC(train._datasets[1][0:20], 'train_labels', 'row_major')
with open(args.dataset, 'w+') as fp:
fp.write(x_str)
fp.write(y_str)
|
161072
|
import glob
import logging
from . import Reader
LOG = logging.getLogger(__name__)
def load_all(path, recursive=True, scene_type=None, sample=None):
"""Parsed scenes at the given path returned as a generator.
Each scene contains a list of `Row`s where the first pedestrian is the
pedestrian of interest.
The path supports `**` when the `recursive` argument is True (default).
:param scene_type: see Reader
"""
LOG.info('loading dataset from %s', path)
filenames = glob.iglob(path, recursive=recursive)
for filename in filenames:
sample_rate = None
if sample is not None:
for k, v in sample.items():
if k in filename:
sample_rate = v
yield from Reader(filename, scene_type=scene_type).scenes(sample=sample_rate)
|
161090
|
from kafka.producer import KafkaProducer
import sys
import os
sys.path.insert(0, os.path.abspath(os.path.join(os.path.dirname(__file__), os.pardir)))
from lib.commonsplunk import check_events_from_splunk
from lib.commonkafka import *
from lib.helper import *
from datetime import datetime
import threading
import logging.config
import yaml
import subprocess
import logging
import time
logging.config.fileConfig(os.path.join(get_test_folder(), "logging.conf"))
logger = logging.getLogger('connector_upgrade')
_config_path = os.path.join(get_test_folder(), 'config.yaml')
with open(_config_path, 'r') as yaml_file:
config = yaml.load(yaml_file)
now = datetime.now()
_time_stamp = str(datetime.timestamp(now))
_topic = 'kafka_connect_upgrade'
_connector = 'kafka_connect'
_connector_ack = 'kafka_connect_ack'
def start_old_connector():
cmds = ["test -f {0}/{1} && echo {0}/{1}".format(config["connector_path"], config["old_connector_name"]),
"cd {}".format(config["kafka_home"]),
"sudo {0}/bin/connect-distributed.sh {1}/config/connect-distributed-quickstart.properties &".
format(config["kafka_home"], os.environ.get('GITHUB_WORKSPACE'))]
cmd = "\n".join(cmds)
try:
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
proc.wait()
except OSError as e:
logger.error(e)
def generate_kafka_events(num):
# Generate message data
topics = [_topic]
connector_content = {
"name": _connector,
"config": {
"connector.class": "com.splunk.kafka.connect.SplunkSinkConnector",
"tasks.max": "1",
"splunk.indexes": config["splunk_index"],
"topics": _topic,
"splunk.hec.ack.enabled": "false",
"splunk.hec.uri": config["splunk_hec_url"],
"splunk.hec.ssl.validate.certs": "false",
"splunk.hec.token": config["splunk_token"],
"splunk.sources": _connector
}
}
create_kafka_connector(config, connector_content)
connector_content_ack = {
"name": _connector_ack,
"config": {
"connector.class": "com.splunk.kafka.connect.SplunkSinkConnector",
"tasks.max": "1",
"splunk.indexes": config["splunk_index"],
"topics": _topic,
"splunk.hec.ack.enabled": "true",
"splunk.hec.uri": config["splunk_hec_url"],
"splunk.hec.ssl.validate.certs": "false",
"splunk.hec.token": config["splunk_token_ack"],
"splunk.sources": _connector_ack
}
}
create_kafka_connector(config, connector_content_ack)
client = KafkaAdminClient(bootstrap_servers=config["kafka_broker_url"], client_id='test')
broker_topics = client.list_topics()
logger.info(broker_topics)
if _topic not in broker_topics:
create_kafka_topics(config, topics)
producer = KafkaProducer(bootstrap_servers=config["kafka_broker_url"],
value_serializer=lambda v: json.dumps(v).encode('utf-8'))
for _ in range(num):
msg = {"timestamp": _time_stamp}
producer.send(_topic, msg)
time.sleep(0.05)
producer.flush()
def upgrade_connector_plugin():
cmds = ["sudo kill $(sudo lsof -t -i:8083) && sleep 2",
"sudo rm {}/{} && sleep 2".format(config["connector_path"], config["old_connector_name"]),
"sudo cp {0}/splunk-kafka-connect*.jar {1} && sleep 2".format(config["connector_build_target"],
config["connector_path"]),
"sudo {0}/bin/connect-distributed.sh {1}/config/connect-distributed-quickstart.properties &".
format(config["kafka_home"], os.environ.get('GITHUB_WORKSPACE'))]
cmd = "\n".join(cmds)
try:
proc = subprocess.Popen(cmd, shell=True, stdout=subprocess.PIPE,
stderr=subprocess.STDOUT)
output, error = proc.communicate()
logger.debug(output)
time.sleep(2)
update_kafka_connectors()
except OSError as e:
logger.error(e)
def update_kafka_connectors():
logger.info("Update kafka connectors ...")
connector_content = {
"name": _connector,
"config": {
"connector.class": "com.splunk.kafka.connect.SplunkSinkConnector",
"tasks.max": "1",
"splunk.indexes": config["splunk_index"],
"topics": _topic,
"splunk.hec.ack.enabled": "false",
"splunk.hec.uri": config["splunk_hec_url"],
"splunk.hec.ssl.validate.certs": "false",
"splunk.hec.token": config["splunk_token"],
"splunk.sources": _connector,
"splunk.hec.json.event.formatted": "true",
"splunk.hec.raw": True
}
}
create_kafka_connector(config, connector_content)
connector_content_ack = {
"name": _connector_ack,
"config": {
"connector.class": "com.splunk.kafka.connect.SplunkSinkConnector",
"tasks.max": "1",
"splunk.indexes": config["splunk_index"],
"topics": _topic,
"splunk.hec.ack.enabled": "true",
"splunk.hec.uri": config["splunk_hec_url"],
"splunk.hec.ssl.validate.certs": "false",
"splunk.hec.token": config["splunk_token_ack"],
"splunk.sources": _connector_ack,
"splunk.hec.json.event.formatted": "true",
"splunk.hec.raw": True
}
}
create_kafka_connector(config, connector_content_ack)
if __name__ == '__main__':
logger.info("Start old Kafka connector ...")
thread_old_connect = threading.Thread(target=start_old_connector, daemon=True)
thread_old_connect.start()
time.sleep(10)
logger.info("Generate Kafka events ...")
thread_gen = threading.Thread(target=generate_kafka_events, args=(2000,), daemon=True)
thread_gen.start()
time.sleep(50)
logger.info("Upgrade Kafka connector ...")
thread_upgrade = threading.Thread(target=upgrade_connector_plugin, daemon=True)
thread_upgrade.start()
time.sleep(100)
search_query_1 = "index={0} | search timestamp=\"{1}\" source::{2}".format(config['splunk_index'], _time_stamp,
_connector)
logger.debug(search_query_1)
events_1 = check_events_from_splunk(start_time="-15m@m",
url=config["splunkd_url"],
user=config["splunk_user"],
query=["search {}".format(search_query_1)],
password=config["splunk_password"])
logger.info("Splunk received %s events in the last 15m", len(events_1))
assert len(events_1) == 2000
search_query_2 = "index={0} | search timestamp=\"{1}\" source::{2}".format(config['splunk_index'], _time_stamp,
_connector_ack)
logger.debug(search_query_2)
events_2 = check_events_from_splunk(start_time="-15m@m",
url=config["splunkd_url"],
user=config["splunk_user"],
query=["search {}".format(search_query_2)],
password=config["splunk_password"])
logger.info("Splunk received %s events in the last 15m", len(events_2))
assert len(events_2) == 2000
|
161099
|
import os
from functools import partial
import argparse
from absl import logging
from lib import settings, train, model, utils
from tensorflow.python.eager import profiler
import tensorflow.compat.v2 as tf
tf.enable_v2_behavior()
""" Enhanced Super Resolution GAN.
Citation:
@article{DBLP:journals/corr/abs-1809-00219,
author = {<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME> and
<NAME>},
title = {{ESRGAN:} Enhanced Super-Resolution Generative Adversarial Networks},
journal = {CoRR},
volume = {abs/1809.00219},
year = {2018},
url = {http://arxiv.org/abs/1809.00219},
archivePrefix = {arXiv},
eprint = {1809.00219},
timestamp = {Fri, 05 Oct 2018 11:34:52 +0200},
biburl = {https://dblp.org/rec/bib/journals/corr/abs-1809-00219},
bibsource = {dblp computer science bibliography, https://dblp.org}
}
"""
def main(**kwargs):
""" Main function for training ESRGAN model and exporting it as a SavedModel2.0
Args:
config: path to config yaml file.
log_dir: directory to store summary for tensorboard.
data_dir: directory to store / access the dataset.
manual: boolean to denote if data_dir is a manual directory.
model_dir: directory to store the model into.
"""
for physical_device in tf.config.experimental.list_physical_devices("GPU"):
tf.config.experimental.set_memory_growth(physical_device, True)
strategy = utils.SingleDeviceStrategy()
scope = utils.assign_to_worker(kwargs["tpu"])
sett = settings.Settings(kwargs["config"])
Stats = settings.Stats(os.path.join(sett.path, "stats.yaml"))
tf.random.set_seed(10)
if kwargs["tpu"]:
cluster_resolver = tf.distribute.cluster_resolver.TPUClusterResolver(
kwargs["tpu"])
tf.config.experimental_connect_to_host(cluster_resolver.get_master())
tf.tpu.experimental.initialize_tpu_system(cluster_resolver)
strategy = tf.distribute.experimental.TPUStrategy(cluster_resolver)
with tf.device(scope), strategy.scope():
summary_writer_1 = tf.summary.create_file_writer(
os.path.join(kwargs["log_dir"], "phase1"))
summary_writer_2 = tf.summary.create_file_writer(
os.path.join(kwargs["log_dir"], "phase2"))
# profiler.start_profiler_server(6009)
discriminator = model.VGGArch(batch_size=sett["batch_size"], num_features=64)
if not kwargs["export_only"]:
generator = model.RRDBNet(out_channel=3)
logging.debug("Initiating Convolutions")
generator.unsigned_call(tf.random.normal([1, 128, 128, 3]))
training = train.Trainer(
summary_writer=summary_writer_1,
summary_writer_2=summary_writer_2,
settings=sett,
model_dir=kwargs["model_dir"],
data_dir=kwargs["data_dir"],
manual=kwargs["manual"],
strategy=strategy)
phases = list(map(lambda x: x.strip(),
kwargs["phases"].lower().split("_")))
if not Stats["train_step_1"] and "phase1" in phases:
logging.info("starting phase 1")
training.warmup_generator(generator)
Stats["train_step_1"] = True
if not Stats["train_step_2"] and "phase2" in phases:
logging.info("starting phase 2")
training.train_gan(generator, discriminator)
Stats["train_step_2"] = True
if Stats["train_step_1"] and Stats["train_step_2"]:
# Attempting to save "Interpolated" Model as SavedModel2.0
interpolated_generator = utils.interpolate_generator(
partial(model.RRDBNet, out_channel=3, first_call=False),
discriminator,
sett["interpolation_parameter"],
[720, 1080],
basepath=kwargs["model_dir"])
tf.saved_model.save(
interpolated_generator, os.path.join(
kwargs["model_dir"], "esrgan"))
if __name__ == '__main__':
parser = argparse.ArgumentParser()
parser.add_argument(
"--config",
default="config/config.yaml",
help="path to configuration file. (default: %(default)s)")
parser.add_argument(
"--data_dir",
default=None,
help="directory to put the data. (default: %(default)s)")
parser.add_argument(
"--manual",
default=False,
help="specify if data_dir is a manual directory. (default: %(default)s)",
action="store_true")
parser.add_argument(
"--model_dir",
default=None,
help="directory to put the model in.")
parser.add_argument(
"--log_dir",
default=None,
help="directory to story summaries for tensorboard.")
parser.add_argument(
"--phases",
default="phase1_phase2",
help="phases to train for seperated by '_'")
parser.add_argument(
"--export_only",
default=False,
action="store_true",
help="Exports to SavedModel")
parser.add_argument(
"--tpu",
default="",
help="Name of the TPU to be used")
parser.add_argument(
"-v",
"--verbose",
action="count",
default=0,
help="each 'v' increases vebosity of logging.")
FLAGS, unparsed = parser.parse_known_args()
log_levels = [logging.WARNING, logging.INFO, logging.DEBUG]
log_level = log_levels[min(FLAGS.verbose, len(log_levels) - 1)]
logging.set_verbosity(log_level)
main(**vars(FLAGS))
|
161105
|
description = 'FRM II FAK40 information (cooling water system)'
group = 'lowlevel'
tango_base = 'tango://ictrlfs.ictrl.frm2:10000/frm2/'
devices = dict(
FAK40_Cap = device('nicos.devices.entangle.AnalogInput',
tangodevice = tango_base +'fak40/CF001',
description = 'The capacity of the cooling water system',
pollinterval = 60,
maxage = 120,
),
FAK40_Press = device('nicos.devices.entangle.AnalogInput',
tangodevice = tango_base +'fak40/CP001',
description = 'The pressure inside the cooling water system',
pollinterval = 60,
maxage = 120,
),
)
|
161132
|
import numpy as np
from scipy.ndimage import imread
from scipy.misc import imsave
from itertools import izip
from collections import deque
from pylibs.spatialfunclib import projection_onto_line
import sqlite3
import math
import sys
import os
import pickle
# globals
min_lat, min_lon, max_lat, max_lon = None, None, None, None
height = None
width = None
xscale = None
yscale = None
def douglas_peucker(segment, epsilon):
dmax = 0
index = 0
for i in range(1, len(segment) - 1):
(_, _, d) = projection_onto_line(segment[0].latitude, segment[0].longitude, segment[-1].latitude, segment[-1].longitude, segment[i].latitude, segment[i].longitude)
if (d > dmax):
index = i
dmax = d
if (dmax >= epsilon):
rec_results1 = douglas_peucker(segment[0:index], epsilon)
rec_results2 = douglas_peucker(segment[index:], epsilon)
smoothed_segment = rec_results1
smoothed_segment.extend(rec_results2)
else:
smoothed_segment = [segment[0], segment[-1]]
return smoothed_segment
def pixels_to_coords((i, j)):
return ((((height - i) / yscale) + min_lat), ((j / xscale) + min_lon))
class Node:
def __init__(self, (latitude, longitude), weight):
self.id = None
self.latitude = latitude
self.longitude = longitude
self.weight = weight
class MainCrossing:
def __init__(self, crossing_stack):
self.component_crossings = []
self.i = 0
self.j = 0
for crossing in crossing_stack:
self.component_crossings.append(crossing)
self.i += crossing[0]
self.j += crossing[1]
self.i /= float(len(crossing_stack))
self.j /= float(len(crossing_stack))
@property
def location(self):
return (self.i, self.j)
class Graph:
def __init__(self):
pass
def extract(self, skeleton, density_estimate, sqlite_filename, output_filename):
skeleton = self.identify_crossing_points(skeleton)
main_crossings, segments = self.find_main_crossings_and_segments(skeleton)
self.create_graph(main_crossings, segments, density_estimate, sqlite_filename, output_filename)
def create_graph(self, main_crossings, segments, density_estimate, sqlite_filename, output_filename):
nodes, new_segments, intersections = self.create_nodes_and_new_segments(main_crossings, segments, density_estimate)
my_nodes = {}
my_edges = {}
my_segments = {}
my_intersections = {}
try:
os.remove(sqlite_filename)
except OSError:
pass
conn = sqlite3.connect(sqlite_filename)
cur = conn.cursor()
cur.execute("CREATE TABLE nodes (id INTEGER, latitude FLOAT, longitude FLOAT, weight FLOAT)")
cur.execute("CREATE TABLE edges (id INTEGER, in_node INTEGER, out_node INTEGER, weight FLOAT)")
cur.execute("CREATE TABLE segments (id INTEGER, edge_ids TEXT)")
cur.execute("CREATE TABLE intersections (node_id INTEGER)")
conn.commit()
node_id = 0
edge_id = 0
segment_id = 0
for segment in new_segments:
segment_weight = 0
if (len(segment) > 2):
for i in range(1, len(segment) - 1):
segment_weight += segment[i].weight
segment_weight /= float(len(segment) - 2)
else:
segment_weight = float(segment[0].weight + segment[1].weight) / 2.0
# remove unnecessary intermediate points with Douglas-Peucker
# smoothed_segment = douglas_peucker(segment, 10)
smoothed_segment = douglas_peucker(segment, 3)
for node in smoothed_segment:
if (node.id is None):
node.id = node_id
my_nodes[node.id]=[node.latitude, node.longitude]
cur.execute("INSERT INTO nodes VALUES (" + str(node.id) + "," + str(node.latitude) + "," + str(node.longitude) + "," + str(node.weight) + ")")
node_id += 1
outbound_segment_edge_ids = []
for i in range(0, len(smoothed_segment) - 1):
my_edges[edge_id] = [smoothed_segment[i].id, smoothed_segment[i + 1].id]
cur.execute("INSERT INTO edges VALUES (" + str(edge_id) + "," + str(smoothed_segment[i].id) + "," + str(smoothed_segment[i + 1].id) + "," + str(segment_weight) + ")")
outbound_segment_edge_ids.append(edge_id)
edge_id += 1
inbound_segment_edge_ids = []
for i in range(0, len(smoothed_segment) - 1):
#my_edges[edge_id] = [smoothed_segment[i+1].id, smoothed_segment[i].id] # One Way
cur.execute("INSERT INTO edges VALUES (" + str(edge_id) + "," + str(smoothed_segment[i + 1].id) + "," + str(smoothed_segment[i].id) + "," + str(segment_weight) + ")")
inbound_segment_edge_ids.append(edge_id)
#edge_id += 1
inbound_segment_edge_ids.reverse()
# sanity check
if (len(outbound_segment_edge_ids) != len(inbound_segment_edge_ids)):
print "ERROR!! Number of inbound and outbound edges are not equal!"
print len(outbound_segment_edge_ids)
print len(inbound_segment_edge_ids)
exit()
my_segments[segment_id] = outbound_segment_edge_ids
cur.execute("INSERT INTO segments VALUES (" + str(segment_id) + ",'" + str(outbound_segment_edge_ids) + "')")
segment_id += 1
my_segments[segment_id] = inbound_segment_edge_ids
cur.execute("INSERT INTO segments VALUES (" + str(segment_id) + ",'" + str(inbound_segment_edge_ids) + "')")
segment_id += 1
for intersection in intersections:
my_intersections[intersection.id] = 1
cur.execute("INSERT INTO intersections VALUES (" + str(intersection.id) + ")")
conn.commit()
conn.close()
my_map = [my_nodes, my_edges, my_segments, my_intersections]
pickle.dump(my_map, open( output_filename, "wb" ) )
def create_nodes_and_new_segments(self, main_crossings, segments, density_estimate):
density_map = [2**x for x in range(16, 3, -1)] + range(15, 0, -1)
density_map.reverse()
nodes = {}
new_segments = []
intersections = set()
for segment in segments:
new_segment = []
head_node = main_crossings[segment[0]].location
if (head_node not in nodes):
#nodes[head_node] = Node(pixels_to_coords(head_node), density_map[density_estimate[segment[0][0], segment[0][1]] - 1])
nodes[head_node] = Node(pixels_to_coords(head_node), 0)
new_segment = [nodes[head_node]]
intersections.add(nodes[head_node])
for i in range(1, len(segment) - 1):
if (segment[i] not in nodes):
#nodes[segment[i]] = Node(pixels_to_coords(segment[i]), density_map[density_estimate[segment[i][0], segment[i][1]] - 1])
nodes[segment[i]] = Node(pixels_to_coords(segment[i]), 0)
new_segment.append(nodes[segment[i]])
tail_node = main_crossings[segment[-1]].location
if (tail_node not in nodes):
#nodes[tail_node] = Node(pixels_to_coords(tail_node), density_map[density_estimate[segment[-1][0], segment[-1][1]] - 1])
nodes[tail_node] = Node(pixels_to_coords(tail_node), 0)
new_segment.append(nodes[tail_node])
intersections.add(nodes[tail_node])
new_segments.append(new_segment)
return nodes, new_segments, intersections
def find_main_crossings_and_segments(self, skeleton):
crossing_pixels = np.where(skeleton == 2)
print "crossing_pixels: " + str(len(crossing_pixels[0]))
curr_count = 1
total_count = len(crossing_pixels[0])
main_crossings = {}
segments = []
for (i, j) in izip(crossing_pixels[0], crossing_pixels[1]):
if ((curr_count % 100 == 0) or (curr_count == total_count)):
sys.stdout.write("\r" + str(curr_count) + "/" + str(total_count) + "... ")
sys.stdout.flush()
curr_count += 1
#
# begin extended combustion (to consume adjacent intersection pixels)
#
crossing_stack = []
combusting_queue = deque([])
if (skeleton[i][j] == 2):
skeleton[i][j] = 3
combusting_queue.appendleft((i, j))
else:
if ((i, j) not in main_crossings):
print "ERROR!! (" + str(i) + "," + str(j) + ") not in main_crossings!"
exit()
while (len(combusting_queue) > 0):
current_crossing = combusting_queue.pop()
crossing_stack.append(current_crossing)
(m, n) = current_crossing
# north
if (skeleton[m - 1][n] == 2):
skeleton[m - 1][n] = 3
combusting_queue.appendleft((m - 1, n))
# north-east
if (skeleton[m - 1][n + 1] == 2):
skeleton[m - 1][n + 1] = 3
combusting_queue.appendleft((m - 1, n + 1))
# east
if (skeleton[m][n + 1] == 2):
skeleton[m][n + 1] = 3
combusting_queue.appendleft((m, n + 1))
# south-east
if (skeleton[m + 1][n + 1] == 2):
skeleton[m + 1][n + 1] = 3
combusting_queue.appendleft((m + 1, n + 1))
# south
if (skeleton[m + 1][n] == 2):
skeleton[m + 1][n] = 3
combusting_queue.appendleft((m + 1, n))
# south-west
if (skeleton[m + 1][n - 1] == 2):
skeleton[m + 1][n - 1] = 3
combusting_queue.appendleft((m + 1, n - 1))
# west
if (skeleton[m][n - 1] == 2):
skeleton[m][n - 1] = 3
combusting_queue.appendleft((m, n - 1))
# north-west
if (skeleton[m - 1][n - 1] == 2):
skeleton[m - 1][n - 1] = 3
combusting_queue.appendleft((m - 1, n - 1))
if (len(crossing_stack) > 0):
new_main_crossing = MainCrossing(crossing_stack)
for crossing in crossing_stack:
main_crossings[crossing] = new_main_crossing
#
# end extended combustion (all adjacent intersection pixels consumed)
#
# mark current crossing point as "do not return"
skeleton[i][j] = -1
# north
if (skeleton[i - 1][j] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i - 1, j), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# north-east
if (skeleton[i - 1][j + 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i - 1, j + 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# east
if (skeleton[i][j + 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i, j + 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# south-east
if (skeleton[i + 1][j + 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i + 1, j + 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# south
if (skeleton[i + 1][j] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i + 1, j), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# south-west
if (skeleton[i + 1][j - 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i + 1, j - 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# west
if (skeleton[i][j - 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i, j - 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# north-west
if (skeleton[i - 1][j - 1] == 1):
edge_nodes, skeleton = self.find_edge_nodes((i - 1, j - 1), skeleton, [(i, j)])
if edge_nodes != []: segments.append(edge_nodes)
# reset crossing point value
skeleton[i][j] = 3
print "done."
#imsave("no_edges_skeleton.png", skeleton)
return main_crossings, segments
def find_edge_nodes(self, start_location, skeleton, edge_nodes):
queue = deque([])
queue.appendleft(start_location)
(i, j) = start_location
skeleton[i][j] = 0
while (len(queue) > 0):
curr_location = queue.pop()
edge_nodes.append(curr_location)
(i, j) = curr_location
# north
if (skeleton[i - 1][j] == 1):
skeleton[i - 1][j] = 0
queue.appendleft((i - 1, j))
# east
if (skeleton[i][j + 1] == 1):
skeleton[i][j + 1] = 0
queue.appendleft((i, j + 1))
# south
if (skeleton[i + 1][j] == 1):
skeleton[i + 1][j] = 0
queue.appendleft((i + 1, j))
# west
if (skeleton[i][j - 1] == 1):
skeleton[i][j - 1] = 0
queue.appendleft((i, j - 1))
# north-east
if (skeleton[i - 1][j + 1] == 1):
skeleton[i - 1][j + 1] = 0
queue.appendleft((i - 1, j + 1))
# south-east
if (skeleton[i + 1][j + 1] == 1):
skeleton[i + 1][j + 1] = 0
queue.appendleft((i + 1, j + 1))
# south-west
if (skeleton[i + 1][j - 1] == 1):
skeleton[i + 1][j - 1] = 0
queue.appendleft((i + 1, j - 1))
# north-west
if (skeleton[i - 1][j - 1] == 1):
skeleton[i - 1][j - 1] = 0
queue.appendleft((i - 1, j - 1))
# find intersection at end of segment
for k in range(-1, (-1 * len(edge_nodes)), -1):
(i, j) = edge_nodes[k]
# north
if (skeleton[i - 1][j] >= 2):
edge_nodes.append((i - 1, j))
# east
elif (skeleton[i][j + 1] >= 2):
edge_nodes.append((i, j + 1))
# south
elif (skeleton[i + 1][j] >= 2):
edge_nodes.append((i + 1, j))
# west
elif (skeleton[i][j - 1] >= 2):
edge_nodes.append((i, j - 1))
# north-east
elif (skeleton[i - 1][j + 1] >= 2):
edge_nodes.append((i - 1, j + 1))
# south-east
elif (skeleton[i + 1][j + 1] >= 2):
edge_nodes.append((i + 1, j + 1))
# south-west
elif (skeleton[i + 1][j - 1] >= 2):
edge_nodes.append((i + 1, j - 1))
# north-west
elif (skeleton[i - 1][j - 1] >= 2):
edge_nodes.append((i - 1, j - 1))
# sanity check -- segment is bookended by two different intersections
(i, j) = edge_nodes[-1]
#print(len(edge_nodes))
if (skeleton[i][j] < 2 ):
print(len(edge_nodes))
print "ERROR!! No intersection at segment end!"
return [], skeleton
exit()
return edge_nodes, skeleton
def identify_crossing_points(self, skeleton):
fg_pixels = np.where(skeleton == 1)
print "fg_pixels: " + str(len(fg_pixels[0]))
curr_count = 1
total_count = len(fg_pixels[0])
crossing_skeleton = np.copy(skeleton)
for (i, j) in izip(fg_pixels[0], fg_pixels[1]):
if ((curr_count % 100 == 0) or (curr_count == total_count)):
sys.stdout.write("\r" + str(curr_count) + "/" + str(total_count) + "... ")
sys.stdout.flush()
curr_count += 1
p = [skeleton[i - 1][j], skeleton[i - 1][j + 1], skeleton[i][j + 1], skeleton[i + 1][j + 1], skeleton[i + 1][j], skeleton[i + 1][j - 1], skeleton[i][j - 1], skeleton[i - 1][j - 1], skeleton[i - 2][j], skeleton[i - 2][j + 1], skeleton[i - 2][j + 2], skeleton[i - 1][j + 2], skeleton[i][j + 2], skeleton[i + 1][j + 2], skeleton[i + 2][j + 2], skeleton[i + 2][j + 1], skeleton[i + 2][j], skeleton[i + 2][j - 1], skeleton[i + 2][j - 2], skeleton[i + 1][j - 2], skeleton[i][j - 2], skeleton[i - 1][j - 2], skeleton[i - 2][j - 2], skeleton[i - 2][j - 1]]
fringe = [bool(p[8] and bool(p[7] or p[0] or p[1])), bool(p[9] and bool(p[0] or p[1])), bool(p[10] and p[1]), bool(p[11] and bool(p[1] or p[2])), bool(p[12] and bool(p[1] or p[2] or p[3])), bool(p[13] and bool(p[2] or p[3])), bool(p[14] and p[3]), bool(p[15] and bool(p[3] or p[4])), bool(p[16] and bool(p[3] or p[4] or p[5])), bool(p[17] and bool(p[4] or p[5])), bool(p[18] and p[5]), bool(p[19] and bool(p[5] or p[6])), bool(p[20] and bool(p[5] or p[6] or p[7])), bool(p[21] and bool(p[6] or p[7])), bool(p[22] and p[7]), bool(p[23] and bool(p[7] or p[0]))]
connected_component_count = 0
for k in range(0, len(fringe)):
connected_component_count += int(not bool(fringe[k]) and bool(fringe[(k + 1) % len(fringe)]))
if (connected_component_count == 0):
crossing_skeleton[i][j] = 0
elif ((connected_component_count == 1) or (connected_component_count > 2)):
crossing_skeleton[i][j] = 2
print "done."
#imsave("crossing_skeleton.png", crossing_skeleton)
return crossing_skeleton
import sys, time
if __name__ == '__main__':
#
# usage: python graph_extract.py skeletons/skeleton_7m.png bounding_boxes/bounding_box_7m.txt skeleton_maps/skeleton_map_7m.db
#
skeleton_filename = str(sys.argv[1])
bounding_box_filename = str(sys.argv[2])
sqlite_filename = str(sys.argv[3])
output_filename = str(sys.argv[4])
print "skeleton filename: " + str(skeleton_filename)
print "bounding box filename: " + str(bounding_box_filename)
print "output filename: " + str(output_filename)
skeleton = imread(skeleton_filename)
# set up globals
bounding_box_file = open(bounding_box_filename, 'r')
bounding_box_values = bounding_box_file.readline().strip("\n").split(" ")
bounding_box_file.close()
min_lat, min_lon, max_lat, max_lon = float(bounding_box_values[0]), float(bounding_box_values[1]), float(bounding_box_values[2]), float(bounding_box_values[3])
#dlat = (max_lat - min_lat)/20
#dlon = (max_lon - min_lon)/20
print min_lat, min_lon, max_lat, max_lon
#min_lat = min_lat - dlat
#min_lon = min_lon - dlon
#max_lat = max_lat + dlat
#max_lon = max_lon + dlon
#print min_lat, min_lon, max_lat, max_lon
height = len(skeleton)
width = len(skeleton[0])
yscale = height / (max_lat - min_lat)
xscale = width / (max_lon - min_lon)
g = Graph()
start_time = time.time()
g.extract(skeleton.astype(np.bool).astype(np.int), skeleton, sqlite_filename, output_filename)
print "total elapsed time: " + str(time.time() - start_time) + " seconds"
|
161204
|
import numpy as np
from sklearn.metrics import r2_score as sklearn_r2_score
from tensorflow import convert_to_tensor
from scikeras.wrappers import KerasRegressor
from .mlp_models import dynamic_regressor
def test_kerasregressor_r2_correctness():
"""Test custom R^2 implementation against scikit-learn's."""
n_samples = 50
datasets = []
y_true = np.arange(n_samples, dtype=float)
y_pred = y_true + 1
datasets.append((y_true.reshape(-1, 1), y_pred.reshape(-1, 1)))
y_true = np.random.random_sample(size=y_true.shape)
y_pred = np.random.random_sample(size=y_true.shape)
datasets.append((y_true.reshape(-1, 1), y_pred.reshape(-1, 1)))
def keras_backend_r2(y_true, y_pred):
"""Wrap Keras operations to numpy."""
y_true = convert_to_tensor(y_true)
y_pred = convert_to_tensor(y_pred)
return KerasRegressor.r_squared(y_true, y_pred).numpy()
for (y_true, y_pred) in datasets:
np.testing.assert_almost_equal(
keras_backend_r2(y_true, y_pred),
sklearn_r2_score(y_true, y_pred),
decimal=5,
)
def test_kerasregressor_r2_as_metric():
"""Test custom R^2 implementation against scikit-learn's."""
est = KerasRegressor(
dynamic_regressor, metrics=[KerasRegressor.r_squared], epochs=10, random_state=0
)
y = np.random.randint(low=0, high=2, size=(1000,))
X = y.reshape((-1, 1))
est.fit(X, y)
current_score = est.score(X, y)
last_hist = est.history_["r_squared"][-1]
np.testing.assert_almost_equal(current_score, last_hist, decimal=3)
current_eval = est.model_.evaluate(X, y, return_dict=True)["r_squared"]
np.testing.assert_almost_equal(current_score, current_eval, decimal=3)
|
161235
|
from __future__ import division
import logging
import traceback
from sqlalchemy.sql import select
from database import get_engine, engine_disposal, anomalies_table_meta
# @added 20210420 - Task #4022: Move mysql_select calls to SQLAlchemy
# Add a global method to query the DB for the latest_anomalies
def latest_anomalies(current_skyline_app):
"""
Return the latest anomalies as a list of tuples, each tuple a DB row.
"""
function_str = 'database_queries.latest_anomalies'
current_skyline_app_logger = current_skyline_app + 'Log'
current_logger = logging.getLogger(current_skyline_app_logger)
anomalies = []
try:
engine, fail_msg, trace = get_engine(current_skyline_app)
except Exception as e:
trace = traceback.format_exc()
current_logger.error(trace)
fail_msg = 'error :: %s :: could not get a MySQL engine - %s' % (function_str, e)
current_logger.error('%s' % fail_msg)
if engine:
engine_disposal(current_skyline_app, engine)
if current_skyline_app == 'webapp':
# Raise to webapp
raise
return anomalies
try:
anomalies_table, fail_msg, trace = anomalies_table_meta(current_skyline_app, engine)
current_logger.info(fail_msg)
except Exception as e:
trace = traceback.format_exc()
current_logger.error('%s' % trace)
fail_msg = 'error :: %s :: failed to get metrics_table meta - %s' % (function_str, e)
current_logger.error('%s' % fail_msg)
if engine:
engine_disposal(current_skyline_app, engine)
if current_skyline_app == 'webapp':
# Raise to webapp
raise
return anomalies
try:
connection = engine.connect()
# Replacing panorama query
# query = 'select id, metric_id, anomalous_datapoint, anomaly_timestamp, full_duration, created_timestamp, anomaly_end_timestamp from anomalies ORDER BY id DESC LIMIT 10'
stmt = select([anomalies_table.c.id, anomalies_table.c.metric_id,
anomalies_table.c.anomalous_datapoint,
anomalies_table.c.anomaly_timestamp,
anomalies_table.c.full_duration,
anomalies_table.c.created_timestamp,
anomalies_table.c.anomaly_end_timestamp]).\
where(anomalies_table.c.id > 0).order_by(anomalies_table.c.id.desc()).\
limit(10)
results = connection.execute(stmt)
anomalies = []
if results is not None:
for row in results:
if row is not None:
anomalies.append(row)
if not anomalies:
anomalies = []
connection.close()
current_logger.info('%s :: determined %s latest anomalies' % (
function_str, str(len(anomalies))))
except Exception as e:
trace = traceback.format_exc()
current_logger.error(trace)
fail_msg = 'error :: %s :: could not determine latest anomalies - %s' % (
function_str, e)
current_logger.error('%s' % fail_msg)
if engine:
engine_disposal(current_skyline_app, engine)
if current_skyline_app == 'webapp':
# Raise to webapp
raise
return anomalies
if engine:
engine_disposal(current_skyline_app, engine)
return anomalies
|
161251
|
from __future__ import print_function
import pickle
import numpy
import theano
numpy.random.seed(42)
def prepare_data(seqs, labels):
"""Create the matrices from the datasets.
This pad each sequence to the same lenght: the lenght of the
longuest sequence or maxlen.
if maxlen is set, we will cut all sequence to this maximum
lenght.
This swap the axis!
"""
# x: a list of sentences
lengths = [len(s) for s in seqs]
n_samples = len(seqs)
maxlen = numpy.max(lengths)
x = numpy.zeros((maxlen, n_samples)).astype('int64')
x_mask = numpy.ones((maxlen, n_samples)).astype(theano.config.floatX)
for idx, s in enumerate(seqs):
x[:lengths[idx], idx] = s
x_mask *= (1 - (x == 0))
return x, x_mask, labels
def load_data(valid_portion=0.1, maxlen=19, sort_by_len=False):
'''Loads the dataset
:type path: String
:param path: The path to the dataset (here RSC2015)
:type n_items: int
:param n_items: The number of items.
:type valid_portion: float
:param valid_portion: The proportion of the full train set used for
the validation set.
:type maxlen: None or positive int
:param maxlen: the max sequence length we use in the train/valid set.
:type sort_by_len: bool
:name sort_by_len: Sort by the sequence lenght for the train,
valid and test set. This allow faster execution as it cause
less padding per minibatch. Another mechanism must be used to
shuffle the train set at each epoch.
'''
#############
# LOAD DATA #
#############
# Load the dataset
path_train_data = '/content/gdrive/My Drive/Colab Notebooks/RSC15-Raw/NARM/train.pk'
path_test_data = '/content/gdrive/My Drive/Colab Notebooks/RSC15-Raw/NARM/test.pk'
f1 = open(path_train_data, 'rb')
train_set = pickle.load(f1)
f1.close()
f2 = open(path_test_data, 'rb')
test_set = pickle.load(f2)
f2.close()
if maxlen:
new_train_set_x = []
new_train_set_y = []
for x, y in zip(train_set[0], train_set[1]):
if len(x) < maxlen:
new_train_set_x.append(x)
new_train_set_y.append(y)
else:
new_train_set_x.append(x[:maxlen])
new_train_set_y.append(y)
train_set = (new_train_set_x, new_train_set_y)
del new_train_set_x, new_train_set_y
new_test_set_x = []
new_test_set_y = []
for xx, yy in zip(test_set[0], test_set[1]):
if len(xx) < maxlen:
new_test_set_x.append(xx)
new_test_set_y.append(yy)
else:
new_test_set_x.append(xx[:maxlen])
new_test_set_y.append(yy)
test_set = (new_test_set_x, new_test_set_y)
del new_test_set_x, new_test_set_y
# split training set into validation set
train_set_x, train_set_y = train_set
n_samples = len(train_set_x)
sidx = numpy.arange(n_samples, dtype='int32')
numpy.random.shuffle(sidx)
n_train = int(numpy.round(n_samples * (1. - valid_portion)))
valid_set_x = [train_set_x[s] for s in sidx[n_train:]]
valid_set_y = [train_set_y[s] for s in sidx[n_train:]]
train_set_x = [train_set_x[s] for s in sidx[:n_train]]
train_set_y = [train_set_y[s] for s in sidx[:n_train]]
train_set = (train_set_x, train_set_y)
valid_set = (valid_set_x, valid_set_y)
test_set_x, test_set_y = test_set
valid_set_x, valid_set_y = valid_set
train_set_x, train_set_y = train_set
def len_argsort(seq):
return sorted(range(len(seq)), key=lambda x: len(seq[x]))
if sort_by_len:
sorted_index = len_argsort(test_set_x)
test_set_x = [test_set_x[i] for i in sorted_index]
test_set_y = [test_set_y[i] for i in sorted_index]
sorted_index = len_argsort(valid_set_x)
valid_set_x = [valid_set_x[i] for i in sorted_index]
valid_set_y = [valid_set_y[i] for i in sorted_index]
train = (train_set_x, train_set_y)
valid = (valid_set_x, valid_set_y)
test = (test_set_x, test_set_y)
return train, valid, test
|
161257
|
import math
from collections import OrderedDict
import logging
logger = logging.getLogger(__name__)
import numpy as np
import torch
import tensorflow as tf
from paragen.generators import AbstractGenerator, register_generator
from paragen.utils.io import remove
from paragen.utils.runtime import Environment
@register_generator
class LightseqTransformerGenerator(AbstractGenerator):
"""
SequenceGenerator is combination of a model and search algorithm.
It processes in a multi-step fashion while model processes only one step.
It is usually separated into encoder and search with decoder, and is
exported and load with encoder and search module.
Args:
path: path to export or load generator
"""
def __init__(self,
batch_size,
path=None, ):
super().__init__(path)
self._batch_size = batch_size
env = Environment()
self._maxlen = getattr(env.configs, 'maxlen', 512)
self._model = None
self._src_special_tokens, self._tgt_special_tokens = None, None
self._lightseq_model = None
def build_from_model(self, model, src_special_tokens, tgt_special_tokens):
"""
Build generator from model and search.
Args:
model (paragen.models.EncoderDecoder): an encoder-decoder model to be wrapped
src_special_tokens (dict): source special token dict
tgt_special_tokens (dict): target special token dict
"""
self._model = model
self._src_special_tokens = src_special_tokens
self._tgt_special_tokens = tgt_special_tokens
def forward(self, encoder, decoder, search=None):
"""
Infer a sample as model in evaluation mode.
Compute encoder output first and decode results with search module
Args:
encoder (tuple): encoder inputs
decoder (tuple): decoder inputs
search (tuple): search states
Returns:
decoder_output: results inferred by search algorithm on decoder
"""
src = encoder[0].cpu().numpy()
output, _ = self._lightseq_model.infer(src)
output = torch.from_numpy(output)
output = output[:, 0, :]
return output
def export(self,
path,
net_input,
lang='en',
**kwargs):
"""
Export self to `path` by export model directly
Args:
path: path to store serialized model
net_input: fake net_input for tracing the model
lang: language
**kwargs:
- beam_size: beam search size
- lenpen: length penalty
- extra_decode_length: maximum_generation_length = min(src_length + extra_decode_length, max_step)
- generation_method: generation method
- topk: top-k candidates
- topp:
- diverse_lambda: lambda in diverse
"""
assert self._model.encoder._normalize_before and self._model.decoder._normalize_before, 'only pre-norm arch can be exported by LightSeq'
from .transformer_pb2 import Transformer
transformer = Transformer()
encoder_state_dict, decoder_state_dict = self._extract_weight()
self._fill_weight(transformer, encoder_state_dict, decoder_state_dict, lang=lang)
self._fill_in_conf(transformer, self._model.encoder._n_head, **kwargs)
self._write(transformer, path)
def _fill_weight(self, transformer, encoder_state_dict, decoder_state_dict, lang='en'):
dec_var_name_list = list(decoder_state_dict.keys())
enc_var_name_list = list(encoder_state_dict.keys())
# fill each encoder layer's params
enc_tensor_names = {}
for name in enc_var_name_list:
name_split = name.split(".")
if len(name_split) <= 2 or not name_split[2].isdigit():
continue
layer_id = int(name_split[2])
enc_tensor_names.setdefault(layer_id, []).append(name)
for layer_id in sorted(enc_tensor_names.keys()):
fill_layer(
enc_tensor_names[layer_id],
encoder_state_dict,
transformer.encoder_stack.add(),
enc_layer_mapping_dict,
)
# fill each decoder layer's params
dec_tensor_names = {}
for name in dec_var_name_list:
name_split = name.split(".")
if len(name_split) <= 2 or not name.split(".")[2].isdigit():
continue
layer_id = int(name.split(".")[2])
dec_tensor_names.setdefault(layer_id, []).append(name)
for layer_id in sorted(dec_tensor_names.keys()):
fill_layer(
dec_tensor_names[layer_id],
decoder_state_dict,
transformer.decoder_stack.add(),
dec_layer_mapping_dict,
)
# fill src_embedding
fill_layer(
enc_var_name_list,
encoder_state_dict,
transformer.src_embedding,
src_emb_mapping_dict,
)
src_tb = _gather_token_embedding(
enc_var_name_list, encoder_state_dict, "_embed"
)
transformer.src_embedding.token_embedding[:] = src_tb.flatten().tolist()
pos_emb = _get_position_encoding(length=self._maxlen, hidden_size=src_tb.shape[-1])
pos_emb_list = pos_emb.numpy().reshape([-1]).tolist()
transformer.src_embedding.position_embedding[:] = pos_emb_list
logger.info(
"model.encoder.embed_positions.weight -> src_embedding.position_embedding, shape: {}, conversion finished!".format(
(pos_emb.shape)
)
)
# fill trg_embedding
encode_output_mapping_dict = _get_encode_output_mapping_dict(len(dec_tensor_names))
trg_emb_mapping_dict.update(encode_output_mapping_dict)
fill_layer(
dec_var_name_list,
decoder_state_dict,
transformer.trg_embedding,
trg_emb_mapping_dict,
)
# assert lang in LANG2ID
trg_tb = _gather_token_embedding(
dec_var_name_list, decoder_state_dict, "_embed", lang=lang
)
transformer.trg_embedding.token_embedding[:] = trg_tb.transpose().flatten().tolist()
logger.info(
"token_embedding.weight -> trg_embedding.token_embedding, shape: {}, conversion finished!".format(
trg_tb.transpose().shape
)
)
pos_emb = _get_position_encoding(length=self._maxlen, hidden_size=trg_tb.shape[-1])
pos_emb_list = pos_emb.numpy().reshape([-1]).tolist()
transformer.trg_embedding.position_embedding[:] = pos_emb_list
logger.info(
"model.decoder.embed_positions.weight -> trg_embedding.position_embedding, shape: {}, conversion finished!".format(
(pos_emb.shape)
)
)
def _extract_weight(self):
reloaded = self._model.state_dict()
encoder_state_dict = {}
decoder_state_dict = {}
for k in reloaded:
if k.startswith("_encoder."):
encoder_state_dict[k] = reloaded[k]
if k.startswith("_decoder."):
decoder_state_dict[k] = reloaded[k]
decoder_state_dict = split_qkv(decoder_state_dict)
decoder_state_dict['_decoder.shared_bias'] = decoder_state_dict.pop('_decoder._out_proj_bias')
return encoder_state_dict, decoder_state_dict
def _fill_in_conf(self,
transformer,
nhead,
beam_size=4,
length_penalty=0.6,
extra_decode_length=50,
generation_method='beam_search',
topk=1,
topp=0.75,
diverse_lambda=0.,):
# fill in conf to transformer
transformer.model_conf.head_num = nhead
transformer.model_conf.beam_size = beam_size
transformer.model_conf.length_penalty = length_penalty
transformer.model_conf.extra_decode_length = extra_decode_length
transformer.model_conf.src_padding_id = self._src_special_tokens['pad']
transformer.model_conf.trg_start_id = self._tgt_special_tokens['bos']
transformer.model_conf.trg_end_id = self._tgt_special_tokens['eos']
transformer.model_conf.sampling_method = generation_method
transformer.model_conf.topk = topk
transformer.model_conf.topp = topp
transformer.model_conf.diverse_lambda = diverse_lambda
transformer.model_conf.is_post_ln = False
transformer.model_conf.no_scale_embedding = False
transformer.model_conf.use_gelu = False
def _write(self, transformer, path):
logger.info("Writing to {0}".format(path))
try:
with tf.io.gfile.GFile(path, "wb") as fout:
fout.write(transformer.SerializeToString())
except Exception:
logger.info('Saving PB fails. Save HDF5 instead!')
remove(path)
path = path.replace('pb', 'hdf5')
import h5py
f = h5py.File(path, "w")
save_bart_proto_to_hdf5(transformer, f)
f.close()
def load(self):
"""
Load generator from path
"""
import lightseq.inference as lsi
self._lightseq_model = lsi.Transformer(self._path, self._batch_size)
""" key是proto参数的值,value是一个强大的表达式,每个&&分割tensor name的匹配路径或表达式,每个匹配
路径的子pattern用空格分隔,表达式用expression_开头,可以对每个tensor进行单独操作,支持多个表达式。多个匹配路径
和表达式最后会concat,axis=-1 """
enc_layer_mapping_dict = OrderedDict(
{
"multihead_norm_scale": "self_attn_norm.weight",
"multihead_norm_bias": "self_attn_norm.bias",
"multihead_project_kernel_qkv": "self_attn.in_proj_weight&&expression_.transpose(0, 1)",
"multihead_project_bias_qkv": "self_attn.in_proj_bias",
"multihead_project_kernel_output": "self_attn.out_proj.weight&&expression_.transpose(0, 1)",
"multihead_project_bias_output": "self_attn.out_proj.bias",
"ffn_norm_scale": "ffn_norm.weight",
"ffn_norm_bias": "ffn_norm.bias",
"ffn_first_kernel": "ffn._fc1.weight&&expression_.transpose(0, 1)",
"ffn_first_bias": "ffn._fc1.bias",
"ffn_second_kernel": "ffn._fc2.weight&&expression_.transpose(0, 1)",
"ffn_second_bias": "ffn._fc2.bias",
}
)
dec_layer_mapping_dict = OrderedDict(
{
"self_norm_scale": "self_attn_norm.weight",
"self_norm_bias": "self_attn_norm.bias",
"self_project_kernel_qkv": "self_attn.in_proj_weight&&expression_.transpose(0, 1)",
"self_project_bias_qkv": "self_attn.in_proj_bias",
"self_project_kernel_output": "self_attn.out_proj.weight&&expression_.transpose(0, 1)",
"self_project_bias_output": "self_attn.out_proj.bias",
"encdec_norm_scale": "multihead_attn_norm.weight",
"encdec_norm_bias": "multihead_attn_norm.bias",
"encdec_project_kernel_q": "multihead_attn.q_proj_weight&&expression_.transpose(0, 1)",
"encdec_project_bias_q": "multihead_attn.q_proj_bias",
"encdec_project_kernel_output": "multihead_attn.out_proj.weight&&expression_.transpose(0, 1)",
"encdec_project_bias_output": "multihead_attn.out_proj.bias",
"ffn_norm_scale": "ffn_norm.weight",
"ffn_norm_bias": "ffn_norm.bias",
"ffn_first_kernel": "ffn._fc1.weight&&expression_.transpose(0, 1)",
"ffn_first_bias": "ffn._fc1.bias",
"ffn_second_kernel": "ffn._fc2.weight&&expression_.transpose(0, 1)",
"ffn_second_bias": "ffn._fc2.bias",
}
)
src_emb_mapping_dict = OrderedDict(
{
"norm_scale": "_norm.weight",
"norm_bias": "_norm.bias",
}
)
trg_emb_mapping_dict = OrderedDict(
{
"norm_scale": "_norm.weight",
"norm_bias": "_norm.bias",
"shared_bias": "shared_bias",
}
)
def check_rule(tensor_name, rule):
if "Adam" in tensor_name or "adam" in tensor_name:
return False
assert isinstance(rule, str) and rule
r_size = len(rule.split('.'))
t = tensor_name.split('.')
if len(t) < r_size:
return False
return rule == '.'.join(t[-r_size:])
def fill_layer(tensor_names, state_dict, layer, mapping_dict):
for proto_name, ckpt_rule in mapping_dict.items():
expression = [
ele for ele in ckpt_rule.split("&&") if ele.startswith("expression_")
]
ckpt_rule = [
ele for ele in ckpt_rule.split("&&") if not ele.startswith("expression_")
]
assert (len(ckpt_rule) > 0 and len(expression) < 2) or (
len(ckpt_rule) == 0 and len(expression) > 0
)
if len(expression) < 2:
expression = "" if not expression else expression[0].split("_")[1]
else:
expression = [exp.split("_")[1] for exp in expression]
target_tn = []
for cr in ckpt_rule:
tmp = []
for tn in tensor_names:
if check_rule(tn, cr):
tmp.append(tn)
if len(tmp) != 1:
logger.info(f'{tmp} {cr}')
assert len(tmp) == 1
target_tn.extend(tmp)
target_tensor = [state_dict[name] for name in target_tn]
tt = {}
if target_tensor:
exec("tt['save'] = [ele%s for ele in target_tensor]" % expression)
else:
if not isinstance(expression, list):
expression = [expression]
exec("tt['save'] = [%s]" % ",".join(expression))
target_tensor = np.concatenate(tt["save"], axis=-1)
logger.info(
"%s -> %s, shape: %s, convert finished."
% (target_tn if target_tn else "created", proto_name, target_tensor.shape)
)
exec("layer.%s[:]=target_tensor.flatten().tolist()" % proto_name)
def _get_encode_output_mapping_dict(dec_layer_num):
encode_output_kernel_pattern = [
"{0}.multihead_attn.k_proj_weight&&{0}.multihead_attn.v_proj_weight".format(ele)
for ele in range(dec_layer_num)
]
encode_output_bias_pattern = [
"{0}.multihead_attn.k_proj_bias&&{0}.multihead_attn.v_proj_bias".format(ele)
for ele in range(dec_layer_num)
]
return {
"encode_output_project_kernel_kv": "&&".join(
encode_output_kernel_pattern + ["expression_.transpose(0, 1)"]
),
"encode_output_project_bias_kv": "&&".join(encode_output_bias_pattern),
}
def _get_position_encoding(length, hidden_size, min_timescale=1.0, max_timescale=1.0e4):
"""Return positional encoding.
Calculates the position encoding as a mix of sine and cosine functions with
geometrically increasing wavelengths.
Defined and formulized in Attention is All You Need, section 3.5.
Args:
length: Sequence length.
hidden_size: Size of the
min_timescale: Minimum scale that will be applied at each position
max_timescale: Maximum scale that will be applied at each position
Returns:
Tensor with shape [length, hidden_size]
"""
with tf.device("/cpu:0"):
position = tf.cast(tf.range(length), tf.float32)
num_timescales = hidden_size // 2
log_timescale_increment = math.log(
float(max_timescale) / float(min_timescale)
) / (tf.cast(num_timescales, tf.float32) - 1)
inv_timescales = min_timescale * tf.exp(
tf.cast(tf.range(num_timescales), tf.float32) * -log_timescale_increment
)
scaled_time = tf.expand_dims(position, 1) * tf.expand_dims(inv_timescales, 0)
signal = tf.concat([tf.math.sin(scaled_time), tf.math.cos(scaled_time)], axis=1)
return signal
def _gather_token_embedding(tensor_names, name2var_dict, tn_pattern, lang="en"):
""" use pattern to diff source and target. """
target_tn = []
for tn in tensor_names:
if (tn_pattern in tn.split(".")) and ("weight" in tn.split(".")):
target_tn.append(tn)
continue
target_tensor = [name2var_dict[name] for name in target_tn]
target_tensor = np.concatenate(target_tensor, axis=0)
target_tensor = target_tensor * (target_tensor.shape[1] ** 0.5)
logger.info(
"token embedding shape is %s, scaled by %s"
% (target_tensor.shape, target_tensor.shape[1] ** 0.5))
logger.info("token embedding shape is {}".format(target_tensor.shape))
return target_tensor
def split_qkv(decoder_state_dict):
state_dict = OrderedDict()
for key, val in decoder_state_dict.items():
if 'multihead_attn.in_proj' in key:
dim = val.size(0) // 3
state_dict[key.replace('multihead_attn.in_proj', 'multihead_attn.q_proj')] = val[:dim]
state_dict[key.replace('multihead_attn.in_proj', 'multihead_attn.k_proj')] = val[dim:dim * 2]
state_dict[key.replace('multihead_attn.in_proj', 'multihead_attn.v_proj')] = val[dim * 2:]
else:
state_dict[key] = val
return state_dict
def save_bart_proto_to_hdf5(transformer, f):
"""Convert bart protobuf to hdf5 format to support larger weight."""
MODEL_CONF_KEYS = [
# model_conf
"head_num",
"beam_size",
"extra_decode_length",
"length_penalty",
"src_padding_id",
"trg_start_id",
"diverse_lambda",
"sampling_method",
"topp",
"topk",
"trg_end_id",
"is_post_ln",
"no_scale_embedding",
"use_gelu",
"is_multilingual",
]
EMBEDDING_KEYS = [
# src_embedding
# trg_embedding
"token_embedding",
"position_embedding",
"norm_scale",
"norm_bias",
"encode_output_project_kernel_kv",
"encode_output_project_bias_kv",
"shared_bias",
"lang_emb",
"trg_vocab_mask",
]
ENCODER_LAYER_KEYS = [
# encoder_stack/{i}
"multihead_norm_scale",
"multihead_norm_bias",
"multihead_project_kernel_qkv",
"multihead_project_bias_qkv",
"multihead_project_kernel_output",
"multihead_project_bias_output",
"ffn_norm_scale",
"ffn_norm_bias",
"ffn_first_kernel",
"ffn_first_bias",
"ffn_second_kernel",
"ffn_second_bias",
]
DECODER_LAYER_KEYS = [
# decoder_stack/{i}
"self_norm_scale",
"self_norm_bias",
"self_project_kernel_qkv",
"self_project_bias_qkv",
"self_project_kernel_output",
"self_project_bias_output",
"encdec_norm_scale",
"encdec_norm_bias",
"encdec_project_kernel_q",
"encdec_project_bias_q",
"encdec_project_kernel_output",
"encdec_project_bias_output",
"ffn_norm_scale",
"ffn_norm_bias",
"ffn_first_kernel",
"ffn_first_bias",
"ffn_second_kernel",
"ffn_second_bias",
]
base_attr_to_keys = {
"src_embedding": EMBEDDING_KEYS,
"trg_embedding": EMBEDDING_KEYS,
"model_conf": MODEL_CONF_KEYS,
}
from operator import attrgetter
logger.info(f"start converting protobuf to hdf5 format.")
# load src_embedding, trg_embedding, model_conf
for base_attr, keys in base_attr_to_keys.items():
for key in keys:
hdf5_key = f"{base_attr}/{key}"
proto_attr = f"{base_attr}.{key}"
if key not in dir(attrgetter(base_attr)(transformer)):
logger.info(f"key {key} not found in {base_attr}, skipping")
continue
logger.info(f"loading transformer {proto_attr} -> {hdf5_key}")
_data = attrgetter(proto_attr)(transformer)
if type(_data) is str:
logger.info(
f"find type str, explicitly convert string to ascii encoded array."
)
# explict convert to array of char (int8) to avoid issues on string reading in C
_data = np.array([ord(c) for c in _data]).astype(np.int8)
f.create_dataset(hdf5_key, data=_data)
# save number of layers metadata
f.create_dataset("model_conf/n_encoder_stack", data=len(transformer.encoder_stack))
f.create_dataset("model_conf/n_decoder_stack", data=len(transformer.decoder_stack))
# load encoder_stack
for layer_id, layer in enumerate(transformer.encoder_stack):
for key in ENCODER_LAYER_KEYS:
hdf5_key = f"encoder_stack/{layer_id}/{key}"
proto_attr = key
logger.info(f"loading transformer.encoder_stack {proto_attr} -> {hdf5_key}")
f.create_dataset(hdf5_key, data=attrgetter(proto_attr)(layer))
# load decoder_stack
for layer_id, layer in enumerate(transformer.decoder_stack):
for key in DECODER_LAYER_KEYS:
hdf5_key = f"decoder_stack/{layer_id}/{key}"
proto_attr = key
logger.info(f"loading transformer.decoder_stack {proto_attr} -> {hdf5_key}")
f.create_dataset(hdf5_key, data=attrgetter(proto_attr)(layer))
logger.info(f"proto to hdf5 conversion completed.")
|
161299
|
import discord
from discord.ext import commands
class Info:
"""Info is a class within Pixie that is only for accessing data from discords built in things (Although we add Pixie's status command here)"""
def __init__(self, bot):
self.bot = bot
@commands.command(name="userinfo", pass_context=True)
async def user_info(self, ctx, user: discord.Member = None):
"""Gets information about the desired user (defaults to the message sender)"""
if not user:
user = ctx.message.author
msg = "```\n"
msg += "User: %s\n" % user.name
msg += "Nickname %s\n" % user.nick
msg += "ID: %s\n" % user.id
msg += "Created at: %s\n" % user.created_at
msg += "Joined on: %s\n" % user.joined_at
msg += "Game: %s\n" % user.game
msg += "Roles: %s\n" % ", ".join([role.name for role in user.roles if role.name != "@everyone"])
msg += "```\n"
msg += "Avatar: %s" % user.avatar_url
await self.bot.send_message(ctx.message.channel, msg)
@commands.command(name="guildinfo", pass_context=True)
async def guild_info(self, ctx):
"""Gets information about the current server"""
await self.bot.say("```xl\n"
"Guild: {0}\n"
"ID: {0.id}\n"
"Region: {0.region}\n"
"Member Count: {1}\n"
"Owner: {0.owner}\n"
"Icon: {0.icon_url}\n"
"Roles: {2}"
"```".format(ctx.message.server, sum(1 for x in ctx.message.server.members),
", ".join([x.name for x in ctx.message.server.roles])))
@commands.command(name="status")
async def status(self):
"""Gives some general information about Pixie's current situation"""
await self.bot.say("```xl\n"
"I'm in {0} guilds\n"
"I can currently see {1} people, {2} of which are unique\n"
"I'm also in {3} voice channels"
"```".format(len(self.bot.servers),
sum(1 for x in self.bot.get_all_members()),
len(set(self.bot.get_all_members())),
len(self.bot.voice_clients)))
@commands.command(name="info")
async def info(self):
await self.bot.say("```xl\n"
"Hiya, I'm Pixie; I'm a bot built for weebs by Recchan.\n"
"Check me out on Github, where you can see my codebase: https://github.com/GetRektByMe/Pixie\n"
"Here's my invite link: https://discordapp.com/oauth2/authorize?client_id=175319652073734144&scope=bot&permissions=536083519```")
def setup(bot):
bot.add_cog(Info(bot))
|
161310
|
import json
from policyglass import Condition
def test_json():
subject = Condition("Key", "Operator", ["Value"])
assert subject.json() == json.dumps({"key": "Key", "operator": "Operator", "values": ["Value"]})
|
161354
|
import requests
from bs4 import BeautifulSoup
from fake_useragent import UserAgent
import re
from datetime import datetime
from time import sleep
from random import randint
FILE_PATH = './'
host = 'https://www.cableav.tv/'
proxies = {
'http': 'http://127.0.0.1:7890',
'https': 'http://127.0.0.1:7890'
}
ua = UserAgent()
headers = {
"accept": "text/html,application/xhtml+xml,application/xml;q=0.9,image/avif,image/webp,image/apng,*/*;q=0.8,application/signed-exchange;v=b3;q=0.9",
"accept-encoding": "gzip, deflate, br",
"accept-language": "zh-CN,zh;q=0.9",
"cache-control": "max-age=0",
"dnt":"1",
"referer":"https://cableav.tv/playlist/",
"user-agent": ua.random
}
def open_page(url):
sleep(randint(1,3))
print('\n{} - [INFO]: requests at {}'.format(
datetime.now().strftime("%Y-%m-%d %H:%M:%S"),url))
req = requests.get(url,headers=headers,proxies=proxies)
try:
if req.status_code == 200 or req.status_code == 304:
req.encoding = 'utf-8'
return req
except TimeoutError:
print("Timeout:")
cnt = 0
while cnt < 3:
open_page(url)
cnt += 1
def parse_playlist(html):
if html != None:
page = BeautifulSoup(html.text,'lxml')
video_urls = page.select('div.listing-content > h3 > a')
for i in video_urls:
data = i.get('href')
yield data
else:
print("Result is None! \n")
pass
def parse_video(html):
PATTERN_URL = r'.*\"single_media_sources\":(\[\{.*\}\])'
if html != None:
page = BeautifulSoup(html.text,'lxml')
m3u8 = page.find("meta", {"property": "og:video:url"})["content"]
video_tags = page.find_all("meta", {"property": "video:tag"})
best_quality = max([int(tag["content"][: -1]) for tag in video_tags])
title = page.find("title").text.replace(' - CableAV','')
for line in html.text.split('\n'):
match = re.match(PATTERN_URL, line)
if match:
quality_lists = eval(match.group(1))
for quality in quality_lists:
if str(best_quality) in quality['source_label']:
m3u8 = quality['source_file'].replace('\/', '/')
break
# return [title,m3u8]
save_file(title,m3u8)
def save_file(title,m3u8):
try:
with open(FILE_PATH + 'test.txt','ab+') as f:
result = '{},{}\r\n'.format(title,m3u8)
f.write(result.encode('utf-8'))
f.close()
except IOError as e:
print(e)
pass
def run(url):
page = open_page(url)
play_list = parse_playlist(page)
for i in play_list:
video_page = open_page(i)
parse_video(video_page)
if __name__ == '__main__':
while True:
start_url = input("Input page URL: \n")
page_num = int(input('Input page list num:\n'))
if page_num <= 1:
run(start_url)
else:
urls = [start_url + "page/" + "{}/".format(x) for x in range(2,page_num+1)]
run(start_url)
for url in urls:
run(url)
|
161369
|
from gempy.eti_core.etiparam import ETIParam
PARAMETERS = "parameters"
__PARAMETERS_SET_INTERNALLY__ = ["FLAG_IMAGE",
"CHECKIMAGE_NAME",
"CATALOG_NAME"]
class SExtractorETIParam(ETIParam):
def __init__(self, params):
super().__init__(params=params)
# Delete any parameters from the dict that need to be set interally
def prepare(self):
if self.params:
for param in __PARAMETERS_SET_INTERNALLY__:
try:
self.params.pop(param)
except KeyError:
pass
|
161424
|
import time
import numpy as np
from airobot import Robot
from airobot import log_warn
from airobot.utils.common import euler2quat
def main():
"""
This function shows an example of block stacking.
"""
np.set_printoptions(precision=4, suppress=True)
robot = Robot('franka')
success = robot.arm.go_home()
if not success:
log_warn('Robot go_home failed!!!')
ori = euler2quat([0, 0, np.pi / 2])
robot.pb_client.load_urdf('table/table.urdf',
[.6, 0, 0.4],
ori,
scaling=0.9)
box_size = 0.03
box_id1 = robot.pb_client.load_geom('box', size=box_size,
mass=0.1,
base_pos=[.5, 0.12, 1.0],
rgba=[1, 0, 0, 1])
box_id2 = robot.pb_client.load_geom('box',
size=box_size,
mass=0.1,
base_pos=[0.3, 0.12, 1.0],
rgba=[0, 0, 1, 1])
robot.arm.eetool.open()
obj_pos = robot.pb_client.get_body_state(box_id1)[0]
move_dir = obj_pos - robot.arm.get_ee_pose()[0]
move_dir[2] = 0
eef_step = 0.025
# an example of using IK with nullspace enabled
ik_kwargs = dict(ns=True)
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step, **dict(ik_kwargs=ik_kwargs))
move_dir = np.zeros(3)
move_dir[2] = obj_pos[2] - robot.arm.get_ee_pose()[0][2]
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
robot.arm.eetool.close(wait=False)
robot.arm.move_ee_xyz([0, 0, 0.3], eef_step=eef_step)
obj_pos = robot.pb_client.get_body_state(box_id2)[0]
move_dir = obj_pos - robot.arm.get_ee_pose()[0]
move_dir[2] = 0
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
move_dir = obj_pos - robot.arm.get_ee_pose()[0]
move_dir[2] += box_size * 2
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
robot.arm.eetool.open()
move_dir[2] = 0.2
robot.arm.move_ee_xyz(move_dir, eef_step=eef_step)
time.sleep(10)
if __name__ == '__main__':
main()
|
161458
|
import os
import re
xrdb_regex = re.compile("(.+)\.[xX][rR][dD][bB]$")
class Xrdb(object):
def __init__(self, filename):
"""Parse an xrdb file"""
self.name = xrdb_regex.match(os.path.basename(filename)).group(1)
self.colors = [None] * 16
color_regex = re.compile("#define +Ansi_(\d+)_Color +(#[A-Fa-f0-9]{6})")
named_color = re.compile("#define +(\S+) +(#[A-Fa-f0-9]{6})")
with open(filename) as f:
for line in f:
m = color_regex.match(line)
if m:
self.colors[int(m.group(1))] = m.group(2)
continue
m = named_color.match(line)
if m:
prop_name = m.group(1)
setattr(self, prop_name, m.group(2))
@classmethod
def parse_all(cls, xrdb_dir):
"""Parse all of the xrdb files in the provided dir"""
for name in filter(lambda x: xrdb_regex.match(x), os.listdir(xrdb_dir)):
filename = os.path.join(xrdb_dir, name)
yield cls(filename)
|
161531
|
import tensorflow as tf
import numpy as np
from models.tf_model import TFModel
class RNN(TFModel):
def input_layer(self):
'''
Data and Hyperparameters
'''
with tf.variable_scope("input_layer"):
# Tensor containing word ids
# shape = (batch size, max length of sentence in batch)
self.word_ids = tf.placeholder(tf.int32, shape=[None, None],
name="word_ids")
# Tensor containing the real length of each sentence
# shape = (batch size)
self.sentence_lengths = tf.placeholder(tf.int32, shape=[None],
name="sentence_lengths")
# Tensor containing char ids
# shape = (batch size, max length of sentence, max length of word)
self.char_ids = tf.placeholder(tf.int32, shape=[None, None, None],
name="char_ids")
# shape = (batch_size, max_length of sentence)
self.word_lengths = tf.placeholder(tf.int32, shape=[None, None],
name="word_lengths")
# Tensor containing the real length of each word
# shape = (batch size, max length of sentence in batch)
self.labels = tf.placeholder(tf.int32, shape=[None, None],
name="labels")
# Dropout tensors
self.char_drop_input = tf.placeholder_with_default(
input=1.0, shape=(), name="char_drop_input")
self.char_drop_state = tf.placeholder_with_default(
input=1.0, shape=(), name="char_drop_state")
self.char_drop_output = tf.placeholder_with_default(
input=1.0, shape=(), name="char_drop_output")
self.word_drop_input = tf.placeholder_with_default(
input=1.0, shape=(), name="word_drop_input")
self.word_drop_state = tf.placeholder_with_default(
input=1.0, shape=(), name="word_drop_state")
self.word_drop_output = tf.placeholder_with_default(
input=1.0, shape=(), name="word_drop_output")
# Training variables
self.global_step = tf.Variable(0, name="global_step", trainable=False)
# Using a decaying learning rate
self.lr = tf.train.exponential_decay(
learning_rate=self.config.learning["rate"],
global_step=self.global_step,
decay_steps=self.config.learning["decay_steps"],
decay_rate=self.config.learning["decay"],
staircase=self.config.learning["staircase"])
# Create the optimizer/trainer
# I initialize it here for multi-gpu training
self.optimizer = tf.train.AdamOptimizer(self.lr)
def embedding_layer(self):
'''
Embedding matrices
'''
with tf.variable_scope("embedding_layer"):
if self.config.pretrained is None:
# Using randomly initialized vectors
# Word embedding matrix
word_embedding = tf.get_variable(
name="word_embedding",
dtype=tf.float32,
initializer=tf.random_uniform(
shape=[self.config.n_words, self.config.dim_word],
minval=-0.25, maxval=0.25))
else:
word_embedding = tf.get_variable(
name="word_embedding",
initializer=np.asarray(self.config.wordvec_matrix, dtype=np.float32),
dtype=tf.float32,
trainable=self.config.non_static)
if self.config.use_chars:
# Char embedding matrix
char_embedding = tf.get_variable(
name="char_embedding",
dtype=tf.float32,
initializer=tf.random_uniform(
shape=[self.config.n_chars, self.config.dim_char],
minval=-0.25, maxval=0.25))
self.word_vectors = tf.nn.embedding_lookup(
word_embedding, self.word_ids, name="word_matrix")
if self.config.use_chars:
self.char_vectors = tf.nn.embedding_lookup(
char_embedding, self.char_ids, name="char_matrix")
'''
word_embedding = (batch size, max length of sentence in batch, self.config.dim_word)
char_embedding = (batch size, max length of sentence in batch, max length of word, self.config.dim_char)
'''
def RNN_layer(self):
'''
Recurrent Layer
'''
def Cells(num_units, char_cell=False):
'''
Function to build cells
'''
# TODO: Wrappers
if self.config.cells == "rnn":
self.cell_fw = tf.contrib.rnn.BasicRNNCell(num_units=num_units)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.BasicRNNCell(num_units=num_units)
elif self.config.cells == "lstm":
self.cell_fw = tf.contrib.rnn.LSTMCell(num_units=num_units)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.LSTMCell(num_units=num_units)
else:
self.cell_fw = tf.contrib.rnn.GRUCell(num_units=num_units)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.GRUCell(num_units=num_units)
if char_cell:
self.cell_fw = tf.contrib.rnn.DropoutWrapper(
cell=self.cell_fw, input_keep_prob=self.char_drop_input, output_keep_prob=self.char_drop_output, state_keep_prob=self.char_drop_state)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.DropoutWrapper(
cell=self.cell_bw, input_keep_prob=self.char_drop_input, output_keep_prob=self.char_drop_output, state_keep_prob=self.char_drop_state)
else:
self.cell_fw = tf.contrib.rnn.DropoutWrapper(
cell=self.cell_fw, input_keep_prob=self.word_drop_input, output_keep_prob=self.word_drop_output, state_keep_prob=self.word_drop_state)
if self.config.bidirectional:
self.cell_bw = tf.contrib.rnn.DropoutWrapper(
cell=self.cell_bw, input_keep_prob=self.word_drop_input, output_keep_prob=self.word_drop_output, state_keep_prob=self.word_drop_state)
# Word Level Network
if self.config.use_chars:
with tf.variable_scope("word_layer"):
# Put the word length in the axis 1 (time dimension)
s = tf.shape(self.char_vectors)
# new shape = [batch*sentence_length,word_length,char_dim]
self.char_vectors = tf.reshape(self.char_vectors,
shape=[s[0] * s[1], s[-2], self.config.dim_char])
word_lengths = tf.reshape(self.word_lengths, shape=[s[0] * s[1]])
# CELLS
Cells(self.config.cell_char)
# Bidirectional
if self.config.bidirectional:
_, (output_state_fw, output_state_bw) = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self.cell_fw, cell_bw=self.cell_bw, inputs=self.char_vectors,
sequence_length=word_lengths, dtype=tf.float32)
if self.config.cells == "lstm":
output_state_fw, output_state_bw = output_state_fw[1], output_state_bw[1]
self.char_output = tf.concat([output_state_fw, output_state_bw], axis=-1)
# Unidirectional
else:
_, output_state_fw = tf.nn.dynamic_rnn(
cell=self.cell_fw, inputs=self.char_vectors,
sequence_length=word_lengths, dtype=tf.float32)
if self.config.model == "lstm":
output_state_fw = output_state_fw[1]
self.char_output = output_state_fw
# shape = (batch size, max sentence length, char hidden size)
self.h = self.char_output.shape[1].value
self.char_output = tf.reshape(self.char_output, shape=[s[0], s[1], self.h])
self.word_vectors = tf.concat([self.word_vectors, self.char_output], axis=-1)
# Sentence Level Network
with tf.variable_scope("sentence_layer"):
# Create Cells
Cells(self.config.cell_word)
# Bidirectional
if self.config.bidirectional:
(output_fw, output_bw), _ = tf.nn.bidirectional_dynamic_rnn(
cell_fw=self.cell_fw, cell_bw=self.cell_bw, inputs=self.word_vectors,
sequence_length=self.sentence_lengths, dtype=tf.float32)
self.lstm_output = tf.concat([output_fw, output_bw], axis=-1)
# Unidirectional
else:
output_state_fw, _ = tf.nn.dynamic_rnn(
cell=self.cell_fw, inputs=self.word_vectors,
sequence_length=self.sentence_lengths, dtype=tf.float32)
self.lstm_output = output_state_fw
# tf.shape() gets us the dynamic shape of a tensor
# Save the max sentence length
self.nsteps = tf.shape(self.lstm_output)[1]
# .shape on the other hand provides the static shape of a tensor
# Save the hidden length
self.h = self.lstm_output.shape[2].value
# current shape = [batch,max sentence length, hidden size]
# after shape = [batch * max sentence , hidden size]
self.layer_output = tf.reshape(self.lstm_output, [-1, self.h])
def output_layer(self):
with tf.variable_scope("output_layer"):
layer = {
'weights': tf.get_variable(name="W", initializer=tf.truncated_normal([self.h, self.config.n_tags])),
'biases': tf.get_variable(name="b", initializer=tf.truncated_normal([self.config.n_tags]))
}
self.pred = tf.nn.xw_plus_b(
self.layer_output, layer["weights"], layer["biases"], name="preds")
self.logits = tf.reshape(
self.pred, [-1, self.nsteps, self.config.n_tags], name="logits")
def loss_function(self):
with tf.variable_scope("loss_layer"):
if self.config.use_crf:
log_likelihood, trans_params = tf.contrib.crf.crf_log_likelihood(
self.logits, self.labels, self.sentence_lengths)
self.trans_params = tf.Variable(trans_params, name="trans_params")
self.loss = tf.reduce_mean(-log_likelihood)
else:
losses = tf.nn.sparse_softmax_cross_entropy_with_logits(
logits=self.logits, labels=self.labels)
mask = tf.sequence_mask(self.sentence_lengths)
losses = tf.boolean_mask(losses, mask)
self.loss = tf.reduce_mean(losses)
def train_op(self):
with tf.variable_scope("train_step"):
self.gradient = self.optimizer.compute_gradients(loss=self.loss)
self.train_op = self.optimizer.apply_gradients(grads_and_vars=self.gradient,
global_step=self.global_step)
def build(self):
self.input_layer()
self.embedding_layer()
self.RNN_layer()
self.output_layer()
self.loss_function()
# Generic functions that add training op and initialize session
self.train_op()
self.initialize_session() # now self.sess is defined and vars are init
def load_model(self, dir):
self.initialize_session()
self.saver = tf.train.import_meta_graph("{}.meta".format(dir))
self.saver.restore(self.sess, dir)
# Get the operations easily
graph = tf.get_default_graph()
# INPUT_LAYER
self.word_ids = graph.get_operation_by_name("input_layer/word_ids").outputs[0]
self.sentence_lengths = graph.get_operation_by_name(
"input_layer/sentence_lengths").outputs[0]
self.char_ids = graph.get_operation_by_name("input_layer/char_ids").outputs[0]
self.word_lengths = graph.get_operation_by_name("input_layer/word_lengths").outputs[0]
self.labels = graph.get_operation_by_name("input_layer/labels").outputs[0]
# OUTPUT_LAYER
self.logits = graph.get_operation_by_name("output_layer/logits").outputs[0]
# CRF
if self.config.use_crf:
self.trans_params = graph.get_operation_by_name("loss_layer/trans_params").outputs[0]
def __init__(self, config):
super(RNN, self).__init__(config)
def predict_batch(self, feed):
# Batch Prediction
# CRF Prediction
if self.config.use_crf:
# get tag scores and transition params of CRF
viterbi_sequences = []
logits, trans_params = self.sess.run(
[self.logits, self.trans_params], feed_dict=feed)
# iterate over the sentences because no batching in vitervi_decode
for logit, sentence_length in zip(logits, feed[self.sentence_lengths]):
logit = logit[:sentence_length] # keep only the valid steps
viterbi_seq, _ = tf.contrib.crf.viterbi_decode(
logit, trans_params)
viterbi_sequences.append(viterbi_seq)
return viterbi_sequences
# Softmax Prediction
else:
labels_pred = self.sess.run(self.logits, feed_dict=feed)
# labels_pred = tf.cast(tf.argmax(self.logits, axis=-1), tf.int32)
labels_pred = np.argmax(labels_pred, axis=-1)
return labels_pred
|
161532
|
import vcr
@vcr.use_cassette
def test_esearchresult(client):
r = client.esearch(db="pubmed", term="hart rk[author]")
assert 5 < r.count
assert 0 == r.retstart
assert isinstance(r.ids, list)
assert 27814769 in r.ids
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.