id
stringlengths 3
8
| content
stringlengths 100
981k
|
|---|---|
428045
|
import numpy as np
import torch
from torch import nn
from torch.nn import functional as F
from .resnet import resnet18
from copy import deepcopy
class UniPlanner(nn.Module):
def __init__(self, bev_planner,
pixels_per_meter=2, crop_size=64, x_offset=0, y_offset=0.75,
feature_x_jitter=1, feature_angle_jitter=10,
num_plan=10, k=16, num_input_feature=96, num_out_feature=64, num_cmds=6, max_num_cars=4,
num_plan_iter=1,
):
super().__init__()
self.num_cmds = num_cmds
self.num_plan = num_plan
self.num_plan_iter = num_plan_iter
self.max_num_cars = max_num_cars
self.bev_planner = bev_planner
self.num_out_feature = num_out_feature
self.pixels_per_meter = pixels_per_meter
self.crop_size = crop_size
self.feature_x_jitter = feature_x_jitter
self.feature_angle_jitter = np.deg2rad(feature_angle_jitter)
self.offset_x = nn.Parameter(torch.tensor(x_offset).float(), requires_grad=False)
self.offset_y = nn.Parameter(torch.tensor(y_offset).float(), requires_grad=False)
self.lidar_conv_emb = nn.Sequential(
resnet18(num_channels=num_input_feature),
nn.AdaptiveAvgPool2d((1,1)),
nn.Flatten(),
)
self.plan_gru = nn.GRU(4,512,batch_first=True)
self.plan_mlp = nn.Linear(512,2)
self.cast_grus_ego = nn.ModuleList([nn.GRU(512, 64, batch_first=True) for _ in range(self.num_cmds)])
self.cast_mlps_ego = nn.ModuleList([nn.Linear(64, 2) for _ in range(self.num_cmds)])
self.cast_grus_other = nn.ModuleList([nn.GRU(512, 64, batch_first=True) for _ in range(self.num_cmds)])
self.cast_mlps_other = nn.ModuleList([nn.Linear(64, 2) for _ in range(self.num_cmds)])
self.cast_cmd_pred = nn.Sequential(
nn.Linear(512,self.num_cmds),
nn.Sigmoid(),
)
def forward(self, features, bev, ego_locs, locs, oris, nxps, typs):
self.bev_planner.eval()
ego_oris = oris[:,:1]
locs = locs[:,1:]
oris = oris[:,1:]
typs = (typs[:,1:]==1) # 1 is for vehicles
N = locs.size(1)
typs = filter_cars(ego_locs, locs, typs)
# Other vehicles
if int(typs.float().sum()) > 0:
# Guard against OOM: randomly sample cars to train on
typs = random_sample(typs, size=self.max_num_cars)
# Flatten the locs
flat_features = features.expand(N,*features.size()).permute(1,0,2,3,4).contiguous()[typs]
flat_bev = bev.expand(N,*bev.size()).permute(1,0,2,3,4).contiguous()[typs]
flat_locs = (locs[:,:,1:]-locs[:,:,:1])[typs]
flat_rel_loc0 = (locs[:,:,0]-ego_locs[:,None,0])[typs]
flat_rel_ori0 = (oris-ego_oris)[typs]
K = flat_locs.size(0)
locs_jitter = (torch.rand((K,2))*2-1).float().to(locs.device) * self.feature_x_jitter
locs_jitter[:,1] = 0
oris_jitter = (torch.rand((K,))*2-1).float().to(oris.device) * self.feature_angle_jitter
cropped_other_features = self.crop_feature(flat_features, flat_rel_loc0+locs_jitter, flat_rel_ori0+oris_jitter, pixels_per_meter=self.pixels_per_meter/2, crop_size=self.crop_size)
cropped_other_bev = self.bev_planner.crop_feature(flat_bev, flat_rel_loc0+locs_jitter, flat_rel_ori0+oris_jitter, pixels_per_meter=self.pixels_per_meter, crop_size=self.crop_size*2)
other_locs = transform_points(flat_locs-locs_jitter[:,None], -flat_rel_ori0-oris_jitter)
other_embd = self.lidar_conv_emb(cropped_other_features)
other_cast_locs = self.cast(other_embd, mode='other')
other_cast_cmds = self.cast_cmd_pred(other_embd)
with torch.no_grad():
other_bev_embd = self.bev_planner.bev_conv_emb(cropped_other_bev)
other_cast_locs_expert = self.bev_planner.cast(other_bev_embd)
other_cast_cmds_expert = self.bev_planner.cast_cmd_pred(other_bev_embd)
else:
dtype = features.dtype
device = features.device
other_locs = torch.zeros((N,self.num_plan,2), dtype=dtype, device=device)
other_cast_locs = torch.zeros((N,self.num_cmds,self.num_plan,2), dtype=dtype, device=device)
other_cast_cmds = torch.zeros((N,self.num_cmds), dtype=dtype, device=device)
other_cast_locs_expert = torch.zeros((N,self.num_cmds,self.num_plan,2), dtype=dtype, device=device)
other_cast_cmds_expert = torch.zeros((N,self.num_cmds), dtype=dtype, device=device)
B = features.size(0)
locs_jitter = (torch.rand((B,2))*2-1).float().to(locs.device) * self.feature_x_jitter
locs_jitter[:,1] = 0
oris_jitter = (torch.rand((B,))*2-1).float().to(oris.device) * self.feature_angle_jitter
ego_locs = transform_points(ego_locs[:,1:]-locs_jitter[:,None], -oris_jitter)
nxps = transform_points(nxps[:,None]-locs_jitter[:,None], -oris_jitter)[:,0]
cropped_ego_features = self.crop_feature(features, locs_jitter, oris_jitter, pixels_per_meter=self.pixels_per_meter/2, crop_size=self.crop_size)
cropped_ego_bev = self.bev_planner.crop_feature(bev, locs_jitter, oris_jitter, pixels_per_meter=self.pixels_per_meter, crop_size=self.crop_size*2)
ego_embd = self.lidar_conv_emb(cropped_ego_features)
with torch.no_grad():
ego_bev_embd = self.bev_planner.bev_conv_emb(cropped_ego_bev)
ego_cast_locs_expert = self.bev_planner.cast(ego_bev_embd)
ego_plan_locs_expert = self.bev_planner.plan(
ego_bev_embd, nxps,
cast_locs=ego_cast_locs_expert,
pixels_per_meter=self.pixels_per_meter,
crop_size=self.crop_size*2
)
ego_cast_locs = self.cast(ego_embd, mode='ego')
ego_plan_locs = self.plan(
ego_embd, nxps,
cast_locs=ego_cast_locs,
pixels_per_meter=self.pixels_per_meter,
crop_size=self.crop_size*2
)
ego_cast_cmds = self.cast_cmd_pred(ego_embd)
return (
other_locs, other_cast_locs, other_cast_cmds, other_cast_locs_expert, other_cast_cmds_expert,
ego_locs, ego_plan_locs, ego_cast_locs, ego_cast_cmds, ego_cast_locs_expert, ego_plan_locs_expert
)
@torch.no_grad()
def ego_infer(self, features, pixels_per_meter=4, num_sample=50):
cropped_ego_features = self.crop_feature(features[None], torch.zeros((1,2),dtype=features.dtype,device=features.device), torch.zeros((1,),dtype=features.dtype,device=features.device))
pred_ego_locs = self.predict(cropped_ego_features, num_sample=num_sample)
return pred_ego_locs[:,0]
@torch.no_grad()
def infer(self, features, det, cmd, nxp):
"""
B (batch-size) is 1
Note: This pixels_per_meter is on original scale
self.pixels_per_meter is on feature map's scale
"""
H = features.size(1)*2
W = features.size(2)*2
center_x = float(W/2 + self.offset_x*W/2)
center_y = float(H/2 + self.offset_y*H/2)
# Construct locs and oris
locs, oris = [], []
for X, Y, h, w, cos, sin in det:
if np.linalg.norm([X-center_x,Y-center_y]) <= 4:
continue
x = (X - center_x) / self.pixels_per_meter
y = (Y - center_y) / self.pixels_per_meter
o = float(np.arctan2(sin, cos))
locs.append([x,y])
oris.append(o)
# relative locations and orientations
locs = torch.tensor(locs, dtype=torch.float32).to(features.device)
oris = torch.tensor(oris, dtype=torch.float32).to(features.device)
N = len(locs)
N_features = features.expand(N, *features.size())
if N > 0:
cropped_other_features = self.crop_feature(
N_features, locs, oris,
pixels_per_meter=self.pixels_per_meter/2,
crop_size=self.crop_size
)
other_embd = self.lidar_conv_emb(cropped_other_features)
other_cast_locs = self.cast(other_embd, mode='other')
other_cast_cmds = self.cast_cmd_pred(other_embd)
other_cast_locs = transform_points(other_cast_locs, oris[:,None].repeat(1,self.num_cmds))
other_cast_locs += locs.view(N,1,1,2)
else:
other_cast_locs = torch.zeros((N,self.num_cmds,self.num_plan,2))
other_cast_cmds = torch.zeros((N,self.num_cmds))
cropped_ego_features = self.crop_feature(
features[None],
torch.zeros((1,2),dtype=features.dtype,device=features.device),
torch.zeros((1,),dtype=features.dtype,device=features.device),
pixels_per_meter=self.pixels_per_meter/2, crop_size=self.crop_size
)
ego_embd = self.lidar_conv_emb(cropped_ego_features)
ego_cast_locs = self.cast(ego_embd, mode='ego')
ego_plan_locs = self.plan(
ego_embd, nxp[None],
cast_locs=ego_cast_locs,
pixels_per_meter=self.pixels_per_meter,
crop_size=self.crop_size*2
)[0,-1,cmd]
return ego_plan_locs, ego_cast_locs[0,cmd], other_cast_locs, other_cast_cmds
def _plan(self, embd, nxp, cast_locs, pixels_per_meter=4, crop_size=96):
B = embd.size(0)
h0, u0 = embd, nxp*pixels_per_meter/crop_size*2-1
self.plan_gru.flatten_parameters()
locs = []
for i in range(self.num_cmds):
u = torch.cat([
u0.expand(self.num_plan, B, -1).permute(1,0,2),
cast_locs[:,i]
], dim=2)
out, _ = self.plan_gru(u, h0[None])
locs.append(torch.cumsum(self.plan_mlp(out), dim=1))
return torch.stack(locs, dim=1) + cast_locs
def plan(self, embd, nxp, cast_locs=None, pixels_per_meter=4, crop_size=96):
if cast_locs is None:
plan_loc = self.cast(embd).detach()
else:
plan_loc = cast_locs.detach()
plan_locs = []
for i in range(self.num_plan_iter):
plan_loc = self._plan(embd, nxp, plan_loc, pixels_per_meter=pixels_per_meter, crop_size=crop_size)
plan_locs.append(plan_loc)
return torch.stack(plan_locs, dim=1)
def cast(self, embd, mode='ego'):
B = embd.size(0)
u = embd.expand(self.num_plan, B, -1).permute(1,0,2)
if mode == 'ego':
cast_grus = self.cast_grus_ego
cast_mlps = self.cast_mlps_ego
elif mode == 'other':
cast_grus = self.cast_grus_ego
cast_mlps = self.cast_mlps_ego
locs = []
for gru, mlp in zip(cast_grus, cast_mlps):
gru.flatten_parameters()
out, _ = gru(u)
locs.append(torch.cumsum(mlp(out), dim=1))
return torch.stack(locs, dim=1)
def crop_feature(self, features, rel_locs, rel_oris, pixels_per_meter=4, crop_size=96):
B, C, H, W = features.size()
# ERROR proof hack...
rel_locs = rel_locs.view(-1,2)
rel_locs = rel_locs * pixels_per_meter/torch.tensor([H/2,W/2]).type_as(rel_locs).to(rel_locs.device)
cos = torch.cos(rel_oris)
sin = torch.sin(rel_oris)
rel_x = rel_locs[...,0]
rel_y = rel_locs[...,1]
k = crop_size / H
rot_x_offset = -k*self.offset_x*cos+k*self.offset_y*sin+self.offset_x
rot_y_offset = -k*self.offset_x*sin-k*self.offset_y*cos+self.offset_y
theta = torch.stack([
torch.stack([k*cos, k*-sin, rot_x_offset+rel_x], dim=-1),
torch.stack([k*sin, k*cos, rot_y_offset+rel_y], dim=-1)
], dim=-2)
grids = F.affine_grid(theta, torch.Size((B,C,crop_size,crop_size)), align_corners=True)
cropped_features = F.grid_sample(features, grids, align_corners=True)
return cropped_features
def _make_downsample(self, num_in, num_out, stride=2):
return nn.Sequential(
nn.Conv2d(num_in,num_out,1,stride=stride),
nn.BatchNorm2d(num_out),
)
def transform_points(locs, oris):
cos, sin = torch.cos(oris), torch.sin(oris)
R = torch.stack([
torch.stack([ cos, sin], dim=-1),
torch.stack([-sin, cos], dim=-1),
], dim=-2)
return locs @ R
def filter_cars(ego_locs, locs, typs):
rel_locs = locs[:,:,0] - ego_locs[:,0:1]
return typs & (rel_locs[...,1] < 0)
def random_sample(binaries, size):
cut_binaries = torch.zeros_like(binaries)
for i in range(binaries.size(0)):
if binaries[i].sum() <= size:
cut_binaries[i] = binaries[i]
else:
nonzero = torch.nonzero(binaries[i]).squeeze(1)
nonzero_idx = torch.multinomial(torch.ones_like(nonzero).float(), size)
nonzero = nonzero[nonzero_idx]
cut_binaries[i,nonzero] = binaries[i,nonzero]
return cut_binaries
|
428059
|
import copy, re
from sets import Set
# NOTE THAT ALL FUNCTIONS HERE WORK BY RETURNING A NEW ARRAY, THERE ARE NO IN-PLACE MODIFICATION METHODS
# ALSO KEEP IN MIND THAT COLUMNS AND ROW INDICES START FROM ZERO (ie. column A -> 0, E -> 4, Z -> 25, etc)
# Scans a CSV line, keeping track of separators and quotes
def scanline(ln,sp=','):
arr = []
inq = False
ind = 0
buff = ""
while ind < len(ln):
if ln[ind] == '"':
inq = not inq
ind += 1
elif ln[ind] == '\\':
buff += ln[ind+1]
ind += 2
elif ln[ind] == sp and not inq:
arr.append(buff)
buff = ""
ind += 1
else:
buff += ln[ind]
ind += 1
arr.append(buff)
return arr
# Load a CSV into a 2D array, automatically filling in unevenly wide rows to make the array square
def load(f,sp=','):
array = [scanline(x,sp) for x in open(f,'r').readlines()]
maxlen = 0
for i in range(len(array)):
if len(array[i]) > maxlen: maxlen = len(array[i])
for i in range(len(array)):
if len(array[i]) < maxlen: array[i] += [''] * (maxlen - len(array[i]))
return array
# Apply a Porter stemmer to every cell in a given range of cols in an array (calling stem with just a list and no cols argument stems _every_ cell)
# Example outputs: manage, management, manager, managing -> manag; pony, ponies -> poni; reincarnate, reincarnated, reincarnation -> reincarn
def stem(li,cols=0):
if cols == 0: cols = range(len(li[0]))
import porter
pstemmer = porter.PorterStemmer()
newlist = copy.deepcopy(li)
for i in range(len(li)):
for j in cols:
string = str(li[i][j])
for ch in "'"+'"+[]?!\n': string = string.replace(ch,'')
words = string.split(' ')
newlist[i][j] = ' '.join([pstemmer.stem(x.strip().lower(),0,len(x.strip())-1) for x in words])
return newlist
# Is a string a number?
def isnumber(s):
t = re.findall('^-?[0-9,]*\.[0-9,]*$',s)
return len(t) > 0
# Declutters (removes special characters, numerifies numbers) every cell, rules same as those for stem(li,cols=0)
def declutter(li,cols=0):
if cols == 0: cols = range(len(li[0]))
newlist = copy.deepcopy(li)
for i in range(len(li)):
for j in cols:
string = str(li[i][j])
for ch in "'"+'"+[]?!\n': string = string.replace(ch,'')
words = string.split(' ')
newlist[i][j] = ' '.join([x.strip().lower() for x in words])
if isnumber(newlist[i][j]):
newlist[i][j] = float(newlist[i][j])
return newlist
# Generate a list of individual words occurring in a given column in a given array; useful for generating source lists to do n-grams from
def wordlist(li,col):
wlist = []
for i in range(len(li)):
words = li[i][col].split(' ')
for w in words:
if w not in wlist: wlist.append(w)
return wlist
# Generates a list of phrases (complete cell entries)
def phraselist(li,col):
wlist = []
for i in range(len(li)):
phrase= li[i][col]
if phrase not in wlist: wlist.append(phrase)
return wlist
# Retrieve just a few columns from a given array to make a smaller (narrower) array
def cols(li,cols):
result = []
for i in range(len(li)):
newline = []
for c in cols:
if c >= 0: newline.append(li[i][c])
else: newline.append(1)
result.append(newline)
return result
# Combine two possibly unsorted arrays matching rows by heading in headingcol1 in li1 and headingcol2 in li2
# setting linclusive = True makes sure every row in li1 makes it into the output, same with rinclusive and li2
# Recommended to do some kind of sort after splice is done
def splice(li1,li2,headingcol1,headingcol2,linclusive=False,rinclusive=False):
s1 = sorted(li1,key=lambda x:x[headingcol1],reverse=True)
s2 = sorted(li2,key=lambda x:x[headingcol2],reverse=True)
l1 = len(s1[0])
l2 = len(s2[0])
ind1 = 0
ind2 = 0
output = []
while ind1 < len(s1) and ind2 < len(s2):
if cmp(s2[ind2][headingcol2],s1[ind1][headingcol1]) == 1:
if rinclusive: output.append([s2[ind2][headingcol2]] + [''] * (l1-1) + s2[ind2][:headingcol2] + s2[ind2][headingcol2 + 1:])
ind2 += 1
elif cmp(s2[ind2][headingcol2],s1[ind1][headingcol1]) == -1:
if linclusive: output.append([s1[ind1][headingcol1]] + s1[ind1][:headingcol1] + s1[ind1][headingcol1 + 1:] + [''] * (l2-1))
ind1 += 1
else:
output.append([s1[ind1][headingcol1]] + s1[ind1][:headingcol1] + s1[ind1][headingcol1 + 1:] + s2[ind2][:headingcol2] + s2[ind2][headingcol2 + 1:])
ind1, ind2 = ind1 + 1, ind2 + 1
while ind1 < len(s1) and linclusive:
output.append([s1[ind1][headingcol1]] + s1[ind1][:headingcol1] + s1[ind1][headingcol1 + 1:] + [''] * l2)
ind1 += 1
while ind2 < len(s2) and rinclusive:
output.append([s2[ind2][headingcol2]] + [''] * l1 + s2[ind2][:headingcol2] + s2[ind2][headingcol2 + 1:])
ind2 += 1
return output
# Creates a wordlist sorted according to function f taken of an array with the results in the addcols in order
# eg. sorted_wordlist with addcols = [2,4,6], row is 1 2 4 8 16 32 64, f=lambda x:x[2]+x[1]+1.01*x[0] returns sorting key 84.04
def sorted_wordlist(li,wcol,addcols,f=lambda x:x[1],rev=True):
return [x[0] for x in sorted(onegrams(li,wcol,addcols),key=f,reverse=rev)]
# Utility function, used by twograms, threegrams and fourgrams
def compose(arg):
return ' '.join(sorted(list(Set(arg))))
# Calculate a total sum for every desired column for different exact matches in wcol, column -1 is implied to be 1 for every row
# for example, consider the array
# dog 20 3
# dog house 15 28
# cat 25 31
# cat 10 7
# dog 40 0
# house 10 14
# Doing pivot(li,0,[1,-1]) gives you the list:
# dog 60 2
# dog house 15 1
# cat 35 2
# house 10 1
# wlist allows you to restrict the table to a given wordlist
def pivot(li, wcol, addcols,wlist=0,sortkey=lambda x:1):
if wlist == 0: wlist = phraselist(li,wcol)
result = {}
for i in range(len(wlist)):
result[wlist[i]] = [0] * len(addcols)
for i in range(len(li)):
nums = []
for ac in addcols:
if ac >= 0:
num = str(li[i][ac]).replace(',','').replace(' ','')
if num == '': num = 0
elif num[-1] == '%': num = float(num[:-1] * 0.01)
else: num = float(num)
else: num = 1
nums.append(num)
if li[i][wcol] in result: result[li[i][wcol]] = [pair[0] + pair[1] for pair in zip(result[li[i][wcol]],nums)]
array = []
for word in result.keys():
array.append([word] + result[word])
return sorted(array,key=sortkey,reverse=True)
# Similar to a pivot table but looks at individual keywords. The example list above will return with onegrams(li,0,[1,2]):
# dog 75 3
# cat 35 2
# house 25 2
def onegrams(li, wcol, addcols,wlist=0,sortkey=lambda x: 1):
if wlist == 0: wlist = wordlist(li,wcol)
result = {}
for i in range(len(wlist)):
result[wlist[i]] = [0] * len(addcols)
for i in range(len(li)):
words = [x.strip() for x in li[i][wcol].split(' ')]
nums = []
for ac in addcols:
if ac >= 0:
num = str(li[i][ac]).replace(',','').replace(' ','')
if num == '': num = 0
elif num[-1] == '%': num = float(num[:-1] * 0.01)
else: num = float(num)
else: num = 1
nums.append(num)
for i in range(len(words)):
if words[i] in result: result[words[i]] = [pair[0] + pair[1] for pair in zip(result[words[i]],nums)]
array = []
for word in result.keys():
array.append([word] + result[word])
return sorted(array,key=sortkey,reverse=True)
# Calculate a total sum for every column in addcols and for every word pair in wcol
# words do not need to be beside each other or in any particular order, so "buy a dog house", "good house for dog owners", "dog in my house" all go under "dog house"
def twograms(li,wcol,addcols,wlist=0,sortkey=lambda x:1,allindices=False):
if wlist == 0: wlist = wordlist(li,wcol)
result = {}
if allindices:
for i in range(len(wlist)):
for j in range(len(wlist)):
if i != j: result[compose([wlist[i],wlist[j]])] = [0] * len(addcols)
for i in range(len(li)):
if i % int(len(li)/10) == (int(len(li)/10) - 1): print "Two grams: " + str(i) + " / " + str(len(li))
words = [x.strip() for x in li[i][wcol].split(' ')]
nums = []
for ac in addcols:
if ac >= 0:
num = str(li[i][ac]).replace(',','').replace(' ','')
if num == '': num = 0
elif num[-1] == '%': num = float(num[:-1]) * 0.01
else: num = float(num)
else: num = 1
nums.append(num)
for i in range(len(words)):
if words[i] in wlist:
for j in range(i+1,len(words)):
if words[j] in wlist:
comb = compose([words[i],words[j]])
if comb in result: result[comb] = [pair[0] + pair[1] for pair in zip(result[comb],nums)]
elif allindices == False: result[comb] = nums
array = []
for words in result.keys():
array.append([words] + result[words])
return sorted(array,key=sortkey,reverse=True)
# Calculate a total sum for every column in addcols and for every word triplet in wcol (do not need to be beside each other or in any particular order)
# setting allindices to True slows down the calculation a lot but gives you a CSV with all possible combinations of words, making it convenient for
# working with the same word list on different data
def threegrams(li,wcol,addcols,wlist=0,sortkey=lambda x:1,allindices=False):
if wlist == 0: wlist = wordlist(li,wcol)
result = {}
if allindices:
for i in range(len(wlist)):
for j in range(len(wlist)):
for k in range(len(wlist)):
if i != j and i != k and j != k: result[compose([wlist[i],wlist[j],wlist[k]])] = [0] * len(addcols)
for i in range(len(li)):
if i % int(len(li)/10) == (int(len(li)/10) - 1): print "Three grams: " + str(i) + " / " + str(len(li))
words = [x.strip() for x in li[i][wcol].split(' ')]
nums = []
for ac in addcols:
if ac >= 0:
num = str(li[i][ac]).replace(',','').replace(' ','')
if num == '': num = 0
elif num[-1] == '%': num = float(num[:-1]) * 0.01
else: num = float(num)
else: num = 1
nums.append(num)
for i in range(len(words)):
if words[i] in wlist:
for j in range(i+1,len(words)):
if words[j] in wlist:
for k in range(j+1,len(words)):
if words[k] in wlist:
comb = compose([words[i],words[j],words[k]])
if comb in result:
result[comb] = [pair[0] + pair[1] for pair in zip(result[comb],nums)]
elif allindices == False: result[comb] = nums
array = []
for words in result.keys():
array.append([words] + result[words])
return sorted(array,key=sortkey,reverse=True)
# Calculate a total sum for every column in addcols and for every word quadruplet in wcol
def fourgrams(li,wcol,addcols,wlist=0,sortkey=lambda x:1):
if wlist == 0: wlist = wordlist(li,wcol)
result = {}
for i in range(len(li)):
if i % int(len(li)/10) == (int(len(li)/10) - 1): print "Four grams: " + str(i) + " / " + str(len(li))
words = [x.strip() for x in li[i][wcol].split(' ')]
nums = []
for ac in addcols:
if ac >= 0:
num = str(li[i][ac]).replace(',','').replace(' ','')
if num == '': num = 0
elif num[-1] == '%': num = float(num[:-1]) * 0.01
else: num = float(num)
else: num = 1
nums.append(num)
for i in range(len(words)):
if words[i] in wlist:
for j in range(i+1,len(words)):
if words[j] in wlist:
for k in range(j+1,len(words)):
if words[j] in wlist:
for l in range(k+1,len(words)):
if words[l] in wlist:
comb = compose([words[i],words[j],words[k],words[l]])
if comb in result:
result[comb] = [pair[0] + pair[1] for pair in zip(result[comb],nums)]
else: result[comb] = nums
array = []
for words in result.keys():
array.append([words] + result[words])
return sorted(array,key=sortkey,reverse=True)
# Filters array, returning only the rows where column wcol of that row contains the query keywords (keywords can appear in any order)
# This and the other filters are useful for taking a list of entries and creating a list of only valid entries according to some validity characteristic
# eg:
# dog house, 15
# cat, 18
# dog, 33
# filter(li,0,'dog'):
# dog house, 15
# dog, 33
def filter(li,wcol,query):
result = []
for i in range(len(li)):
words = [x.strip() for x in li[i][wcol].split(' ')]
inlist = True
queryarray = query.split(' ')
if queryarray == ['']: queryarray = []
for w in queryarray:
if w not in words: inlist = False
if queryarray == ['*']: inlist = len(li[i][wcol]) > 0
if inlist: result.append(li[i])
return result
# Filters array, requiring column wcol to exactly match query
def phrasefilter(li,wcol,query):
result = []
for i in range(len(li)):
if li[i][wcol] == query: result.append(li[i])
return result
# Filters array, requiring function func taken of the row to return True (or 1)
def funcfilter(li,func):
result = []
for i in range(len(li)):
if func(li[i]): result.append(li[i])
return result
# Adds up columns in addcols for a query matching keyfilter(li,wcol,query); can also be thought of as doing a single n-keyword match
# eg:
# dog, 25
# cat, 15
# dog, 75
# dog, 10
# horse, 55
# cat, 7
# search(li,0,[1],'dog') gives ['dog',110]
def search(li,wcol,addcols,query):
result = [0] * len(addcols)
for i in range(len(li)):
words = [x.strip() for x in li[i][wcol].split(' ')]
nums = []
for ac in addcols:
if ac >= 0:
num = str(li[i][ac]).replace(',','').replace(' ','')
if num == '': num = 0
elif num[-1] == '%': num = float(num[:-1] * 0.01)
else: num = float(num)
else: num = 1
nums.append(num)
inlist = True
queryarray = query.split(' ')
if queryarray == ['']: queryarray = []
for w in queryarray:
if w not in words: inlist = False
if queryarray == ['*']: inlist = len(li[i][wcol]) > 0
if inlist:
result = [pair[0] + pair[1] for pair in zip(result,nums)]
return [query] + result
# Print a CSV from an array to stdout
def tochars(array,sp=','):
string = ""
for line in array: string += sp.join([str(x) for x in line]) + '\n'
return string[:-1]
# Save an array to CSV
def save(f,array,sp=','):
writeto = open(f,'w')
writeto.write(tochars(array,sp))
writeto.close()
# Compares keywords by two different parameters from two different lists. For example, li1 can be a list of how much money is spent (on addcol1) on a particular combination of keywords (on keycol1) and li2 can be a list of upgraded accounts with the search query they came from on keycol2, and addcol 2 can be left blank to default to -1 (each row is worth one point). Fourth column is statistical significance.
# Remember that you may have to filter the list yourself first
# Arguments:
# grams = 1 for single keywords, 2 for pairs, 3 for triplets and 4 for quadruplets
# li1, li2 = your two lists
# keycol1, keycol2 = where the keywords are located in those two lists
# addcol1, addcol2 = the columns of what you want to add up, eg. cost (set to -1 or leave blank to make it add 1 for each row)
# sortkey = function to sort results by (highest first)
# usestem = stem keywords
# sigtable = add ratio and significance to table
# invertratio = set ratio column to col1/col2 instead of col2/col1
# preformatted = li1 and li2 are already properly formatted
# justpreformat = convert li1 and li2 into twocolumns for comparison but don't go all the way
# wordlimit = limit search to some more common keywords for speedup purposes
# Example: list of customers, some upgraded, with originating keywords, and a list of how much you're paying for each search phrase
#
# customers.csv:
# Name, Keyword, Status
# <NAME>, spreadsheet csv software, upgraded
# <NAME>, csv python utils, free
# <NAME>, free spreadsheet, free
# <NAME>, csv software, upgraded
# <NAME>, python utils software, upgraded
# <NAME>, python spreadsheet program, upgraded
#
# costs.csv:
# csv software, useless, and, irrelevant, data, 5.00, blah, blah
# python spreadsheet, useless, and, irrelevant, data, 2.50, blah, blah
# spreadsheet utils, useless, and, irrelevant, data, 10.00, blah, blah
# csv utils, useless, and, irrelevant, data, 1.50, blah, blah
#
# Steps:
# 1. import spread (if not imported already)
# 2. upgrades = spread.filter(spread.load('customers.csv'),2,'upgraded')
# 3. costs = spread.load('costs.csv')
# 4. res = compare(1,costs,upgrades,0,1,5,invertratio=True)
# 5. spread.save('saved.csv',res)
#
# Res should look like:
#
# Keyword, Column 1, Column 2, Ratio, Significance
# spreadsheet, 12.50, 2, 6.25, -0.389
# utils, 11.50, 1, 11.50, -0.913
# csv, 7.50, 2, 3.75, 0.335
# python, 2.50, 2, 1.25, 2.031
#
# Or, if desired, you can:
# i1,i2 = compare(1,costs,upgrades,0,1,5,justpreformat=True)
# res1 = compare(1,i1,i2,0,1,5,invertratio=True,preformatted=True)
# res2 = compare(2,i1,i2,0,1,5,invertratio=True,preformatted=True)
# res3 = compare(3,i1,i2,0,1,5,invertratio=True,preformatted=True)
# res4 = compare(4,i1,i2,0,1,5,invertratio=True,preformatted=True)
#
# Note that significance is calculated based on col2/col1 regardless of invertratio, since getting 0 upgrades when you should have gotten 2 is not that unlikely, but calculating significance based on col1/col2 would give you infinity as infinity is infinitely far away from 0.5.
def compare(grams,li1,li2,keycol1,keycol2,addcol1=-1,addcol2=-1,sortkey=lambda x:x[1],usestem=True,sigtable=True,invertratio=False,preformatted=False,justpreformat=False,wordlimit=0):
gramfuncs = [0,onegrams,twograms,threegrams,fourgrams]
if preformatted == False:
s1 = declutter(cols(li1,[keycol1,addcol1]),[1])
print "Done decluttering/stemming: 1/4"
s2 = declutter(cols(li2,[keycol2,addcol2]),[1])
print "Done decluttering/stemming: 2/4"
s1 = stem(s1,[0]) if usestem else declutter(s1,[0])
print "Done decluttering/stemming: 3/4"
s2 = stem(s2,[0]) if usestem else declutter(s2,[0])
print "Done decluttering/stemming: 4/4"
else: s1,s2 = li1,li2
print "Printing sample of list 1"
print s1[:10]
print "Printing sample of list 2"
print s2[:10]
if justpreformat: return s1,s2
while type(s1[0][1]) is str: s1.pop(0)
while type(s2[0][1]) is str: s2.pop(0)
print "Cleaned invalid rows"
wl = sorted_wordlist(s1,0,[1])
if wl.count('') > 0: blank = wl.pop(wl.index(''))
print "Base wordlist length: " + str(len(wl)) + " ; Top ten: " + str(wl[:10])
if wordlimit > 0 and wordlimit < len(wl):
print "Shortening to " + str(wordlimit)
wl = wl[:wordlimit]
res1 = gramfuncs[grams](s1,0,[1],wl)
print "Done search: 1/2"
res2 = gramfuncs[grams](s2,0,[1],wl)
print "Done search: 2/2"
comb = sorted(splice(res1,res2,0,0),key=sortkey,reverse=True)
if sigtable:
tot1 = search(s1,0,[1],'')
tot2 = search(s2,0,[1],'')
ev = tot2[1]*1.0/tot1[1]
print "Totals: " + str(tot1[1]) + ", " + str(tot2[1])
for i in range(len(comb)):
comb[i].append(comb[i][2 - invertratio]*1.0/(comb[i][1 + invertratio] + 0.000001))
comb[i].append((comb[i][2] - ev * comb[i][1])*1.0/(ev * comb[i][1] + 0.000001) ** 0.5)
comb = [['Keyword','Column 1','Column 2','Ratio','Significance']] + comb
else: comb = [['Keyword','Column 1','Column 2']] + comb
print "Done"
return comb
|
428090
|
from build import models, reader
from build import labels as categories
from sklearn.model_selection import train_test_split as tts
from sklearn.metrics import classification_report
docs = reader.fileids(categories=categories)
labels = [reader.categories(fileids=[fid])[0] for fid in docs]
train_docs, test_docs, train_labels, test_labels = tts(docs, labels, test_size=0.2)
def get_docs(fids):
for fid in fids:
yield list(reader.docs(fileids=[fid]))
sgd = models[3]
nby = models[4]
sgd.fit(get_docs(train_docs), train_labels)
y_pred = sgd.predict(get_docs(test_docs))
print(classification_report(test_labels, y_pred, labels=categories))
import nltk
def preprocess(text):
return [
[
list(nltk.pos_tag(nltk.word_tokenize(sent)))
for sent in nltk.sent_tokenize(para)
] for para in text.split("\n\n")
]
doc = preprocess("""
Last summer, two defensemen from opposing conferences with distinct styles of play and contrasting personalities were forever placed in the same breath, their destinies intertwined by a trade.
The Nashville Predators sent Shea Weber, their cornerstone, to the Montreal Canadiens for <NAME>, who had become tremendously popular in Montreal and throughout the league. Subban, 27, won a Norris Trophy as the league’s top defenseman in 2013. Weber, 31, had been a three-time finalist for the award.
“Sometimes you forget that superstars get traded,” Anaheim Ducks defenseman <NAME> said. “Obviously, what <NAME>. meant to Montreal and the impact that he had on that city, it was hard for them to let him go. The same with Shea, who was their captain for years.”
Weber and Subban were together again at last weekend’s All-Star three-on-three tournament. Weber’s 31 points in 50 games for the first-place Canadiens, and his plus-18 rating, made him an obvious selection. Subban was voted in as a team captain by the fans despite a mixed first half of the season. He posted only 18 points and missed 16 games for the Predators, who are in third place in the Central Division.
""")
# print(doc[0][0])
print(sgd.predict(doc[0][0]))
|
428111
|
import pyqtgraph as pg
class BlankAxis(pg.AxisItem):
#Will need to override other functions in the future for this one
#Right now it chooses weird stupid places for ticks
def __init__(self, orientation, pen=None, linkView=None, parent=None, maxTickLength=-5, showValues=True):
pg.AxisItem.__init__(self, orientation=orientation, pen=pen, linkView=linkView, parent=parent, maxTickLength=maxTickLength, showValues=showValues)
def tickStrings(self, values, scale, spacing):
strns = []
for _ in values:
try:
strns.append('')
except ValueError:
strns.append('')
return strns
|
428173
|
from fuzzconfig import FuzzConfig
import nonrouting
import fuzzloops
import nets
import pytrellis
import re
cfg = FuzzConfig(job="PLC2WRE", family="ECP5", device="LFE5U-25F", ncl="empty.ncl", tiles=["R19C33:PLC2"])
def main():
pytrellis.load_database("../../../database")
cfg.setup()
empty_bitfile = cfg.build_design(cfg.ncl, {})
cfg.ncl = "wremux.ncl"
def per_slice(slicen):
def get_substs(wremux):
if wremux == "INV":
wremux = "WRE:::WRE=#INV"
if wremux == "0":
wremux = "1:::1=0"
return dict(slice=slicen, wremux=wremux)
nonrouting.fuzz_enum_setting(cfg, "SLICE{}.WREMUX".format(slicen), ["0", "1", "WRE", "INV"],
lambda x: get_substs(wremux=x),
empty_bitfile, False)
fuzzloops.parallel_foreach(["A"], per_slice)
if __name__ == "__main__":
main()
|
428181
|
from flask import Blueprint
auth = Blueprint(
'auth', __name__, template_folder='templates', static_folder='static')
from . import views
|
428186
|
import os
from koursaros.utils.database.psql import Conn
from koursaros.utils.misc import gb_free_space
from koursaros.utils.bucket import bucket_contains, download_and_unzip
from kctl.logger import set_logger
from .data import *
logger = set_logger('MODELS')
class Model(object):
def __init__(self, config, training):
if gb_free_space() < 3:
logger.error("There is not enough space on your disk, please allocate more!")
raise SystemError
self.config = config
self.version = config.hash
self.dir = '.model-data'
if not os.path.exists(self.dir):
os.makedirs(self.dir)
self.ckpt_dir = f'{self.dir}/{self.version}/'
logger.info("Local model cache dir %s" %self.ckpt_dir)
if not 'training' in self.config: # use a default model
logger.info('Loading model from default checkpoint')
self.checkpoint = self.config.checkpoint
self.trained = True
elif os.path.exists(self.ckpt_dir + 'config.json') and not training: # model already trained
logger.info('Loading trained model')
self.checkpoint = self.ckpt_dir
self.trained = True
elif bucket_contains(f'{self.version}.tar.gz'):
logger.info(f'Downloading and extracting from bucket {self.config.repo}')
download_and_unzip(self.config.repo.split('//')[-1],
f'{self.version}.tar.gz', self.dir)
self.checkpoint = self.ckpt_dir
assert(os.path.exists(self.ckpt_dir + 'config.json'))
self.trained = True
else: # init model for training
logger.info('Initializing model for training')
if not training:
logger.error('Please train model before deploying')
raise SystemError
self.data_dir = os.path.join(self.dir, self.version)
if not os.path.exists(self.data_dir):
os.makedirs(self.data_dir)
if not os.path.exists(self.ckpt_dir):
os.makedirs(self.ckpt_dir)
self.checkpoint = config.training.checkpoint
self.trained = False
def get_data(self):
"""
Get training data based on yaml config and connection
:return:
"""
data = self.config.training.data
if data.source == 'postgres':
p = Conn()
query_fn = p.query
return query_fn(select_all(data.schema, data.train)), \
query_fn(select_all(data.schema, data.test))
else:
return get_rows_from_tsv(data.train), get_rows_from_tsv(data.test)
def train(self):
"""
Runs training as defined in the model yaml. Saves model to directory
.cache/<md5 hash of yaml>
:return: evaluation metric
"""
raise NotImplementedError()
def run(self, *args):
"""
Runs inference on arbitrary args
:param args: sent_a, sent_b for classification / regression task.
:return:
"""
raise NotImplementedError()
def save_model(self):
# append hash of yaml to model checkpoint
raise NotImplementedError()
@staticmethod
def architectures():
raise NotImplementedError()
def getInputProto(self):
raise NotImplementedError()
def getOutputProto(self):
raise NotImplementedError()
|
428190
|
import sys
import mgp
try:
import networkx as nx
import numpy # noqa E401
import scipy # noqa E401
except ImportError as import_error:
sys.stderr.write(
(
f"NOTE: Please install networkx, numpy, scipy to be able to "
f"use proxied NetworkX algorithms. E.g., CALL nxalg.pagerank(...).\n"
f"Using Python:\n{sys.version}\n"
)
)
raise import_error
# Imported last because it also depends on networkx.
from mgp_networkx import (
MemgraphMultiDiGraph,
MemgraphDiGraph, # noqa: E402
MemgraphMultiGraph,
MemgraphGraph,
PropertiesDictionary,
)
# networkx.algorithms.approximation.connectivity.node_connectivity
@mgp.read_proc
def node_connectivity(
ctx: mgp.ProcCtx,
source: mgp.Nullable[mgp.Vertex] = None,
target: mgp.Nullable[mgp.Vertex] = None,
) -> mgp.Record(connectivity=int):
return mgp.Record(
connectivity=nx.node_connectivity(MemgraphMultiDiGraph(ctx=ctx), source, target)
)
# networkx.algorithms.assortativity.degree_assortativity_coefficient
@mgp.read_proc
def degree_assortativity_coefficient(
ctx: mgp.ProcCtx,
x: str = "out",
y: str = "in",
weight: mgp.Nullable[str] = None,
nodes: mgp.Nullable[mgp.List[mgp.Vertex]] = None,
) -> mgp.Record(assortativity=float):
return mgp.Record(
assortativity=nx.degree_assortativity_coefficient(
MemgraphMultiDiGraph(ctx=ctx), x, y, weight, nodes
)
)
# networkx.algorithms.asteroidal.is_at_free
@mgp.read_proc
def is_at_free(ctx: mgp.ProcCtx) -> mgp.Record(is_at_free=bool):
return mgp.Record(is_at_free=nx.is_at_free(MemgraphGraph(ctx=ctx)))
# networkx.algorithms.bipartite.basic.is_bipartite
@mgp.read_proc
def is_bipartite(ctx: mgp.ProcCtx) -> mgp.Record(is_bipartite=bool):
return mgp.Record(is_bipartite=nx.is_bipartite(MemgraphMultiDiGraph(ctx=ctx)))
# networkx.algorithms.boundary.node_boundary
@mgp.read_proc
def node_boundary(
ctx: mgp.ProcCtx,
nbunch1: mgp.List[mgp.Vertex],
nbunch2: mgp.Nullable[mgp.List[mgp.Vertex]] = None,
) -> mgp.Record(boundary=mgp.List[mgp.Vertex]):
return mgp.Record(
boundary=list(nx.node_boundary(MemgraphMultiDiGraph(ctx=ctx), nbunch1, nbunch2))
)
# networkx.algorithms.bridges.bridges
@mgp.read_proc
def bridges(
ctx: mgp.ProcCtx, root: mgp.Nullable[mgp.Vertex] = None
) -> mgp.Record(bridges=mgp.List[mgp.Edge]):
g = MemgraphMultiGraph(ctx=ctx)
return mgp.Record(
bridges=[
next(iter(g[u][v]))
for u, v in nx.bridges(MemgraphGraph(ctx=ctx), root=root)
]
)
# networkx.algorithms.centrality.betweenness_centrality
@mgp.read_proc
def betweenness_centrality(
ctx: mgp.ProcCtx,
k: mgp.Nullable[int] = None,
normalized: bool = True,
weight: mgp.Nullable[str] = None,
endpoints: bool = False,
seed: mgp.Nullable[int] = None,
) -> mgp.Record(node=mgp.Vertex, betweenness=mgp.Number):
return [
mgp.Record(node=n, betweenness=b)
for n, b in nx.betweenness_centrality(
MemgraphDiGraph(ctx=ctx),
k=k,
normalized=normalized,
weight=weight,
endpoints=endpoints,
seed=seed,
).items()
]
# networkx.algorithms.chains.chain_decomposition
@mgp.read_proc
def chain_decomposition(
ctx: mgp.ProcCtx, root: mgp.Nullable[mgp.Vertex] = None
) -> mgp.Record(chains=mgp.List[mgp.List[mgp.Edge]]):
g = MemgraphMultiGraph(ctx=ctx)
return mgp.Record(
chains=[
[next(iter(g[u][v])) for u, v in d]
for d in nx.chain_decomposition(MemgraphGraph(ctx=ctx), root=root)
]
)
# networkx.algorithms.chordal.is_chordal
@mgp.read_proc
def is_chordal(ctx: mgp.ProcCtx) -> mgp.Record(is_chordal=bool):
return mgp.Record(is_chordal=nx.is_chordal(MemgraphGraph(ctx=ctx)))
# networkx.algorithms.clique.find_cliques
@mgp.read_proc
def find_cliques(
ctx: mgp.ProcCtx,
) -> mgp.Record(cliques=mgp.List[mgp.List[mgp.Vertex]]):
return mgp.Record(cliques=list(nx.find_cliques(MemgraphMultiGraph(ctx=ctx))))
# networkx.algorithms.cluster.clustering
@mgp.read_proc
def clustering(
ctx: mgp.ProcCtx,
nodes: mgp.Nullable[mgp.List[mgp.Vertex]] = None,
weight: mgp.Nullable[str] = None,
) -> mgp.Record(node=mgp.Vertex, clustering=mgp.Number):
return [
mgp.Record(node=n, clustering=c)
for n, c in nx.clustering(
MemgraphDiGraph(ctx=ctx), nodes=nodes, weight=weight
).items()
]
# networkx.algorithms.coloring.greedy_color
@mgp.read_proc
def greedy_color(
ctx: mgp.ProcCtx, strategy: str = "largest_first", interchange: bool = False
) -> mgp.Record(node=mgp.Vertex, color=int):
return [
mgp.Record(node=n, color=c)
for n, c in nx.greedy_color(
MemgraphMultiDiGraph(ctx=ctx), strategy, interchange
).items()
]
# networkx.algorithms.communicability_alg.communicability
@mgp.read_proc
def communicability(
ctx: mgp.ProcCtx,
) -> mgp.Record(node1=mgp.Vertex, node2=mgp.Vertex, communicability=mgp.Number):
return [
mgp.Record(node1=n1, node2=n2, communicability=v)
for n1, d in nx.communicability(MemgraphGraph(ctx=ctx)).items()
for n2, v in d.items()
]
# networkx.algorithms.community.kclique.k_clique_communities
@mgp.read_proc
def k_clique_communities(
ctx: mgp.ProcCtx,
k: int,
cliques: mgp.Nullable[mgp.List[mgp.List[mgp.Vertex]]] = None,
) -> mgp.Record(communities=mgp.List[mgp.List[mgp.Vertex]]):
return mgp.Record(
communities=[
list(s)
for s in nx.community.k_clique_communities(
MemgraphMultiGraph(ctx=ctx), k, cliques
)
]
)
# networkx.algorithms.approximation.kcomponents.k_components
@mgp.read_proc
def k_components(
ctx: mgp.ProcCtx, density: mgp.Number = 0.95
) -> mgp.Record(k=int, components=mgp.List[mgp.List[mgp.Vertex]]):
kcomps = nx.k_components(MemgraphMultiGraph(ctx=ctx), density)
return [
mgp.Record(k=k, components=[list(s) for s in comps])
for k, comps in kcomps.items()
]
# networkx.algorithms.components.biconnected_components
@mgp.read_proc
def biconnected_components(
ctx: mgp.ProcCtx,
) -> mgp.Record(components=mgp.List[mgp.List[mgp.Vertex]]):
comps = nx.biconnected_components(MemgraphMultiGraph(ctx=ctx))
return mgp.Record(components=[list(s) for s in comps])
# networkx.algorithms.components.strongly_connected_components
@mgp.read_proc
def strongly_connected_components(
ctx: mgp.ProcCtx,
) -> mgp.Record(components=mgp.List[mgp.List[mgp.Vertex]]):
comps = nx.strongly_connected_components(MemgraphMultiDiGraph(ctx=ctx))
return mgp.Record(components=[list(s) for s in comps])
# networkx.algorithms.connectivity.edge_kcomponents.k_edge_components
#
# NOTE: NetworkX 2.4, algorithms/connectivity/edge_kcompnents.py:367. We create
# a *copy* of the graph because the algorithm copies the graph using
# __class__() and tries to modify it.
@mgp.read_proc
def k_edge_components(
ctx: mgp.ProcCtx, k: int
) -> mgp.Record(components=mgp.List[mgp.List[mgp.Vertex]]):
return mgp.Record(
components=[
list(s)
for s in nx.k_edge_components(nx.DiGraph(MemgraphDiGraph(ctx=ctx)), k)
]
)
# networkx.algorithms.core.core_number
@mgp.read_proc
def core_number(ctx: mgp.ProcCtx) -> mgp.Record(node=mgp.Vertex, core=mgp.Number):
return [
mgp.Record(node=n, core=c)
for n, c in nx.core_number(MemgraphDiGraph(ctx=ctx)).items()
]
# networkx.algorithms.covering.is_edge_cover
@mgp.read_proc
def is_edge_cover(
ctx: mgp.ProcCtx, cover: mgp.List[mgp.Edge]
) -> mgp.Record(is_edge_cover=bool):
cover = set([(e.from_vertex, e.to_vertex) for e in cover])
return mgp.Record(
is_edge_cover=nx.is_edge_cover(MemgraphMultiGraph(ctx=ctx), cover)
)
# networkx.algorithms.cycles.find_cycle
@mgp.read_proc
def find_cycle(
ctx: mgp.ProcCtx,
source: mgp.Nullable[mgp.List[mgp.Vertex]] = None,
orientation: mgp.Nullable[str] = None,
) -> mgp.Record(cycle=mgp.Nullable[mgp.List[mgp.Edge]]):
try:
return mgp.Record(
cycle=[
e
for _, _, e in nx.find_cycle(
MemgraphMultiDiGraph(ctx=ctx), source, orientation
)
]
)
except nx.NetworkXNoCycle:
return mgp.Record(cycle=None)
# networkx.algorithms.cycles.simple_cycles
#
# NOTE: NetworkX 2.4, algorithms/cycles.py:183. We create a *copy* of the graph
# because the algorithm copies the graph using type() and tries to pass initial
# data.
@mgp.read_proc
def simple_cycles(
ctx: mgp.ProcCtx,
) -> mgp.Record(cycles=mgp.List[mgp.List[mgp.Vertex]]):
return mgp.Record(
cycles=list(
nx.simple_cycles(nx.MultiDiGraph(MemgraphMultiDiGraph(ctx=ctx)).copy())
)
)
# networkx.algorithms.cuts.node_expansion
@mgp.read_proc
def node_expansion(
ctx: mgp.ProcCtx, s: mgp.List[mgp.Vertex]
) -> mgp.Record(node_expansion=mgp.Number):
return mgp.Record(
node_expansion=nx.node_expansion(MemgraphMultiDiGraph(ctx=ctx), set(s))
)
# networkx.algorithms.dag.topological_sort
@mgp.read_proc
def topological_sort(
ctx: mgp.ProcCtx,
) -> mgp.Record(nodes=mgp.Nullable[mgp.List[mgp.Vertex]]):
return mgp.Record(nodes=list(nx.topological_sort(MemgraphMultiDiGraph(ctx=ctx))))
# networkx.algorithms.dag.ancestors
@mgp.read_proc
def ancestors(
ctx: mgp.ProcCtx, source: mgp.Vertex
) -> mgp.Record(ancestors=mgp.List[mgp.Vertex]):
return mgp.Record(
ancestors=list(nx.ancestors(MemgraphMultiDiGraph(ctx=ctx), source))
)
# networkx.algorithms.dag.descendants
@mgp.read_proc
def descendants(
ctx: mgp.ProcCtx, source: mgp.Vertex
) -> mgp.Record(descendants=mgp.List[mgp.Vertex]):
return mgp.Record(
descendants=list(nx.descendants(MemgraphMultiDiGraph(ctx=ctx), source))
)
# networkx.algorithms.distance_measures.center
#
# NOTE: Takes more parameters.
@mgp.read_proc
def center(ctx: mgp.ProcCtx) -> mgp.Record(center=mgp.List[mgp.Vertex]):
return mgp.Record(center=list(nx.center(MemgraphMultiDiGraph(ctx=ctx))))
# networkx.algorithms.distance_measures.diameter
#
# NOTE: Takes more parameters.
@mgp.read_proc
def diameter(ctx: mgp.ProcCtx) -> mgp.Record(diameter=int):
return mgp.Record(diameter=nx.diameter(MemgraphMultiDiGraph(ctx=ctx)))
# networkx.algorithms.distance_regular.is_distance_regular
@mgp.read_proc
def is_distance_regular(ctx: mgp.ProcCtx) -> mgp.Record(is_distance_regular=bool):
return mgp.Record(
is_distance_regular=nx.is_distance_regular(MemgraphMultiGraph(ctx=ctx))
)
# networkx.algorithms.strongly_regular.is_strongly_regular
@mgp.read_proc
def is_strongly_regular(ctx: mgp.ProcCtx) -> mgp.Record(is_strongly_regular=bool):
return mgp.Record(
is_strongly_regular=nx.is_strongly_regular(MemgraphMultiGraph(ctx=ctx))
)
# networkx.algorithms.dominance.dominance_frontiers
@mgp.read_proc
def dominance_frontiers(
ctx: mgp.ProcCtx,
start: mgp.Vertex,
) -> mgp.Record(node=mgp.Vertex, frontier=mgp.List[mgp.Vertex]):
return [
mgp.Record(node=n, frontier=list(f))
for n, f in nx.dominance_frontiers(MemgraphMultiDiGraph(ctx=ctx), start).items()
]
# networkx.algorithms.dominance.immediate_dominatorss
@mgp.read_proc
def immediate_dominators(
ctx: mgp.ProcCtx,
start: mgp.Vertex,
) -> mgp.Record(node=mgp.Vertex, dominator=mgp.Vertex):
return [
mgp.Record(node=n, dominator=d)
for n, d in nx.immediate_dominators(
MemgraphMultiDiGraph(ctx=ctx), start
).items()
]
# networkx.algorithms.dominating.dominating_set
@mgp.read_proc
def dominating_set(
ctx: mgp.ProcCtx,
start: mgp.Vertex,
) -> mgp.Record(dominating_set=mgp.List[mgp.Vertex]):
return mgp.Record(
dominating_set=list(nx.dominating_set(MemgraphMultiDiGraph(ctx=ctx), start))
)
# networkx.algorithms.efficiency_measures.local_efficiency
@mgp.read_proc
def local_efficiency(ctx: mgp.ProcCtx) -> mgp.Record(local_efficiency=float):
return mgp.Record(local_efficiency=nx.local_efficiency(MemgraphMultiGraph(ctx=ctx)))
# networkx.algorithms.efficiency_measures.global_efficiency
@mgp.read_proc
def global_efficiency(ctx: mgp.ProcCtx) -> mgp.Record(global_efficiency=float):
return mgp.Record(
global_efficiency=nx.global_efficiency(MemgraphMultiGraph(ctx=ctx))
)
# networkx.algorithms.euler.is_eulerian
@mgp.read_proc
def is_eulerian(ctx: mgp.ProcCtx) -> mgp.Record(is_eulerian=bool):
return mgp.Record(is_eulerian=nx.is_eulerian(MemgraphMultiDiGraph(ctx=ctx)))
# networkx.algorithms.euler.is_semieulerian
@mgp.read_proc
def is_semieulerian(ctx: mgp.ProcCtx) -> mgp.Record(is_semieulerian=bool):
return mgp.Record(is_semieulerian=nx.is_semieulerian(MemgraphMultiDiGraph(ctx=ctx)))
# networkx.algorithms.euler.has_eulerian_path
@mgp.read_proc
def has_eulerian_path(ctx: mgp.ProcCtx) -> mgp.Record(has_eulerian_path=bool):
return mgp.Record(
has_eulerian_path=nx.has_eulerian_path(MemgraphMultiDiGraph(ctx=ctx))
)
# networkx.algorithms.hierarchy.flow_hierarchy
@mgp.read_proc
def flow_hierarchy(
ctx: mgp.ProcCtx, weight: mgp.Nullable[str] = None
) -> mgp.Record(flow_hierarchy=float):
return mgp.Record(
flow_hierarchy=nx.flow_hierarchy(MemgraphMultiDiGraph(ctx=ctx), weight=weight)
)
# networkx.algorithms.isolate.isolates
@mgp.read_proc
def isolates(ctx: mgp.ProcCtx) -> mgp.Record(isolates=mgp.List[mgp.Vertex]):
return mgp.Record(isolates=list(nx.isolates(MemgraphMultiDiGraph(ctx=ctx))))
# networkx.algorithms.isolate.is_isolate
@mgp.read_proc
def is_isolate(ctx: mgp.ProcCtx, n: mgp.Vertex) -> mgp.Record(is_isolate=bool):
return mgp.Record(is_isolate=nx.is_isolate(MemgraphMultiDiGraph(ctx=ctx), n))
# networkx.algorithms.isomorphism.is_isomorphic
@mgp.read_proc
def is_isomorphic(
ctx: mgp.ProcCtx,
nodes1: mgp.List[mgp.Vertex],
edges1: mgp.List[mgp.Edge],
nodes2: mgp.List[mgp.Vertex],
edges2: mgp.List[mgp.Edge],
) -> mgp.Record(is_isomorphic=bool):
nodes1, edges1, nodes2, edges2 = map(set, [nodes1, edges1, nodes2, edges2])
g = MemgraphMultiDiGraph(ctx=ctx)
g1 = nx.subgraph_view(g, lambda n: n in nodes1, lambda n1, n2, e: e in edges1)
g2 = nx.subgraph_view(g, lambda n: n in nodes2, lambda n1, n2, e: e in edges2)
return mgp.Record(is_isomorphic=nx.is_isomorphic(g1, g2))
# networkx.algorithms.link_analysis.pagerank_alg.pagerank
@mgp.read_proc
def pagerank(
ctx: mgp.ProcCtx,
alpha: mgp.Number = 0.85,
personalization: mgp.Nullable[str] = None,
max_iter: int = 100,
tol: mgp.Number = 1e-06,
nstart: mgp.Nullable[str] = None,
weight: mgp.Nullable[str] = "weight",
dangling: mgp.Nullable[str] = None,
) -> mgp.Record(node=mgp.Vertex, rank=float):
def to_properties_dictionary(prop):
return None if prop is None else PropertiesDictionary(ctx, prop)
pg = nx.pagerank(
MemgraphDiGraph(ctx=ctx),
alpha=alpha,
personalization=to_properties_dictionary(personalization),
max_iter=max_iter,
tol=tol,
nstart=to_properties_dictionary(nstart),
weight=weight,
dangling=to_properties_dictionary(dangling),
)
return [mgp.Record(node=k, rank=v) for k, v in pg.items()]
# networkx.algorithms.link_prediction.jaccard_coefficient
@mgp.read_proc
def jaccard_coefficient(
ctx: mgp.ProcCtx, ebunch: mgp.Nullable[mgp.List[mgp.List[mgp.Vertex]]] = None
) -> mgp.Record(u=mgp.Vertex, v=mgp.Vertex, coef=float):
return [
mgp.Record(u=u, v=v, coef=c)
for u, v, c in nx.jaccard_coefficient(MemgraphGraph(ctx=ctx), ebunch)
]
# networkx.algorithms.lowest_common_ancestors.lowest_common_ancestor
@mgp.read_proc
def lowest_common_ancestor(
ctx: mgp.ProcCtx, node1: mgp.Vertex, node2: mgp.Vertex
) -> mgp.Record(ancestor=mgp.Nullable[mgp.Vertex]):
return mgp.Record(
ancestor=nx.lowest_common_ancestor(MemgraphDiGraph(ctx=ctx), node1, node2)
)
# networkx.algorithms.matching.maximal_matching
@mgp.read_proc
def maximal_matching(ctx: mgp.ProcCtx) -> mgp.Record(edges=mgp.List[mgp.Edge]):
g = MemgraphMultiDiGraph(ctx=ctx)
return mgp.Record(
edges=list(next(iter(g[u][v])) for u, v in nx.maximal_matching(g))
)
# networkx.algorithms.planarity.check_planarity
#
# NOTE: Returns a graph.
@mgp.read_proc
def check_planarity(ctx: mgp.ProcCtx) -> mgp.Record(is_planar=bool):
return mgp.Record(is_planar=nx.check_planarity(MemgraphMultiDiGraph(ctx=ctx))[0])
# networkx.algorithms.non_randomness.non_randomness
@mgp.read_proc
def non_randomness(
ctx: mgp.ProcCtx, k: mgp.Nullable[int] = None
) -> mgp.Record(non_randomness=float, relative_non_randomness=float):
nn, rnn = nx.non_randomness(MemgraphGraph(ctx=ctx), k=k)
return mgp.Record(non_randomness=nn, relative_non_randomness=rnn)
# networkx.algorithms.reciprocity.reciprocity
@mgp.read_proc
def reciprocity(
ctx: mgp.ProcCtx, nodes: mgp.Nullable[mgp.List[mgp.Vertex]] = None
) -> mgp.Record(node=mgp.Nullable[mgp.Vertex], reciprocity=mgp.Nullable[float]):
rp = nx.reciprocity(MemgraphMultiDiGraph(ctx=ctx), nodes=nodes)
if nodes is None:
return mgp.Record(node=None, reciprocity=rp)
else:
return [mgp.Record(node=n, reciprocity=r) for n, r in rp.items()]
# networkx.algorithms.shortest_paths.generic.shortest_path
@mgp.read_proc
def shortest_path(
ctx: mgp.ProcCtx,
source: mgp.Nullable[mgp.Vertex] = None,
target: mgp.Nullable[mgp.Vertex] = None,
weight: mgp.Nullable[str] = None,
method: str = "dijkstra",
) -> mgp.Record(source=mgp.Vertex, target=mgp.Vertex, path=mgp.List[mgp.Vertex]):
sp = nx.shortest_path(
MemgraphMultiDiGraph(ctx=ctx),
source=source,
target=target,
weight=weight,
method=method,
)
if source and target:
sp = {source: {target: sp}}
elif source and not target:
sp = {source: sp}
elif not source and target:
sp = {source: {target: p} for source, p in sp.items()}
return [
mgp.Record(source=s, target=t, path=p)
for s, d in sp.items()
for t, p in d.items()
]
# networkx.algorithms.shortest_paths.generic.shortest_path_length
@mgp.read_proc
def shortest_path_length(
ctx: mgp.ProcCtx,
source: mgp.Nullable[mgp.Vertex] = None,
target: mgp.Nullable[mgp.Vertex] = None,
weight: mgp.Nullable[str] = None,
method: str = "dijkstra",
) -> mgp.Record(source=mgp.Vertex, target=mgp.Vertex, length=mgp.Number):
sp = nx.shortest_path_length(
MemgraphMultiDiGraph(ctx=ctx),
source=source,
target=target,
weight=weight,
method=method,
)
if source and target:
sp = {source: {target: sp}}
elif source and not target:
sp = {source: sp}
elif not source and target:
sp = {source: {target: l} for source, l in sp.items()}
else:
sp = dict(sp)
return [
mgp.Record(source=s, target=t, length=l)
for s, d in sp.items()
for t, l in d.items()
]
# networkx.algorithms.shortest_paths.generic.all_shortest_paths
@mgp.read_proc
def all_shortest_paths(
ctx: mgp.ProcCtx,
source: mgp.Vertex,
target: mgp.Vertex,
weight: mgp.Nullable[str] = None,
method: str = "dijkstra",
) -> mgp.Record(paths=mgp.List[mgp.List[mgp.Vertex]]):
return mgp.Record(
paths=list(
nx.all_shortest_paths(
MemgraphMultiDiGraph(ctx=ctx),
source=source,
target=target,
weight=weight,
method=method,
)
)
)
# networkx.algorithms.shortest_paths.generic.has_path
@mgp.read_proc
def has_path(
ctx: mgp.ProcCtx, source: mgp.Vertex, target: mgp.Vertex
) -> mgp.Record(has_path=bool):
return mgp.Record(
has_path=nx.has_path(MemgraphMultiDiGraph(ctx=ctx), source, target)
)
# networkx.algorithms.shortest_paths.weighted.multi_source_dijkstra_path
@mgp.read_proc
def multi_source_dijkstra_path(
ctx: mgp.ProcCtx,
sources: mgp.List[mgp.Vertex],
cutoff: mgp.Nullable[int] = None,
weight: str = "weight",
) -> mgp.Record(target=mgp.Vertex, path=mgp.List[mgp.Vertex]):
return [
mgp.Record(target=t, path=p)
for t, p in nx.multi_source_dijkstra_path(
MemgraphMultiDiGraph(ctx=ctx), sources, cutoff=cutoff, weight=weight
).items()
]
# networkx.algorithms.shortest_paths.weighted.multi_source_dijkstra_path_length
@mgp.read_proc
def multi_source_dijkstra_path_length(
ctx: mgp.ProcCtx,
sources: mgp.List[mgp.Vertex],
cutoff: mgp.Nullable[int] = None,
weight: str = "weight",
) -> mgp.Record(target=mgp.Vertex, length=mgp.Number):
return [
mgp.Record(target=t, length=l)
for t, l in nx.multi_source_dijkstra_path_length(
MemgraphMultiDiGraph(ctx=ctx), sources, cutoff=cutoff, weight=weight
).items()
]
# networkx.algorithms.simple_paths.is_simple_path
@mgp.read_proc
def is_simple_path(
ctx: mgp.ProcCtx, nodes: mgp.List[mgp.Vertex]
) -> mgp.Record(is_simple_path=bool):
return mgp.Record(
is_simple_path=nx.is_simple_path(MemgraphMultiDiGraph(ctx=ctx), nodes)
)
# networkx.algorithms.simple_paths.all_simple_paths
@mgp.read_proc
def all_simple_paths(
ctx: mgp.ProcCtx,
source: mgp.Vertex,
target: mgp.Vertex,
cutoff: mgp.Nullable[int] = None,
) -> mgp.Record(paths=mgp.List[mgp.List[mgp.Vertex]]):
return mgp.Record(
paths=list(
nx.all_simple_paths(
MemgraphMultiDiGraph(ctx=ctx), source, target, cutoff=cutoff
)
)
)
# networkx.algorithms.tournament.is_tournament
@mgp.read_proc
def is_tournament(ctx: mgp.ProcCtx) -> mgp.Record(is_tournament=bool):
return mgp.Record(
is_tournament=nx.tournament.is_tournament(MemgraphDiGraph(ctx=ctx))
)
# networkx.algorithms.traversal.breadth_first_search.bfs_edges
@mgp.read_proc
def bfs_edges(
ctx: mgp.ProcCtx,
source: mgp.Vertex,
reverse: bool = False,
depth_limit: mgp.Nullable[int] = None,
) -> mgp.Record(edges=mgp.List[mgp.Edge]):
return mgp.Record(
edges=list(
nx.bfs_edges(
MemgraphMultiDiGraph(ctx=ctx),
source,
reverse=reverse,
depth_limit=depth_limit,
)
)
)
# networkx.algorithms.traversal.breadth_first_search.bfs_tree
@mgp.read_proc
def bfs_tree(
ctx: mgp.ProcCtx,
source: mgp.Vertex,
reverse: bool = False,
depth_limit: mgp.Nullable[int] = None,
) -> mgp.Record(tree=mgp.List[mgp.Vertex]):
return mgp.Record(
tree=list(
nx.bfs_tree(
MemgraphMultiDiGraph(ctx=ctx),
source,
reverse=reverse,
depth_limit=depth_limit,
)
)
)
# networkx.algorithms.traversal.breadth_first_search.bfs_predecessors
@mgp.read_proc
def bfs_predecessors(
ctx: mgp.ProcCtx, source: mgp.Vertex, depth_limit: mgp.Nullable[int] = None
) -> mgp.Record(node=mgp.Vertex, predecessor=mgp.Vertex):
return [
mgp.Record(node=n, predecessor=p)
for n, p in nx.bfs_predecessors(
MemgraphMultiDiGraph(ctx=ctx), source, depth_limit=depth_limit
)
]
# networkx.algorithms.traversal.breadth_first_search.bfs_successors
@mgp.read_proc
def bfs_successors(
ctx: mgp.ProcCtx, source: mgp.Vertex, depth_limit: mgp.Nullable[int] = None
) -> mgp.Record(node=mgp.Vertex, successors=mgp.List[mgp.Vertex]):
return [
mgp.Record(node=n, successors=s)
for n, s in nx.bfs_successors(
MemgraphMultiDiGraph(ctx=ctx), source, depth_limit=depth_limit
)
]
# networkx.algorithms.traversal.depth_first_search.dfs_tree
@mgp.read_proc
def dfs_tree(
ctx: mgp.ProcCtx, source: mgp.Vertex, depth_limit: mgp.Nullable[int] = None
) -> mgp.Record(tree=mgp.List[mgp.Vertex]):
return mgp.Record(
tree=list(
nx.dfs_tree(MemgraphMultiDiGraph(ctx=ctx), source, depth_limit=depth_limit)
)
)
# networkx.algorithms.traversal.depth_first_search.dfs_predecessors
@mgp.read_proc
def dfs_predecessors(
ctx: mgp.ProcCtx, source: mgp.Vertex, depth_limit: mgp.Nullable[int] = None
) -> mgp.Record(node=mgp.Vertex, predecessor=mgp.Vertex):
return [
mgp.Record(node=n, predecessor=p)
for n, p in nx.dfs_predecessors(
MemgraphMultiDiGraph(ctx=ctx), source, depth_limit=depth_limit
).items()
]
# networkx.algorithms.traversal.depth_first_search.dfs_successors
@mgp.read_proc
def dfs_successors(
ctx: mgp.ProcCtx, source: mgp.Vertex, depth_limit: mgp.Nullable[int] = None
) -> mgp.Record(node=mgp.Vertex, successors=mgp.List[mgp.Vertex]):
return [
mgp.Record(node=n, successors=s)
for n, s in nx.dfs_successors(
MemgraphMultiDiGraph(ctx=ctx), source, depth_limit=depth_limit
).items()
]
# networkx.algorithms.traversal.depth_first_search.dfs_preorder_nodes
@mgp.read_proc
def dfs_preorder_nodes(
ctx: mgp.ProcCtx, source: mgp.Vertex, depth_limit: mgp.Nullable[int] = None
) -> mgp.Record(nodes=mgp.List[mgp.Vertex]):
return mgp.Record(
nodes=list(
nx.dfs_preorder_nodes(
MemgraphMultiDiGraph(ctx=ctx), source, depth_limit=depth_limit
)
)
)
# networkx.algorithms.traversal.depth_first_search.dfs_postorder_nodes
@mgp.read_proc
def dfs_postorder_nodes(
ctx: mgp.ProcCtx, source: mgp.Vertex, depth_limit: mgp.Nullable[int] = None
) -> mgp.Record(nodes=mgp.List[mgp.Vertex]):
return mgp.Record(
nodes=list(
nx.dfs_postorder_nodes(
MemgraphMultiDiGraph(ctx=ctx), source, depth_limit=depth_limit
)
)
)
# networkx.algorithms.traversal.edgebfs.edge_bfs
@mgp.read_proc
def edge_bfs(
ctx: mgp.ProcCtx,
source: mgp.Nullable[mgp.Vertex] = None,
orientation: mgp.Nullable[str] = None,
) -> mgp.Record(edges=mgp.List[mgp.Edge]):
return mgp.Record(
edges=list(
e
for _, _, e in nx.edge_bfs(
MemgraphMultiDiGraph(ctx=ctx), source=source, orientation=orientation
)
)
)
# networkx.algorithms.traversal.edgedfs.edge_dfs
@mgp.read_proc
def edge_dfs(
ctx: mgp.ProcCtx,
source: mgp.Nullable[mgp.Vertex] = None,
orientation: mgp.Nullable[str] = None,
) -> mgp.Record(edges=mgp.List[mgp.Edge]):
return mgp.Record(
edges=list(
e
for _, _, e in nx.edge_dfs(
MemgraphMultiDiGraph(ctx=ctx), source=source, orientation=orientation
)
)
)
# networkx.algorithms.tree.recognition.is_tree
@mgp.read_proc
def is_tree(ctx: mgp.ProcCtx) -> mgp.Record(is_tree=bool):
return mgp.Record(is_tree=nx.is_tree(MemgraphDiGraph(ctx=ctx)))
# networkx.algorithms.tree.recognition.is_forest
@mgp.read_proc
def is_forest(ctx: mgp.ProcCtx) -> mgp.Record(is_forest=bool):
return mgp.Record(is_forest=nx.is_forest(MemgraphDiGraph(ctx=ctx)))
# networkx.algorithms.tree.recognition.is_arborescence
@mgp.read_proc
def is_arborescence(ctx: mgp.ProcCtx) -> mgp.Record(is_arborescence=bool):
return mgp.Record(is_arborescence=nx.is_arborescence(MemgraphDiGraph(ctx=ctx)))
# networkx.algorithms.tree.recognition.is_branching
@mgp.read_proc
def is_branching(ctx: mgp.ProcCtx) -> mgp.Record(is_branching=bool):
return mgp.Record(is_branching=nx.is_branching(MemgraphDiGraph(ctx=ctx)))
# networkx.algorithms.tree.mst.minimum_spanning_tree
@mgp.read_proc
def minimum_spanning_tree(
ctx: mgp.ProcCtx,
weight: str = "weight",
algorithm: str = "kruskal",
ignore_nan: bool = False,
) -> mgp.Record(nodes=mgp.List[mgp.Vertex], edges=mgp.List[mgp.Edge]):
gres = nx.minimum_spanning_tree(
MemgraphMultiGraph(ctx=ctx), weight, algorithm, ignore_nan
)
return mgp.Record(
nodes=list(gres.nodes()), edges=[e for _, _, e in gres.edges(keys=True)]
)
# networkx.algorithms.triads.triadic_census
@mgp.read_proc
def triadic_census(ctx: mgp.ProcCtx) -> mgp.Record(triad=str, count=int):
return [
mgp.Record(triad=t, count=c)
for t, c in nx.triadic_census(MemgraphDiGraph(ctx=ctx)).items()
]
# networkx.algorithms.voronoi.voronoi_cells
@mgp.read_proc
def voronoi_cells(
ctx: mgp.ProcCtx, center_nodes: mgp.List[mgp.Vertex], weight: str = "weight"
) -> mgp.Record(center=mgp.Vertex, cell=mgp.List[mgp.Vertex]):
return [
mgp.Record(center=c1, cell=list(c2))
for c1, c2 in nx.voronoi_cells(
MemgraphMultiDiGraph(ctx=ctx), center_nodes, weight=weight
).items()
]
# networkx.algorithms.wiener.wiener_index
@mgp.read_proc
def wiener_index(
ctx: mgp.ProcCtx, weight: mgp.Nullable[str] = None
) -> mgp.Record(wiener_index=mgp.Number):
return mgp.Record(
wiener_index=nx.wiener_index(MemgraphMultiDiGraph(ctx=ctx), weight=weight)
)
@mgp.read_proc
def weakly_connected_components_subgraph(
vertices: mgp.List[mgp.Vertex], edges: mgp.List[mgp.Edge]
) -> mgp.Record(n_components=int, components=mgp.List[mgp.List[mgp.Vertex]]):
"""
This procedure finds weakly connected components of a given subgraph of a
directed graph.
The subgraph is defined by a list of vertices and a list edges which are
passed as arguments of the procedure. More precisely, a set of vertices of
a subgraph contains all vertices provided in a list of vertices along with
all vertices that are endpoints of provided edges. Similarly, a set of
edges of a subgraph contains all edges from the list of provided edges.
The procedure returns 2 fields:
* `n_components` is the number of weakly connected components of the
subgraph.
* `components` is a list of weakly connected components. Each component
is given as a list of `mgp.Vertex` objects from that component.
For example, weakly connected components in a subgraph formed from all
vertices labeled `Person` and edges between such vertices can be obtained
using the following openCypher query:
MATCH (n:Person)-[e]->(m:Person)
WITH collect(n) AS nodes, collect(e) AS edges
CALL wcc.get_components(nodes, edges) YIELD *
RETURN n_components, components;
"""
g = nx.DiGraph()
g.add_nodes_from(vertices)
g.add_edges_from([(edge.from_vertex, edge.to_vertex) for edge in edges])
components = [list(wcc) for wcc in nx.weakly_connected_components(g)]
return mgp.Record(n_components=len(components), components=components)
|
428240
|
from collections import namedtuple
from unittest.mock import call, patch, MagicMock
from kubernetes.client.rest import ApiException
import pytest
from nephos.helpers.executer import Executer
class TestExecuter:
def test_executer_init(self):
executer = Executer("a-pod", "a-namespace")
assert executer.pod == "a-pod"
assert executer.prefix_exec == "kubectl exec a-pod -n a-namespace -- "
def test_executer_init_container(self):
executer = Executer("a-pod", "a-namespace", container="a_container")
assert executer.pod == "a-pod"
assert (
executer.prefix_exec
== "kubectl exec a-pod -n a-namespace --container a_container -- "
)
@patch("nephos.helpers.executer.execute")
def test_executer_execute(self, mock_execute):
mock_execute.side_effect = [("result", None)]
executer = Executer("a_pod", "a-namespace")
executer.execute("a_command")
mock_execute.assert_called_once_with(
"kubectl exec a_pod -n a-namespace -- a_command"
)
@patch("nephos.helpers.executer.execute")
def test_executer_logs(self, mock_execute):
mock_execute.side_effect = [("result", None)]
executer = Executer("a_pod", "a-namespace")
executer.logs()
mock_execute.assert_called_once_with(
"kubectl logs a_pod -n a-namespace --tail=-1"
)
@patch("nephos.helpers.executer.execute")
def test_executer_logs_tail(self, mock_execute):
mock_execute.side_effect = [("result", None)]
executer = Executer("a_pod", "a-namespace", container="a_container")
executer.logs(tail=10)
mock_execute.assert_called_once_with(
"kubectl logs a_pod -n a-namespace --container a_container --tail=10"
)
@patch("nephos.helpers.executer.execute")
def test_executer_logs_sincetime(self, mock_execute):
mock_execute.side_effect = [("result", None)]
executer = Executer("a_pod", "a-namespace")
executer.logs(since_time="1970-01-01T00:00:00Z")
mock_execute.assert_called_once_with(
"kubectl logs a_pod -n a-namespace --tail=-1 --since-time='1970-01-01T00:00:00Z'"
)
|
428258
|
import os, sys
from mapslicer import version
# py2exe - needs OSGeo4W with GDAL 1.6
if sys.platform in ['win32','win64']:
from distutils.core import setup
import glob
import py2exe
sys.path.insert(0, 'C:\\OSGeo4W\\apps\\gdal-16\\pymod' )
os.environ['PATH'] += ';C:\\OSGeo4W\\bin'
setup(name='MapSlicer',
version=version.replace(' ','.'),
description = "MapSlicer - Map Tile Generator for Mashups",
long_description= "MapSlicer is a powerful tool for online map publishing and generation of map overlay mashups. Your geodata are transformed to the tiles compatible with Google Maps and Earth - ready for uploading to your webserver.",
url='https://github.com/kalxas/mapslicer',
author='<NAME>',
author_email='<EMAIL>',
packages=['mapslicer'],
scripts=['mapslicer.py'],
windows=[ {'script':'mapslicer.py', "icon_resources": [(1, os.path.join('resources', 'mapslicer.ico'))] } ],
data_files=[
('proj', glob.glob('C:\\OSGeo4W\\share\\proj\\*')),
('gdal', glob.glob('C:\\OSGeo4W\\apps\\gdal-16\\share\\gdal\\*')),
('gdalplugins', glob.glob('C:\\OSGeo4W\\apps\\gdal-16\\bin\\gdalplugins\\*.*')),
('', glob.glob('C:\\OSGeo4W\\bin\\*.dll')+glob.glob('C:\\OSGeo4W\\bin\\*.manifest')),
],
options={'py2exe':{'packages':['mapslicer'],
'includes':['encodings','osgeo'],
'excludes':['PIL','numpy','wx.BitmapFromImage','wx.EmptyIcon']
},
},
)
# py2app - creates 'fat' standalone Universal binary - with size around 160MB :-(
# Use 'Build Applet.app' for small Leopard-only bundle with dependency on the Kyngchaos GDAL 1.6 Framework
if sys.platform == 'darwin':
from setuptools import setup
import py2app
# Build the .app file
setup(
options=dict(
py2app=dict(
iconfile='resources/mapslicer.icns',
packages='wx',
excludes='osgeo,PIL,numpy',
resources=['resources/license/LICENSE.txt','mapslicer'],
plist=dict(
CFBundleName = "MapSlicer",
CFBundleShortVersionString = version.replace(' ','.'),
CFBundleGetInfoString = "MapSlicer %s" % version,
CFBundleExecutable = "MapSlicer",
CFBundleIdentifier = "cz.klokan.mapslicer",
),
frameworks=['PROJ.framework','GEOS.framework','SQLite3.framework','UnixImageIO.framework','GDAL.framework'],
),
),
app=[ 'mapslicer.py' ]
)
|
428286
|
from .enums import BasePermissionEnum
def is_app(context):
return bool(context.app)
def is_user(context):
return context.user.is_active and context.user.is_authenticated
def is_staff_user(context):
return is_user(context) and context.user.is_staff
class AuthorizationFilters(BasePermissionEnum):
# Grants access to any authenticated app.
AUTHENTICATED_APP = "authorization_filters.authenticated_app"
# Grants access to any authenticated staff user.
AUTHENTICATED_STAFF_USER = "authorization_filters.authenticated_staff_user"
# Grants access to any authenticated user.
AUTHENTICATED_USER = "authorization_filters.authenticated_user"
# Grants access to the owner of the related object. This rule doesn't come with any
# permission function, as the ownership needs to be defined individually in each
# case.
OWNER = "authorization_filters.owner"
AUTHORIZATION_FILTER_MAP = {
AuthorizationFilters.AUTHENTICATED_APP: is_app,
AuthorizationFilters.AUTHENTICATED_USER: is_user,
AuthorizationFilters.AUTHENTICATED_STAFF_USER: is_staff_user,
}
def resolve_authorization_filter_fn(perm):
return AUTHORIZATION_FILTER_MAP.get(perm)
|
428292
|
import tempfile
import contextlib
import pytest
import random
import pydeduplines
@pytest.mark.parametrize(
'number_of_threads',
[
0,
1,
2,
]
)
@pytest.mark.parametrize(
'number_of_splits',
[
1,
2,
]
)
def test_compute_unique_lines_one_file(
number_of_threads,
number_of_splits,
):
with contextlib.ExitStack() as stack:
test_input_file_one = stack.enter_context(
tempfile.NamedTemporaryFile('wb')
)
test_output_file = stack.enter_context(
tempfile.NamedTemporaryFile('rb')
)
lines = [
f'line{i}'.encode()
for i in range(11000)
]
random.shuffle(lines)
test_input_file_one.file.write(b'\n'.join(lines * 2))
test_input_file_one.file.flush()
tempdir = tempfile.mkdtemp()
pydeduplines.compute_unique_lines(
working_directory=tempdir,
file_paths=[
test_input_file_one.name,
],
output_file_path=test_output_file.name,
number_of_splits=number_of_splits,
number_of_threads=number_of_threads,
)
unique_file_data = test_output_file.read()
assert sorted(unique_file_data.split(b'\n')[:-1]) == sorted(lines)
@pytest.mark.parametrize(
'number_of_threads',
[
0,
1,
2,
]
)
@pytest.mark.parametrize(
'number_of_splits',
[
1,
2,
]
)
def test_compute_unique_lines_two_files(
number_of_threads,
number_of_splits,
):
with contextlib.ExitStack() as stack:
test_input_file_one = stack.enter_context(
tempfile.NamedTemporaryFile('wb')
)
test_input_file_two = stack.enter_context(
tempfile.NamedTemporaryFile('wb')
)
test_output_file = stack.enter_context(
tempfile.NamedTemporaryFile('rb')
)
lines = [
f'line{i}'.encode()
for i in range(11000)
]
random.shuffle(lines)
test_input_file_one.file.write(b'\n'.join(lines[:10000]))
test_input_file_one.file.flush()
test_input_file_two.file.write(b'\n'.join(lines[:11000]))
test_input_file_two.file.flush()
tempdir = tempfile.mkdtemp()
pydeduplines.compute_unique_lines(
working_directory=tempdir,
file_paths=[
test_input_file_one.name,
test_input_file_two.name,
],
output_file_path=test_output_file.name,
number_of_splits=number_of_splits,
number_of_threads=number_of_threads,
)
unique_file_data = test_output_file.read()
assert sorted(unique_file_data.split(b'\n')[:-1]) == sorted(lines)
@pytest.mark.parametrize(
'number_of_threads',
[
0,
1,
2,
]
)
@pytest.mark.parametrize(
'number_of_splits',
[
1,
2,
]
)
def test_compute_added_lines(
number_of_threads,
number_of_splits,
):
with contextlib.ExitStack() as stack:
test_input_file_one = stack.enter_context(
tempfile.NamedTemporaryFile('wb')
)
test_input_file_two = stack.enter_context(
tempfile.NamedTemporaryFile('wb')
)
test_output_file = stack.enter_context(
tempfile.NamedTemporaryFile('rb')
)
lines = [
f'line{i}'.encode()
for i in range(11000)
]
random.shuffle(lines)
test_input_file_one.file.write(b'\n'.join(lines[:10000]))
test_input_file_one.file.flush()
test_input_file_two.file.write(b'\n'.join(lines[:11000]))
test_input_file_two.file.flush()
tempdir = tempfile.mkdtemp()
pydeduplines.compute_added_lines(
working_directory=tempdir,
first_file_path=test_input_file_one.name,
second_file_path=test_input_file_two.name,
output_file_path=test_output_file.name,
number_of_splits=number_of_splits,
number_of_threads=number_of_threads,
)
added_lines_file_data = test_output_file.read()
assert sorted(added_lines_file_data.split(b'\n')[:-1]) == sorted(lines[10000:])
|
428306
|
from orphics.theory.limber import XCorrIntegrator
cosmo = {}
cosmo['omch2'] = 0.1194
cosmo['ombh2'] = 0.022
cosmo['H0'] = 67.0
cosmo['ns'] = 0.96
cosmo['As'] = 2.2e-9
#mnu = 0.06,0.02
#w0 = -1.0,0.3
x = XCorrIntegrator(cosmo)
x.addDeltaNz("jiagalaxy",zsource=2.0)
import numpy as np
print((-np.trapz(x.kernels['jiagalaxy']['W'],x.zs)))
|
428308
|
from setuptools import setup
setup(
name='awsume-console-plugin',
version='1.2.2',
entry_points={
'awsume': [
'console = console'
]
},
author='Trek10, Inc',
author_email='<EMAIL>',
py_modules=['console'],
)
|
428318
|
import json
from ..AdKeyword.sub.ManagedKeywordInfoObject import ManagedKeywordInfoObject
class ManagedKeywordObject:
def __init__(self, json_def):
if type(json_def) is str:
json_def = json.loads(json_def)
s = json_def
self.keyword = None if 'keyword' not in s else s['keyword']
self.managedKeyword = None if 'managedKeyword' not in s else ManagedKeywordInfoObject(s['managedKeyword'])
|
428327
|
class _EVENT_DESCRIPTOR(_EVENT_DESCRIPTOR):
def __repr__(self):
return "<{0} Id={self.Id} Opcode={self.Opcode} Version={self.Version} Level={self.Level}>".format(type(self).__name__, self=self)
|
428332
|
import pytest
def test_bad_subclass():
from dataclasses import dataclass
from jobflow import Maker
@dataclass
class BadMaker(Maker):
name: str = "BadMaker"
with pytest.raises(NotImplementedError):
BadMaker().make()
@dataclass
class BadMaker(Maker):
a = 1
with pytest.raises(NotImplementedError):
BadMaker().name()
def test_required_arguments_works():
from dataclasses import dataclass
@dataclass
class MyMaker:
a: int
name = "123"
m = MyMaker(1)
assert m.a == 1
with pytest.raises(TypeError):
MyMaker()
def test_job_maker():
from dataclasses import dataclass
from jobflow.core.job import job
from jobflow.core.maker import Maker
@dataclass
class AddMaker(Maker):
name: str = "add"
@job
def make(self, a, b):
return a + b
maker = AddMaker()
add_job = maker.make(1, 2)
assert add_job.name == "add"
assert add_job.function_args == (1, 2)
assert add_job.maker == maker # maker should match as all kwargs are the same
assert str(add_job.function) == str(maker.make)
# test updating maker class does not impact the job
maker.name = "sum"
assert add_job.maker != maker # now makers should not match
assert add_job.maker.name != "sum"
def test_flow_maker():
from dataclasses import dataclass
from jobflow import Flow, Maker, job
@job
def add(a, b):
return a + b
@dataclass
class AddMaker(Maker):
name: str = "add"
def make(self, a, b):
first = add(a, b)
second = add(first.output, b)
return Flow([first, second], second.output, name=self.name)
maker = AddMaker()
add_job = maker.make(1, 2)
assert add_job.name == "add"
assert len(add_job.jobs) == 2
@dataclass
class DoubleAddMaker(Maker):
name: str = "add_add"
add1: AddMaker = AddMaker()
add2: AddMaker = AddMaker()
def make(self, a, b):
first = self.add1.make(a, b)
second = self.add2.make(first.output, b)
return Flow([first, second], second.output, name=self.name)
maker = DoubleAddMaker()
double_add_flow = maker.make(1, 2)
assert double_add_flow.name == "add_add"
assert len(double_add_flow.jobs) == 2
assert isinstance(double_add_flow.jobs[0], Flow)
assert isinstance(double_add_flow.jobs[1], Flow)
def test_update_kwargs():
from dataclasses import dataclass
from monty.json import MSONable
from jobflow.core.job import Response, job
from jobflow.core.maker import Maker
# this is needed to get monty to deserialize them correctly
global AddMaker
global DetourMaker
@dataclass
class AddMaker(Maker):
name: str = "add"
c: int = 5
@job
def make(self, a, b):
return a + b + self.c
@dataclass
class DetourMaker(Maker):
name: str = "add"
add_maker: Maker = AddMaker()
def make(self, a, b):
detour = self.add_maker.make(a, b)
return Response(detour=detour)
# test no filter
maker = AddMaker()
maker = maker.update_kwargs({"c": 10})
assert maker.c == 10
# test bad kwarg
maker = AddMaker()
with pytest.raises(TypeError):
maker.update_kwargs({"d": 10})
# test name filter
maker = AddMaker()
maker = maker.update_kwargs({"c": 10}, name_filter="add")
assert maker.c == 10
maker = AddMaker()
maker = maker.update_kwargs({"c": 10}, name_filter="div")
assert maker.c == 5
# test class filter
maker = AddMaker()
maker = maker.update_kwargs({"c": 10}, class_filter=AddMaker)
assert maker.c == 10
maker = AddMaker()
maker = maker.update_kwargs({"c": 10}, class_filter=maker)
assert maker.c == 10
maker = AddMaker()
maker = maker.update_kwargs({"c": 10}, class_filter=list)
assert maker.c == 5
# test dict mod
maker = AddMaker()
maker = maker.update_kwargs({"_inc": {"c": 10}}, dict_mod=True)
assert maker.c == 15
# test nesting
maker = DetourMaker()
maker = maker.update_kwargs({"c": 10}, class_filter=AddMaker, nested=True)
assert maker.add_maker.c == 10
maker = DetourMaker()
maker = maker.update_kwargs({"c": 10}, class_filter=AddMaker, nested=False)
assert maker.add_maker.c == 5
global NotAMaker
global FakeDetourMaker
@dataclass
class NotAMaker(MSONable):
name: str = "add"
c: int = 5
@job
def make(self, a, b):
return a + b + self.c
@dataclass
class FakeDetourMaker(Maker):
name: str = "add"
add_maker: MSONable = NotAMaker()
def make(self, a, b):
detour = self.add_maker.make(a, b)
return Response(detour=detour)
# test non maker dataclasses not updated
maker = FakeDetourMaker()
maker = maker.update_kwargs({"c": 10}, class_filter=NotAMaker, nested=True)
assert maker.add_maker.c == 5
|
428342
|
import math
INFINITY = float('inf')
NAN = float('nan')
def sqrt_nothrow(x):
return math.sqrt(x) if x >= 0 else NAN
def cg(opfunc, x, config, state=None):
"""
This cg implementation is a rewrite of minimize.m written by <NAME>. It is supposed to produce exactly same results (give
or take numerical accuracy due to some changed order of
operations). You can compare the result on rosenbrock with minimize.m.
http://www.gatsby.ucl.ac.uk/~edward/code/minimize/example.html
[x fx c] = minimize([0 0]', 'rosenbrock', -25)
Note that we limit the number of function evaluations only, it seems much
more important in practical use.
ARGS:
- `opfunc` : a function that takes a single input, the point of evaluation.
- `x` : the initial point
- `state` : a table of parameters and temporary allocations.
- `state['maxEval']` : max number of function evaluations
- `state['maxIter']` : max number of iterations
- `state['df0']` : if you pass torch.Tensor they will be used for temp storage
- `state['df1']` : if you pass torch.Tensor they will be used for temp storage
- `state['df2']` : if you pass torch.Tensor they will be used for temp storage
- `state['df3']` : if you pass torch.Tensor they will be used for temp storage
- `state['s']` : if you pass torch.Tensor they will be used for temp storage
- `state['x0']` : if you pass torch.Tensor they will be used for temp storage
RETURN:
- `x*` : the new x vector, at the optimal point
- `f` : a table of all function values where
`f[1]` is the value of the function before any optimization and
`f[#f]` is the final fully optimized value, at x*
(<NAME>, 2012)
"""
# parameters
if config is None and state is None:
raise ValueError("cg requires a dictionary to retain state between iterations")
state = state if state is not None else config
rho = config.get('rho', 0.01)
sig = config.get('sig', 0.5)
_int = config.get('int', 0.1)
ext = config.get('ext', 3.0)
maxIter = config.get('maxIter', 20)
ratio = config.get('ratio', 100)
maxEval = config.get('maxEval', maxIter * 1.25)
red = 1
i = 0
ls_failed = 0
fx = []
# we need three points for the interpolation/extrapolation stuff
z1, z2, z3 = 0, 0, 0
d1, d2, d3 = 0, 0, 0
f1, f2, f3 = 0, 0, 0
df1 = state.get('df1', x.new())
df2 = state.get('df2', x.new())
df3 = state.get('df3', x.new())
df1.resize_as_(x)
df2.resize_as_(x)
df3.resize_as_(x)
# search direction
s = state.get('s', x.new())
s.resize_as_(x)
# we need a temp storage for X
x0 = state.get('x0', x.new())
f0 = 0
df0 = state.get('df0', x.new())
x0.resize_as_(x)
df0.resize_as_(x)
# evaluate at initial point
f1, tdf = opfunc(x)
fx.append(f1)
df1.copy_(tdf)
i = i + 1
# initial search direction
s.copy_(df1).mul_(-1)
d1 = -s.dot(s) # slope
z1 = red / (1 - d1) # initial step
while i < abs(maxEval):
x0.copy_(x)
f0 = f1
df0.copy_(df1)
x.add_(z1, s)
f2, tdf = opfunc(x)
df2.copy_(tdf)
i = i + 1
d2 = df2.dot(s)
f3, d3, z3 = f1, d1, -z1 # init point 3 equal to point 1
m = min(maxIter, maxEval - i)
success = 0
limit = -1
while True:
while (f2 > f1 + z1 * rho * d1 or d2 > -sig * d1) and m > 0:
limit = z1
if f2 > f1:
z2 = z3 - (0.5 * d3 * z3 * z3) / (d3 * z3 + f2 - f3)
else:
A = 6 * (f2 - f3) / z3 + 3 * (d2 + d3)
B = 3 * (f3 - f2) - z3 * (d3 + 2 * d2)
z2 = (sqrt_nothrow(B * B - A * d2 * z3 * z3) - B) / A
if z2 != z2 or z2 == INFINITY or z2 == -INFINITY:
z2 = z3 / 2
z2 = max(min(z2, _int * z3), (1 - _int) * z3)
z1 = z1 + z2
x.add_(z2, s)
f2, tdf = opfunc(x)
df2.copy_(tdf)
i = i + 1
m = m - 1
d2 = df2.dot(s)
z3 = z3 - z2
if f2 > f1 + z1 * rho * d1 or d2 > -sig * d1:
break
elif d2 > sig * d1:
success = 1
break
elif m == 0:
break
A = 6 * (f2 - f3) / z3 + 3 * (d2 + d3)
B = 3 * (f3 - f2) - z3 * (d3 + 2 * d2)
_denom = (B + sqrt_nothrow(B * B - A * d2 * z3 * z3))
z2 = -d2 * z3 * z3 / _denom if _denom != 0 else NAN
if z2 != z2 or z2 == INFINITY or z2 == -INFINITY or z2 < 0:
if limit < -0.5:
z2 = z1 * (ext - 1)
else:
z2 = (limit - z1) / 2
elif (limit > -0.5) and (z2 + z1) > limit:
z2 = (limit - z1) / 2
elif limit < -0.5 and (z2 + z1) > z1 * ext:
z2 = z1 * (ext - 1)
elif z2 < -z3 * _int:
z2 = -z3 * _int
elif limit > -0.5 and z2 < (limit - z1) * (1 - _int):
z2 = (limit - z1) * (1 - _int)
f3 = f2
d3 = d2
z3 = -z2
z1 = z1 + z2
x.add_(z2, s)
f2, tdf = opfunc(x)
df2.copy_(tdf)
i = i + 1
m = m - 1
d2 = df2.dot(s)
if success == 1:
f1 = f2
fx.append(f1)
ss = (df2.dot(df2) - df2.dot(df1)) / df1.dot(df1)
s.mul_(ss)
s.add_(-1, df2)
tmp = df1.clone()
df1.copy_(df2)
df2.copy_(tmp)
d2 = df1.dot(s)
if d2 > 0:
s.copy_(df1)
s.mul_(-1)
d2 = -s.dot(s)
z1 = z1 * min(ratio, d1 / (d2 - 1e-320))
d1 = d2
ls_failed = 0
else:
x.copy_(x0)
f1 = f0
df1.copy_(df0)
if ls_failed or i > maxEval:
break
tmp = df1.clone()
df1.copy_(df2)
df2.copy_(tmp)
s.copy_(df1)
s.mul_(-1)
d1 = -s.dot(s)
z1 = 1 / (1 - d1)
ls_failed = 1
state['df0'] = df0
state['df1'] = df1
state['df2'] = df2
state['df3'] = df3
state['x0'] = x0
state['s'] = s
return x, fx, i
|
428349
|
from requests import Session
import json
import re
from typing import Pattern, Dict, Union
class LoggedInException(Exception):
def __init__(self, *args, **kwargs):
super(LoggedInException, self).__init__(*args, **kwargs)
class API(object):
"""
Unifi API for the Unifi Controller.
"""
_login_data = {}
_current_status_code = None
def __init__(self, username: str="ubnt", password: str="<PASSWORD>", site: str="default", baseurl: str="https://unifi:8443", verify_ssl: bool=True):
"""
Initiates tha api with default settings if none other are set.
:param username: username for the controller user
:param password: <PASSWORD>
:param site: which site to connect to (Not the name you've given the site, but the url-defined name)
:param baseurl: where the controller is located
:param verify_ssl: Check if certificate is valid or not, throws warning if set to False
"""
self._login_data['username'] = username
self._login_data['password'] = password
self._site = site
self._verify_ssl = verify_ssl
self._baseurl = baseurl
self._session = Session()
def __enter__(self):
"""
Contextmanager entry handle
:return: isntance object of class
"""
self.login()
return self
def __exit__(self, *args):
"""
Contextmanager exit handle
:return: None
"""
self.logout()
def login(self):
"""
Log the user in
:return: None
"""
self._current_status_code = self._session.post("{}/api/login".format(self._baseurl), data=json.dumps(self._login_data), verify=self._verify_ssl).status_code
if self._current_status_code == 400:
raise LoggedInException("Failed to log in to api with provided credentials")
def logout(self):
"""
Log the user out
:return: None
"""
self._session.get("{}/logout".format(self._baseurl))
self._session.close()
def list_clients(self, filters: Dict[str, Union[str, Pattern]]=None, order_by: str=None) -> list:
"""
List all available clients from the api
:param filters: dict with valid key, value pairs, string supplied is compiled to a regular expression
:param order_by: order by a valid client key, defaults to '_id' if key is not found
:return: A list of clients on the format of a dict
"""
r = self._session.get("{}/api/s/{}/stat/sta".format(self._baseurl, self._site, verify=self._verify_ssl), data="json={}")
self._current_status_code = r.status_code
if self._current_status_code == 401:
raise LoggedInException("Invalid login, or login has expired")
data = r.json()['data']
if filters:
for term, value in filters.items():
value_re = value if isinstance(value, Pattern) else re.compile(value)
data = [x for x in data if term in x.keys() and re.fullmatch(value_re, x[term])]
if order_by:
data = sorted(data, key=lambda x: x[order_by] if order_by in x.keys() else x['_id'])
return data
|
428411
|
from os.path import dirname, join
import sys
from tempfile import TemporaryDirectory
import numpy as np
import pytest
from typhon.collocations import collapse, Collocator, Collocations, expand
from typhon.files import FileSet, MHS_HDF
from typhon.files.utils import get_testfiles_directory
import xarray as xr
class TestCollocations:
"""Testing the collocation functions."""
refdir = get_testfiles_directory("collocations")
@pytest.mark.skipif(refdir is None, reason="typhon-testfiles not found.")
def test_search(self):
"""Collocate fake MHS filesets"""
fake_mhs1 = FileSet(
path=join(self.refdir,
"{satname}_mhs_{year}", "{month}", "{day}",
"*NSS.MHSX.*.S{hour}{minute}.E{end_hour}{end_minute}.*.h5"),
handler=MHS_HDF(),
)
fake_mhs2 = fake_mhs1.copy()
with TemporaryDirectory() as outdir:
collocations = Collocations(
path=join(outdir, "{year}-{month}-{day}",
"{hour}{minute}{second}-{end_hour}{end_minute}{end_second}"),
)
collocations.search(
[fake_mhs1, fake_mhs2],
start="2007",
end="2008",
max_interval="1h",
max_distance="10km"
)
def test_flat_to_main_coord(self):
"""Tests Collocator._flat_to_main_coord
This method is crucial since it stacks the whole input datasets for the
collocating routine and makes them collocateable.
"""
collocator = Collocator()
test = xr.Dataset({
"time": ("time", np.arange(10)),
"lat": ("time", np.arange(10)),
"lon": ("time", np.arange(10)),
})
check = xr.Dataset({
"time": ("collocation", np.arange(10)),
"lat": ("collocation", np.arange(10)),
"lon": ("collocation", np.arange(10)),
})
results = collocator._flat_to_main_coord(test)
assert check.equals(results)
test = xr.Dataset({
"time": ("main", np.arange(10)),
"lat": ("main", np.arange(10)),
"lon": ("main", np.arange(10)),
})
check = xr.Dataset({
"time": ("collocation", np.arange(10)),
"lat": ("collocation", np.arange(10)),
"lon": ("collocation", np.arange(10)),
})
results = collocator._flat_to_main_coord(test)
assert check.equals(results)
test = xr.Dataset({
"time": ("scnline", np.arange(5)),
"lat": (("scnline", "scnpos"), np.arange(10).reshape(5, 2)),
"lon": (("scnline", "scnpos"), np.arange(10).reshape(5, 2)),
})
check = test.stack(collocation=("scnline", "scnpos"))
results = collocator._flat_to_main_coord(test)
assert check.equals(results)
def test_collocate_collapse_expand(self):
"""Test whether collocating, collapsing and expanding work"""
collocator = Collocator()
test = xr.Dataset({
"time": ("time", np.arange("2000", "2010", dtype="M8[Y]")),
"lat": ("time", np.arange(10)),
"lon": ("time", np.arange(10)),
})
collocations = collocator.collocate(
test, test, max_interval="30 days",
max_distance="150 miles"
)
collapsed = collapse(collocations)
expanded = expand(collocations)
|
428421
|
from __future__ import absolute_import
from . import driver
from . import objective
from . import plugin
from . import technique
|
428468
|
import numpy as np
from gym_collision_avoidance.envs.policies.LearningPolicy import LearningPolicy
from gym_collision_avoidance.envs.policies.GA3C_CADRL import network
class LearningPolicyGA3C(LearningPolicy):
""" The GA3C-CADRL policy while it's still being trained (an external process provides a discrete action input)
"""
def __init__(self):
LearningPolicy.__init__(self)
self.possible_actions = network.Actions()
def external_action_to_action(self, agent, external_action):
""" Convert the discrete external_action into an action for this environment using properties about the agent.
Args:
agent (:class:`~gym_collision_avoidance.envs.agent.Agent`): the agent who has this policy
external_action (int): discrete action between 0-11 directly from the network output
Returns:
[speed, heading_change] command
"""
raw_action = self.possible_actions.actions[int(external_action)]
action = np.array([agent.pref_speed*raw_action[0], raw_action[1]])
return action
|
428478
|
from typing import List
from thompson_sampling.base import BasePrior
from pandas import isna
# TODO build out functionality to add priors
class BetaPrior(BasePrior):
def __init__(self):
"""
Initializes a prior distribution object
"""
super().__init__()
def _param_calculator(self, mean: float, variance: float, effective_size: int):
"""
Hidden method that creates the beta prior given specifications
"""
if mean >= 1 or mean <= 0:
raise ValueError(f"mean:{mean} must be in (0,1)")
if variance <= 0 or variance >= 0.5 ** 2 or variance >= (mean * (1 - mean)):
raise ValueError(
f"variance: {variance} must be in (0,{round(min([0.25, mean*(1-mean)]), 3)})"
)
if effective_size <= 0:
raise ValueError(f"effective_size: {effective_size} must be greater then 0")
alpha = round((((1 - mean) / variance) - (1 / mean)) * (mean ** 2), 3)
beta = round(alpha * (1 / mean - 1), 3)
ratio = effective_size / (alpha + beta) # effective_size = beta+alpha
return {"a": round(alpha * ratio), "b": round(beta * ratio)}
class GammaPrior(BasePrior):
def __init__(self):
super().__init__()
def _param_calculator(self, mean, variance: None, effective_size: None):
if not isna(variance):
if any([mean <= 0, variance <= 0]):
raise ValueError("Parameters must be positive")
rate = mean / variance
shape = mean ** 2 / variance
scale = 1 / rate
if not isna(effective_size) and (isna(variance)):
if any([mean <= 0, effective_size <= 0]):
raise ValueError("Parameters must be positive")
rate = effective_size
shape = mean * effective_size
scale = 1 / rate
elif all([isna(variance), isna(effective_size)]):
raise ValueError("Must specify either variance or effective size")
return {"shape": round(shape, 3), "scale": round(scale, 3)}
|
428488
|
import itertools as it
import numpy, h5py, os
from pyglib.gutz.atoms import Atoms
from pyglib.basic.units import Ryd_eV
try:
import spglib
except:
import pyspglib.spglib as spglib
def cleanup_imap_list(imap_list):
'''Clean up imap list in case correlated atoms are not well ordered
in the struct file.
'''
imax = numpy.max(imap_list)+1
index_map = numpy.zeros(imax, dtype=numpy.int)-1
for i in range(imax):
if i in imap_list:
index_map[i] = imap_list.index(i)
_imap_list = []
for imap in imap_list:
_imap_list.append(index_map[imap])
return _imap_list
class gAtoms(Atoms):
'''
A class to describe the calculated system with all the relevant
informations, such as coordinates, correlated atom indices,
symmetry information, etc.
'''
def __init__(self, symbols=None, positions=None, numbers=None,
masses=None, magmoms=None, scaled_positions=None,
cell=None, pbc=None, wiencase=None, locrot_list=None):
Atoms.__init__(self, symbols=symbols, positions=positions,
numbers=numbers, masses=masses, magmoms=magmoms,
scaled_positions=scaled_positions,
cell=cell, pbc=pbc)
self.wiencase = wiencase
self.locrot_list = locrot_list
def h5set(self):
from pyglib.io.h5io import h5auto_read
with h5py.File('ginit.h5', 'r') as f:
dist_cut = h5auto_read(f, \
'/usrqa/dist_cut', default=-1.0)
self.set_sym_dist_cut(dist_cut)
self.unit = h5auto_read(f, \
'/usrqa/unit', default='rydberg')
spin_polarization = h5auto_read(f, \
'/usrqa/spin_polarization', default='n')
self.set_ispin(spin_polarization)
orbital_polarization = h5auto_read(f, \
'/usrqa/full_orbital_polarization', default='n')
self.set_orbital_polarization(orbital_polarization)
spin_orbit_coup = h5auto_read(f, \
'/usrqa/spin_orbit_coup', default='n')
self.set_iso(spin_orbit_coup)
crystal_field = h5auto_read(f, \
'/usrqa/crystal_field', default='y')
self.set_crystal_field(crystal_field)
lhub = h5auto_read(f, \
'/usrqa/u_matrix_type', default=1)
self.set_lhub(lhub)
ldc = h5auto_read(f, \
'/usrqa/ldc', default=1)
self.set_ldc(ldc)
idx_equivalent_atoms = h5auto_read(f, \
'/usrqa/idx_equivalent_atoms')
idx_equivalent_atoms = idx_equivalent_atoms.tolist()
self.set_idx_equivalent_atoms(idx_equivalent_atoms)
unique_corr_symbol_list = h5auto_read(f, \
'/usrqa/unique_corr_symbol_list')
self.unique_corr_symbol_list = unique_corr_symbol_list.tolist()
self.unique_df_list = h5auto_read(f, \
'/usrqa/unique_df_list')
self.unique_nf_list = h5auto_read(f, '/usrqa/unique_nf_list')
if self.unique_nf_list is not None:
self.unique_nf_list = numpy.asfarray(self.unique_nf_list)
if self.lhub in [1, 2]:
self.unique_u_list = numpy.asfarray(h5auto_read(f, \
'/usrqa/unique_u_list_ev'))
self.unique_j_list = numpy.asfarray(h5auto_read(f, \
'/usrqa/unique_j_list_ev'))
if 'ryd' in self.unit:
self.unique_u_list /= Ryd_eV
self.unique_j_list /= Ryd_eV
elif self.lhub == 3:
self.unique_f_list = numpy.asfarray(h5auto_read(f, \
'/usrqa/unique_f_list_ev'))
self.set_nval_range_list()
self.set_CorrAtoms()
self.set_na2_list()
lnewton = h5auto_read(f, \
'/usrqa/lnewton', default=0)
self.set_gimix(lnewton)
iembeddiag = h5auto_read(f, \
'/usrqa/iembeddiag', default=-1)
self.set_giembeddiag(iembeddiag)
ferromagnetism = h5auto_read(f, \
"/usrqa/ferromagnetism", default="n")
if "y" == ferromagnetism:
self.fm_direction = \
f["/usrqa/unique_magmom_direction_list"][0]
else:
self.fm_direction = None
# corr_list = list of indeces of correlated atoms listed in self.symbols
def set_CorrAtoms(self):
corr_list = []
ityp_list = []
imap_list = []
df_list = []
nf_list = []
u_list = []
j_list = []
f_list = []
for i, s in enumerate(self.symbols):
if s in self.unique_corr_symbol_list:
corr_list.append(i)
ityp_list.append(self.unique_corr_symbol_list.index(s))
df_list.append(self.unique_df_list[ityp_list[-1]])
if self.unique_nf_list is not None:
nf_list.append(self.unique_nf_list[ityp_list[-1]])
if self.lhub in [1, 2]:
u_list.append(self.unique_u_list[ityp_list[-1]])
j_list.append(self.unique_j_list[ityp_list[-1]])
elif self.lhub == 3:
f_list.append(self.unique_f_list[ityp_list[-1]])
idx_equ = self.idx_equivalent_atoms[i]
imap = self.idx_equivalent_atoms.index(idx_equ)
imap_list.append(imap)
self.corr_list = corr_list
self.ityp_list = ityp_list
self.imap_list = cleanup_imap_list(imap_list)
self.df_list = df_list
self.u_list = u_list
self.j_list = j_list
self.f_list = f_list
if len(nf_list) == 0:
nf_list = None
if self.modify_mode:
with h5py.File('GPARAM.h5', 'r') as f:
if '/dc_nelf_list' in f:
self.nelf_list = f['/dc_nelf_list'][()]
else:
self.nelf_list = nf_list
else:
self.nelf_list = nf_list
def set_lhub(self, lhub):
self.lhub = lhub
def set_ldc(self, ldc):
self.ldc= ldc
def set_modify_mode(self):
self.modify_mode = os.path.exists('GPARAM.h5')
def set_orbital_polarization(self, orbital_polarization):
self.orbital_polarization = orbital_polarization
def set_crystal_field(self, crystal_field):
self.crystal_field = crystal_field
def set_ispin(self, spin_polarization):
self.ispin = 2 if 'y' in spin_polarization else 1
def set_iso(self, spin_orbit_coup):
self.iso = 2 if 'y' in spin_orbit_coup else 1
def set_imap_list(self, imap_list):
self.imap_list = imap_list
def set_gimix(self, gimix):
self.gimix = gimix
def set_giembeddiag(self, giembeddiag):
self.giembeddiag = int(giembeddiag)
def set_sym_dist_cut(self, dist_cut):
self.sym_dist_cut = dist_cut
def get_Rotations(self, iat, Nmax=10):
symbols = self.symbols
cell = self.cell
scaled_positions = self.scaled_positions
if self.locrot_list is not None:
locrot = self.locrot_list[iat]
else:
locrot = None
equivalent_indices = self.idx_equivalent_atoms
from pyglib.gutz.molecule import xtal_get_local_rot
return xtal_get_local_rot(symbols, scaled_positions, cell, iat,
self.sym_dist_cut, equivalent_indices=equivalent_indices,
locrot=locrot, fm_direction=self.fm_direction, Nmax=Nmax)
def get_EquivalentAtoms(self):
# Getting general information about symmetry of the lattice
if self.wiencase is not None:
from pyglib.dft.wien import get_equivalent_atom_indices
idx_equivalent_atoms = get_equivalent_atom_indices(
self.wiencase)
else:
dataset = spglib.get_symmetry_dataset(self, symprec=1e-5)
idx_equivalent_atoms = dataset['equivalent_atoms'].tolist()
return idx_equivalent_atoms
def set_idx_equivalent_atoms(self, idx_equivalent_atoms):
self.idx_equivalent_atoms = idx_equivalent_atoms
def get_llist(self):
if not hasattr(self, 'l_list'):
from pyglib.symm.angular_momentum_1p import get_l_list_from_string
l_list = []
AM_list = self.df_list
for i, s in enumerate(self.corr_list):
l_sublist = get_l_list_from_string(AM_list[i])
l_list.append(l_sublist)
return l_list
def set_SL_vector_list(self):
'''
Set S(L)_vector_list in the working symmetry-adapted basis.
'''
l_list = self.get_llist()
sx_list = []; sy_list = []; sz_list = []
lx_list = []; ly_list = []; lz_list = []
from pyglib.symm.angular_momentum_1p import get_S_vector, \
get_L_vector
for i, _ls in enumerate(l_list):
S_vector = get_S_vector(_ls)
L_vector = get_L_vector(_ls, iso=2)
utrans = self.utrans_list[i]
for j, _S, _L in it.izip(it.count(), S_vector, L_vector):
S_vector[j] = utrans.T.conj().dot(_S).dot(utrans)
L_vector[j] = utrans.T.conj().dot(_L).dot(utrans)
sx_list.append(S_vector[0])
sy_list.append(S_vector[1])
sz_list.append(S_vector[2])
lx_list.append(L_vector[0])
ly_list.append(L_vector[1])
lz_list.append(L_vector[2])
self.sx_list = sx_list
self.sy_list = sy_list
self.sz_list = sz_list
self.lx_list = lx_list
self.ly_list = ly_list
self.lz_list = lz_list
def set_SelfEnergy(self):
'''
Get the self-energy structure and the corresponding unitary
transformation of the local basis.
'''
from pyglib.gutz.self_energy import get_self_energy
utrans_list = []
sigma_list = []
l_list = self.get_llist()
rotations_list = []
jgenerator_list = []
idx_equivalent_atoms = self.idx_equivalent_atoms
if self.modify_mode:
f = h5py.File('GPARAM.h5', 'r')
for i, l in enumerate(self.corr_list):
if self.modify_mode:
utrans_list.append(f['/IMPURITY_'+str(i+1)+ \
'/DB_TO_SAB'][()].T)
sigma_list.append(f['/IMPURITY_'+str(i+1)+ \
'/SIGMA_STRUCT'][()].T)
if 'y' in self.crystal_field and 'n' in \
self.orbital_polarization:
rotations_list.append(f['/IMPURITY_'+str(i+1)+ \
'/rotations'][()])
jgenerator_list.append(f['/IMPURITY_'+str(i+1)+ \
'/JGENERATOR'][()])
jgenerator_list[-1] = numpy.swapaxes(jgenerator_list[-1],1,2)
else:
if i > 0:
idx_equ = idx_equivalent_atoms[i]
if idx_equ in idx_equivalent_atoms[:i]:
imap = idx_equivalent_atoms.index(idx_equ)
utrans_list.append(utrans_list[imap])
sigma_list.append(sigma_list[imap])
if len(rotations_list) > 0:
rotations_list.append(rotations_list[imap])
jgenerator_list.append(jgenerator_list[imap])
continue
if 'y' in self.crystal_field and \
'n' in self.orbital_polarization:
rotations = self.get_Rotations(l)
rotations_list.append(rotations)
else:
rotations = None
jgen, utrans, sigma = get_self_energy(l_list[i], self.ispin,
self.orbital_polarization,
self.crystal_field, self.iso,
rotations=rotations)
utrans_list.append(utrans)
sigma_list.append(sigma)
if jgen is not None:
jgenerator_list.append(jgen)
self.utrans_list = utrans_list
self.sigma_list = sigma_list
if len(rotations_list) > 0:
self.rotations_list = rotations_list
self.jgenerator_list = jgenerator_list
else:
self.rotations_list = None
self.jgenerator_list = None
def set_LieParameters(self):
'''
Set the Lie parameters of rotation operators for odd and even J.
'''
import pyglib.symm.atom_symm as atsym
Lie_Jeven_list = []
Lie_Jodd_list = []
if 'y' in self.crystal_field and 'n' in self.orbital_polarization:
for i, l in enumerate(self.corr_list):
rotations = self.get_Rotations(l)
Lie = atsym.get_Lie_parameters(rotations, plus2pi=False)
Lie_Jeven_list.append(Lie)
Lie = atsym.get_Lie_parameters(rotations, plus2pi=True)
Lie_Jodd_list.append(Lie)
self.Lie_Jeven_list = Lie_Jeven_list
self.Lie_Jodd_list = Lie_Jodd_list
else:
self.Lie_Jeven_list = None
self.Lie_Jodd_list = None
def set_one_particle_rotation_list(self):
'''Setup the rotation matrix in the single-particle basis
'''
import pyglib.symm.atom_symm as atsym
if self.Lie_Jodd_list is not None:
sp_rotations_list = []
for J, lies in it.izip(self.jgenerator_list, self.Lie_Jodd_list):
if self.iso == 1:
J_ = [J1[::2, ::2] for J1 in J]
else:
J_ = J
sp_rotations_ = atsym.get_representation(J_, lies)
if self.iso == 1:
sp_rotations = []
for sp_r_ in sp_rotations_:
sp_r = numpy.zeros_like(J[0])
sp_r[::2, ::2] = sp_r_
sp_r[fd00:c2b6:b24b:be67:2827:688d:e6a1:6a3b, 1::2] = sp_r[::2, ::2]
sp_rotations.append(sp_r)
else:
sp_rotations = sp_rotations_
sp_rotations_list.append(sp_rotations)
self.sp_rotations_list = sp_rotations_list
else:
self.sp_rotations_list = None
def set_na2_list(self):
l_list = self.get_llist()
na2_list = [numpy.sum(2 * (2 * numpy.array(ls) + 1)) for ls in l_list]
self.na2_list = na2_list
def set_nval_range_list(self):
self.nval_bot_list=[]
self.nval_top_list=[]
for s in self.unique_corr_symbol_list:
self.nval_bot_list.append(nval_range_list[s][0])
self.nval_top_list.append(nval_range_list[s][1])
def set_v2e_list(self):
from pyglib.mbody.coulomb_matrix import get_v2e_list
l_list = self.get_llist()
self.v2e_list, self.u_avg_list, self.j_avg_list = \
get_v2e_list(self.lhub, l_list, \
self.imap_list, self.utrans_list, \
u_list=self.u_list, j_list=self.j_list, \
f_list=self.f_list)
def h5calc_save_lrot(gatm):
'''calcuate the 3d rotation matrix representations
in Hilbert space fro each impurity.
for equivalent impurity, a link is set up instead of
redundant calculations.
the results are save in 'GLROT.h5'.
'''
if gatm.giembeddiag != -31:
return
from pyglib.mbody.local_operator_factory import get_lrot_op
if os.path.isfile('GLROT.h5'):
return
with h5py.File('GLROT.h5', 'w') as f:
for i, imap in enumerate(gatm.imap_list):
prefix = '/IMPURITY_{}'.format(i+1)
if i > imap:
f[prefix] = h5py.SoftLink(
'/IMPURITY_{}'.format(imap+1))
continue
elif i < imap:
raise ValueError('(i={}) > (imap={})'.format(i, imap))
else:
orb = gatm.df_list[i]
if orb == 'd':
nmax = 10
elif orb == 'f':
nmax = 14
else:
raise ValueError('not supported df={}!'.format(orb))
# empty or full occupation excluded.
lvec = [gatm.lx_list[i], gatm.ly_list[i], gatm.lz_list[i]]
for ival in range(1, nmax):
lrots = get_lrot_op(lvec, gatm.Lie_Jeven_list[i],
l=orb, ival=ival)
for ir, lrot in enumerate(lrots):
f[prefix+'/valence_block_{}/ROT_{}'.format( \
ival, ir+1)] = lrot.T
f[prefix+'/dim_rot'] = [len(gatm.Lie_Jeven_list[i])]
nval_range_list = {
"H":[0,2],
"Li":[0,2],
"Na":[0,2],
"Sc":[0,10],
"Ti":[0,10],
"V":[0,10],
"Cr":[0,10],
"Mn":[0,10],
"Fe":[0,10],
"Co":[0,10],
"Ni":[0,10],
"Cu":[0,10],
"Y":[0,10],
"Zr":[0,10],
"Nb":[0,10],
"Mo":[0,10],
"Tc":[0,10],
"Ru":[0,10],
"Rh":[0,10],
"Pd":[0,10],
"Ag":[0,10],
"Ce":[0,3],
"Pr":[0,4],
"Nd":[0,5],
"Pm":[0,8],
"Sm":[0,9],
"Eu":[0,10],
"Gd":[0,11],
"Tb":[0,12],
"Dy":[0,13],
"Ho":[0,14],
"Er":[1,14],
"Tm":[2,14],
"Yb":[3,14],
"Hf":[0,10],
"Ta":[0,10],
"W":[0,10],
"Re":[0,10],
"Os":[0,10],
"Ir":[0,10],
"Pt":[0,10],
"U":[0,4],
"Np":[0,14],
"Pu":[0,14],
"Am":[0,14],
"Cm":[0,14],
"Bk":[0,14],
"Cf":[0,14],
"Es":[0,14],
"Fm":[0,14],
"Md":[0,14]
}
|
428522
|
import argparse
import datetime
import sys
from io import StringIO
from . import StreamReporter, context_name, format_exception, make_readable
class DotsReporter(StreamReporter):
@classmethod
def locate(cls):
return (None, VerboseReporter)
def initialise(self, args, env):
return args.verbosity == 'normal'
def assertion_passed(self, *args, **kwargs):
self.dot()
def assertion_failed(self, *args, **kwargs):
self.F()
def assertion_errored(self, *args, **kwargs):
self.E()
def context_errored(self, *args, **kwargs):
self.E()
def test_class_errored(self, *args, **kwargs):
self.E()
def unexpected_error(self, *args, **kwargs):
self.E()
def test_run_ended(self):
self._print('')
def dot(self):
self._print('.', end='')
def F(self):
self._print('F', end='')
def E(self):
self._print('E', end='')
class VerboseReporter(StreamReporter):
dashes = '-' * 70
def setup_parser(self, parser):
group = parser.add_mutually_exclusive_group(required=False)
group.add_argument('-v', '--verbose',
action='store_const',
dest='verbosity',
const='verbose',
default='normal',
help="Enable verbose progress reporting.")
group.add_argument('-q', '--quiet',
action='store_const',
dest='verbosity',
const='quiet',
default='normal',
help="Disable progress reporting.")
def initialise(self, args, env):
return args.verbosity != "quiet"
def context_started(self, cls, example):
self._print(context_name(cls.__name__, example))
def context_errored(self, name, example, exception):
for line in format_exception(exception):
self._print(' ' + line)
def test_class_errored(self, cls, exception):
for line in format_exception(exception):
self._print(line)
def assertion_passed(self, func):
self._print(' PASS: ' + make_readable(func.__name__))
def assertion_failed(self, func, exception):
self._print(' FAIL: ' + make_readable(func.__name__))
for line in format_exception(exception):
self._print(' ' + line)
def assertion_errored(self, func, exception):
self._print(' ERROR: ' + make_readable(func.__name__))
for line in format_exception(exception):
self._print(' ' + line)
def unexpected_error(self, exception):
for line in format_exception(exception):
self._print(line)
class FinalCountsReporter(StreamReporter):
dashes = '-' * 70
@classmethod
def locate(cls):
return (VerboseReporter, None)
def initialise(self, args, env):
return args.verbosity != 'quiet'
def __init__(self, stream=sys.stdout):
super().__init__(stream)
self.context_count = 0
self.assertion_count = 0
self.failure_count = 0
self.error_count = 0
self.failed = False
def context_started(self, name, example):
self.context_count += 1
def context_errored(self, name, example, exception):
self.error_count += 1
self.failed = True
def assertion_started(self, func):
self.assertion_count += 1
def assertion_failed(self, func, exception):
self.failure_count += 1
self.failed = True
def assertion_errored(self, func, exception):
self.error_count += 1
self.failed = True
def unexpected_error(self, exception):
self.error_count += 1
self.failed = True
def test_class_errored(self, cls, exception):
self.error_count += 1
self.failed = True
def test_run_ended(self):
self.summarise()
def summarise(self):
self._print(self.dashes)
if self.failure_count or self.error_count:
self._print('FAILED!')
self._print(self.failure_numbers())
else:
self._print('PASSED!')
self._print(self.success_numbers())
def success_numbers(self):
return "{}, {}".format(
pluralise("context", self.context_count),
pluralise("assertion", self.assertion_count))
def failure_numbers(self):
return "{}, {}: {} failed, {}".format(
pluralise("context", self.context_count),
pluralise("assertion", self.assertion_count),
self.failure_count,
pluralise("error", self.error_count))
class StdOutCapturingReporter(StreamReporter):
@classmethod
def locate(cls):
return (VerboseReporter, FinalCountsReporter)
def setup_parser(self, parser):
parser.add_argument('-s', '--no-capture',
action='store_false',
dest='capture',
default=True,
help="Disable capturing of stdout during tests.")
def initialise(self, args, env):
self.quiet = args.verbosity == "quiet"
return args.capture and not (args.teamcity or 'TEAMCITY_VERSION' in env)
def centred_dashes(self, string, indentation):
num = str(70 - indentation)
return ("{:-^" + num + "}").format(string)
def context_started(self, name, example):
self.real_stdout = sys.stdout
self.buffer = StringIO()
sys.stdout = self.buffer
def context_ended(self, name, example):
sys.stdout = self.real_stdout
def context_errored(self, name, example, exception):
sys.stdout = self.real_stdout
self.output_buffer(2)
def assertion_failed(self, func, exception):
self.output_buffer(4)
def assertion_errored(self, func, exception):
self.output_buffer(4)
def output_buffer(self, indentation):
if self.buffer.getvalue() and not self.quiet:
lines = [self.centred_dashes(" >> begin captured stdout << ", indentation)]
lines.extend(self.buffer.getvalue().strip().split('\n'))
lines.append(self.centred_dashes(" >> end captured stdout << ", indentation))
for line in lines:
self._print((' ' * indentation) + line)
class TimedReporter(StreamReporter):
@classmethod
def locate(self):
return (FinalCountsReporter, None)
def initialise(self, args, env):
return not args.verbosity == 'quiet'
def test_run_started(self):
self.start_time = datetime.datetime.now()
def test_run_ended(self):
self.end_time = datetime.datetime.now()
self.print_time()
def print_time(self):
total_secs = (self.end_time - self.start_time).total_seconds()
rounded = round(total_secs, 1)
self._print("({} seconds)".format(rounded))
class Colouriser(StreamReporter):
@classmethod
def locate(cls):
return (DotsReporter, VerboseReporter)
def setup_parser(self, parser):
try:
parser.add_argument('--no-colour',
action='store_false',
dest='colour',
default=True,
help='Disable coloured output.')
except argparse.ArgumentError:
# just means the other one already did it
pass
def initialise(self, args, env):
if not sys.stdout.isatty():
return False
if args.colour:
global colorama
try:
import colorama # noqa
except ImportError:
return False
return True
def context_errored(self, name, example, exception):
self.stream.write(colorama.Fore.RED) # noqa
def assertion_passed(self, func):
self.stream.write(colorama.Fore.GREEN) # noqa
def assertion_failed(self, func, exception):
self.stream.write(colorama.Fore.RED) # noqa
def assertion_errored(self, func, exception):
self.stream.write(colorama.Fore.RED) # noqa
def test_class_errored(self, cls, exception):
self.stream.write(colorama.Fore.RED) # noqa
def unexpected_error(self, exception):
self.stream.write(colorama.Fore.RED) # noqa
class UnColouriser(StreamReporter):
@classmethod
def locate(cls):
return (StdOutCapturingReporter, None)
def setup_parser(self, parser):
try:
parser.add_argument('--no-colour',
action='store_false',
dest='colour',
default=True,
help='Disable coloured output.')
except argparse.ArgumentError:
# just means the other one already did it
pass
def initialise(self, args, env):
if not sys.stdout.isatty():
return False
if args.colour:
global colorama
try:
import colorama # noqa
except ImportError:
return False
return True
def context_errored(self, name, example, exception):
self.stream.write(colorama.Fore.RESET) # noqa
def assertion_passed(self, func):
self.stream.write(colorama.Fore.RESET) # noqa
def assertion_failed(self, func, exception):
self.stream.write(colorama.Fore.RESET) # noqa
def assertion_errored(self, func, exception):
self.stream.write(colorama.Fore.RESET) # noqa
def test_class_errored(self, cls, exception):
self.stream.write(colorama.Fore.RESET) # noqa
def unexpected_error(self, exception):
self.stream.write(colorama.Fore.RESET) # noqa
# these three are kinda hideous
class FailuresOnlyMaster(StreamReporter):
def __init__(self, stream):
super().__init__(stream)
self.plugins = []
self.final_report = StringIO()
self.fake_stream = StringIO()
@classmethod
def locate(cls):
return (None, FinalCountsReporter)
def initialise(self, args, env):
return args.verbosity == 'normal'
def request_plugins(self):
returned_plugins = yield [Colouriser, VerboseReporter, StdOutCapturingReporter, UnColouriser]
self.plugins = returned_plugins.values()
def set_streams(self, stream):
self.fake_stream = stream
for plugin in self.plugins:
plugin.stream = stream
class FailuresOnlyBefore(object):
dashes = '-' * 70
@classmethod
def locate(cls):
return (DotsReporter, Colouriser)
def initialise(self, args, env):
return args.verbosity == 'normal'
def request_plugins(self):
returned_plugins = yield [FailuresOnlyMaster]
self.master = returned_plugins[FailuresOnlyMaster]
def context_started(self, name, example):
self.master.set_streams(StringIO())
self.master.current_context_failed = False
def assertion_passed(self, func):
return True
def assertion_failed(self, func, exception):
self.master.current_context_failed = True
def assertion_errored(self, func, exception):
self.master.current_context_failed = True
def test_class_errored(self, cls, exception):
self.unexpected_error(exception)
def unexpected_error(self, exception):
# write the error directly to the report
self.master.orig_stream = self.master.fake_stream
self.master.set_streams(self.master.final_report)
def test_run_ended(self):
output = self.master.final_report.getvalue()
if output:
self.master.stream.write(self.dashes + '\n')
self.master.stream.write(output.strip() + '\n')
def __eq__(self, other):
return type(self) == type(other)
class FailuresOnlyAfter(object):
@classmethod
def locate(cls):
return (UnColouriser, None)
def initialise(self, args, env):
return args.verbosity == 'normal'
def request_plugins(self):
returned_plugins = yield [FailuresOnlyMaster]
self.master = returned_plugins[FailuresOnlyMaster]
def context_ended(self, name, example):
if self.master.current_context_failed:
self.master.final_report.write(self.master.fake_stream.getvalue())
self.master.set_streams(StringIO())
def context_errored(self, name, example, exception):
self.master.final_report.write(self.master.fake_stream.getvalue())
self.master.set_streams(StringIO())
def unexpected_error(self, exception):
self.master.set_streams(self.master.orig_stream)
def __eq__(self, other):
return type(self) == type(other)
def pluralise(noun, num):
string = str(num) + ' ' + noun
if num != 1:
string += 's'
return string
|
428540
|
import csv
from convertextract.parsers.utils import BaseParser
class Parser(BaseParser):
"""Extract text from comma separated values files (.csv).
"""
delimiter = ','
def extract(self, filename, **kwargs):
if 'mapping' in kwargs and kwargs['mapping']:
transducer = self.create_transducer(kwargs['mapping'])
else:
transducer = self.get_transducer(kwargs.get('input_language', ''), kwargs.get('output_language', ''))
# quick 'n dirty solution for the time being
with open(filename) as stream:
reader = csv.reader(stream, delimiter=self.delimiter)
data = [row for row in reader]
return '\n'.join([self.delimiter.join([transducer(col).output_string for col in row]) for row in data])
|
428549
|
from datetime import date
from dotenv import load_dotenv
from pathlib import Path
from sqlalchemy import create_engine
import bs4 as bs
import ftplib
import gzip
import os
import pandas as pd
import psycopg2
import re
import sys
import time
import urllib.request
import wget
import zipfile
#%%
def getEnv(env):
return os.getenv(env)
load_dotenv()
dados_rf = 'http://172.16.58.3/CNPJ/'
output_files = Path(getEnv('OUTPUT_FILES_PATH'))
extracted_files = Path(getEnv('EXTRACTED_FILES_PATH'))
raw_html = urllib.request.urlopen(dados_rf)
raw_html = raw_html.read()
# Formatar página e converter em string
page_items = bs.BeautifulSoup(raw_html, 'lxml')
html_str = str(page_items)
# Obter arquivos
Files = []
text = '.zip'
for m in re.finditer(text, html_str):
i_start = m.start()-40
i_end = m.end()
i_loc = html_str[i_start:i_end].find('href=')+6
print(html_str[i_start+i_loc:i_end])
Files.append(html_str[i_start+i_loc:i_end])
print('Arquivos que serão baixados:')
i_f = 0
for f in Files:
i_f += 1
print(str(i_f) + ' - ' + f)
#%%
########################################################################################################################
## DOWNLOAD ############################################################################################################
########################################################################################################################
# Download files
# Create this bar_progress method which is invoked automatically from wget:
def bar_progress(current, total, width=80):
progress_message = "Downloading: %d%% [%d / %d] bytes - " % (current / total * 100, current, total)
# Don't use print() as it will print in new line every time.
sys.stdout.write("\r" + progress_message)
sys.stdout.flush()
#%%
# Download arquivos ################################################################################################################################
i_l = 0
for l in Files:
# Download dos arquivos
i_l += 1
print('Baixando arquivo:')
print(str(i_l) + ' - ' + l)
url = dados_rf+l
wget.download(url, out=output_files, bar=bar_progress)
#%%
# Download layout:
Layout = 'https://www.gov.br/receitafederal/pt-br/assuntos/orientacao-tributaria/cadastros/consultas/arquivos/NOVOLAYOUTDOSDADOSABERTOSDOCNPJ.pdf'
print('Baixando layout:')
wget.download(Layout, out=output_files, bar=bar_progress)
####################################################################################################################################################
#%%
# Creating directory to store the extracted files:
if not os.path.exists(extracted_files):
os.mkdir(extracted_files)
# Extracting files:
i_l = 0
for l in Files:
try:
i_l += 1
print('Descompactando arquivo:')
print(str(i_l) + ' - ' + l)
with zipfile.ZipFile(output_files / l, 'r') as zip_ref:
zip_ref.extractall(extracted_files)
except:
pass
#%%
########################################################################################################################
## LER E INSERIR DADOS #################################################################################################
########################################################################################################################
insert_start = time.time()
# Files:
Items = [name for name in os.listdir(extracted_files) if name.endswith('')]
# Separar arquivos:
arquivos_empresa = []
arquivos_estabelecimento = []
arquivos_socios = []
arquivos_simples = []
arquivos_cnae = []
arquivos_moti = []
arquivos_munic = []
arquivos_natju = []
arquivos_pais = []
arquivos_quals = []
for i in range(len(Items)):
if Items[i].find('EMPRE') > -1:
arquivos_empresa.append(Items[i])
elif Items[i].find('ESTABELE') > -1:
arquivos_estabelecimento.append(Items[i])
elif Items[i].find('SOCIO') > -1:
arquivos_socios.append(Items[i])
elif Items[i].find('SIMPLES') > -1:
arquivos_simples.append(Items[i])
elif Items[i].find('CNAE') > -1:
arquivos_cnae.append(Items[i])
elif Items[i].find('MOTI') > -1:
arquivos_moti.append(Items[i])
elif Items[i].find('MUNIC') > -1:
arquivos_munic.append(Items[i])
elif Items[i].find('NATJU') > -1:
arquivos_natju.append(Items[i])
elif Items[i].find('PAIS') > -1:
arquivos_pais.append(Items[i])
elif Items[i].find('QUALS') > -1:
arquivos_quals.append(Items[i])
else:
pass
#%%
# Conectar no banco de dados:
# Dados da conexão com o BD
user=getEnv('DB_USER')
passw=getEnv('DB_PASSWORD')
host=getEnv('DB_HOST')
port=getEnv('DB_PORT')
database=getEnv('DB_NAME')
# Conectar:
engine = create_engine('postgresql://'+user+':'+passw+'@'+host+':'+port+'/'+database)
conn = psycopg2.connect('dbname='+database+' '+'user='+user+' '+'host='+host+' '+'password='+passw)
cur = conn.cursor()
#%%
# Arquivos de empresa:
empresa_insert_start = time.time()
print("""
#######################
## Arquivos de EMPRESA:
#######################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "empresa";')
conn.commit()
for e in range(0, len(arquivos_empresa)):
print('Trabalhando no arquivo: '+arquivos_empresa[e]+' [...]')
try:
del empresa
except:
pass
empresa = pd.DataFrame(columns=[0, 1, 2, 3, 4, 5, 6])
empresa_dtypes = {0: 'object', 1: 'object', 2: 'object', 3: 'object', 4: 'object', 5: 'object', 6: 'object'}
extracted_file_path = Path(f'{extracted_files}/{arquivos_empresa[e]}')
empresa = pd.read_csv(filepath_or_buffer=extracted_file_path,
sep=';',
#nrows=100,
skiprows=0,
header=None,
dtype=empresa_dtypes)
# Tratamento do arquivo antes de inserir na base:
empresa = empresa.reset_index()
del empresa['index']
# Renomear colunas
empresa.columns = ['cnpj_basico', 'razao_social', 'natureza_juridica', 'qualificacao_responsavel', 'capital_social', 'porte_empresa', 'ente_federativo_responsavel']
# Replace "," por "."
empresa['capital_social'] = empresa['capital_social'].apply(lambda x: x.replace(',','.'))
empresa['capital_social'] = empresa['capital_social'].astype(float)
# Gravar dados no banco:
# Empresa
empresa.to_sql(name='empresa', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_empresa[e] + ' inserido com sucesso no banco de dados!')
try:
del empresa
except:
pass
print('Arquivos de empresa finalizados!')
empresa_insert_end = time.time()
empresa_Tempo_insert = round((empresa_insert_end - empresa_insert_start))
print('Tempo de execução do processo de empresa (em segundos): ' + str(empresa_Tempo_insert))
#%%
# Arquivos de estabelecimento:
estabelecimento_insert_start = time.time()
print("""
###############################
## Arquivos de ESTABELECIMENTO:
###############################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "estabelecimento";')
conn.commit()
for e in range(0, len(arquivos_estabelecimento)):
print('Trabalhando no arquivo: '+arquivos_estabelecimento[e]+' [...]')
try:
del estabelecimento
except:
pass
estabelecimento = pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10,11,12,13,14,15,16,17,18,19,20,21,22,23,24,25,26,27,28])
extracted_file_path = Path(f'{extracted_files}/{arquivos_estabelecimento[e]}')
estabelecimento = pd.read_csv(filepath_or_buffer=extracted_file_path,
sep=';',
#nrows=100,
skiprows=0,
header=None,
dtype='object')
# Tratamento do arquivo antes de inserir na base:
estabelecimento = estabelecimento.reset_index()
del estabelecimento['index']
# Renomear colunas
estabelecimento.columns = ['cnpj_basico',
'cnpj_ordem',
'cnpj_dv',
'identificador_matriz_filial',
'nome_fantasia',
'situacao_cadastral',
'data_situacao_cadastral',
'motivo_situacao_cadastral',
'nome_cidade_exterior',
'pais',
'data_inicio_atividade',
'cnae_fiscal_principal',
'cnae_fiscal_secundaria',
'tipo_logradouro',
'logradouro',
'numero',
'complemento',
'bairro',
'cep',
'uf',
'municipio',
'ddd_1',
'telefone_1',
'ddd_2',
'telefone_2',
'ddd_fax',
'fax',
'correio_eletronico',
'situacao_especial',
'data_situacao_especial']
# Gravar dados no banco:
# estabelecimento
estabelecimento.to_sql(name='estabelecimento', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_estabelecimento[e] + ' inserido com sucesso no banco de dados!')
try:
del estabelecimento
except:
pass
print('Arquivos de estabelecimento finalizados!')
estabelecimento_insert_end = time.time()
estabelecimento_Tempo_insert = round((estabelecimento_insert_end - estabelecimento_insert_start))
print('Tempo de execução do processo de estabelecimento (em segundos): ' + str(estabelecimento_Tempo_insert))
#%%
# Arquivos de socios:
socios_insert_start = time.time()
print("""
######################
## Arquivos de SOCIOS:
######################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "socios";')
conn.commit()
for e in range(0, len(arquivos_socios)):
print('Trabalhando no arquivo: '+arquivos_socios[e]+' [...]')
try:
del socios
except:
pass
extracted_file_path = Path(f'{extracted_files}/{arquivos_socios[e]}')
socios = pd.DataFrame(columns=[1,2,3,4,5,6,7,8,9,10,11])
socios = pd.read_csv(filepath_or_buffer=extracted_file_path,
sep=';',
#nrows=100,
skiprows=0,
header=None,
dtype='object')
# Tratamento do arquivo antes de inserir na base:
socios = socios.reset_index()
del socios['index']
# Renomear colunas
socios.columns = ['cnpj_basico',
'identificador_socio',
'nome_socio_razao_social',
'cpf_cnpj_socio',
'qualificacao_socio',
'data_entrada_sociedade',
'pais',
'representante_legal',
'nome_do_representante',
'qualificacao_representante_legal',
'faixa_etaria']
# Gravar dados no banco:
# socios
socios.to_sql(name='socios', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_socios[e] + ' inserido com sucesso no banco de dados!')
try:
del socios
except:
pass
print('Arquivos de socios finalizados!')
socios_insert_end = time.time()
socios_Tempo_insert = round((socios_insert_end - socios_insert_start))
print('Tempo de execução do processo de sócios (em segundos): ' + str(socios_Tempo_insert))
#%%
# Arquivos de simples:
simples_insert_start = time.time()
print("""
################################
## Arquivos do SIMPLES NACIONAL:
################################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "simples";')
conn.commit()
for e in range(0, len(arquivos_simples)):
print('Trabalhando no arquivo: '+arquivos_simples[e]+' [...]')
try:
del simples
except:
pass
# Verificar tamanho do arquivo:
print('Lendo o arquivo ' + arquivos_simples[e]+' [...]')
extracted_file_path = Path(f'{extracted_files}/{arquivos_simples[e]}')
simples_lenght = sum(1 for line in open(extracted_file_path, "r"))
print('Linhas no arquivo do Simples '+ arquivos_simples[e] +': '+str(simples_lenght))
tamanho_das_partes = 1000000 # Registros por carga
partes = round(simples_lenght / tamanho_das_partes)
nrows = tamanho_das_partes
skiprows = 0
print('Este arquivo será dividido em ' + str(partes) + ' partes para inserção no banco de dados')
for i in range(0, partes):
print('Iniciando a parte ' + str(i+1) + ' [...]')
simples = pd.DataFrame(columns=[1,2,3,4,5,6])
simples = pd.read_csv(filepath_or_buffer=extracted_file_path,
sep=';',
nrows=nrows,
skiprows=skiprows,
header=None,
dtype='object')
# Tratamento do arquivo antes de inserir na base:
simples = simples.reset_index()
del simples['index']
# Renomear colunas
simples.columns = ['cnpj_basico',
'opcao_pelo_simples',
'data_opcao_simples',
'data_exclusao_simples',
'opcao_mei',
'data_opcao_mei',
'data_exclusao_mei']
skiprows = skiprows+nrows
# Gravar dados no banco:
# simples
simples.to_sql(name='simples', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_simples[e] + ' inserido com sucesso no banco de dados! - Parte '+ str(i+1))
try:
del simples
except:
pass
try:
del simples
except:
pass
print('Arquivos do simples finalizados!')
simples_insert_end = time.time()
simples_Tempo_insert = round((simples_insert_end - simples_insert_start))
print('Tempo de execução do processo do Simples Nacional (em segundos): ' + str(simples_Tempo_insert))
#%%
# Arquivos de cnae:
cnae_insert_start = time.time()
print("""
######################
## Arquivos de cnae:
######################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "cnae";')
conn.commit()
for e in range(0, len(arquivos_cnae)):
print('Trabalhando no arquivo: '+arquivos_cnae[e]+' [...]')
try:
del cnae
except:
pass
extracted_file_path = Path(f'{extracted_files}/{arquivos_cnae[e]}')
cnae = pd.DataFrame(columns=[1,2])
cnae = pd.read_csv(filepath_or_buffer=extracted_file_path, sep=';', skiprows=0, header=None, dtype='object', encoding='ANSI')
# Tratamento do arquivo antes de inserir na base:
cnae = cnae.reset_index()
del cnae['index']
# Renomear colunas
cnae.columns = ['codigo', 'descricao']
# Gravar dados no banco:
# cnae
cnae.to_sql(name='cnae', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_cnae[e] + ' inserido com sucesso no banco de dados!')
try:
del cnae
except:
pass
print('Arquivos de cnae finalizados!')
cnae_insert_end = time.time()
cnae_Tempo_insert = round((cnae_insert_end - cnae_insert_start))
print('Tempo de execução do processo de cnae (em segundos): ' + str(cnae_Tempo_insert))
#%%
# Arquivos de moti:
moti_insert_start = time.time()
print("""
#########################################
## Arquivos de motivos da situação atual:
#########################################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "moti";')
conn.commit()
for e in range(0, len(arquivos_moti)):
print('Trabalhando no arquivo: '+arquivos_moti[e]+' [...]')
try:
del moti
except:
pass
extracted_file_path = Path(f'{extracted_files}/{arquivos_moti[e]}')
moti = pd.DataFrame(columns=[1,2])
moti = pd.read_csv(filepath_or_buffer=extracted_file_path, sep=';', skiprows=0, header=None, dtype='object', encoding='ANSI')
# Tratamento do arquivo antes de inserir na base:
moti = moti.reset_index()
del moti['index']
# Renomear colunas
moti.columns = ['codigo', 'descricao']
# Gravar dados no banco:
# moti
moti.to_sql(name='moti', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_moti[e] + ' inserido com sucesso no banco de dados!')
try:
del moti
except:
pass
print('Arquivos de moti finalizados!')
moti_insert_end = time.time()
moti_Tempo_insert = round((moti_insert_end - moti_insert_start))
print('Tempo de execução do processo de motivos da situação atual (em segundos): ' + str(moti_Tempo_insert))
#%%
# Arquivos de munic:
munic_insert_start = time.time()
print("""
##########################
## Arquivos de municípios:
##########################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "munic";')
conn.commit()
for e in range(0, len(arquivos_munic)):
print('Trabalhando no arquivo: '+arquivos_munic[e]+' [...]')
try:
del munic
except:
pass
extracted_file_path = Path(f'{extracted_files}/{arquivos_munic[e]}')
munic = pd.DataFrame(columns=[1,2])
munic = pd.read_csv(filepath_or_buffer=extracted_file_path, sep=';', skiprows=0, header=None, dtype='object', encoding='ANSI')
# Tratamento do arquivo antes de inserir na base:
munic = munic.reset_index()
del munic['index']
# Renomear colunas
munic.columns = ['codigo', 'descricao']
# Gravar dados no banco:
# munic
munic.to_sql(name='munic', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_munic[e] + ' inserido com sucesso no banco de dados!')
try:
del munic
except:
pass
print('Arquivos de munic finalizados!')
munic_insert_end = time.time()
munic_Tempo_insert = round((munic_insert_end - munic_insert_start))
print('Tempo de execução do processo de municípios (em segundos): ' + str(munic_Tempo_insert))
#%%
# Arquivos de natju:
natju_insert_start = time.time()
print("""
#################################
## Arquivos de natureza jurídica:
#################################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "natju";')
conn.commit()
for e in range(0, len(arquivos_natju)):
print('Trabalhando no arquivo: '+arquivos_natju[e]+' [...]')
try:
del natju
except:
pass
extracted_file_path = Path(f'{extracted_files}/{arquivos_natju[e]}')
natju = pd.DataFrame(columns=[1,2])
natju = pd.read_csv(filepath_or_buffer=extracted_file_path, sep=';', skiprows=0, header=None, dtype='object', encoding='ANSI')
# Tratamento do arquivo antes de inserir na base:
natju = natju.reset_index()
del natju['index']
# Renomear colunas
natju.columns = ['codigo', 'descricao']
# Gravar dados no banco:
# natju
natju.to_sql(name='natju', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_natju[e] + ' inserido com sucesso no banco de dados!')
try:
del natju
except:
pass
print('Arquivos de natju finalizados!')
natju_insert_end = time.time()
natju_Tempo_insert = round((natju_insert_end - natju_insert_start))
print('Tempo de execução do processo de natureza jurídica (em segundos): ' + str(natju_Tempo_insert))
#%%
# Arquivos de pais:
pais_insert_start = time.time()
print("""
######################
## Arquivos de país:
######################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "pais";')
conn.commit()
for e in range(0, len(arquivos_pais)):
print('Trabalhando no arquivo: '+arquivos_pais[e]+' [...]')
try:
del pais
except:
pass
extracted_file_path = Path(f'{extracted_files}/{arquivos_pais[e]}')
pais = pd.DataFrame(columns=[1,2])
pais = pd.read_csv(filepath_or_buffer=extracted_file_path, sep=';', skiprows=0, header=None, dtype='object', encoding='ANSI')
# Tratamento do arquivo antes de inserir na base:
pais = pais.reset_index()
del pais['index']
# Renomear colunas
pais.columns = ['codigo', 'descricao']
# Gravar dados no banco:
# pais
pais.to_sql(name='pais', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_pais[e] + ' inserido com sucesso no banco de dados!')
try:
del pais
except:
pass
print('Arquivos de pais finalizados!')
pais_insert_end = time.time()
pais_Tempo_insert = round((pais_insert_end - pais_insert_start))
print('Tempo de execução do processo de país (em segundos): ' + str(pais_Tempo_insert))
#%%
# Arquivos de qualificação de sócios:
quals_insert_start = time.time()
print("""
######################################
## Arquivos de qualificação de sócios:
######################################
""")
# Drop table antes do insert
cur.execute('DROP TABLE IF EXISTS "quals";')
conn.commit()
for e in range(0, len(arquivos_quals)):
print('Trabalhando no arquivo: '+arquivos_quals[e]+' [...]')
try:
del quals
except:
pass
extracted_file_path = Path(f'{extracted_files}/{arquivos_quals[e]}')
quals = pd.DataFrame(columns=[1,2])
quals = pd.read_csv(filepath_or_buffer=extracted_file_path, sep=';', skiprows=0, header=None, dtype='object', encoding='ANSI')
# Tratamento do arquivo antes de inserir na base:
quals = quals.reset_index()
del quals['index']
# Renomear colunas
quals.columns = ['codigo', 'descricao']
# Gravar dados no banco:
# quals
quals.to_sql(name='quals', con=engine, if_exists='append', index=False)
print('Arquivo ' + arquivos_quals[e] + ' inserido com sucesso no banco de dados!')
try:
del quals
except:
pass
print('Arquivos de quals finalizados!')
quals_insert_end = time.time()
quals_Tempo_insert = round((quals_insert_end - quals_insert_start))
print('Tempo de execução do processo de qualificação de sócios (em segundos): ' + str(quals_Tempo_insert))
#%%
insert_end = time.time()
Tempo_insert = round((insert_end - insert_start))
print("""
#############################################
## Processo de carga dos arquivos finalizado!
#############################################
""")
print('Tempo total de execução do processo de carga (em segundos): ' + str(Tempo_insert)) # Tempo de execução do processo (em segundos): 17.770 (4hrs e 57 min)
# ###############################
# Tamanho dos arquivos:
# empresa = 45.811.638
# estabelecimento = 48.421.619
# socios = 20.426.417
# simples = 27.893.923
# ###############################
#%%
# Criar índices na base de dados:
index_start = time.time()
print("""
#######################################
## Criar índices na base de dados [...]
#######################################
""")
cur.execute("""
create index empresa_cnpj on empresa(cnpj_basico);
commit;
create index estabelecimento_cnpj on estabelecimento(cnpj_basico);
commit;
create index socios_cnpj on socios(cnpj_basico);
commit;
create index simples_cnpj on simples(cnpj_basico);
commit;
""")
conn.commit()
print("""
############################################################
## Índices criados nas tabelas, para a coluna `cnpj_basico`:
- empresa
- estabelecimento
- socios
- simples
############################################################
""")
index_end = time.time()
index_time = round(index_end - index_start)
print('Tempo para criar os índices (em segundos): ' + str(index_time))
#%%
print("""Processo 100% finalizado! Você já pode usar seus dados no BD!
- Desenvolvido por: <NAME>
- Contribua com esse projeto aqui: https://github.com/aphonsoar/Receita_Federal_do_Brasil_-_Dados_Publicos_CNPJ
""")
|
428557
|
from ._title import Title
from plotly.graph_objs.parcats.line.colorbar import title
from ._tickformatstop import Tickformatstop
from ._tickfont import Tickfont
|
428617
|
import numpy as np
print(np.__version__)
# 1.19.0
a_1d = np.array([1, 2, 3, 4, 5, 6])
print(a_1d)
# [1 2 3 4 5 6]
print(np.cumsum(a_1d))
# [ 1 3 6 10 15 21]
print(np.cumsum(a_1d, dtype=float))
# [ 1. 3. 6. 10. 15. 21.]
print(a_1d.cumsum())
# [ 1 3 6 10 15 21]
print(a_1d.cumsum(dtype=float))
# [ 1. 3. 6. 10. 15. 21.]
l = [1, 2, 3, 4, 5, 6]
print(np.cumsum(l))
# [ 1 3 6 10 15 21]
print(type(np.cumsum(l)))
# <class 'numpy.ndarray'>
a_2d = a_1d.reshape(2, 3)
print(a_2d)
# [[1 2 3]
# [4 5 6]]
print(np.cumsum(a_2d))
# [ 1 3 6 10 15 21]
print(np.cumsum(a_2d, axis=0))
# [[1 2 3]
# [5 7 9]]
print(np.cumsum(a_2d, axis=1))
# [[ 1 3 6]
# [ 4 9 15]]
print(a_2d.cumsum())
# [ 1 3 6 10 15 21]
print(a_2d.cumsum(axis=0))
# [[1 2 3]
# [5 7 9]]
print(a_2d.cumsum(axis=1))
# [[ 1 3 6]
# [ 4 9 15]]
l_2d = [[1, 2, 3], [4, 5, 6]]
print(np.cumsum(l_2d))
# [ 1 3 6 10 15 21]
print(np.cumsum(l_2d, axis=0))
# [[1 2 3]
# [5 7 9]]
print(np.cumsum(l_2d, axis=1))
# [[ 1 3 6]
# [ 4 9 15]]
l_2d_error = [[1, 2, 3], [4, 5]]
print(np.cumsum(l_2d_error))
# [list([1, 2, 3]) list([1, 2, 3, 4, 5])]
#
# /usr/local/lib/python3.8/site-packages/numpy/core/_asarray.py:83: VisibleDeprecationWarning: Creating an ndarray from ragged nested sequences (which is a list-or-tuple of lists-or-tuples-or ndarrays with different lengths or shapes) is deprecated. If you meant to do this, you must specify 'dtype=object' when creating the ndarray
# return array(a, dtype, copy=False, order=order)
print(np.cumprod(a_1d))
# [ 1 2 6 24 120 720]
print(np.cumprod(a_1d, dtype=float))
# [ 1. 2. 6. 24. 120. 720.]
print(a_1d.cumprod())
# [ 1 2 6 24 120 720]
print(a_1d.cumprod(dtype=float))
# [ 1. 2. 6. 24. 120. 720.]
print(np.cumprod(a_2d))
# [ 1 2 6 24 120 720]
print(np.cumprod(a_2d, axis=0))
# [[ 1 2 3]
# [ 4 10 18]]
print(np.cumprod(a_2d, axis=1))
# [[ 1 2 6]
# [ 4 20 120]]
print(a_2d.cumprod())
# [ 1 2 6 24 120 720]
print(a_2d.cumprod(axis=0))
# [[ 1 2 3]
# [ 4 10 18]]
print(a_2d.cumprod(axis=1))
# [[ 1 2 6]
# [ 4 20 120]]
print(np.cumprod(l))
# [ 1 2 6 24 120 720]
print(np.cumprod(l_2d))
# [ 1 2 6 24 120 720]
print(np.cumprod(l_2d, axis=0))
# [[ 1 2 3]
# [ 4 10 18]]
print(np.cumprod(l_2d, axis=1))
# [[ 1 2 6]
# [ 4 20 120]]
|
428622
|
from __future__ import (
absolute_import,
print_function,
unicode_literals,
)
import posixpath
from collections import defaultdict
class OpenXmlPartContainer(object):
'''
Represents a container for other OpenXmlParts that are associated with a
OpenXmlPackage.
See also: http://msdn.microsoft.com/en-us/library/documentformat.openxml.packaging.openxmlpartcontainer%28v=office.14%29.aspx # noqa
'''
child_part_types = []
def __init__(self):
self.external_relationships = {}
self.hyperlink_relationships = {}
self._parts = None
self.parts_of_type = defaultdict(list)
@property
def parts(self):
if self._parts is None:
self._parts = {}
self._load_parts()
return self._parts
def get_relationship_lookup(self):
raise NotImplementedError
def _load_parts(self):
from pydocx.openxml.packaging.open_xml_package import OpenXmlPackage
relationship_lookup = self.get_relationship_lookup()
# TODO I don't like this -Kyle
if isinstance(self, OpenXmlPackage):
open_xml_package = self
else:
open_xml_package = self.open_xml_package
for child_part_type in self.child_part_types:
relationships = relationship_lookup.get_relationships_by_type(
child_part_type.relationship_type,
)
if not relationships:
continue
for relationship in relationships:
if relationship.is_external():
part_uri = relationship.target_uri
else:
base, _ = posixpath.split(relationship.source_uri)
part_uri = posixpath.join(
base,
relationship.target_uri,
)
if not open_xml_package.package.part_exists(part_uri):
continue
part = child_part_type(
open_xml_package=open_xml_package,
uri=part_uri,
)
self.add_part(
part=part,
relationship_id=relationship.relationship_id,
)
def _ensure_parts_are_loaded(self):
return self.parts
def get_parts_of_type(self, relationship_type):
self._ensure_parts_are_loaded()
return list(self.parts_of_type[relationship_type])
def get_part_by_id(self, relationship_id):
return self.parts[relationship_id]
def get_part_of_class_type(self, part_class):
self._ensure_parts_are_loaded()
parts = self.get_parts_of_type(
part_class.relationship_type,
)
if parts:
return parts[0]
def add_part(self, part, relationship_id=None):
self._ensure_parts_are_loaded()
if relationship_id is not None:
self.parts[relationship_id] = part
self.parts_of_type[part.relationship_type].append(part)
|
428662
|
import warnings
from .exceptions import ChannelNotDefined, NoDefaultChannel
from .models import Channel
DEPRECATION_WARNING_MESSAGE = (
"Default channel used in a query. Please make sure that channel is explicitly "
"provided. This behavior works only when a one channel exists and will be removed "
"after 2020-12-31."
)
def get_default_channel() -> Channel:
"""Return a default channel.
Returns a channel only when exactly one channel exists in the system. If there are
more channels, you need to ensure that the channel is explicitly specified. This
function is intended to use throughout the full migration to the multi-channel
approach in Saleor and will be removed after 2020-12-31. Since then, the API and
all functions will require specifying the channel.
:raises ChannelNotDefined: When there is more than one channel.
:raises NoDefaultChannel: When there are no channels.
"""
try:
channel = Channel.objects.get()
except Channel.MultipleObjectsReturned:
channels = list(Channel.objects.filter(is_active=True))
if len(channels) == 1:
warnings.warn(DEPRECATION_WARNING_MESSAGE)
return channels[0]
raise ChannelNotDefined()
except Channel.DoesNotExist:
raise NoDefaultChannel()
else:
warnings.warn(DEPRECATION_WARNING_MESSAGE)
return channel
|
428695
|
from pathlib import Path # pylint: disable=unused-import
from os.path import expanduser # pylint: disable=unused-import
import pytest
from .context import load_telco_sentiment_train, load_telco_sentiment_test, load_telco_sentiment_test_label
from .context import TELCO_CLASS_VALUES
@pytest.mark.skipif('not Path(expanduser("~/.sadedegel_data/telco_sentiment")).exists()')
def test_data_load():
data = load_telco_sentiment_train()
for i, row in enumerate(data):
assert any(key in row.keys() for key in ['text_uuid', 'tweet', 'sentiment_class'])
assert isinstance(row['id'], str)
assert isinstance(row['tweet'], str)
assert TELCO_CLASS_VALUES[row['sentiment_class']] in ['notr', 'olumlu', 'olumsuz']
assert i + 1 == 13832
@pytest.mark.skipif('not Path(expanduser("~/.sadedegel_data/telco_sentiment")).exists()')
def test_data_load_test():
data = load_telco_sentiment_test()
for i, row in enumerate(data):
assert any(key in row.keys() for key in ['text_uuid', 'tweet'])
assert isinstance(row['id'], str)
assert isinstance(row['tweet'], str)
assert i + 1 == 3457
@pytest.mark.skipif('not Path(expanduser("~/.sadedegel_data/telco_sentiment")).exists()')
def test_data_load_target():
data = load_telco_sentiment_test_label()
for i, row in enumerate(data):
assert any(key in row.keys() for key in ['id', 'sentiment_class'])
assert isinstance(row['id'], str)
assert TELCO_CLASS_VALUES[row['sentiment_class']] in ['notr', 'olumlu', 'olumsuz']
assert i + 1 == 3457
|
428752
|
x = input("Type a number: ")
y = input("Type another number: ")
sum = int(x) + int(y)
print("The sum is: ", sum)
|
428790
|
from PIL import Image
from random import randrange, randint
import glob
import os
import shutil
width, height = 16, 16
txts = glob.glob("gen*.txt")
print(txts[-1][3:-4])
gen = int(txts[-1][3:-4])
with open('select5.txt','r') as file:
numbers = [ int(_) for _ in file.read().split('\n')[:-1] ]
# 代表画像の生成
with open('gen{}.txt'.format(gen),'r') as fff:
gens = fff.read().replace('[','').replace(']','').split('\n')
number = numbers[0]
r = [ int(_) for _ in gens[number*3].split(', ')]
g = [ int(_) for _ in gens[number*3+1].split(', ')]
b = [ int(_) for _ in gens[number*3+2].split(', ')]
img = Image.new('RGB', (width, height))
cnt = 0
for y in range(height):
for x in range(width):
img.putpixel((x, y), (r[cnt], g[cnt], b[cnt]))
cnt += 1
img.resize((256, 256), Image.BOX).save('{}.png'.format(str(gen)))
with open('gen{}.txt'.format(gen+1),'w') as file1:
with open('gen{}.txt'.format(gen),'r') as file:
head = file.read()
data = head.replace('[','').replace(']','').split('\n')
for i in range(128):
print(i)
# 親を含める
if i == 32:
r = data[numbers[0]*3]
g = data[numbers[0]*3+1]
b = data[numbers[0]*3+2]
elif i == 64:
r = data[numbers[1]*3]
g = data[numbers[1]*3+1]
b = data[numbers[1]*3+2]
elif i == 96:
r = data[numbers[2]*3]
g = data[numbers[2]*3+1]
b = data[numbers[2]*3+2]
elif i == 120:
r = data[numbers[3]*3]
g = data[numbers[3]*3+1]
b = data[numbers[3]*3+2]
else:
r, g, b = [], [], []
cnt = 0
for y in range(16):
for x in range(16):
mutation = randint(0,99)
if mutation < 5:
# 5/100の確率で突然変異
r.append((randrange(0, 255, 20)))
g.append((randrange(0, 255, 20)))
b.append((randrange(0, 255, 20)))
else:
# 4つからランダムに選んで交配
rand = randint(0,3)
r.append(int(data[numbers[rand]*3].split(', ')[cnt]))
g.append(int(data[numbers[rand]*3+1].split(', ')[cnt]))
b.append(int(data[numbers[rand]*3+2].split(', ')[cnt]))
cnt += 1
file1.write('{}\n{}\n{}\n'.format(r,g,b))
# 前世代の代表画像生成
# 前世代のファイルの移動
os.mkdir(str(gen))
shutil.move('gen{}.txt'.format(gen), '{}/'.format(gen))
shutil.move('select1.txt', '{}/'.format(gen))
shutil.move('select2.txt', '{}/'.format(gen))
shutil.move('select3.txt', '{}/'.format(gen))
shutil.move('select4.txt', '{}/'.format(gen))
shutil.move('select5.txt', '{}/'.format(gen))
|
428800
|
import unittest
from libpysal.examples import load_example
import geopandas as gpd
import numpy as np
from segregation.singlegroup import DistanceDecayInteraction
class Distance_Decay_Interaction_Tester(unittest.TestCase):
def test_Distance_Decay_Interaction(self):
s_map = gpd.read_file(load_example("Sacramento1").get_path("sacramentot2.shp"))
s_map = s_map.to_crs(s_map.estimate_utm_crs())
df = s_map[['geometry', 'HISP', 'TOT_POP']]
index = DistanceDecayInteraction(df, 'HISP', 'TOT_POP')
np.testing.assert_almost_equal(index.statistic, 0.8620769567792631)
if __name__ == '__main__':
unittest.main()
|
428806
|
def to_bytes(bytes_or_str):
"""
Converts supplied data into bytes if the data is of type str.
:param bytes_or_str: Data to be converted.
:return: UTF-8 encoded bytes if the data was of type str. Otherwise it returns the supplied data as is.
"""
if isinstance(bytes_or_str, str):
return bytes_or_str.encode()
return bytes_or_str
def to_str(bytes_or_str):
"""
Converts supplied data into a UTF-8 encoded string if the data is of type bytes.
:param bytes_or_str: Data to be converted.
:return: UTF-8 encoded string if the data was of type bytes. Otherwise it returns the supplied data as is.
"""
if isinstance(bytes_or_str, bytes):
return bytes_or_str.decode()
return bytes_or_str
|
428815
|
from Crypto.Util.number import long_to_bytes
from gmpy2 import *
class ContinuedFraction():
def __init__(self, numerator, denumerator):
self.numberlist = [] # number in continued fraction
self.fractionlist = [] # the near fraction list
self.GenerateNumberList(numerator, denumerator)
self.GenerateFractionList()
def GenerateNumberList(self, numerator, denumerator):
while numerator != 1:
quotient = numerator // denumerator
remainder = numerator % denumerator
self.numberlist.append(quotient)
numerator = denumerator
denumerator = remainder
def GenerateFractionList(self):
self.fractionlist.append([self.numberlist[0], 1])
for i in range(1, len(self.numberlist)):
numerator = self.numberlist[i]
denumerator = 1
for j in range(i):
temp = numerator
numerator = denumerator + numerator * self.numberlist[i - j - 1]
denumerator = temp
self.fractionlist.append([numerator, denumerator])
def Solve(a, b, c):
"""solve ax^2+bx+c=0 , return x1 , x2"""
delta = b ** 2 - 4 * a * c
if delta < 0:
return 0
if is_square(delta):
sqr_delta = isqrt(delta)
temp1 = -b + sqr_delta
temp2 = -b - sqr_delta
if temp1 % (2 * a) != 0 or temp2 % (2 * a) != 0:
return 0
else:
return [temp1 // (2 * a), temp2 // (2 * a)]
else:
return 0
def attack(n, e):
a = ContinuedFraction(e, n)
for i in a.fractionlist:
k = i[0]
d = i[1]
if k == 0:
continue
phi = (d * e - 1) // k
b = phi - n - 1
temp = Solve(1, b, n)
if isinstance(temp, list):
p, q = temp
return d, p, q
return None, None, None
|
428865
|
import faulthandler
import os
import sys
import traceback
import urllib3
from PyQt5.QtCore import QCoreApplication, Qt
from bauh import __app_name__, app_args
from bauh.view.core.config import CoreConfigManager
from bauh.view.util import logs
def main(tray: bool = False):
if not os.getenv('PYTHONUNBUFFERED'):
os.environ['PYTHONUNBUFFERED'] = '1'
faulthandler.enable()
urllib3.disable_warnings(urllib3.exceptions.InsecureRequestWarning)
args = app_args.read()
logger = logs.new_logger(__app_name__, bool(args.logs))
if args.offline:
logger.warning("offline mode activated")
app_config = CoreConfigManager().get_config()
if bool(app_config['ui']['auto_scale']):
os.environ['QT_AUTO_SCREEN_SCALE_FACTOR'] = '1'
logger.info("Auto screen scale factor activated")
try:
scale_factor = float(app_config['ui']['scale_factor'])
os.environ['QT_SCALE_FACTOR'] = str(scale_factor)
logger.info("Scale factor set to {}".format(scale_factor))
except:
traceback.print_exc()
if bool(app_config['ui']['hdpi']):
logger.info("HDPI settings activated")
QCoreApplication.setAttribute(Qt.AA_UseHighDpiPixmaps)
QCoreApplication.setAttribute(Qt.AA_EnableHighDpiScaling)
if tray or bool(args.tray):
from bauh.tray import new_tray_icon
app, widget = new_tray_icon(app_config, logger)
else:
from bauh.manage import new_manage_panel
app, widget = new_manage_panel(args, app_config, logger)
widget.show()
sys.exit(app.exec_())
def tray():
main(tray=True)
if __name__ == '__main__':
main()
|
428879
|
import json
import unittest
from coba.registry import CobaRegistry
from coba.exceptions import CobaException
from coba.environments.definitions import EnvironmentDefinitionFileV1
from coba.environments.primitives import SimulatedEnvironment
from coba.environments.openml import OpenmlSimulation
from coba.environments.filters import Take
class EnvironmentFileFmtV1_Tests(unittest.TestCase):
def setUp(self) -> None:
CobaRegistry.register("OpenmlSimulation", OpenmlSimulation)
CobaRegistry.register("Take", Take)
def test_one_environment(self):
json_txt = """{
"environments" : [
{ "OpenmlSimulation": 150 }
]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertDictEqual({'openml':150, **environments[0].params}, environments[0].params)
def test_raw_environment(self):
json_txt = """{
"environments" : { "OpenmlSimulation": 150 }
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertDictEqual({'openml':150, **environments[0].params}, environments[0].params)
def test_one_environment_one_filter(self):
json_txt = """{
"environments" : [
[{ "OpenmlSimulation": 150 }, {"Take":10} ]
]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertDictEqual({"openml":150, "take":10, **environments[0].params}, environments[0].params)
def test_one_environment_two_filters(self):
json_txt = """{
"environments" : [
[{ "OpenmlSimulation": 150 }, {"Take":[10,20], "method":"foreach"} ]
]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertEqual(2, len(environments))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertIsInstance(environments[1], SimulatedEnvironment)
self.assertDictEqual({"openml":150, "take":10, **environments[0].params}, environments[0].params)
self.assertDictEqual({"openml":150, "take":20, **environments[1].params}, environments[1].params)
def test_two_environments_two_filters(self):
json_txt = """{
"environments" : [
[{ "OpenmlSimulation": [150,151], "method":"foreach" }, { "Take":[10,20], "method":"foreach" }]
]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertEqual(4, len(environments))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertIsInstance(environments[1], SimulatedEnvironment)
self.assertIsInstance(environments[2], SimulatedEnvironment)
self.assertIsInstance(environments[3], SimulatedEnvironment)
self.assertDictEqual({"openml":150, "take":10, **environments[0].params}, environments[0].params)
self.assertDictEqual({"openml":150, "take":20, **environments[1].params}, environments[1].params)
self.assertDictEqual({"openml":151, "take":10, **environments[2].params}, environments[2].params)
self.assertDictEqual({"openml":151, "take":20, **environments[3].params}, environments[3].params)
def test_two_singular_environments(self):
json_txt = """{
"environments" : [
{"OpenmlSimulation": 150},
{"OpenmlSimulation": 151}
]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertIsInstance(environments[1], SimulatedEnvironment)
self.assertDictEqual({"openml":150, **environments[0].params}, environments[0].params)
self.assertDictEqual({"openml":151, **environments[1].params}, environments[1].params)
def test_one_foreach_environment(self):
json_txt = """{
"environments" : [
{"OpenmlSimulation": [150,151], "method":"foreach"}
]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertIsInstance(environments[1], SimulatedEnvironment)
self.assertDictEqual({"openml":150, **environments[0].params}, environments[0].params)
self.assertDictEqual({"openml":151, **environments[1].params}, environments[1].params)
def test_one_variable(self):
json_txt = """{
"variables" : {"$openml_sims": {"OpenmlSimulation": [150,151], "method":"foreach"} },
"environments" : [ "$openml_sims" ]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertIsInstance(environments[1], SimulatedEnvironment)
self.assertDictEqual({"openml":150, **environments[0].params}, environments[0].params)
self.assertDictEqual({"openml":151, **environments[1].params}, environments[1].params)
def test_two_variables(self):
json_txt = """{
"variables": {
"$openmls": {"OpenmlSimulation": [150,151], "method":"foreach"},
"$takes" : {"Take":[10,20], "method":"foreach"}
},
"environments": [
["$openmls", "$takes"],
"$openmls"
]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertEqual(6, len(environments))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertIsInstance(environments[1], SimulatedEnvironment)
self.assertIsInstance(environments[2], SimulatedEnvironment)
self.assertIsInstance(environments[3], SimulatedEnvironment)
self.assertIsInstance(environments[4], SimulatedEnvironment)
self.assertIsInstance(environments[5], SimulatedEnvironment)
self.assertDictEqual({"openml":150, "take":10, **environments[0].params}, environments[0].params)
self.assertDictEqual({"openml":150, "take":20, **environments[1].params}, environments[1].params)
self.assertDictEqual({"openml":151, "take":10, **environments[2].params}, environments[2].params)
self.assertDictEqual({"openml":151, "take":20, **environments[3].params}, environments[3].params)
self.assertDictEqual({"openml":150 , **environments[4].params}, environments[4].params)
self.assertDictEqual({"openml":151 , **environments[5].params}, environments[5].params)
def test_pipe_list(self):
json_txt = """{
"environments" : [
[ {"OpenmlSimulation":150}, [ {"Take":10}, {"Take":20} ] ]
]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertEqual(2, len(environments))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertIsInstance(environments[1], SimulatedEnvironment)
self.assertDictEqual({"openml":150, "take":10, **environments[0].params}, environments[0].params)
self.assertDictEqual({"openml":150, "take":20, **environments[1].params}, environments[1].params)
def test_pipe_str(self):
json_txt = """{
"environments" : [
[ {"OpenmlSimulation":150}, "Identity" ]
]
}"""
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertEqual(1, len(environments))
self.assertIsInstance(environments[0], SimulatedEnvironment)
self.assertDictEqual({"openml":150, **environments[0].params}, environments[0].params)
def test_bad_pipe_exception(self):
json_txt = """{
"environments" : [
[ {"OpenmlSimulation":150}, null ]
]
}"""
with self.assertRaises(CobaException) as e:
environments = EnvironmentDefinitionFileV1().filter(json.loads(json_txt))
self.assertIn("We were unable to construct",str(e.exception))
if __name__ == '__main__':
unittest.main()
|
428880
|
import numpy as np
import tidynamics
def test_length_one_msd():
N = 1
pos = np.zeros(N)
reference_msd = np.zeros(N)
computed_msd = tidynamics.msd(pos)
assert np.allclose(reference_msd, computed_msd)
def test_length_zero_msd():
N = 0
pos = np.zeros(N)
reference_msd = np.zeros(N)
computed_msd = tidynamics.msd(pos)
assert np.allclose(reference_msd, computed_msd)
def test_length_one_msd_nd():
N = 1
ND = 4
data = np.ones((N, ND))
reference_msd = np.zeros_like(data[:,0])
computed_msd = tidynamics.msd(data)
assert np.allclose(reference_msd, computed_msd)
|
428902
|
import torch
import numpy as np
import cv2
import os
from utils.decode import decode_seg_map_sequence
def output_visualize(image, cam, label, gt_map, pred_map):
image = np.transpose(image.clone().cpu().detach().numpy(), (1,2,0)) # H, W, C
cam = np.transpose(cam, (1,2,0)) # H, W, C
""" image denormalize """
image *= [0.229, 0.224, 0.225]
image += [0.485, 0.456, 0.406]
image *= 255
image = np.clip(image.transpose(2,0,1), 0, 255).astype(np.uint8) # C, H, W
size = image.shape[1]
""" visualize selected CAM outputs """
label = label.clone().cpu().detach().numpy()
label = np.nonzero(label)[0]
selected_cam_image = np.zeros((len(label)+3, 3, size, size), dtype=np.uint8)
selected_cam_image[0] = image
for n, i in enumerate(label):
cam_img = cam[:, :, i] # H, W
cam_img *= 255
cam_img = np.clip(cam_img, 0, 255)
cam_img = cv2.applyColorMap(cam_img.astype(np.uint8), cv2.COLORMAP_JET) # H, W, 3
cam_img = cam_img[:, :, ::-1]
selected_cam_image[n+1] = cam_img.transpose(2, 0, 1)
""" visualize semantic segmentaiton map """
selected_cam_image[-1] = decode_seg_map_sequence(gt_map) * 255
selected_cam_image[-2] = decode_seg_map_sequence(pred_map) * 255
selected_cam_image = selected_cam_image.astype(np.float32) / 255.
return selected_cam_image
|
428927
|
from setuptools import find_packages
from setuptools import setup
REQUIRED_PACKAGES = [
'tensorflow==1.14.0',
'tensorflow-data-validation==0.11.0'
]
setup(
name='tfdv-data-extraction',
version='0.1',
install_requires=REQUIRED_PACKAGES,
packages=find_packages(),
include_package_data=True
)
|
428954
|
import os
try:
from StringIO import StringIO
except ImportError:
from io import StringIO
import json
try:
import secure_config.secrets
HAS_SECURE_CONFIG=True
except ImportError:
HAS_SECURE_CONFIG = False
import postgraas_server.configuration as cf
import pytest
class TestConfiguration:
module_path = os.path.abspath(os.path.dirname(__file__))
def test_default_config_filename(self):
actual = cf.get_default_config_filename()
expected = os.path.join(os.getcwd(), 'application.cfg')
assert actual == expected
def test_get_config(self):
test_config = os.path.join(self.module_path, 'application.cfg')
actual = cf.get_config(test_config)
expected = 'postgraas'
assert actual['metadb']['db_name'] == expected
def test_get_user(self):
config_string = '''
{
"metadb":
{
"db_username": "postgraas_user"
}
}
'''
config = json.loads(config_string)
username = cf.get_user(config)
expected = 'postgraas_user'
assert username == expected
config_string = '''
{
"metadb":
{
"server": "testserver1",
"db_username": "postgraas_user"
}
}
'''
config = json.loads(config_string)
username = cf.get_user(config)
expected = 'postgraas_user@testserver1'
assert username == expected
@pytest.mark.skipif(not HAS_SECURE_CONFIG,
reason="secure_config not installed")
def test_secrets(self, tmpdir):
expected_secret = secure_config.secrets.EncryptedSecret("v3rys3cur3", "<PASSWORD>_db_password")
print(expected_secret)
test_config = os.path.join(self.module_path, 'application_secure.cfg')
secret_file = os.path.join(self.module_path, 'secret_file.json')
config_undecrypted = cf.get_config(test_config)
assert config_undecrypted['metadb']["db_password"] == expected_secret.dumps()
config_decrypted = cf.get_config(test_config, secrets_file=secret_file)
assert config_decrypted['metadb']["db_password"].decrypt() == "correct_db_password"
@pytest.mark.skipif(not HAS_SECURE_CONFIG,
reason="secure_config not installed")
def test_get_meta_db_config_path(self, tmpdir):
config_dict = {
"metadb": {
"host": "thisserver.host",
"db_pwd": "$<PASSWORD>;0.1;AES256|613839656430373831386237333266306163376563343632663138346163323162333830333861666263326330663238346361666165313266373363316236370a613135396239326632663739376364313466616535333733626165333738303166303761366132633033346433376263393734643132336432393764623465330a65353264343035353236643533303464333561393637643966663165663739656130613435366564383065303834303066613338353631663430613061623833",
"port": "5432",
"db_name": "postgres",
"db_username": "postgraas_user",
"server": "thisserver"
}
}
config = secure_config.secrets.load_secret_dict(password="<PASSWORD>", config_dict=config_dict)
metadb_string = cf.get_meta_db_config_path(config)
print(metadb_string)
assert metadb_string == "postgresql://postgraas_user@thisserver:correct_db_password@thisserver.host:5432/postgres"
@pytest.mark.skipif(not HAS_SECURE_CONFIG,
reason="secure_config not installed")
def test_get_secure_password(self, tmpdir):
config_dict = {
"metadb": {
"db_pwd": "$<PASSWORD>;0.1;AES256|613839656430373831386237333266306163376563343632663138346163323162333830333861666263326330663238346361666165313266373363316236370a613135396239326632663739376364313466616535333733626165333738303166303761366132633033346433376263393734643132336432393764623465330a65353264343035353236643533303464333561393637643966663165663739656130613435366564383065303834303066613338353631663430613061623833",
}
}
config = secure_config.secrets.load_secret_dict(password="<PASSWORD>", config_dict=config_dict)
password_string = cf.get_password(config)
print(password_string)
assert password_string == "<PASSWORD>"
def test_get_plain_password(self, tmpdir):
config_dict = {
"metadb": {
"db_pwd": "<PASSWORD>",
}
}
password_string = cf.get_password(config_dict)
print(password_string)
assert password_string == "<PASSWORD>"
|
428987
|
from django.db import migrations
# This can be deleted when doing next squash of migrations because it's a one time update
def update_well_ground_elevation_method_codes(apps, schema_editor):
ground_method_code = apps.get_model('wells', 'GroundElevationMethodCode')
well = apps.get_model('wells', 'well')
unknown_method = ground_method_code.objects.get_or_create(
pk='UNKNOWN',
create_user='ETL_USER',
create_date='2017-07-01T08:00:00Z',
update_user='ETL_USER',
update_date='2017-07-01T08:00:00Z',
description='Unknown',
display_order=90,
effective_date='2018-05-25T07:00:00Z',
expiry_date='9999-12-31T23:59:59Z'
)
# Set all ground elevation methods that are null to UNKNOWN
well.objects.filter(ground_elevation_method__isnull=True).update(
ground_elevation_method_id=unknown_method[0])
def reverse(apps, schema_editor):
pass
class Migration(migrations.Migration):
dependencies = [
('wells', '0081_update_well_disinfect_values'),
]
operations = [
migrations.RunPython(update_well_ground_elevation_method_codes, reverse),
]
|
429003
|
import threading
from time import sleep
import nb_log
def test():
while 1:
print(123123)
sleep(1)
if __name__ == '__main__':
threading.Thread(target=test).start()
# threading._start_new_thread(test, ())
print(111111111)
|
429048
|
import iso8601
import nose
from series_tiempo_ar_api.apps.api.tests.endpoint_tests.endpoint_test_case import EndpointTestCase
class TestDateFilters(EndpointTestCase):
def setUp(self):
self.start_date = '2010-01-01'
self.end_date = '2015-01-01'
def test_filters(self):
resp = self.run_query({'ids': self.increasing_month_series_id,
'start_date': '2010-01-01',
'end_date': '2015-01-01'})
for row in resp['data']:
if 'T' in row[0]:
date = iso8601.parse_date(row[0])
start_date = iso8601.parse_date(self.start_date)
end_date = iso8601.parse_date(self.end_date)
else:
date = iso8601.parse_date(row[0])
start_date = iso8601.parse_date(self.start_date)
end_date = iso8601.parse_date(self.end_date)
nose.tools.assert_greater_equal(date, start_date)
nose.tools.assert_less_equal(date, end_date)
|
429068
|
import click
import sys
import os
from functools import reduce
from asciimatics.screen import Screen
from click_didyoumean import DYMMixin
from distutils.version import LooseVersion as LV
from calm.dsl.log import get_logging_handle
from calm.dsl.tools import get_module_from_file
from calm.dsl.store import Version
LOG = get_logging_handle(__name__)
def get_states_filter(STATES_CLASS=None, state_key="state", states=[]):
if not states:
for field in vars(STATES_CLASS):
if not field.startswith("__"):
states.append(getattr(STATES_CLASS, field))
state_prefix = ",{}==".format(state_key)
return ";({}=={})".format(state_key, state_prefix.join(states))
def get_name_query(names):
if names:
search_strings = [
"name==.*"
+ reduce(
lambda acc, c: "{}[{}|{}]".format(acc, c.lower(), c.upper()), name, ""
)
+ ".*"
for name in names
]
return "({})".format(",".join(search_strings))
return ""
def highlight_text(text, **kwargs):
"""Highlight text in our standard format"""
return click.style("{}".format(text), fg="blue", bold=False, **kwargs)
def import_var_from_file(file, var, default_value=None):
try:
module = get_module_from_file(var, file)
return getattr(module, var)
except: # NoQA
return default_value
class Display:
@classmethod
def wrapper(cls, func, watch=False):
if watch and os.isatty(sys.stdout.fileno()):
Screen.wrapper(func, height=1000)
else:
func(display)
def clear(self):
pass
def refresh(self):
pass
def wait_for_input(self, *args):
pass
def print_at(self, text, x, *args, **kwargs):
click.echo("{}{}".format((" " * x), text))
display = Display()
class FeatureFlagMixin:
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.feature_version_map = dict()
self.experimental_cmd_map = dict()
def command(self, *args, **kwargs):
"""Behaves the same as `click.Group.command()` except added an
`feature_min_version` flag which can be used to warn users if command
is not supported setup calm version.
"""
feature_min_version = kwargs.pop("feature_min_version", None)
if feature_min_version and args:
self.feature_version_map[args[0]] = feature_min_version
is_experimental = kwargs.pop("experimental", False)
if args:
self.experimental_cmd_map[args[0]] = is_experimental
return super().command(*args, **kwargs)
def invoke(self, ctx):
if not ctx.protected_args:
return super(FeatureFlagMixin, self).invoke(ctx)
cmd_name = ctx.protected_args[0]
feature_min_version = self.feature_version_map.get(cmd_name, "")
if feature_min_version:
calm_version = Version.get_version("Calm")
if not calm_version:
LOG.error("Calm version not found. Please update cache")
sys.exit(-1)
if LV(calm_version) >= LV(feature_min_version):
return super().invoke(ctx)
else:
LOG.warning(
"Please update Calm (v{} -> >=v{}) to use this command.".format(
calm_version, feature_min_version
)
)
return None
else:
return super().invoke(ctx)
class FeatureFlagGroup(FeatureFlagMixin, DYMMixin, click.Group):
"""click Group that have *did-you-mean* functionality and adds *feature_min_version* paramter to each subcommand
which can be used to set minimum calm version for command"""
pass
|
429114
|
if sm.hasQuest(2566):
if sm.hasItem(4032985):
sm.chatScript("You already have the Ignition Device.")
else:
sm.giveItem(4032985)
sm.chatScript("Ignition Device. Bring ")
|
429138
|
import os
import re
from typing import List, Any
from .base import Base
from deoplete.util import error, load_external_module
from deoplete.util import Nvim, UserContext, Candidates
load_external_module(__file__, "")
from exegesis.proto.pdf.x86 import intel_sdm_pb2 as intel_sdm # noqa: E261
class Source(Base):
def __init__(self, vim: Nvim) -> None:
super(Source, self).__init__(vim)
self.name = "asm"
self.mark = "[asm]"
self.rank = 500
self.filetypes = [
"goasm",
"gas",
"asm",
"anm68k",
"asmh8300",
"ia64",
"masm",
"nasm",
"tasm",
]
self.input_pattern = "[^.'\" \t0-9]\\.\\w*"
self.instructions: Any = intel_sdm.SdmDocument()
self.result: List = [] # for cache
def on_init(self, context: UserContext) -> None:
vars = context["vars"]
self.go_mode = vars.get("deoplete#sources#asm#go_mode", False)
pb = "{}/pb/instructions.sdm.pb".format(os.path.dirname(__file__))
try:
f = open(pb, "rb") # binary mode for predump protobuf data
self.instructions.ParseFromString(f.read())
f.close()
except IOError:
error(self.vim, "could not open {}".format(pb))
def get_complete_position(self, context: UserContext) -> int:
m = re.search(r"\w*$", context["input"])
return m.start() if m else -1
def gather_candidates(self, context: UserContext) -> Candidates:
if not self.result:
if self.go_mode:
from sources.opcode import go
self.result += go.symbols
for section in self.instructions.instruction_sections:
for instructions in section.instruction_table.instructions:
kind = instructions.description
vendor_syntax = instructions.vendor_syntax
mnemonic = str(vendor_syntax.mnemonic).lower()
if self.go_mode:
try:
kind = "({}) {}".format(mnemonic, kind)
mnemonic = go.mnemonics[mnemonic]
except Exception:
continue
operand = ""
for i, op in enumerate(vendor_syntax.operands):
operand += op.name
if i < len(vendor_syntax.operands) - 1:
operand += ", "
abbr = "{} {}".format(mnemonic, operand)
if instructions.feature_name:
abbr += " ({})".format(instructions.feature_name)
self.result.append(
dict(word=mnemonic, abbr=abbr, kind=kind, info=abbr, dup=1)
)
return self.result
|
429139
|
from __future__ import division, print_function
import argparse
import yaml
from visual_dynamics import envs
from visual_dynamics import policies
from visual_dynamics.envs import ServoingEnv
from visual_dynamics.utils.config import from_config
from visual_dynamics.utils.rl_util import do_rollouts, FeaturePredictorServoingImageVisualizer
from visual_dynamics.utils.transformer import transfer_image_transformer
def main():
parser = argparse.ArgumentParser()
parser.add_argument('predictor_fname', type=str)
parser.add_argument('--output_dir', '-o', type=str, default=None)
parser.add_argument('--num_trajs', '-n', type=int, default=10, metavar='N', help='total number of data points is N*T')
parser.add_argument('--num_steps', '-t', type=int, default=10, metavar='T', help='number of time steps per trajectory')
parser.add_argument('--gamma', type=float, default=0.9)
parser.add_argument('--visualize', '-v', type=int, default=None)
parser.add_argument('--record_file', '-r', type=str, default=None)
parser.add_argument('--target_distance', '-d', type=int, default=0)
parser.add_argument('--feature_inds', '-i', type=str, help='inds of subset of features to use')
parser.add_argument('--w_init', type=float, default=1.0)
parser.add_argument('--lambda_init', type=float, default=1.0)
args = parser.parse_args()
with open(args.predictor_fname) as predictor_file:
predictor_config = yaml.load(predictor_file)
if issubclass(predictor_config['environment_config']['class'], envs.Panda3dEnv):
transfer_image_transformer(predictor_config)
predictor = from_config(predictor_config)
if args.feature_inds:
args.feature_inds = [int(ind) for ind in args.feature_inds]
predictor.feature_name = [predictor.feature_name[ind] for ind in args.feature_inds]
predictor.next_feature_name = [predictor.next_feature_name[ind] for ind in args.feature_inds]
if issubclass(predictor.environment_config['class'], envs.RosEnv):
import rospy
rospy.init_node("visual_servoing")
env = from_config(predictor.environment_config)
if not isinstance(env, ServoingEnv):
env = ServoingEnv(env, max_time_steps=args.num_steps)
pol = policies.ServoingPolicy(predictor, alpha=1.0, lambda_=args.lambda_init, w=args.w_init)
if args.record_file and not args.visualize:
args.visualize = 1
if args.visualize:
image_visualizer = FeaturePredictorServoingImageVisualizer(predictor, visualize=args.visualize)
do_rollouts(env, pol, args.num_trajs, args.num_steps,
target_distance=args.target_distance,
output_dir=args.output_dir,
image_visualizer=image_visualizer,
record_file=args.record_file,
gamma=args.gamma,
verbose=True)
if __name__ == "__main__":
main()
|
429199
|
import musicbrainzngs
from artist import artist
from disk import disk
class apiconnection():
def __init__(self):
pass
def searach_Artist_in_Area(self,area,country,tag,limit):#tag=['rock','metal']
result=musicbrainzngs.search_artists(area=area,country=country,tag=tag,limit=limit)
return result
def resultToArtistObj(self,result):
artis=None
artists=[]
for x in result['artist-list']:
area=x['area']['name']
name=x['name']
score=x['ext:score']
try:
date=x['life-span']['begin']
gender=x['gender']
type=x['type']
except:
date="EMPTY"
gender="EMPTY"
type="EMPTY"
artis=artist(area,date,name,gender,type,score)
artists.append(artis)
return artists
def searach_Disc_in_Area_by_Artist(self,artist,country,limit):#tag=['rock','metal']
result=musicbrainzngs.search_releases(artist=artist, country=country, limit=limit)
return result
def resultToDiscObj(self,result):
artis=None
dis=None
co=None
disks=[]
for y in result:
for x in y['release-list']:
dis=x['artist-credit'][0]['name']
artis=x['title']
try:
co=x['country']
except:
co="EMPTY"
dis=disk(dis,artis,co)
disks.append(dis)
return disks
|
429217
|
import urllib
from . import http, dict, errors
noresponse = "Couldn't contact the API right now..."
def gif():
try:
return http.get("/gif/")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def jav():
try:
return http.get("/jav/")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def twitter():
try:
return http.get("/twitter/")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def real():
try:
return http.get("/rb/")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def fgo():
try:
return http.get("/r34/?tags=fate/grand_order")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def armpits():
try:
return http.get("/r34/?tags=armpits")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def armpit_hair():
try:
return http.get("/r34/?tags=armpit_hair")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def armpit_licking():
try:
return http.get("/r34/?tags=armpit_licking")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def smell():
try:
return http.get("/r34/?tags=smell")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def pantyhose():
try:
return http.get("/r34/?tags=pantyhose")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def large_breasts():
try:
return http.get("/r34/?tags=large_breasts")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def blush():
try:
return http.get("/r34/?tags=blush")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def blowjob():
try:
return http.get("/r34/?tags=blowjob")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def masturbation():
try:
return http.get("/r34/?tags=masturbation")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def pervert():
try:
return http.get("/r34/?tags=pervert")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def pubic_hair():
try:
return http.get("/r34/?tags=pubic_hair")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def sweat():
try:
return http.get("/r34/?tags=sweat")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def footjob():
try:
return http.get("/r34/?tags=footjob")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def foot_licking():
try:
return http.get("/r34/?tags=foot_licking")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def housewife():
try:
return http.get("/r34/?tags=housewife")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def milf():
try:
return http.get("/r34/?tags=milf")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def school_uniform():
try:
return http.get("/r34/?tags=school_uniform")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def bikini():
try:
return http.get("/r34/?tags=bikini")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def cowgirl_position():
try:
return http.get("/r34/?tags=cowgirl_position")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def doggystyle():
try:
return http.get("/r34/?tags=doggystyle")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def dark_skin():
try:
return http.get("/r34/?tags=dark_skin")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def animated():
try:
return http.get("/r34/?tags=animated")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def gangbang():
try:
return http.get("/r34/?tags=gangbang")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def clothed_sex():
try:
return http.get("/r34/?tags=clothed_sex")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def uncensored():
try:
return http.get("/r34/?tags=uncensored")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def twintails():
try:
return http.get("/r34/?tags=twintails")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def pussy_juice():
try:
return http.get("/r34/?tags=pussy_juice")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def rape():
try:
return http.get("/r34/?tags=rape")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def double_penetration():
try:
return http.get("/r34/?tags=double_penetration")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def futanari():
try:
return http.get("/r34/?tags=futanari")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def yaoi():
try:
return http.get("/r34/?tags=yaoi")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def yuri():
try:
return http.get("/r34/?tags=yuri")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def ass():
try:
return http.get("/r34/?tags=ass")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def pregnant():
try:
return http.get("/r34/?tags=pregnant")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def netorare():
try:
return http.get("/r34/?tags=netorare")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def ugly_man():
try:
return http.get("/r34/?tags=ugly_man")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def pout():
try:
return http.get("/r34/?tags=pout")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def cum():
try:
return http.get("/r34/?tags=cum")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def thick():
try:
return http.get("/r34/?tags=thick")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def scathach():
try:
return http.get("/r34/?tags=scathach_(fate/grand_order)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def raikou():
try:
return http.get("/r34/?tags=minamoto_no_raikou_(fate/grand_order)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def jeanne():
try:
return http.get("/r34/?tags=jeanne_d'arc_(fate)_(all)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def ereshkigal():
try:
return http.get("/r34/?tags=ereshkigal_(fate/grand_order)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def artoria():
try:
return http.get("/r34/?tags=artoria_pendragon_(all)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def okita():
try:
return http.get("/r34/?tags=okita_souji_(fate)_(all)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def kama():
try:
return http.get("/r34/?tags=kama_(fate/grand_order)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def davinci():
try:
return http.get("/r34/?tags=leonardo_da_vinci_(fate/grand_order)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def anastasia():
try:
return http.get("/r34/?tags=anastasia_(fate/grand_order)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def saint_martha():
try:
return http.get("/r34/?tags=saint_martha")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def gudako():
try:
return http.get("/r34/?tags=fujimaru_ritsuka_(female)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def mashu():
try:
return http.get("/r34/?tags=mash_kyrielight")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def abigail():
try:
return http.get("/r34/?tags=abigail_williams_(fate/grand_order)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def ushiwakamaru():
try:
return http.get("/r34/?tags=ushiwakamaru_(fate/grand_order)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def consort_yu():
try:
return http.get("/r34/?tags=consort_yu_(fate)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def mordred():
try:
return http.get("/r34/?tags=himiko_(fate)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def himiko():
try:
return http.get("/r34/?tags=thick")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def kiara():
try:
return http.get("/r34/?tags=sesshouin_kiara")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def xuanzang():
try:
return http.get("/r34/?tags=xuanzang_(fate/grand_order)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def bb():
try:
return http.get("/r34/?tags=bb_(fate)_(all)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
def nero():
try:
return http.get("/r34/?tags=nero_claudius_(fate)_(all)")["url"]
except Exception as e:
raise errors.NothingFound(noresponse)
|
429246
|
from ixnetwork_restpy.base import Base
from ixnetwork_restpy.files import Files
class FipDiscoveryAdvertisementFcf(Base):
__slots__ = ()
_SDM_NAME = 'fipDiscoveryAdvertisementFcf'
_SDM_ATT_MAP = {
'HeaderFipVersion': 'fipDiscoveryAdvertisementFcf.header.fipVersion-1',
'HeaderFipReserved': 'fipDiscoveryAdvertisementFcf.header.fipReserved-2',
'FipOperationCodeFipDiscovery': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipOperationCode.fipDiscovery-3',
'FipOperationFipOperationReserved1': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipOperationReserved1-4',
'FipSubcodeFipSubcode02h': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipSubcode.fipSubcode02h-5',
'FipOperationFipDescriptorListLength': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipDescriptorListLength-6',
'FipOperationFipFp': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipFp-7',
'FipOperationFipSp': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipSp-8',
'FipOperationFipReserved2': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipReserved2-9',
'FipOperationFipABit': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipABit-10',
'FipOperationFipSBit': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipSBit-11',
'FipOperationFipFBit': 'fipDiscoveryAdvertisementFcf.header.fipOperation.fipFBit-12',
'FipPriorityDescriptorFipPriorityDescriptorType': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipPriorityDescriptor.fipPriorityDescriptorType-13',
'FipPriorityDescriptorFipPriorityDescriptorLength': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipPriorityDescriptor.fipPriorityDescriptorLength-14',
'FipPriorityDescriptorFipPriorityDescriptorReserved': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipPriorityDescriptor.fipPriorityDescriptorReserved-15',
'FipPriorityDescriptorFipPriorityDescriptorValue': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipPriorityDescriptor.fipPriorityDescriptorValue-16',
'FipMacAddressDescriptorFipMacAddressDescriptorType': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipMacAddressDescriptor.fipMacAddressDescriptorType-17',
'FipMacAddressDescriptorFipMacAddressDescriptorLength': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipMacAddressDescriptor.fipMacAddressDescriptorLength-18',
'FipMacAddressDescriptorFipMacAddressDescriptorValue': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipMacAddressDescriptor.fipMacAddressDescriptorValue-19',
'FipNameIdentifierDescriptorFipNameIdentifierDescriptorType': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipNameIdentifierDescriptor.fipNameIdentifierDescriptorType-20',
'FipNameIdentifierDescriptorFipNameIdentifierDescriptorLength': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipNameIdentifierDescriptor.fipNameIdentifierDescriptorLength-21',
'FipNameIdentifierDescriptorFipNameIdentifierDescriptorReserved': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipNameIdentifierDescriptor.fipNameIdentifierDescriptorReserved-22',
'FipNameIdentifierDescriptorFipNameIdentifierDescriptorValue': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipNameIdentifierDescriptor.fipNameIdentifierDescriptorValue-23',
'FipFabricDescriptorFipFabricDescriptorType': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFabricDescriptor.fipFabricDescriptorType-24',
'FipFabricDescriptorFipFabricDescriptorLength': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFabricDescriptor.fipFabricDescriptorLength-25',
'FipFabricDescriptorFipFabricDescriptorVfId': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFabricDescriptor.fipFabricDescriptorVfId-26',
'FipFabricDescriptorFipFabricDescriptorReserved': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFabricDescriptor.fipFabricDescriptorReserved-27',
'FipFabricDescriptorFipFabricDescriptorFc-map': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFabricDescriptor.fipFabricDescriptorFc-map-28',
'FipFabricDescriptorFipFabricDescriptorValue': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFabricDescriptor.fipFabricDescriptorValue-29',
'FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorType': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFkaAdvPeriodDescriptor.fipFkaAdvPeriodDescriptorType-30',
'FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorLength': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFkaAdvPeriodDescriptor.fipFkaAdvPeriodDescriptorLength-31',
'FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorReserved': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFkaAdvPeriodDescriptor.fipFkaAdvPeriodDescriptorReserved-32',
'FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorDBit': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFkaAdvPeriodDescriptor.fipFkaAdvPeriodDescriptorDBit-33',
'FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorValue': 'fipDiscoveryAdvertisementFcf.header.fipDescriptors.fipSelectFipDescriptor.fipFkaAdvPeriodDescriptor.fipFkaAdvPeriodDescriptorValue-34',
}
def __init__(self, parent, list_op=False):
super(FipDiscoveryAdvertisementFcf, self).__init__(parent, list_op)
@property
def HeaderFipVersion(self):
"""
Display Name: Version
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderFipVersion']))
@property
def HeaderFipReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['HeaderFipReserved']))
@property
def FipOperationCodeFipDiscovery(self):
"""
Display Name: Discovery
Default Value: 0x0001
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationCodeFipDiscovery']))
@property
def FipOperationFipOperationReserved1(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipOperationReserved1']))
@property
def FipSubcodeFipSubcode02h(self):
"""
Display Name: Subcode 02h
Default Value: 0x02
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipSubcodeFipSubcode02h']))
@property
def FipOperationFipDescriptorListLength(self):
"""
Display Name: FIP Descriptor List Length
Default Value: 12
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipDescriptorListLength']))
@property
def FipOperationFipFp(self):
"""
Display Name: FP
Default Value: 1
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipFp']))
@property
def FipOperationFipSp(self):
"""
Display Name: SP
Default Value: 1
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipSp']))
@property
def FipOperationFipReserved2(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipReserved2']))
@property
def FipOperationFipABit(self):
"""
Display Name: A bit
Default Value: 1
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipABit']))
@property
def FipOperationFipSBit(self):
"""
Display Name: S bit
Default Value: 0
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipSBit']))
@property
def FipOperationFipFBit(self):
"""
Display Name: F bit
Default Value: 1
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipOperationFipFBit']))
@property
def FipPriorityDescriptorFipPriorityDescriptorType(self):
"""
Display Name: Priority Descriptor Type
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipPriorityDescriptorFipPriorityDescriptorType']))
@property
def FipPriorityDescriptorFipPriorityDescriptorLength(self):
"""
Display Name: Priority Descriptor Length
Default Value: 1
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipPriorityDescriptorFipPriorityDescriptorLength']))
@property
def FipPriorityDescriptorFipPriorityDescriptorReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipPriorityDescriptorFipPriorityDescriptorReserved']))
@property
def FipPriorityDescriptorFipPriorityDescriptorValue(self):
"""
Display Name: Priority Descriptor Value
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipPriorityDescriptorFipPriorityDescriptorValue']))
@property
def FipMacAddressDescriptorFipMacAddressDescriptorType(self):
"""
Display Name: MAC Address Descriptor Type
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipMacAddressDescriptorFipMacAddressDescriptorType']))
@property
def FipMacAddressDescriptorFipMacAddressDescriptorLength(self):
"""
Display Name: MAC Address Descriptor Length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipMacAddressDescriptorFipMacAddressDescriptorLength']))
@property
def FipMacAddressDescriptorFipMacAddressDescriptorValue(self):
"""
Display Name: MAC Address Descriptor Value
Default Value: 00:EE:00:00:00:00
Value Format: mAC
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipMacAddressDescriptorFipMacAddressDescriptorValue']))
@property
def FipNameIdentifierDescriptorFipNameIdentifierDescriptorType(self):
"""
Display Name: Name_Identifier Descriptor Type
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipNameIdentifierDescriptorFipNameIdentifierDescriptorType']))
@property
def FipNameIdentifierDescriptorFipNameIdentifierDescriptorLength(self):
"""
Display Name: Name_Identifier Descriptor Length
Default Value: 3
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipNameIdentifierDescriptorFipNameIdentifierDescriptorLength']))
@property
def FipNameIdentifierDescriptorFipNameIdentifierDescriptorReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipNameIdentifierDescriptorFipNameIdentifierDescriptorReserved']))
@property
def FipNameIdentifierDescriptorFipNameIdentifierDescriptorValue(self):
"""
Display Name: Name_Identifier Descriptor Value
Default Value: 0x0000000000000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipNameIdentifierDescriptorFipNameIdentifierDescriptorValue']))
@property
def FipFabricDescriptorFipFabricDescriptorType(self):
"""
Display Name: Fabric Descriptor Type
Default Value: 5
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFabricDescriptorFipFabricDescriptorType']))
@property
def FipFabricDescriptorFipFabricDescriptorLength(self):
"""
Display Name: Fabric Descriptor Length
Default Value: 4
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFabricDescriptorFipFabricDescriptorLength']))
@property
def FipFabricDescriptorFipFabricDescriptorVfId(self):
"""
Display Name: Fabric Descriptor VF_ID
Default Value: 256
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFabricDescriptorFipFabricDescriptorVfId']))
@property
def FipFabricDescriptorFipFabricDescriptorReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFabricDescriptorFipFabricDescriptorReserved']))
@property
def FipFabricDescriptorFipFabricDescriptorFcmap(self):
"""
Display Name: Fabric Descriptor FC-MAP
Default Value: 0x000EFC
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFabricDescriptorFipFabricDescriptorFc-map']))
@property
def FipFabricDescriptorFipFabricDescriptorValue(self):
"""
Display Name: Fabric Descriptor Value
Default Value: 0x0000000000000000
Value Format: hex
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFabricDescriptorFipFabricDescriptorValue']))
@property
def FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorType(self):
"""
Display Name: FKA_ADV_Period Descriptor Type
Default Value: 12
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorType']))
@property
def FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorLength(self):
"""
Display Name: FKA_ADV_Period Descriptor Length
Default Value: 2
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorLength']))
@property
def FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorReserved(self):
"""
Display Name: Reserved
Default Value: 0
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorReserved']))
@property
def FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorDBit(self):
"""
Display Name: D bit
Default Value: 0
Value Format: decimal
Available enum values: False, 0, True, 1
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorDBit']))
@property
def FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorValue(self):
"""
Display Name: FKA_ADV_Period Descriptor Value
Default Value: 8000
Value Format: decimal
"""
from ixnetwork_restpy.multivalue import Multivalue
return Multivalue(self, self._get_attribute(self._SDM_ATT_MAP['FipFkaAdvPeriodDescriptorFipFkaAdvPeriodDescriptorValue']))
def add(self):
return self._create(self._map_locals(self._SDM_ATT_MAP, locals()))
|
429252
|
import operator
from abc import ABCMeta, abstractmethod
from typing import Callable, List, TypeVar
T = TypeVar("T")
class SortedList(List[T]):
"""List class that keeps itself but adding key and reversed methods
__setitem__, append, extend, and insert functions are unsupported
"""
class Comparable(metaclass=ABCMeta):
@abstractmethod
def __lt__(self, other: T) -> bool: ...
def __init__(self, key: Callable[[T], Comparable],
reversed_: bool,
seq=()):
super().__init__(seq)
self.key = key
self.reversed = reversed_
def __setitem__(self, *args, **kwargs):
raise NotImplementedError
def append(self, *args, **kwargs):
raise NotImplementedError
def extend(self, *args, **kwargs):
raise NotImplementedError
def insert(self, *args, **kwargs):
raise NotImplementedError
def add(self, item: T) -> int:
"""Add an item to the list and return the index it was added at"""
index = self.future_index(item)
super().insert(index, item)
return index
def future_index(self, item: T) -> int:
"""Similar to bisect.bisect_right/left, but now we use a key function
to get values to compare and can switch the direction using
self.reversed"""
index = 0
hi = len(self)
while index < hi:
mid = (index + hi) // 2
op = operator.gt if self.reversed else operator.lt
if op(self.key(item), self.key(self[mid])):
hi = mid
else:
index = mid + 1
return index
|
429259
|
from typing import Sequence
import IPython.display
import ipywidgets as widgets
import traitlets
from ..base import LabellingWidgetMixin
from .. import generic
from ..generic.generic_mixin import GenericWidgetMixin
def _text_display_function(item: str):
IPython.display.display(IPython.display.Markdown(item))
class ClassLabeller(generic.ClassLabeller):
"""A text classification widget.
This widget lets you assign a single class to text.
"""
def __init__(
self,
options: Sequence[str] = (),
max_buttons: int = 12,
allow_freetext: bool = True,
*args,
**kwargs,
):
"""Create a widget for classifying text.
Parameters
----------
options : Sequence[str], optional
The classes, by default ()
max_buttons : int, optional
The number of buttons to allow, before switching to a dropdown
menu, by default 12
allow_freetext : bool, optional
If a text box should be available for new classes, by default True
"""
super().__init__(
options=options,
max_buttons=max_buttons,
allow_freetext=allow_freetext,
display_function=_text_display_function,
*args,
**kwargs,
) # type: ignore
class MulticlassLabeller(generic.MulticlassLabeller):
def __init__(
self,
options: Sequence[str] = (),
max_buttons: int = 12,
allow_freetext: bool = True,
*args,
**kwargs,
):
"""Create a widget for multi-class assignment.
Parameters
----------
options : Sequence[str], optional
The options for classes, by default ()
max_buttons : int, optional
How many buttons to display before switching to a multi-select
tool, by default 12
allow_freetext : bool, optional
Whether to allow free-text submission in a text box,
by default True
"""
super().__init__(
options=options,
max_buttons=max_buttons,
allow_freetext=allow_freetext,
display_function=_text_display_function,
*args,
**kwargs,
) # type: ignore
class SentimentLabeller(
GenericWidgetMixin, LabellingWidgetMixin, widgets.VBox
):
"""A sentiment classification widget.
This widget presents three label options, for classifying text into
one of negative, neutral, or positive sentiment.
"""
data: str = traitlets.Unicode()
def __init__(self, *args, **kwargs):
"""Create a sentiment classification widget."""
super().__init__(
display_function=_text_display_function, *args, **kwargs
)
self.buttons = [
widgets.Button(
description="negative",
icon="thumbs-down",
button_style="danger",
# layout=button_layout,
),
widgets.Button(
description="neutral",
icon="equals",
# layout=button_layout,
),
widgets.Button(
description="positive",
icon="thumbs-up",
button_style="success",
# layout=button_layout,
),
]
for button in self.buttons:
button.on_click(self.submit)
self.display_widget = widgets.Output(
layout=widgets.Layout(margin="auto")
)
self.children = [
widgets.Box(
(self.display_widget,),
layout=widgets.Layout(
justify_content="center",
padding="2.5% 0",
display="flex",
width="100%",
),
),
widgets.HBox(
[
widgets.HBox(),
widgets.HBox(self.buttons),
widgets.HBox([self.skip_button, self.undo_button]),
],
layout=widgets.Layout(justify_content="space-between"),
),
]
def submit(self, sender: widgets.Button): # type: ignore
"""Submit the label.
Parameters
----------
sender : widgets.Button
One of the three interface buttons.
"""
value = sender.description
self.data = value
super().submit()
def _handle_keystroke(self, event):
# the default enter shouldn't apply
if event.get("key") == "Enter":
return
super()._handle_keystroke(event)
keys = [str(i) for i in range(1, 10)] + ["0"]
for key, btn in zip(keys, self.buttons):
if event.get("key") == key:
self.submit(btn)
|
429262
|
import keras
from keras.models import Sequential
from keras.layers import Dense
import numpy as np
trX = np.linspace(-1, 1, 101)
trY = 3 * trX + np.random.randn(*trX.shape) * 0.33
model = Sequential()
#model.add(Dense(input_dim=1, output_dim=1, init='uniform', activation='linear'))
model.add(Dense(input_dim=1, units=1, kernel_initializer='uniform', activation='linear'))
model.compile(optimizer='sgd', loss='mse')
weights = model.layers[0].get_weights()
w_init = weights[0][0][0]
b_init = weights[1][0]
print('Linear regression model is initialized with weight w: %.2f, b: %.2f' % (w_init, b_init))
model.fit(trX, trY, epochs=200, verbose=1)
weights = model.layers[0].get_weights()
w_final = weights[0][0][0]
b_final = weights[1][0]
print('Linear regression model is trained to have final weight w: %.2f, b: %.2f' % (w_final, b_final))
|
429292
|
import subprocess
import pytest
from scripts.generate_utils import PROJECT_DIRECTORY, clone_and_generate
from scripts.stubgen import (
DEFAULT_BRANCH,
K8S_CLIENT_MODULE_DIRECTORY,
K8S_SOURCE_DIRECTORY,
STUBS_CLIENT_MODULE_DIRECTORY,
STUBS_TMP_CLIENT_MODULE_DIRECTORY,
STUBS_TMP_DIRECTORY,
)
from scripts.typeddictgen import DICT_CLIENT_DIRECTORY, generate_dicts
DICT_TMP_CLIENT_DIRECTORY = PROJECT_DIRECTORY / "tmp" / "client"
DICT_TMP_CLIENT_MODELS_DIRECTORY = DICT_TMP_CLIENT_DIRECTORY / "models"
def diff(src: str, dst: str) -> str:
result = subprocess.run(
[
"git",
"diff",
"--no-index",
src,
dst,
],
stdout=subprocess.PIPE,
)
return result.stdout.decode("utf-8")
@pytest.mark.stubtest
def test_generated_stubs():
clone_and_generate(
K8S_SOURCE_DIRECTORY,
K8S_CLIENT_MODULE_DIRECTORY,
STUBS_TMP_DIRECTORY,
DEFAULT_BRANCH,
)
assert diff(STUBS_TMP_CLIENT_MODULE_DIRECTORY, STUBS_CLIENT_MODULE_DIRECTORY) == ""
@pytest.mark.stubtest
def test_generated_dicts():
generate_dicts(DICT_TMP_CLIENT_DIRECTORY, DICT_TMP_CLIENT_MODELS_DIRECTORY)
assert diff(DICT_TMP_CLIENT_DIRECTORY, DICT_CLIENT_DIRECTORY) == ""
|
429333
|
from kipoi.model import BaseModel
from mmsplice import MMSplice
from mmsplice.utils import predict_splicing_efficiency
mmsplice = MMSplice()
class MMSpliceModel(BaseModel):
'''Model to predict delta logit PSI'''
def predict_on_batch(self, inputs):
X_ref = mmsplice.predict_on_batch(inputs['seq'])
X_alt = mmsplice.predict_on_batch(inputs['mut_seq'])
return predict_splicing_efficiency(X_ref, X_alt)
|
429363
|
import numpy as np
import tensorflow as tf
from tensorflow.python.framework import constant_op
import sys
import json
import math
import os
import time
import random
import sqlite3
random.seed(time.time())
from model import Model, _START_VOCAB
tf.app.flags.DEFINE_boolean("is_train", True, "Set to False to inference.")
tf.app.flags.DEFINE_integer("symbols", 30000, "vocabulary size.")
tf.app.flags.DEFINE_integer("num_entities", 21471, "entitiy vocabulary size.")
tf.app.flags.DEFINE_integer("num_relations", 44, "relation size.")
tf.app.flags.DEFINE_integer("embed_units", 300, "Size of word embedding.")
tf.app.flags.DEFINE_integer("trans_units", 100, "Size of trans embedding.")
tf.app.flags.DEFINE_integer("units", 512, "Size of each model layer.")
tf.app.flags.DEFINE_integer("layers", 2, "Number of layers in the model.")
tf.app.flags.DEFINE_boolean("copy_use", True, "use copy mechanism or not.")
tf.app.flags.DEFINE_integer("batch_size", 100, "Batch size to use during training.")
tf.app.flags.DEFINE_string("data_dir", "./data", "Data directory")
tf.app.flags.DEFINE_string("train_dir", "./train", "Training directory.")
tf.app.flags.DEFINE_integer("per_checkpoint", 1000, "How many steps to do per checkpoint.")
tf.app.flags.DEFINE_integer("inference_version", 0, "The version for inferencing.")
tf.app.flags.DEFINE_boolean("log_parameters", True, "Set to True to show the parameters")
tf.app.flags.DEFINE_string("inference_path", "test", "Set filename of inference, default isscreen")
FLAGS = tf.app.flags.FLAGS
if FLAGS.train_dir[-1] == '/': FLAGS.train_dir = FLAGS.train_dir[:-1]
csk_triples, csk_entities, kb_dict = [], [], []
def prepare_data(path, is_train=True):
global csk_entities, csk_triples, kb_dict
with open('%s/resource.txt' % path) as f:
d = json.loads(f.readline())
csk_triples = d['csk_triples']
csk_entities = d['csk_entities']
raw_vocab = d['vocab_dict']
kb_dict = d['dict_csk']
data_train, data_dev, data_test = [], [], []
if is_train:
with open('%s/trainset.txt' % path) as f:
for idx, line in enumerate(f):
#if idx == 100000: break
if idx % 100000 == 0: print('read train file line %d' % idx)
data_train.append(json.loads(line))
with open('%s/validset.txt' % path) as f:
for line in f:
data_dev.append(json.loads(line))
with open('%s/testset.txt' % path) as f:
for line in f:
data_test.append(json.loads(line))
return raw_vocab, data_train, data_dev, data_test
def build_vocab(path, raw_vocab, trans='transE'):
print("Creating word vocabulary...")
vocab_list = _START_VOCAB + sorted(raw_vocab, key=raw_vocab.get, reverse=True)
if len(vocab_list) > FLAGS.symbols:
vocab_list = vocab_list[:FLAGS.symbols]
print("Creating entity vocabulary...")
entity_list = ['_NONE', '_PAD_H', '_PAD_R', '_PAD_T', '_NAF_H', '_NAF_R', '_NAF_T']
with open('%s/entity.txt' % path) as f:
for i, line in enumerate(f):
e = line.strip()
entity_list.append(e)
print("Creating relation vocabulary...")
relation_list = []
with open('%s/relation.txt' % path) as f:
for i, line in enumerate(f):
r = line.strip()
relation_list.append(r)
print("Loading word vectors...")
vectors = {}
with open('%s/glove.840B.300d.txt' % path) as f:
for i, line in enumerate(f):
if i % 100000 == 0:
print(" processing line %d" % i)
s = line.strip()
word = s[:s.find(' ')]
vector = s[s.find(' ')+1:]
vectors[word] = vector
embed = []
for word in vocab_list:
if word in vectors:
vector = map(float, vectors[word].split())
else:
vector = np.zeros((FLAGS.embed_units), dtype=np.float32)
embed.append(vector)
embed = np.array(embed, dtype=np.float32)
print("Loading entity vectors...")
entity_embed = []
with open('%s/entity_%s.txt' % (path, trans)) as f:
for i, line in enumerate(f):
s = line.strip().split('\t')
entity_embed.append(map(float, s))
print("Loading relation vectors...")
relation_embed = []
with open('%s/relation_%s.txt' % (path, trans)) as f:
for i, line in enumerate(f):
s = line.strip().split('\t')
relation_embed.append(s)
entity_relation_embed = np.array(entity_embed+relation_embed, dtype=np.float32)
entity_embed = np.array(entity_embed, dtype=np.float32)
relation_embed = np.array(relation_embed, dtype=np.float32)
return vocab_list, embed, entity_list, entity_embed, relation_list, relation_embed, entity_relation_embed
def gen_batched_data(data):
global csk_entities, csk_triples, kb_dict
encoder_len = max([len(item['post']) for item in data])+1
decoder_len = max([len(item['response']) for item in data])+1
triple_len = max([sum([len(tri) for tri in item['all_triples']]) for item in data ])+1
max_length = 20
posts, responses, posts_length, responses_length = [], [], [], []
entities, triples, matches, post_triples, response_triples = [], [], [], [], []
match_entities, all_entities = [], []
match_triples, all_triples = [], []
NAF = ['_NAF_H', '_NAF_R', '_NAF_T']
PAD = ['_PAD_H', '_PAD_R', '_PAD_T']
def padding(sent, l):
return sent + ['_EOS'] + ['_PAD'] * (l-len(sent)-1)
def padding_triple(triple, l):
return [NAF] + triple + [PAD] * (l - len(triple) - 1)
for item in data:
posts.append(padding(item['post'], encoder_len))
responses.append(padding(item['response'], decoder_len))
posts_length.append(len(item['post'])+1)
responses_length.append(len(item['response'])+1)
all_triples.append(padding_triple([csk_triples[x].split(', ') for triple in item['all_triples'] for x in triple], triple_len))
match_index = []
for x in item['match_index']:
_index = [-1] * triple_len
if x[0] == -1 and x[1] == -1:
match_index.append(-1)
else:
match_index.append(sum([len(m) for m in item['all_triples'][:(x[0]-1)]]) + 1 + x[1])
match_triples.append(match_index + [-1]*(decoder_len-len(match_index)))
if not FLAGS.is_train:
entity = ['_NONE']
entity += [csk_entities[x] for ent in item['all_entities'] for x in ent]
entities.append(entity+['_NONE']*(triple_len-len(entity)))
batched_data = {'posts': np.array(posts),
'responses': np.array(responses),
'posts_length': posts_length,
'responses_length': responses_length,
'triples': np.array(all_triples),
'entities': np.array(entities),
'match_triples': np.array(match_triples)}
return batched_data
def train(model, sess, data_train):
batched_data = gen_batched_data(data_train)
outputs = model.step_decoder(sess, batched_data, kb_use=True)
return np.sum(outputs[0])
def generate_summary(model, sess, data_train):
selected_data = [random.choice(data_train) for i in range(FLAGS.batch_size)]
batched_data = gen_batched_data(selected_data)
summary = model.step_decoder(sess, batched_data, kb_use=True, forward_only=True, summary=True)[-1]
return summary
def evaluate(model, sess, data_dev, summary_writer):
loss = np.zeros((1, ))
st, ed, times = 0, FLAGS.batch_size, 0
while st < len(data_dev):
selected_data = data_dev[st:ed]
batched_data = gen_batched_data(selected_data)
outputs = model.step_decoder(sess, batched_data, kb_use=True, forward_only=True)
loss += np.sum(outputs[0])
st, ed = ed, ed+FLAGS.batch_size
times += 1
loss /= len(data_dev)
summary = tf.Summary()
summary.value.add(tag='decoder_loss/dev', simple_value=loss)
summary.value.add(tag='perplexity/dev', simple_value=np.exp(loss))
summary_writer.add_summary(summary, model.global_step.eval())
print(' perplexity on dev set: %.2f' % np.exp(loss))
def get_steps(train_dir):
a = os.walk(train_dir)
for root, dirs, files in a:
if root == train_dir:
filenames = files
steps, metafiles, datafiles, indexfiles = [], [], [], []
for filename in filenames:
if 'meta' in filename:
metafiles.append(filename)
if 'data' in filename:
datafiles.append(filename)
if 'index' in filename:
indexfiles.append(filename)
metafiles.sort()
datafiles.sort()
indexfiles.sort(reverse=True)
for f in indexfiles:
steps.append(int(f[11:-6]))
return steps
def test(sess, saver, data_dev, setnum=5000):
with open('%s/stopwords' % FLAGS.data_dir) as f:
stopwords = json.loads(f.readline())
steps = get_steps(FLAGS.train_dir)
low_step = 00000
high_step = 800000
with open('%s.res' % FLAGS.inference_path, 'w') as resfile, open('%s.log' % FLAGS.inference_path, 'w') as outfile:
for step in [step for step in steps if step > low_step and step < high_step]:
outfile.write('test for model-%d\n' % step)
model_path = '%s/checkpoint-%08d' % (FLAGS.train_dir, step)
print('restore from %s' % model_path)
try:
saver.restore(sess, model_path)
except:
continue
st, ed = 0, FLAGS.batch_size
results = []
loss = []
while st < len(data_dev):
selected_data = data_dev[st:ed]
batched_data = gen_batched_data(selected_data)
responses, ppx_loss = sess.run(['decoder_1/generation:0', 'decoder/ppx_loss:0'], {'enc_inps:0': batched_data['posts'], 'enc_lens:0': batched_data['posts_length'], 'dec_inps:0': batched_data['responses'], 'dec_lens:0': batched_data['responses_length'], 'entities:0': batched_data['entities'], 'triples:0': batched_data['triples'], 'match_triples:0': batched_data['match_triples']})
loss += [x for x in ppx_loss]
for response in responses:
result = []
for token in response:
if token != '_EOS':
result.append(token)
else:
break
results.append(result)
st, ed = ed, ed+FLAGS.batch_size
match_entity_sum = [.0] * 4
cnt = 0
for post, response, result, match_triples, triples, entities in zip([data['post'] for data in data_dev], [data['response'] for data in data_dev], results, [data['match_triples'] for data in data_dev], [data['all_triples'] for data in data_dev], [data['all_entities'] for data in data_dev]):
setidx = cnt / setnum
result_matched_entities = []
triples = [csk_triples[tri] for triple in triples for tri in triple]
match_triples = [csk_triples[triple] for triple in match_triples]
entities = [csk_entities[x] for entity in entities for x in entity]
matches = [x for triple in match_triples for x in [triple.split(', ')[0], triple.split(', ')[2]] if x in response]
for word in result:
if word not in stopwords and word in entities:
result_matched_entities.append(word)
outfile.write('post: %s\nresponse: %s\nresult: %s\nmatch_entity: %s\n\n' % (' '.join(post), ' '.join(response), ' '.join(result), ' '.join(result_matched_entities)))
match_entity_sum[setidx] += len(set(result_matched_entities))
cnt += 1
match_entity_sum = [m / setnum for m in match_entity_sum] + [sum(match_entity_sum) / len(data_dev)]
losses = [np.sum(loss[x:x+setnum]) / float(setnum) for x in range(0, setnum*4, setnum)] + [np.sum(loss) / float(setnum*4)]
losses = [np.exp(x) for x in losses]
def show(x):
return ', '.join([str(v) for v in x])
outfile.write('model: %d\n\tperplexity: %s\n\tmatch_entity_rate: %s\n%s\n\n' % (step, show(losses), show(match_entity_sum), '='*50))
resfile.write('model: %d\n\tperplexity: %s\n\tmatch_entity_rate: %s\n\n' % (step, show(losses), show(match_entity_sum)))
outfile.flush()
resfile.flush()
return results
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
with tf.Session(config=config) as sess:
if FLAGS.is_train:
raw_vocab, data_train, data_dev, data_test = prepare_data(FLAGS.data_dir)
vocab, embed, entity_vocab, entity_embed, relation_vocab, relation_embed, entity_relation_embed = build_vocab(FLAGS.data_dir, raw_vocab)
FLAGS.num_entities = len(entity_vocab)
print(FLAGS.__flags)
model = Model(
FLAGS.symbols,
FLAGS.embed_units,
FLAGS.units,
FLAGS.layers,
embed,
entity_relation_embed,
num_entities=len(entity_vocab)+len(relation_vocab),
num_trans_units=FLAGS.trans_units,
output_alignments=FLAGS.copy_use)
if tf.train.get_checkpoint_state(FLAGS.train_dir):
print("Reading model parameters from %s" % FLAGS.train_dir)
model.saver.restore(sess, tf.train.latest_checkpoint(FLAGS.train_dir))
else:
print("Created model with fresh parameters.")
tf.global_variables_initializer().run()
op_in = model.symbol2index.insert(constant_op.constant(vocab),
constant_op.constant(range(FLAGS.symbols), dtype=tf.int64))
sess.run(op_in)
op_out = model.index2symbol.insert(constant_op.constant(
range(FLAGS.symbols), dtype=tf.int64), constant_op.constant(vocab))
sess.run(op_out)
op_in = model.entity2index.insert(constant_op.constant(entity_vocab+relation_vocab),
constant_op.constant(range(len(entity_vocab)+len(relation_vocab)), dtype=tf.int64))
sess.run(op_in)
op_out = model.index2entity.insert(constant_op.constant(
range(len(entity_vocab)+len(relation_vocab)), dtype=tf.int64), constant_op.constant(entity_vocab+relation_vocab))
sess.run(op_out)
if FLAGS.log_parameters:
model.print_parameters()
summary_writer = tf.summary.FileWriter('%s/log' % FLAGS.train_dir, sess.graph)
loss_step, time_step = np.zeros((1, )), .0
previous_losses = [1e18]*3
train_len = len(data_train)
while True:
st, ed = 0, FLAGS.batch_size * FLAGS.per_checkpoint
random.shuffle(data_train)
while st < train_len:
start_time = time.time()
for batch in range(st, ed, FLAGS.batch_size):
loss_step += train(model, sess, data_train[batch:batch+FLAGS.batch_size]) / (ed - st)
show = lambda a: '[%s]' % (' '.join(['%.2f' % x for x in a]))
print("global step %d learning rate %.4f step-time %.2f loss %f perplexity %s"
% (model.global_step.eval(), model.lr,
(time.time() - start_time) / (ed - st) / FLAGS.batch_size, loss_step, show(np.exp(loss_step))))
model.saver.save(sess, '%s/checkpoint' % FLAGS.train_dir,
global_step=model.global_step)
summary = tf.Summary()
summary.value.add(tag='decoder_loss/train', simple_value=loss_step)
summary.value.add(tag='perplexity/train', simple_value=np.exp(loss_step))
summary_writer.add_summary(summary, model.global_step.eval())
summary_model = generate_summary(model, sess, data_train)
summary_writer.add_summary(summary_model, model.global_step.eval())
evaluate(model, sess, data_dev, summary_writer)
previous_losses = previous_losses[1:]+[np.sum(loss_step)]
loss_step, time_step = np.zeros((1, )), .0
st, ed = ed, min(train_len, ed + FLAGS.batch_size * FLAGS.per_checkpoint)
model.saver_epoch.save(sess, '%s/epoch/checkpoint' % FLAGS.train_dir, global_step=model.global_step)
else:
model = Model(
FLAGS.symbols,
FLAGS.embed_units,
FLAGS.units,
FLAGS.layers,
embed=None,
num_entities=FLAGS.num_entities+FLAGS.num_relations,
num_trans_units=FLAGS.trans_units,
output_alignments=FLAGS.copy_use)
if FLAGS.inference_version == 0:
model_path = tf.train.latest_checkpoint(FLAGS.train_dir)
else:
model_path = '%s/checkpoint-%08d' % (FLAGS.train_dir, FLAGS.inference_version)
print('restore from %s' % model_path)
model.saver.restore(sess, model_path)
saver = model.saver
raw_vocab, data_train, data_dev, data_test = prepare_data(FLAGS.data_dir, is_train=False)
test(sess, saver, data_test, setnum=5000)
|
429371
|
import os
def print_head():
print("=======================================================================")
print("Gestión de notas - \"<NAME> - Desarrollo / Formación\"")
print("=======================================================================")
print("\n\t\t\tDATOS GENERALES")
print("=======================================================================")
def start_program():
print_head()
# Parte 1 - Introducir el número de alumnos/as y nombre del curso
number_students, course_name = input_students_course_name()
print_start_input_info(number_students, course_name)
students_data, calification_acumulator = add_students_info(number_students)
avg = calculate_course_average(calification_acumulator, number_students)
above_avg, below_avg = classified_students_respect_avg(students_data, avg)
save_data_in_files(course_name, avg, above_avg, below_avg, students_data)
def input_students_course_name():
while True:
try:
number_students = int(input("Introduce número de alumnos/as: "))
except ValueError:
number_students = 0
except Exception as message:
print("Error: " + message)
if (number_students > 0):
break
else:
print("Debes de introducir un número superior a 0")
while True:
course_name = input("Introduzca el nombre del curso: ")
if (course_name != '' and course_name != None):
break
return (number_students, course_name)
def print_start_input_info(number_students, course_name):
print("-----------------------------------------------------------------------")
print(
f"Número de alumnos/as: {number_students} / Nombre del curso: {course_name}")
def add_students_info(number_students):
# Parte 2 - Introducir la información de los/as alumnos/as
print("=======================================================================")
print("\n\t\t\tInformación de los/as alumnos/as")
print("=======================================================================")
students_data = []
calification_acumulator = 0
for i in range(number_students):
# Introducir información de los estudiantes
print(f"Datos de estudiante - {i + 1}")
# Añadimos el while para controlar que introducimos algo
while True:
name_lastname = input("Introduzca nombre y apellidos: ")
if (name_lastname != '' and name_lastname != None):
break
while True:
email = input("Introduzca email: ")
if (email != '' and email != None):
break
while True:
try:
calification = float(
input("Introduce nota (de 0 a 10 incluidos): "))
except ValueError:
calification = -1
except Exception as message:
print("Error: " + message)
if (calification >= 0 and calification <= 10):
calification_acumulator += calification
break
else:
print("Debes de introducir una nota válida de 0 a 10 (incluido)")
students_data.append(dict(name_lastname=name_lastname,
email=email, calification=calification))
print("-----------------------------------------------------------------------")
return students_data, calification_acumulator
def calculate_course_average(califications, students):
# Parte 3.- <suma notas de todos estudiantes> / <nº estudiantes> = NOTA MEDIA
avg_calification = califications / students
print(f"Nota media de la clase: {avg_calification:.2f}")
return avg_calification
def classified_students_respect_avg(students, avg):
# Parte 4.- Clasificar alumnos/as con nota superioresa la media y por debajo
above_avg = []
below_avg = []
for value in students:
calification = value['calification']
if (calification >= avg):
above_avg.append(value)
else:
below_avg.append(value)
return above_avg, below_avg
def save_data_in_files(course, avg, above_avg, below_avg, students):
# 5.- Almacenar la información
try:
# Obtenemos la ruta absoluta del directorio en el que estamos trabajando
script_directory = os.path.dirname(__file__)
# 5.1.- students.txt
save_students_principal(script_directory, course, students, avg)
# 5.2.- Alumnos por debajo de la media y por encima
save_avg_classfied(script_directory, course, avg, above_avg, below_avg)
except FileExistsError:
print("Fichero existe")
except Exception as except_message:
print(except_message)
def save_students_principal(directory, course, students, avg):
file_path = f"{directory}/students.txt"
txt = open(file_path, "w")
txt.write("===========================\n")
txt.write(f"Notas de {course}\n")
txt.write("===========================\n")
txt.write(f"Nombre / Apellidos,Correo electrónico,Nota\n")
for item in students:
text_data = f"{item['name_lastname']}," + \
f"{item['email']}," + \
f"{item['calification']}"
txt.write(f"{text_data}\n")
txt.write("===========================\n")
txt.write(f"PROMEDIO DE LA CLASE: {avg:.2f}\n")
txt.write("===========================\n")
txt.close()
def save_avg_classfied(directory, course, avg, above_avg, below_avg):
file_path = f"{directory}/report-students-avg-data.txt"
txt = open(file_path, "w")
txt.write("===========================\n")
txt.write(f"Notas de {course} superiores a la media: \
{avg:.2f}\n")
txt.write("===========================\n")
txt.write(f"Nombre / Apellidos,Correo electrónico,Nota\n")
for item in above_avg:
text_data = f"{item['name_lastname']}," + \
f"{item['email']}," + \
f"{item['calification']}"
txt.write(f"{text_data}\n")
txt.write("===========================\n")
txt.write(f"Notas de {course} inferiores a la media: \
{avg:.2f}\n")
txt.write("===========================\n")
txt.write(f"Nombre / Apellidos,Correo electrónico,Nota\n")
for item in below_avg:
text_data = f"{item['name_lastname']}," + \
f"{item['email']}," + \
f"{item['calification']}"
txt.write(f"{text_data}\n")
txt.close()
start_program()
|
429384
|
import numpy as np
import picos
from cvxopt import matrix, spmatrix
from . import linear_projection
from . import utils
from . import _kabsch
def to_spmatrix(sp):
coo = sp.astype(np.double).tocoo()
return spmatrix(coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=sp.shape)
def solve_sdp(w, k):
nsquare = w.shape[0]
n = int(np.sqrt(nsquare))
ww = np.c_[w, np.zeros((nsquare, 1))]
ww = np.r_[ww, np.zeros((1, nsquare + 1))]
maps, mapeq, maps_b, mapeq_b = utils.generate_sdp_constraint_map(ww, n, k)
sdp = picos.Problem()
y = sdp.add_variable('y', (nsquare + 1, nsquare + 1), 'symmetric')
sdp.add_constraint(y >> 0)
sdp.add_constraint(y[nsquare, nsquare] == 1.0)
sdp.add_constraint(y[nsquare, :] == y[:, nsquare].T)
for i, m in enumerate(maps):
sdp.add_constraint(to_spmatrix(m) | y <= maps_b.astype(np.double)[i, 0])
sdp.add_constraint(to_spmatrix(mapeq) | y == matrix(mapeq_b))
sdp.add_constraint(picos.trace(y) == k + 1)
sdp.set_objective('max', matrix(ww) | y)
sdp.set_option('tol', 0.1)
sdp.set_option('verbose', 0)
# print(sdp)
sdp.solve()
y = y.value
t = np.array(y[-1, :-1])
x1 = t.reshape((n, n))
x = linear_projection.linear_projection(x1.ravel(order='F'))
idx, = np.where(x.ravel(order='F')==1.0)
score = x1.ravel(order='F')[idx]
sidx = np.argsort(score)
idx = np.delete(idx, sidx[(n - k):])
x.ravel(order='F')[idx] = 0.0
return x
def count_correspondence(m, tree, eps):
dist, idx = tree.query(m.T)
in_idx, = np.where(dist <= eps)
return len(in_idx)
def get_correspondences(x):
idxs = np.argmax(x, axis=1)
y = x[np.arange(x.shape[0]), idxs]
corr_m = np.where(y > 0)[0]
corr_b = idxs[corr_m]
return corr_m, corr_b
def sdp_reg(m, b, k, d_diff_thresh, pair_dist_thresh, n_pair_thres):
w, n_accepted_pairs = utils.generate_weight_matrix(m, b,
d_diff_thresh,
pair_dist_thresh)
if n_accepted_pairs <= n_pair_thres:
return None, None, None, None
x = solve_sdp(w, k)
corr_m, corr_b = get_correspondences(x)
sm = m[:, corr_m]
sb = b[:, corr_b]
rot, t = _kabsch.kabsch(sm, sb)
return rot, t, corr_m, corr_b
|
429417
|
from __future__ import print_function, absolute_import, division
from future.builtins import *
from future import standard_library
standard_library.install_aliases()
# Copyright 2017 Autodesk Inc.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from . import Alias
class ResizableArray(object):
""" Behaves like a numpy array, but with fast extends and appends for the first index.
Similar to python lists, the underlying array is reallocated in large chunks whenever the
number of elements grows beyond the current memory allocation.
"""
#@args_from(np.array)
def __init__(self, *args, **kwargs):
self._array = np.array(*args, **kwargs)
self._len = len(self._array)
self._subarray = self._array[:self._len]
self._size = self._len
def __getattr__(self, item):
if item == '_subarray':
return self.__getattribute__('_subarray')
return getattr(self._subarray, item)
def __repr__(self):
return '%s(%s)' % (self.__class__.__name__, repr(self._subarray))
def append(self, item):
self.extend([item])
def extend(self, its):
try:
ll = len(its)
except TypeError:
its = list(its)
ll = len(its)
newlen = self._len + ll
if newlen > self._size:
self._resize(newlen)
for item in its:
self._array[self._len] = item
self._len += 1
self._subarray = self._array[:self._len]
def _resize(self, minsize):
newsize = round_up_to_power_of_two(minsize)
if newsize <= self._size:
return
newshape = (newsize,) + self._array.shape[1:]
newarray = np.empty(newshape, dtype=self._array.dtype)
newarray[:self._len] = self._array
self._array = newarray
self._size = newsize
# delegate magic methods as well - this is a list of all math-related array methods
_ARRAYMAGIC = ('__abs__', '__add__', '__and__', '__array__', '__contains__', '__copy__',
'__deepcopy__', '__delitem__', '__delslice__', '__div__', '__divmod__', '__eq__',
'__float__', '__floordiv__', '__ge__', '__getitem__', '__getslice__', '__gt__',
'__hex__', '__iadd__', '__iand__', '__idiv__', '__ifloordiv__', '__ilshift__',
'__imod__', '__imul__', '__index__', '__int__', '__invert__', '__ior__', '__ipow__',
'__irshift__', '__isub__', '__itruediv__', '__ixor__', '__le__', '__len__',
'__long__', '__lshift__', '__lt__', '__mod__', '__mul__', '__ne__', '__neg__',
'__nonzero__', '__oct__', '__or__', '__pos__', '__pow__', '__radd__', '__rand__',
'__rdiv__', '__rdivmod__', '__rfloordiv__', '__rlshift__', '__rmod__', '__rmul__',
'__ror__', '__rpow__', '__rrshift__', '__rshift__', '__rsub__', '__rtruediv__',
'__rxor__', '__setitem__', '__setslice__', '__str__', '__sub__',
'__truediv__', '__xor__')
for _methname in _ARRAYMAGIC:
setattr(ResizableArray, _methname, Alias('_subarray.%s' % _methname))
def round_up_to_power_of_two(n):
"""
From http://stackoverflow.com/a/14267825/1958900
"""
if n < 0:
raise TypeError("Nonzero positive integers only")
elif n == 0:
return 1
else:
return 1 << (n-1).bit_length()
|
429429
|
from jnpr.healthbot.swagger.models.inline_response2002 import InlineResponse2002
def _mock_user_login():
return InlineResponse2002(access_token='xxxx', refresh_token='yyyy',
refresh_token_expires=1, token_expires=1)
|
429467
|
from .enzyme import Enzyme, Ribosome, RNAPolymerase
from .thermomemodel import ThermoMEModel
from .memodel import MEModel
|
429521
|
import re
import os
print("Generating new layers.py from the following files\n")
files = list(filter(lambda x: x[-10:] == '_layers.py', os.listdir("onnx_caffe")))
print(files)
layers_file = open(os.path.join('onnx_caffe', 'layers.py'), 'w')
layers_file.write("# This file is generated by generate_layers.py\n")
doc_file = open("Operators.md", 'w')
doc_file.write("# Supported Layer List\n\n")
doc_file.write("**This file is generated by generate_layers.py. Please do not edit manually.**\n\n")
doc_file.write("## List\n\n")
count = 0
for fname in files:
with open(os.path.join('onnx_caffe', fname), 'rb') as f:
content = f.read()
layers = re.findall('class\ \S+\(Layer\)' , str(content))
layers = list(map(lambda x: x[6:-7], layers))
count += len(layers)
#print(layers)
for layer in layers:
layers_file.write('from {} import {}\n'.format('.' + fname[:-3], layer))
doc_file.write('* {}\n'.format(layer))
layers_file.close()
doc_file.close()
print("Totally {} kinds of layers generated.".format(count))
|
429526
|
import unittest
from .. import auto_transforms as at
import pandas as pd
import numpy as np
from sklearn.pipeline import Pipeline
from sklearn.externals import joblib
import logging
class Test_AutoTransforms(unittest.TestCase):
def test_sklearn_pipeline(self):
t = at.AutoTransform(ignore_vals=["NA",""])
transformers = [("auto",t)]
p = Pipeline(transformers)
df = pd.DataFrame([True,False])
df2 = p.fit_transform(df)
self.assertTrue(df2[0][0] == 1)
self.assertTrue(df2[0][1] == 0)
joblib.dump(p,"/tmp/auto_pl")
p2 = joblib.load("/tmp/auto_pl")
df3 = p2.transform(df)
self.assertTrue(df3[0][0] == 1)
self.assertTrue(df3[0][1] == 0)
def test_bool_col(self):
df = pd.DataFrame([True,False])
t = at.AutoTransform(ignore_vals=["NA",""])
t.fit(df)
df2 = t.transform(df)
self.assertTrue(df2[0][0] == 1)
self.assertTrue(df2[0][1] == 0)
def test_boolean_col_with_missing(self):
df = pd.DataFrame([{"a":"true"},{"a":"false"},{"a":""}])
t = at.AutoTransform(ignore_vals=["NA"])
t.fit(df)
df2 = t.transform(df)
print df2
self.assertTrue(df2["a"][0] == "true")
self.assertTrue(df2["a"][1] == "false")
self.assertTrue(df2["a"][2] == "UKN")
def test_boolean_col_with_missing2(self):
df = pd.DataFrame([{"a":"true"},{"a":"false"},{"a":""}])
t = at.AutoTransform(ignore_vals=["NA"],cat_missing_val="?")
t.fit(df)
df2 = t.transform(df)
print df2
self.assertTrue(df2["a"][0] == "true")
self.assertTrue(df2["a"][1] == "false")
self.assertTrue(df2["a"][2] == "?")
def test_boolean_col2(self):
df = pd.DataFrame([{"a":1},{"a":0},{"a":"false"}])
t = at.AutoTransform(ignore_vals=["NA"])
t.fit(df)
df2 = t.transform(df)
self.assertTrue(df2["a"][0] == 1)
self.assertTrue(df2["a"][1] == 0)
def test_change_type_when_ignored_removed(self):
df = pd.DataFrame([{"a":"NA"},{"a":10},{"a":12},{"a":8}])
t = at.AutoTransform(ignore_vals=["NA"])
t.fit(df)
df2 = t.transform(df)
self.assertTrue(df2["a"][0] == 0.0)
self.assertTrue(df2["a"][1] == 0.0)
self.assertAlmostEqual(df2["a"][2],1.224745,places=4)
def test_categorical(self):
df = pd.DataFrame([{"a":""},{"a":"v1"},{"a":"v2"},{"a":"v3"}])
t = at.AutoTransform(ignore_vals=["NA"])
t.fit(df)
df2 = t.transform(df)
self.assertTrue(df2["a"][0] == "UKN")
self.assertTrue(df2["a"][1] == "v1")
self.assertTrue(df2["a"][2] == "v2")
"""
Test that categorical values can be limited. Those appearing less than some value are removed.
"""
def test_categorical_values_limit(self):
df = pd.DataFrame([{"a":10,"b":1},{"a":5,"b":2},{"a":10,"b":3}])
t = at.AutoTransform(max_values_numeric_categorical=2)
t.fit(df)
df2 = t.transform(df)
self.assertEqual(df2["a"][0],"a_10")
def test_ignored_values(self):
df = pd.DataFrame([{"a":10},{"a":99},{"a":12},{"a":8}])
t = at.AutoTransform(ignore_vals=[99])
t.fit(df)
df2 = t.transform(df)
self.assertTrue(df2["a"][0] == 0.0)
self.assertTrue(df2["a"][1] == 0.0)
self.assertAlmostEqual(df2["a"][2],1.224745,places=4)
def test_dates(self):
df = pd.DataFrame([{"a":"30JAN14:15:11:00","b":"20 Jan 2015"},{"a":"31JAN14:10:11:00","b":"20 Jan 2015"}])
t = at.AutoTransform(custom_date_formats=["%d%b%y:%H:%M:%S"],date_cols=["a"])
t.fit(df)
df2 = t.transform(df)
self.assertAlmostEqual(df2["a_h1"][0],-0.707,places=2)
def test_dates2(self):
df = pd.DataFrame([{"a":"28-09-15"},{"a":"22-03-15"}])
t = at.AutoTransform(custom_date_formats=["%d-%m-%y"],date_cols=["a"],date_transforms=[False,True,True,True])
t.fit(df)
df2 = t.transform(df)
print df2
#self.assertAlmostEqual(df2["a_h1"][0],-0.707,places=2)
def test_drop_constant_cols(self):
df = pd.DataFrame([{"a":10,"b":11},{"a":10,"b":12}])
t = at.AutoTransform()
t.fit(df)
df2 = t.transform(df)
self.assertTrue(len(df2.columns) == 1)
def test_drop_duplicate_cols(self):
df = pd.DataFrame([{"a":12,"b":12},{"a":10,"b":10}])
t = at.AutoTransform()
t.fit(df)
df2 = t.transform(df)
self.assertTrue(len(df2.columns) == 1)
def test_min_max_limit(self):
df = pd.DataFrame([{"a":9,"b":12},{"a":12,"b":10}])
df2 = pd.DataFrame([{"a":1,"b":12},{"a":15,"b":10}])
t = at.AutoTransform(min_max_limit=True)
t.fit(df)
df3 = t.transform(df2)
self.assertTrue(df3["a"][0] == -1)
self.assertTrue(df3["a"][1] == 1)
if __name__ == '__main__':
logging.basicConfig(format='%(asctime)s : %(levelname)s : %(message)s', level=logging.INFO)
unittest.main()
|
429599
|
import random
import collections
from nltk import NaiveBayesClassifier as nbc
from nltk.classify.maxent import MaxentClassifier as mec
import nltk.classify
import pickle
import math
from nltk.metrics import precision
from nltk.metrics import recall
from nltk.metrics import f_measure
from sklearn.svm import LinearSVC
from nltk.classify.scikitlearn import SklearnClassifier
class PerformanceFinder:
def __init__(self):
self.train_set_size = 0
self.test_set_size = 0
self.total_size = 0
self.trainSet = []
self.testSet = []
self.featureList = []
def findsets(self,classifier):
refsets = collections.defaultdict(set)
self.testSets = collections.defaultdict(set)
for i, (feats, label) in enumerate(self.testSet):
refsets[label].add(i)
observed = classifier.classify(feats)
self.testSets[observed].add(i)
return refsets,self.testSets
def findAccuracy(self,classifier):
return nltk.classify.accuracy(classifier,self.testSet)
def findPrecision(self,classifier):
refsets,self.testSets = self.findsets(classifier)
return precision(refsets['bullish'], self.testSets['bullish']),precision(refsets['bearish'], self.testSets['bearish']),precision(refsets['neutral'], self.testSets['neutral'])
def findRecall(self,classifier):
refsets,self.testSets = self.findsets(classifier)
return recall(refsets['bullish'], self.testSets['bullish']),recall(refsets['bearish'], self.testSets['bearish']),recall(refsets['neutral'], self.testSets['neutral'])
def findFMetric(self,classifier):
refsets,self.testSets = self.findsets(classifier)
return f_measure(refsets['bullish'], self.testSets['bullish']),f_measure(refsets['bearish'], self.testSets['bearish']),f_measure(refsets['neutral'], self.testSets['neutral'])
def findNBPerformance(self):
self.train_set_size, self.test_set_size, self.trainSet, self.testSet = self.findSet()
classifier = nbc.train(self.trainSet)
bull_precision, bear_precision, neutral_precision = self.findPrecision(classifier)
bull_recall, bear_recall, neutral_recall = self.findRecall(classifier)
bull_fmetric, bear_fmetric, neutral_fmetric = self.findFMetric(classifier)
accuracy = self.findAccuracy(classifier)
return self.train_set_size, self.test_set_size, accuracy, bull_precision, bear_precision, neutral_precision, bull_recall, bear_recall, neutral_recall, bull_fmetric, bear_fmetric, neutral_fmetric
def findMEPerformance(self):
self.train_set_size, self.test_set_size, self.trainSet, self.testSet = self.findSet()
classifier = mec.train(self.trainSet,algorithm ='iis' ,max_iter=50)
bull_precision, bear_precision, neutral_precision = self.findPrecision(classifier)
bull_recall, bear_recall, neutral_recall = self.findRecall(classifier)
bull_fmetric, bear_fmetric, neutral_fmetric = self.findFMetric(classifier)
accuracy = self.findAccuracy(classifier)
return self.train_set_size, self.test_set_size, accuracy, bull_precision, bear_precision, neutral_precision, bull_recall, bear_recall, neutral_recall, bull_fmetric, bear_fmetric, neutral_fmetric
def findSVMPerformance(self):
self.train_set_size, self.test_set_size, self.trainSet, self.testSet = self.findSet()
classifier = SklearnClassifier(LinearSVC())
classifier.train(self.trainSet)
bull_precision, bear_precision, neutral_precision = self.findPrecision(classifier)
bull_recall, bear_recall, neutral_recall = self.findRecall(classifier)
bull_fmetric, bear_fmetric, neutral_fmetric = self.findFMetric(classifier)
accuracy = self.findAccuracy(classifier)
return self.train_set_size, self.test_set_size, accuracy, bull_precision, bear_precision, neutral_precision, bull_recall, bear_recall, neutral_recall, bull_fmetric, bear_fmetric, neutral_fmetric
def findSet(self):
featuresets = pickle.load(open('data/featureSets.pickle','rb'))
random.shuffle(featuresets)
self.total_size = len(featuresets)
self.train_set_size = math.floor((3/4)*self.total_size)
self.test_set_size = self.total_size - self.train_set_size
train_set, test_set = featuresets[self.train_set_size:], featuresets[:self.test_set_size]
self.featureList = []
self.trainSet = []
for line in train_set:
featureVector = line[0]
sentiment = line[1]
self.trainSet.append((dict([(word, True) for word in featureVector]), sentiment))
self.featureList = self.featureList + featureVector
self.testSet = []
for line in test_set:
featureVector = line[0]
sentiment = line[1]
self.testSet.append((dict([(word, (word in self.featureList)) for word in featureVector]), sentiment))
return self.train_set_size, self.test_set_size, self.trainSet, self.testSet
|
429601
|
import sys, requests
from PyQt5 import QtGui, uic, QtWidgets
(form_class, qtbase_class) = uic.loadUiType('Demo3.ui')
class MainWindow(qtbase_class, form_class):
def __init__(self):
super(MainWindow, self).__init__()
self.setupUi(self)
self.pushButton.clicked.connect(self._showpic)
def _showpic(self):
url = 'http://i.meizitu.net/thumbs/2017/04/90448_18b47_236.jpg' ##图片链接
pic = requests.get(url).content ##获取图片链接的数据
pixmap = QtGui.QPixmap() ##新建一个QPixmap的类
pixmap.loadFromData(pic) ##pixmap加载图片数据
self.label.setPixmap(pixmap) ##最终在label上显示
if __name__ == "__main__":
app = QtWidgets.QApplication(sys.argv)
ui = MainWindow()
ui.show()
sys.exit(app.exec_())
|
429616
|
import os
import tensorflow as tf
import model
from simulation_data import data_handler
import numpy as np
class trainer(object):
def __init__(self, epochs = 10, batch_size = 128, validation_split = 0.2, tune_model = True, L2NormConst = 0.001, left_and_right_images = False, left_right_offset = 0.2, root_path = '', test_root_path ='',stop_gradient_at_conv = False, test_left_and_right_images = False):
self.handler = data_handler(validation_split = validation_split, batch_size = batch_size, root_path = root_path, left_and_right_images = left_and_right_images, left_right_offset = left_right_offset, test_root_path = test_root_path, test_left_and_right_images = False)
self.validation_split = validation_split
self.LOGDIR = './save'
self.sess = tf.InteractiveSession()
self.L2NormConst = L2NormConst
self.train_vars = tf.trainable_variables()
self.loss = tf.reduce_mean(tf.square(tf.sub(model.y_, model.y))) + tf.add_n([tf.nn.l2_loss(v) for v in self.train_vars]) * L2NormConst
self.train_step = tf.train.AdamOptimizer(1e-4).minimize(self.loss)
self.epochs = epochs
self.batch_size = batch_size
self.sess.run(tf.initialize_all_variables())
self.saver = tf.train.Saver()
if(tune_model):
self.saver.restore(self.sess, "save/model_trained_on_game.ckpt")
if(stop_gradient_at_conv):
model.h_fc1 = tf.stop_gradient(model.h_fc1)
def train(self):
# train over the dataset
for epoch in range(self.epochs):
batches_handled = 0
for X_train, y_train in self.handler.generate_train_batch():
self.train_step.run(feed_dict={model.x: X_train, model.y_: y_train, model.keep_prob: 0.8})
if not os.path.exists(self.LOGDIR):
os.makedirs(self.LOGDIR)
checkpoint_path = os.path.join(self.LOGDIR, "model_trained_on_game.ckpt")
filename = self.saver.save(self.sess, checkpoint_path)
batches_handled = batches_handled + 1
if(batches_handled>self.handler.num_train_batches()):
break
avg_train_loss = 0
batches_handled = 0
for X_train, y_train in self.handler.generate_train_batch():
avg_train_loss = (avg_train_loss*batches_handled + self.loss.eval(feed_dict={model.x: X_train, model.y_: y_train, model.keep_prob: 1.0}))/(batches_handled+1)
batches_handled = batches_handled + 1
if(batches_handled>self.handler.num_train_batches()):
break
avg_val_loss = 0
batches_handled = 0
for X_val, y_val in self.handler.generate_validation_batch():
avg_val_loss = (avg_val_loss*batches_handled + self.loss.eval(feed_dict={model.x: X_val, model.y_: y_val, model.keep_prob: 1.0}))/(batches_handled+1)
batches_handled = batches_handled + 1
if(batches_handled>self.handler.num_val_batches()):
break
print("Model saved in %s. Metrics::: Epoch: %d, Loss: %g, Validation_loss: %g" % (filename, epoch, avg_train_loss, avg_val_loss))
print("Run the command line:\n" \
"--> tensorboard --logdir=./logs " \
"\nThen open http://0.0.0.0:6006/ into your web browser")
def test(self):
avg_test_loss = 0
batches_tested = 0
for X_test, y_test, num_batches in self.handler.generate_test_batch():
avg_test_loss = (avg_test_loss*batches_tested + self.loss.eval(feed_dict={model.x: X_test, model.y_: y_test, model.keep_prob: 1.0}))/(batches_tested+1)
batches_tested = batches_tested + 1
if(batches_tested>num_batches):
break
print("Test_loss: %g" % (avg_test_loss))
def set_root_image_path(self,path):
self.handler.set_root_image_path(path)
|
429674
|
from pythonforandroid.recipe import CythonRecipe
class SpineCython(CythonRecipe):
version = '0.5.1'
url = 'https://github.com/tileworks/spine-cython/archive/{version}.zip'
name = 'spine'
depends = ['setuptools']
site_packages_name = 'spine'
call_hostpython_via_targetpython = False
recipe = SpineCython()
|
429705
|
import builtins
import sys
from unittest.mock import patch
import numpy as np
import pytest
builtin_import = builtins.__import__
def import_except_torch(name, *args, **kwargs):
if name == "torch":
raise ImportError
else:
return builtin_import(name, *args, **kwargs)
def test_torch_importerror():
with patch("builtins.__import__", import_except_torch):
# one can use fdiff and Fracdiff since torch is optional
import fracdiff
from fracdiff import fdiff
from fracdiff.sklearn import Fracdiff
a = np.random.randn(10, 100)
_ = fdiff(a, 0.5)
a = np.random.randn(10, 100)
_ = Fracdiff(0.5).fit(a).transform(a)
|
429732
|
import logging
from util.callbacks import callsback
from util.Events import event
import msn
from msn import Message, MSNTextMessage
from msn.p import Switchboard as Super
log = logging.getLogger('msn.p8.sb')
defcb = dict(trid=True, callback=sentinel)
class MSNP8Switchboard(Super):
events = Super.events | set((
'recv_text_msg',
'send_text_msg',
'typing_info',
))
def connect(self):
if self.socket is None:
log.info('Creating %s', self._socktype)
self.socket = self._socktype()
self.socket.bind('on_connect', self.on_conn_success)
self.socket.bind('on_close', self.leave)
self.socket.bind('on_conn_error', self.conn_failed)
self.socket.bind('on_message', self.on_message)
log.info('Bound events to %s', self.socket)
conn_args = self.socket.connect_args_for('SB', self._server)
self.socket._connect(*conn_args)
else:
log.critical('Connect called on a switchboard that already has a socket')
@callsback
def invite(self, bname, callback=None):
self._invite(bname, callback)
def conn_failed(self, sck, e = None):
log.error('Failed to connect!: %r, %r', sck, e)
self.event('transport_error', e or sck)
def on_conn_success(self, s):
log.info('Connection success. Calling authenticate.')
self.event('on_conn_success', self)
self.event('needs_auth')
#self.authenticate()
def authenticate(self, username):
if self._session is None:
log.info('Authenticating for new session.')
self.socket.send(Message('USR', username, self._cookie), **defcb)
else:
log.info('Authenticating for session in progress.')
self.socket.send(Message('ANS', username, self._cookie, self._session), **defcb)
def leave(self, sck=None):
# unbind events
log.debug('leaving %r', self)
if self.socket is not None:
self.socket._disconnect()
self.on_disconnect()
def on_disconnect(self):
del self.principals[:]
self.event('disconnect')
self.socket = None
self._session = None
#self._cookie = None
def connected(self):
return (self.socket is not None)
def recv_usr(self, msg):
ok, name, nick = msg.args
nick = nick.decode('url').decode('utf-8') or None
self.event('contact_alias', name, nick)
assert ok == 'OK'
self._session = self._cookie.split('.',1)[0]
self.on_authenticate()
@event
def on_authenticate(self):
log.info('Authenticated.')
def _invite(self, bname, callback):
self.socket.send(Message('CAL', bname), trid=True, callback=callback)
def recv_ack(self, msg):
log.debug('Got ack: %s', str(msg).strip())
def recv_ans(self, msg):
'''
ANS (ANSwer)
This command is sent by the server after we send our ANS to login.
@param socket: the socket the command came from
@param trid: trid assocated with this command
@param status: the status of our ANS....has to be OK or we would
have been disconnected already!
'''
status, = msg.args
assert status == 'OK'
self.on_authenticate()
def recv_msg(self, msg):
try:
getattr(self, 'recv_msg_%s' % msg.type, self.recv_msg_unknown)(msg)
except Exception, e:
import traceback
traceback.print_exc()
log.error('Exception handling MSG! error = %r, msg = %r', e, msg)
def recv_msg_plain(self, msg):
'''
msg_plain(msg, src_account, src_display)
this is called when a msg comes in with type='text/plain'
@param socket: the socket it arrived from (better be self.socket!)
@param msg: the rfc822 object representing the MIME headers and such
@param src_account: the email address/passport this comes from
@param src_display: the display name of the buddy who sent it
@param *params: more stuff!
'''
name, nick = msg.args[:2]
nick = msn.util.url_decode(nick).decode('utf-8') or None
msg = MSNTextMessage.from_net(msg.payload)
self.event('contact_alias', name, nick)
self.event('recv_text_msg', name, msg)
def recv_msg_control(self, msg):
'''
msg_control(msg, src_account, src_display)
This is called when a message comes in with type='text/x-msmsgscontrol'
Generally, these are typing indicators.
@param msg: msnmessage
'''
name, nick = msg.args[:2]
nick = msn.util.url_decode(nick).decode('utf-8') or None
self.event('contact_alias', name, nick)
self.event('typing_info', name, bool(msg.payload.get('TypingUser', False)))
def recv_msg_notification(self, msg):
'''
msg_notification(msg, src_account, src_display)
No idea what these are. So, raise for now...
@param msg: msnmessage
'''
#TODO: find out what this is for.
name, nick = msg.args[:2]
self.event('contact_alias', name, nick)
@event
def on_buddy_join(self, name):
# if name in self._to_invite:
# self._to_invite.remove(name)
self.principals.append(name)
return name
def recv_cal(self, msg):
ringing, session = msg.args
# event: on_invite
def recv_iro (self, msg):
'''
IRO (In ROom)
This is sent to us when we connect to notify us of who is in the room.
@param socket: the socket the message arrived from
@param trid: the trid associated with this command
@param rooster: the 'rooster number' for this buddy
@param roostercount: the total number of 'roosters' to expect
@param passport: the passport name of the buddy (email address)
@param buddyname: the friendly name of the buddy
@param *args: more stuff!
'''
rooster, roostercount, name, nick= msg.args[:4]
rooster = int(rooster)
roostercount = int(roostercount)
nick = msn.util.url_decode(nick).decode('utf-8') or None
self.event('contact_alias', name,nick)
self.on_buddy_join(name)
def recv_joi (self, msg):
'''
JOI (Buddy JOIn)
A buddy is joining the conversation
@param socket: the socket the command came from
@param trid: trid assocated with this command
@param acct: the email addresss of the joining buddy
@param fn: the friendly name (display name) of the buddy
@param sessid: the session ID of this session
'''
name, nick = msg.args[:2]
nick = msn.util.url_decode(nick).decode('utf-8') or None
self.event('contact_alias', name, nick)
self.on_buddy_join(name)
def recv_bye (self, msg):
'''
BYE (BYE)
A buddy is leaving the conversation (saying bye!)
@param socket: the socket the command came from
@param trid: trid assocated with this command
@param buddy: the buddyname leaving
'''
notify = True
try:
name, = msg.args
except:
name, reason = msg.args
if int(reason) == 1:
self.on_buddy_timeout(name)
notify = False
self.on_buddy_leave(name, notify)
@event
def on_buddy_leave(self, name, notify=True):
if name in self.principals:
self.principals.remove(name)
return name, notify
@event
def on_buddy_timeout(self, name):
#self._to_invite.append(name)
return name
def recv_msg_unknown(self, msg):
log.error('Unknown message type! %r', msg)
def recv_msg_invite(self, msg):
'''
---
Exception handling MSG!
MSG <EMAIL> digsby%20oh%20threesus 502
MIME-Version: 1.0
Content-Type: text/x-msmsgsinvite; charset=UTF-8
Application-Name: an audio conversation
Application-GUID: {02D3C01F-BF30-4825-A83A-DE7AF41648AA}
Session-Protocol: SM1
Context-Data: Requested:SIP_A,;Capabilities:SIP_A,;
Invitation-Command: INVITE
Avm-Support: 7
Avm-Request: 2
Invitation-Cookie: 24443504
Session-ID: {EE086C37-D672-44C2-9AA9-57151CEB0BEF}
Conn-Type: IP-Restrict-NAT
Sip-Capability: 1
Public-IP: 172.16.31.10
Private-IP: 192.168.1.102
UPnP: FALSE
---
Exception handling MSG!
MSG <EMAIL> digsby%20oh%20threesus 317
MIME-Version: 1.0
Content-Type: text/x-msmsgsinvite; charset=UTF-8
Invitation-Command: CANCEL
Cancel-Code: TIMEOUT
Invitation-Cookie: 24443504
Session-ID: {EE086C37-D672-44C2-9AA9-57151CEB0BEF}
Conn-Type: IP-Restrict-NAT
Sip-Capability: 1
Public-IP: 172.16.31.10
Private-IP: 192.168.1.102
UPnP: FALSE
'''
return log.info('msg invite %s', msg)
m = Message(msg.body())
c = m['Invitation-Cookie']
try:
a = self.activities[c]
except KeyError:
a = self.activities.setdefault(c, msn.slp.SLP(self, name, c))
a.incoming(m)
def recv_msg_datacast(self, msg):
log.info('Received datacast')
try:
name, nick = msg.args
nick = nick.decode('url').decode('utf-8')
body = msg.payload
id = int(msg.id)
if id == 1:
action_type = 'nudge'
action_text = None
elif id == 2:
action_type = 'wink'
action_text = None
elif id == 4:
action_type = 'custom'
action_text = msg.data
else:
return
#action_text = 'sent an unknown datacat'
self.event('recv_action', name, action_type, action_text)
#self.system_message('%s %s' % (b.alias, action_text))
except Exception,e :
import traceback; traceback.print_exc()
raise e
def recv_msg_caps(self, msg):
log.info(msg.name + ' is using gaim/pidgin')
# def msg_p2p(self, msg, name, nick, len):
# msg = msg.body()
#
# h, b, f = msg[:48], \
# msg[48:-4],\
# msg[-4:]
#
# sess_id, base_id = struct.unpack('>II', h[:8])
#
# if not sess_id and base_id not in self.activities:
# self.activities[base_id] = msn.slp.SLP(self, name, base_id)
# self.activities[base_id].incoming(h,b,f)
#
@callsback
def send_text_message(self, body, callback):
if not self.connected:
callback.error('connection lost')
raise Exception('connection lost')
if isinstance(body, unicode):
_body, body = body, body.encode('utf8')
cmd = msn.MSNCommands.MSG('N', payload = str(body))
def check_nak(sck, msg):
if msg.cmd == 'NAK':
msg.source_message = body
callback.error(msg)
else:
callback.success()
self.socket.send(cmd, trid=True, success=check_nak, error=lambda sck, emsg: callback.error(emsg))
#self.event('send_text_msg', body)
def send_typing_status(self, name, status):
if not self.connected:
return
if status:
body = "MIME-Version: 1.0\r\n" \
"Content-Type: text/x-msmsgscontrol\r\n" \
"TypingUser: %s\r\n\r\n\r\n" % name
self.socket.send(Message('MSG', 'U', payload=body), **defcb)
|
429773
|
from typing import Callable, Dict, List, Set, Optional
from ctypes import c_int8 as i8, c_int16 as i16, c_int32 as i32, c_int64 as i64
from ctypes import c_uint8 as u8, c_uint16 as u16, c_uint32 as u32, c_uint64 as u64
import sys
if __name__ == "__main__":
print("Hello world!")
print("Hello", "world!")
|
429791
|
from apiclient.discovery import build
from apiclient.errors import HttpError
from oauth2client.tools import argparser
DEVELOPER_KEY = "<PASSWORD>KEYHERE!"
YOUTUBE_API_SERVICE_NAME = "youtube"
YOUTUBE_API_VERSION = "v3"
def youtube_search(q, max_results=50,order="relevance", token=None, location=None, location_radius=None):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
search_response = youtube.search().list(
q=q,
type="video",
pageToken=token,
order = order,
part="id,snippet",
maxResults=max_results,
location=location,
locationRadius=location_radius
).execute()
videos = []
for search_result in search_response.get("items", []):
if search_result["id"]["kind"] == "youtube#video":
videos.append(search_result)
try:
nexttok = search_response["nextPageToken"]
return(nexttok, videos)
except Exception as e:
nexttok = "last_page"
return(nexttok, videos)
def geo_query(video_id):
youtube = build(YOUTUBE_API_SERVICE_NAME, YOUTUBE_API_VERSION,
developerKey=DEVELOPER_KEY)
video_response = youtube.videos().list(
id=video_id,
part='snippet, recordingDetails, statistics'
).execute()
return video_response
|
429800
|
import librosa
import numpy as np
import sys
# sys.path.append('../')
from tifresi.transforms import log_spectrogram, inv_log_spectrogram, log_mel_spectrogram, mel_spectrogram
__author__ = 'Andres'
def test_log_spectrogram():
x = np.random.rand(1024 * 1024).reshape([1024, 1024])
log_x = log_spectrogram(x, dynamic_range_dB=80)
inv_log_x = inv_log_spectrogram(log_x)
assert (np.linalg.norm(inv_log_x - x) < 1e-7)
def test_log_spectrogram_small_range():
x = np.random.rand(1024 * 1024).reshape([1024, 1024])
log_x = log_spectrogram(x, dynamic_range_dB=30)
inv_log_x = inv_log_spectrogram(log_x)
assert (np.linalg.norm(inv_log_x - x) < 0.08)
def test_log_mel_spectrogram():
x = np.random.rand(1024 * 513).reshape([513, 1024])
x_mel = mel_spectrogram(x)
log_x = log_mel_spectrogram(x, dynamic_range_dB=80)
inv_log_x = inv_log_spectrogram(log_x)
assert (np.linalg.norm(inv_log_x - x_mel) < 1e-7)
def test_log_mel_spectrogram_small_range():
x = np.random.rand(1024 * 513).reshape([513, 1024])
x_mel = mel_spectrogram(x)
log_x = log_mel_spectrogram(x, dynamic_range_dB=30)
inv_log_x = inv_log_spectrogram(log_x)
assert (np.linalg.norm(inv_log_x - x_mel) < 0.08)
def test_mel_spectrogram():
x = np.random.rand(256 * 1025).reshape([1025, 256])
sr = 28000
stft_channels = 2048
n_mels = 40
fmin = 40
fmax = 12000
mel_basis = librosa.filters.mel(sr=sr, n_fft=stft_channels, n_mels=n_mels, fmin=fmin, fmax=fmax)
x_mel = mel_spectrogram(x, stft_channels=stft_channels, n_mels=n_mels, fmin=fmin, fmax=fmax, sr=sr)
x_test_mel = np.matmul(mel_basis, x)
assert (np.linalg.norm(x_test_mel - x_mel) < 1e-20)
if __name__ == "__main__":
test_log_spectrogram()
test_log_spectrogram_small_range()
test_log_mel_spectrogram()
test_log_mel_spectrogram_small_range()
test_mel_spectrogram()
|
429804
|
from cortex.plugins import ModelPlugin
from cortex.built_ins.models.utils import update_encoder_args, update_decoder_args, ms_ssim
import torch.nn.functional as F
class ImageEncoder(ModelPlugin):
def build(self,
dim_out=None,
encoder_type: str = 'convnet',
encoder_args=dict(fully_connected_layers=1028),
Encoder=None):
x_shape = self.get_dims('x', 'y', 'c')
Encoder_, encoder_args = update_encoder_args(
x_shape, model_type=encoder_type, encoder_args=encoder_args)
Encoder = Encoder or Encoder_
encoder = Encoder(x_shape, dim_out=dim_out, **encoder_args)
self.nets.encoder = encoder
def encode(self, inputs, **kwargs):
return self.nets.encoder(inputs, **kwargs)
def visualize(self, inputs, targets):
Z = self.encode(inputs)
if targets is not None:
targets = targets.data
self.add_scatter(Z.data, labels=targets, name='latent values')
class ImageDecoder(ModelPlugin):
def build(self,
dim_in=None,
decoder_type: str = 'convnet',
decoder_args=dict(output_nonlinearity='tanh'),
Decoder=None):
x_shape = self.get_dims('x', 'y', 'c')
Decoder_, decoder_args = update_decoder_args(
x_shape, model_type=decoder_type, decoder_args=decoder_args)
Decoder = Decoder or Decoder_
decoder = Decoder(x_shape, dim_in=dim_in, **decoder_args)
self.nets.decoder = decoder
def routine(self, inputs, Z, decoder_crit=F.mse_loss):
X = self.decode(Z)
self.losses.decoder = decoder_crit(X, inputs) / inputs.size(0)
msssim = ms_ssim(inputs, X)
self.results.ms_ssim = msssim.item()
def decode(self, Z):
return self.nets.decoder(Z)
def visualize(self, Z):
gen = self.decode(Z)
self.add_image(gen, name='generated')
|
429808
|
import matplotlib as mpl
mpl.use("Agg")
import matplotlib.pyplot as plt
import numpy as np
from fidimag.micro import Sim
from fidimag.common import CuboidMesh
from fidimag.micro import UniformExchange, Demag
from fidimag.micro import Zeeman, TimeZeeman
from fidimag.common.fileio import DataReader
mu0 = 4 * np.pi * 1e-7
def init_m(pos):
x = pos[0]
if x <= 2:
return (1, 0, 0)
elif x >= 4:
return (0, 0, 1)
else:
return (0, 1, 0)
def relax_system(mesh):
sim = Sim(mesh, name='relax')
sim.driver.set_tols(rtol=1e-10, atol=1e-10)
sim.driver.alpha = 0.5
sim.driver.gamma = 2.211e5
sim.Ms = 8.0e5
sim.do_precession = False
sim.set_m((1, 0.25, 0.1))
# sim.set_m(np.load('m0.npy'))
A = 1.3e-11
exch = UniformExchange(A=A)
sim.add(exch)
demag = Demag()
sim.add(demag)
sim.relax(dt=1e-13, stopping_dmdt=0.01, max_steps=5000,
save_m_steps=100, save_vtk_steps=50)
np.save('m0.npy', sim.spin)
def apply_field1(mesh):
sim = Sim(mesh, name='dyn')
sim.driver.set_tols(rtol=1e-10, atol=1e-10)
sim.driver.alpha = 0.02
sim.driver.gamma = 2.211e5
sim.Ms = 8.0e5
sim.set_m(np.load('m0.npy'))
A = 1.3e-11
exch = UniformExchange(A=A)
sim.add(exch)
demag = Demag()
sim.add(demag)
mT = 0.001 / mu0
print("Applied field = {}".format(mT))
zeeman = Zeeman([-24.6 * mT, 4.3 * mT, 0], name='H')
sim.add(zeeman, save_field=True)
ts = np.linspace(0, 1e-9, 201)
for t in ts:
sim.driver.run_until(t)
print('sim t=%g' % t)
def deal_plot():
data = DataReader('dyn.txt')
ts = data['time'] * 1e9
mx = data['m_x']
my = data['m_y']
mz = data['m_z']
data2 = np.loadtxt('oommf.txt')
ts2 = data2[:, 0] * 1e9
mx2 = data2[:, 1]
my2 = data2[:, 2]
mz2 = data2[:, 3]
plt.plot(ts, mx, '--', label='m_fidimag', dashes=(2, 2))
plt.plot(ts, my, '--', label='', dashes=(2, 2))
plt.plot(ts, mz, '--', label='', dashes=(2, 2))
plt.plot(ts2, mx2, '--', label='m_oommf')
plt.plot(ts2, my2, '--', label='')
plt.plot(ts2, mz2, '--', label='')
plt.title('std4')
plt.legend()
#plt.xlim([0, 0.012])
#plt.ylim([-5, 100])
plt.xlabel(r'Ts (ns)')
# plt.ylabel('Susceptibility')
plt.savefig('cmp.pdf')
# compute deviation of very last point against reference solution
# from the OOMMF simulation tool
dev_mx = abs(mx[-1] - mx2[-1])
dev_my = abs(my[-1] - my2[-1])
dev_mz = abs(mz[-1] - mz2[-1])
print("Deviations are {}, {} and {} in m_x, m_y and m_z.".format(
dev_mx, dev_my, dev_mz))
expected_devs = 1.8811609559e-06, 1.88438380672e-05, 1.05292548867e-06
print("Expected devs are {}, {} and {} in m_x, m_y and m_z.".format(
*expected_devs))
# and use as simple system test
assert dev_mx < 1.89e-06
assert dev_my < 1.89e-05
assert dev_mz < 1.06e-06
if __name__ == '__main__':
mesh = CuboidMesh(nx=200, ny=50, nz=1, dx=2.5, dy=2.5, dz=3, unit_length=1e-9)
relax_system(mesh)
apply_field1(mesh)
deal_plot()
|
429811
|
import json
from nose.tools import eq_,raises
from unittest.mock import patch
from schematics.exceptions import ConversionError, DataError
from moncli import *
from moncli import entities as en
from moncli.enums import ColumnType
from moncli.models import MondayModel
from moncli import types as t
def test_should_succeed_when_to_native_returns_a_phone_when_passed_a_phonevalue_value_with_api_data_to_phone_type():
# Arrange
id = "phone"
title = 'phone 1'
column_type = ColumnType.phone
value = json.dumps({'phone': '+15083658469', 'countryShortName': 'US'})
column_value = cv.create_column_value(column_type,id=id,title=title,value=value)
# Act
phone_type = t.PhoneType(title=title)
value = phone_type.to_native(column_value)
# Assert
eq_(value.phone,'+15083658469')
eq_(value.code, 'US')
def test_should_succeed_when_to_native_returns_a_phone_when_passed_a_valid_import_dict_value_to_phone_type():
# Arrange
phone_type = t.PhoneType(title='phone 1')
# Act
value = phone_type.to_native({'phone': '+15083658469', 'code': 'US'})
# Assert
eq_(value.phone,'+15083658469')
eq_(value.code, 'US')
def test_should_succeed_when_to_native_returns_a_none_when_passed_a_none_to_phone_type():
# Arrange
phone_type = t.PhoneType(title='phone 1')
# Act
value = phone_type.to_native(None)
# Assert
eq_(value,None)
@raises(ConversionError)
def test_should_succeed_when_to_native_raises_a_conversionerror_when_passed_an_invalid_import_dict_value_to_phone_type():
# Arrange
phone_type = t.PhoneType(title='phone 1')
# Act
phone_type.to_native({'invalid': 'phone'})
def test_should_return_phone_value_when_passed_simple_string_for_to_native_to_phone_type():
# Arrange
phone_type = t.PhoneType(title='phone 1')
# Act
value = phone_type.to_native('+15083658469 US')
# Assert
eq_(value.phone,'+15083658469')
eq_(value.code, 'US')
def test_should_succeed_when_to_primitive_returns_empty_dict_when_passed_a_none_to_phone_type():
# Arrange
phone_type = t.PhoneType(title='phone 1')
# Act
value = phone_type.to_primitive(None)
# Assert
eq_(value,{})
def test_should_succeed_when_to_primitive_returns_export_dict_when_passed_a_phone_value__to_phone_type():
# Arrange
phone_type = t.PhoneType(title='phone 1')
# Act
value = phone_type.to_primitive(cv.Phone(phone = '+15083658469', code= 'US'))
# Assert
eq_(value['phone'],'+15083658469')
eq_(value['countryShortName'], 'US')
def test_should_succeed_when_to_primitive_returns_empty_dict_when_passed_a_phone_with_phone_or_code_as_none_to_phone_type():
# Arrange
phone_type = t.PhoneType(title='phone 1')
# Act
value = phone_type.to_primitive(cv.Phone(phone = '+15083658469', code= None))
# Assert
eq_(value,{})
@raises(DataError)
def test_should_succeed_when_validate_country_code_raises_a_validation_error_when_passed_an_invalid_phonecode_value_to_phone_type():
# Arrange
class TestModel(MondayModel):
value = t.PhoneType(id='phone')
test = TestModel(id='item_id', name='Item Name')
# Act
test.value = '1234 zz'
test.validate()
|
429816
|
from typing import Optional, List, Callable, Tuple
import torch
import random
import sys
import torch.nn.functional as F
import torch.nn.parallel as parallel
import torch.multiprocessing as mp
import torch.nn.parallel as paralle
import torch.distributed as dist
import unittest
import torchshard as ts
from testing import IdentityLayer, IdentityLayer2D, IdentityLayer3D
from testing import CausalSelfAttention, ParallelCausalSelfAttention
from testing import MLP, ParallelMLP
from testing import dist_worker, assertEqual, set_seed
from testing import loss_reduction_type, threshold
class TestLayers(unittest.TestCase):
@staticmethod
def run_test_parallel_self_attention(local_rank: int) -> None:
seed = 1235
batch_size = 10
sequence_length = 12
vocab_size = 12
hidden_size = 8
num_att_heads_per_partition = 6
hidden_size_per_att_head = 8
dropout_prob = 0.0 # has to be zero
tensor_model_parallel_size = ts.distributed.get_group_size()
world_size = ts.distributed.get_world_size()
set_seed(seed + local_rank)
num_att_heads = num_att_heads_per_partition * world_size
hidden_size = hidden_size_per_att_head * num_att_heads
attention_mask = torch.randn(batch_size, 1, 1, sequence_length).cuda(local_rank)
x = torch.randn(batch_size, sequence_length, hidden_size).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
raw_model = ParallelCausalSelfAttention(hidden_size, num_att_heads, dropout_prob).cuda(local_rank)
raw_model = parallel.DistributedDataParallel(raw_model, device_ids=[local_rank])
ts.register_ddp_parameters_to_ignore(raw_model)
ddp_model = parallel.DistributedDataParallel(
CausalSelfAttention(hidden_size, num_att_heads, dropout_prob).cuda(local_rank),
device_ids=[local_rank]
)
raw_criterion = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
ddp_criterion = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
# align weight & bias
for (on, op), (pn, pp) in zip(ddp_model.named_parameters(), raw_model.named_parameters()):
parallel_dim = ts.get_parallel_dim(pp)
if parallel_dim == None:
pp.data.copy_(op.data)
elif parallel_dim == 0:
if len(pp.shape) == 2:
pp.data.copy_(ts.distributed.scatter(op.data, dim=-1))
else:
pp.data.copy_(op.data)
elif parallel_dim in [1, -1]:
pp.data.copy_(ts.distributed.scatter(op.data, dim=0))
# switch mode
raw_model.train()
ddp_model.train()
attention_mask = ts.distributed.gather(attention_mask, dim=0)
x = ts.distributed.gather(x, dim=0)
y = ts.distributed.gather(y, dim=0)
y1 = raw_model(x, attention_mask)
y2 = ddp_model(x, attention_mask)
# 1st assert: forward outputs
assertEqual(y1, y2, threshold=threshold)
raw_loss = raw_criterion(y1.view(batch_size*tensor_model_parallel_size, -1), y)
ddp_loss = ddp_criterion(y2.view(batch_size*tensor_model_parallel_size, -1), y)
if loss_reduction_type == 'none':
raw_loss = raw_loss.sum()
ddp_loss = ddp_loss.sum()
# 2nd assert: forward losses
assertEqual(raw_loss, ddp_loss, threshold=threshold)
# 3rd assert: backward gradients
raw_loss.backward()
ddp_loss.backward()
for (on, op), (pn, pp) in zip(ddp_model.named_parameters(), raw_model.named_parameters()):
parallel_dim = ts.get_parallel_dim(pp)
if parallel_dim == None:
assertEqual(pp.grad, op.grad, threshold=threshold)
elif parallel_dim == 0:
if len(pp.shape) == 2:
pp_grad = ts.distributed.reduce(pp.grad)
op_grad = ts.distributed.reduce(ts.distributed.scatter(op.grad, dim=1))
assertEqual(pp_grad, op_grad, threshold=threshold)
else:
assertEqual(pp.grad, op.grad, threshold=threshold)
elif parallel_dim in [1, -1]:
pp_grad = ts.distributed.reduce(pp.grad)
op_grad = ts.distributed.reduce(ts.distributed.scatter(op.grad, dim=0))
assertEqual(pp_grad, op_grad, threshold=threshold)
@staticmethod
def run_test_parallel_mlp(local_rank: int) -> None:
# settings
seed = 12345
batch_size = 10
sequence_length = 12
vocab_size = 12
hidden_size = 8
dropout_prob = 0.0 # has to be zero
tensor_model_parallel_size = ts.distributed.get_group_size()
world_size = ts.distributed.get_world_size()
# test parallel_dim = None
set_seed(seed + local_rank)
loss_weight = torch.randn(batch_size, sequence_length, hidden_size).cuda(local_rank)
attention_mask = torch.randn(batch_size, 1, 1, sequence_length).cuda(local_rank)
input_data = torch.randn(batch_size, sequence_length, hidden_size).cuda(local_rank)
dist.broadcast(loss_weight, src=0)
dist.broadcast(attention_mask, src=0)
dist.broadcast(input_data, src=0)
# build attention module
original_model = MLP(hidden_size, dropout_prob).cuda(local_rank)
parallel_model = MLP(hidden_size, dropout_prob).cuda(local_rank)
# we convert nn.Linear() to ts.nn.ParallelLinear()
cnt = 0
for n, m in parallel_model.named_modules():
if isinstance(m, torch.nn.Linear) and cnt == 0: # first linear layer
parallel_model.mlp[0] = ts.nn.ParallelLinear.convert_parallel_linear(m, dim=1)
cnt += 1
continue
if isinstance(m, torch.nn.Linear) and cnt == 1: # second linear layer
parallel_model.mlp[2] = ts.nn.RegisterParallelDim(dim=-1)
parallel_model.mlp[3] = ts.nn.ParallelLinear.convert_parallel_linear(m, dim=0)
original_model = parallel.DistributedDataParallel(original_model, device_ids=[local_rank])
parallel_model = parallel.DistributedDataParallel(parallel_model, device_ids=[local_rank])
ts.register_ddp_parameters_to_ignore(parallel_model)
# align weight & bias
for (on, op), (pn, pp) in zip(original_model.named_parameters(), parallel_model.named_parameters()):
parallel_dim = ts.get_parallel_dim(pp)
if parallel_dim == None:
pp.data.copy_(op.data)
elif parallel_dim == 0:
if len(pp.shape) == 2:
pp.data.copy_(ts.distributed.scatter(op.data, dim=-1))
else:
pp.data.copy_(op.data)
elif parallel_dim in [1, -1]:
pp.data.copy_(ts.distributed.scatter(op.data, dim=0))
# switch mode
original_model.train()
parallel_model.train()
# assert: weight and bias
for (on, op), (pn, pp) in zip(original_model.named_parameters(), parallel_model.named_parameters()):
parallel_dim = ts.get_parallel_dim(pp)
if parallel_dim == None:
assertEqual(op, pp, threshold=threshold)
elif parallel_dim == 0:
if len(pp.shape) == 2:
assertEqual(pp, ts.distributed.scatter(op.data, dim=-1), threshold=threshold)
else:
assertEqual(pp, op, threshold=threshold)
elif parallel_dim in [1, -1]:
assertEqual(pp, ts.distributed.scatter(op.data, dim=0), threshold=threshold)
# 1st assert: forward outputs
parallel_output = parallel_model(input_data)
original_output = original_model(input_data)
assertEqual(original_output, parallel_output, threshold=threshold)
# 2nd assert: forward losses
original_loss = torch.mul(original_output, loss_weight)
parallel_loss = torch.mul(parallel_output, loss_weight)
original_loss.sum().backward()
parallel_loss.sum().backward()
assertEqual(original_loss, parallel_loss, threshold=threshold)
# 3rd assert: backward gradients
for (on, op), (pn, pp) in zip(original_model.named_parameters(), parallel_model.named_parameters()):
parallel_dim = ts.get_parallel_dim(pp)
if parallel_dim == None:
assertEqual(pp.grad, op.grad, threshold=threshold)
elif parallel_dim == 0:
if len(pp.shape) == 2:
pp_grad = ts.distributed.reduce(pp.grad)
op_grad = ts.distributed.reduce(ts.distributed.scatter(op.grad, dim=1))
assertEqual(pp_grad, op_grad, threshold=threshold)
else:
assertEqual(pp.grad, op.grad, threshold=threshold)
elif parallel_dim in [1, -1]:
pp_grad = ts.distributed.reduce(pp.grad)
op_grad = ts.distributed.reduce(ts.distributed.scatter(op.grad, dim=0))
assertEqual(pp_grad, op_grad, threshold=threshold)
@staticmethod
def run_test_parallel_transformer_block(local_rank: int) -> None:
seed = 123
batch_size = 10
sequence_length = 12
vocab_size = 12
hidden_size = 8
num_att_heads_per_partition = 6
hidden_size_per_att_head = 8
dropout_prob = 0.0 # has to be zero
tensor_model_parallel_size = ts.distributed.get_group_size()
world_size = ts.distributed.get_world_size()
set_seed(seed + local_rank)
num_att_heads = num_att_heads_per_partition * world_size
hidden_size = hidden_size_per_att_head * num_att_heads
attention_mask = torch.randn(batch_size, 1, 1, sequence_length).cuda(local_rank)
x = torch.randn(batch_size, sequence_length, hidden_size).cuda(local_rank)
y = torch.randint(10, (batch_size,)).cuda(local_rank)
raw_model = torch.nn.Sequential(
ParallelCausalSelfAttention(hidden_size, num_att_heads, dropout_prob),
ParallelMLP(hidden_size, dropout_prob)
).cuda(local_rank)
raw_model = parallel.DistributedDataParallel(raw_model, device_ids=[local_rank])
ts.register_ddp_parameters_to_ignore(raw_model)
ddp_model = parallel.DistributedDataParallel(
torch.nn.Sequential(
CausalSelfAttention(hidden_size, num_att_heads, dropout_prob),
MLP(hidden_size, dropout_prob)
).cuda(local_rank),
device_ids=[local_rank]
)
raw_criterion = ts.nn.ParallelCrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
ddp_criterion = torch.nn.CrossEntropyLoss(reduction=loss_reduction_type).cuda(local_rank)
# align weight & bias
for (on, op), (pn, pp) in zip(ddp_model.named_parameters(), raw_model.named_parameters()):
parallel_dim = ts.get_parallel_dim(pp)
if parallel_dim == None:
pp.data.copy_(op.data)
elif parallel_dim == 0:
if len(pp.shape) == 2:
pp.data.copy_(ts.distributed.scatter(op.data, dim=-1))
else:
pp.data.copy_(op.data)
elif parallel_dim in [1, -1]:
pp.data.copy_(ts.distributed.scatter(op.data, dim=0))
# switch mode
raw_model.train()
ddp_model.train()
attention_mask = ts.distributed.gather(attention_mask, dim=0)
x = ts.distributed.gather(x, dim=0)
y = ts.distributed.gather(y, dim=0)
y1 = raw_model.module[0](x, attention_mask)
y1 = raw_model.module[1](y1)
y2 = ddp_model.module[0](x, attention_mask)
y2 = ddp_model.module[1](y2)
# 1st assert: forward outputs
assertEqual(y1, y2, threshold=threshold)
raw_loss = raw_criterion(y1.view(batch_size*tensor_model_parallel_size, -1), y)
ddp_loss = ddp_criterion(y2.view(batch_size*tensor_model_parallel_size, -1), y)
if loss_reduction_type == 'none':
raw_loss = raw_loss.sum()
ddp_loss = ddp_loss.sum()
# 2nd assert: forward losses
assertEqual(raw_loss, ddp_loss, threshold=threshold)
# 3rd assert: backward gradients
raw_loss.backward()
ddp_loss.backward()
# 3rd assert: backward gradients
for (on, op), (pn, pp) in zip(ddp_model.named_parameters(), raw_model.named_parameters()):
parallel_dim = ts.get_parallel_dim(pp)
if parallel_dim == None:
assertEqual(pp.grad, op.grad, threshold=threshold)
elif parallel_dim == 0:
if len(pp.shape) == 2:
pp_grad = ts.distributed.reduce(pp.grad)
op_grad = ts.distributed.reduce(ts.distributed.scatter(op.grad, dim=-1))
assertEqual(pp_grad, op_grad, threshold=threshold)
else:
assertEqual(pp.grad, op.grad, threshold=threshold)
elif parallel_dim in [1, -1]:
assertEqual(pp.grad, ts.distributed.scatter(op.grad, dim=0))
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_self_attention(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_self_attention, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_mlp(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_mlp, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
@unittest.skipIf(not torch.cuda.is_available(), 'CUDA is not available')
def test_parallel_transformer_block(self):
ngpus = torch.cuda.device_count()
mp.spawn(
dist_worker,
args=(self.run_test_parallel_transformer_block, ngpus),
nprocs=ngpus
)
ts.distributed.destroy_process_group()
if __name__ == '__main__':
torch.backends.cudnn.deterministic = True
torch.backends.cudnn.benchmark = False
unittest.main()
|
429822
|
from .pokemon.species import Species
national = [
Species.MissingNo_00,
Species.Bulbasaur,
Species.Ivysaur,
Species.Venusaur,
Species.Charmander,
Species.Charmeleon,
Species.Charizard,
Species.Squirtle,
Species.Wartortle,
Species.Blastoise,
Species.Caterpie,
Species.Metapod,
Species.Butterfree,
Species.Weedle,
Species.Kakuna,
Species.Beedrill,
Species.Pidgey,
Species.Pidgeotto,
Species.Pidgeot,
Species.Rattata,
Species.Raticate,
Species.Spearow,
Species.Fearow,
Species.Ekans,
Species.Arbok,
Species.Pikachu,
Species.Raichu,
Species.Sandshrew,
Species.Sandslash,
Species.Nidoran_f,
Species.Nidorina,
Species.Nidoqueen,
Species.Nidoran_m,
Species.Nidorino,
Species.Nidoking,
Species.Clefairy,
Species.Clefable,
Species.Vulpix,
Species.Ninetales,
Species.Jigglypuff,
Species.Wigglytuff,
Species.Zubat,
Species.Golbat,
Species.Oddish,
Species.Gloom,
Species.Vileplume,
Species.Paras,
Species.Parasect,
Species.Venonat,
Species.Venomoth,
Species.Diglett,
Species.Dugtrio,
Species.Meowth,
Species.Persian,
Species.Psyduck,
Species.Golduck,
Species.Mankey,
Species.Primeape,
Species.Growlithe,
Species.Arcanine,
Species.Poliwag,
Species.Poliwhirl,
Species.Poliwrath,
Species.Abra,
Species.Kadabra,
Species.Alakazam,
Species.Machop,
Species.Machoke,
Species.Machamp,
Species.Bellsprout,
Species.Weepinbell,
Species.Victreebel,
Species.Tentacool,
Species.Tentacruel,
Species.Geodude,
Species.Graveler,
Species.Golem,
Species.Ponyta,
Species.Rapidash,
Species.Slowpoke,
Species.Slowbro,
Species.Magnemite,
Species.Magneton,
Species.Farfetchd,
Species.Doduo,
Species.Dodrio,
Species.Seel,
Species.Dewgong,
Species.Grimer,
Species.Muk,
Species.Shellder,
Species.Cloyster,
Species.Gastly,
Species.Haunter,
Species.Gengar,
Species.Onix,
Species.Drowzee,
Species.Hypno,
Species.Krabby,
Species.Kingler,
Species.Voltorb,
Species.Electrode,
Species.Exeggcute,
Species.Exeggutor,
Species.Cubone,
Species.Marowak,
Species.Hitmonlee,
Species.Hitmonchan,
Species.Lickitung,
Species.Koffing,
Species.Weezing,
Species.Rhyhorn,
Species.Rhydon,
Species.Chansey,
Species.Tangela,
Species.Kangaskhan,
Species.Horsea,
Species.Seadra,
Species.Goldeen,
Species.Seaking,
Species.Staryu,
Species.Starmie,
Species.Mr_Mime,
Species.Scyther,
Species.Jynx,
Species.Electabuzz,
Species.Magmar,
Species.Pinsir,
Species.Tauros,
Species.Magikarp,
Species.Gyarados,
Species.Lapras,
Species.Ditto,
Species.Eevee,
Species.Vaporeon,
Species.Jolteon,
Species.Flareon,
Species.Porygon,
Species.Omanyte,
Species.Omastar,
Species.Kabuto,
Species.Kabutops,
Species.Aerodactyl,
Species.Snorlax,
Species.Articuno,
Species.Zapdos,
Species.Moltres,
Species.Dratini,
Species.Dragonair,
Species.Dragonite,
Species.Mewtwo,
Species.Mew
]
|
429824
|
import torch.nn as nn
import torch
import math
import torch.nn.functional as F
def get_activation_fn(name):
"""Returns a callable activation function from torch."""
if name in (None, 'linear'):
return lambda x: x
elif name in ('sigmoid', 'tanh'):
return getattr(torch, name)
else:
return getattr(F, name)
class FF(nn.Module):
"""A smart feedforward layer with activation support.
Arguments:
in_features(int): Input dimensionality.
out_features(int): Output dimensionality.
bias(bool, optional): Enable/disable bias for the layer. (Default: True)
bias_zero(bool, optional): Start with a 0-vector bias. (Default: True)
activ(str, optional): A string like 'tanh' or 'relu' to define the
non-linearity type. `None` or `'linear'` is a linear layer (default).
"""
def __init__(self, in_features, out_features, bias=True,
bias_zero=True, activ=None):
super().__init__()
self.in_features = in_features
self.out_features = out_features
self.use_bias = bias
self.bias_zero = bias_zero
self.activ_type = activ
if self.activ_type in (None, 'linear'):
self.activ_type = 'linear'
self.weight = nn.Parameter(torch.Tensor(out_features, in_features))
self.activ = get_activation_fn(activ)
if self.use_bias:
self.bias = nn.Parameter(torch.Tensor(out_features))
else:
self.register_parameter('bias', None)
self.reset_parameters()
def reset_parameters(self):
stdv = 1. / math.sqrt(self.weight.size(1))
self.weight.data.uniform_(-stdv, stdv)
if self.use_bias:
if self.bias_zero:
self.bias.data.zero_()
else:
self.bias.data.uniform_(-stdv, stdv)
def forward(self, input):
return self.activ(F.linear(input, self.weight, self.bias))
def __repr__(self):
repr_ = self.__class__.__name__ + '(' \
+ 'in_features=' + str(self.in_features) \
+ ', out_features=' + str(self.out_features) \
+ ', activ=' + str(self.activ_type) \
+ ', bias=' + str(self.use_bias)
if self.use_bias:
repr_ += ', bias_zero=' + str(self.bias_zero)
return repr_ + ')'
|
429886
|
import argparse
import math
from datetime import datetime
import h5py
import numpy as np
import tensorflow as tf
import socket
import importlib
import os
import sys
import provider
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
ROOT_DIR = BASE_DIR
sys.path.append(BASE_DIR) # model
sys.path.append(os.path.join(ROOT_DIR, 'models'))
sys.path.append(os.path.join(ROOT_DIR, 'utils'))
import pickle
from sklearn.neighbors import NearestNeighbors
from sklearn.neighbors import KDTree
from pdb import set_trace
from sklearn.manifold import TSNE
from matplotlib import pyplot as plt
np.random.seed(0)
parser = argparse.ArgumentParser()
parser.add_argument('--gpu', type=int, default=0, help='GPU to use [default: GPU 0]')
parser.add_argument('--model', default='pointnet_autoencoder', help='Model name [default: model]')
parser.add_argument('--num_point', type=int, default=1024, help='Point Number [default: 2048]')
parser.add_argument('--batch_size', type=int, default=32, help='Batch Size during training [default: 32]')
parser.add_argument('--num_neighbors', type=int, default=3, help='Number of neighbors to retrieve')
parser.add_argument('--model_path', default='log_chair_autoencoder/model.ckpt', help='model checkpoint file path [default: log/model.ckpt]')
parser.add_argument('--dump_dir', default='dump_chair_autoencoder/', help='dump folder path [dump]')
parser.add_argument('--category', default='chair', help='Which class')
parser.add_argument('--output_dim', type=int, default=256, help='with or without autoencoder for triplet')
parser.add_argument('--testrank', default=False, help='if testing with a smaller database for each model')
FLAGS = parser.parse_args()
BATCH_SIZE = FLAGS.batch_size
NUM_POINT = FLAGS.num_point
GPU_INDEX = FLAGS.gpu
OBJ_CAT = FLAGS.category
MODEL_PATH = FLAGS.model_path
MODEL = importlib.import_module(FLAGS.model) # import network module
DUMP_DIR = FLAGS.dump_dir
if not os.path.exists(DUMP_DIR): os.mkdir(DUMP_DIR)
LOG_FOUT = open(os.path.join(DUMP_DIR, 'log_evaluate.txt'), 'w')
LOG_FOUT.write(str(FLAGS)+'\n')
HOSTNAME = socket.gethostname()
TRAIN_FILE = '../candidate_generation/train_'+OBJ_CAT+'.h5'
TEST_FILE = '../candidate_generation/test_'+OBJ_CAT+'.h5'
TRAIN_DATA = provider.load_h5(TRAIN_FILE)
TEST_DATA = provider.load_h5(TEST_FILE)
NUM_NEIGHBORS = FLAGS.num_neighbors
OUTPUT_DIM = FLAGS.output_dim
TEST_RANK = FLAGS.testrank
if (TEST_RANK):
pickle_in = open('../candidate_generation/candidates_test_'+OBJ_CAT+'_testrank.pickle',"rb")
database_candidate_idxs = pickle.load(pickle_in)
pickle_in.close()
NUM_CANDIDATES = len(database_candidate_idxs[0])
def log_string(out_str):
LOG_FOUT.write(out_str+'\n')
LOG_FOUT.flush()
print(out_str)
def evaluate():
with tf.Graph().as_default():
with tf.device('/gpu:'+str(GPU_INDEX)):
if (FLAGS.model == "pointnet_autoencoder"):
pointclouds_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
print ("--- Get model and loss")
# Get model and loss
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl)
embeddings = end_points['embedding']
elif FLAGS.model == "pointnet_autoencoder_dimreduc":
pointclouds_pl = MODEL.placeholder_inputs(BATCH_SIZE, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
pred, end_points = MODEL.get_model(pointclouds_pl, is_training_pl, output_dim = OUTPUT_DIM)
embeddings = end_points['embedding']
else:
pointclouds_pl= MODEL.placeholder_inputs(BATCH_SIZE, 1, NUM_POINT)
is_training_pl = tf.placeholder(tf.bool, shape=())
with tf.variable_scope("query_triplets") as scope:
if FLAGS.model == "pointnet_triplet":
out_vecs, end_points= MODEL.get_model(pointclouds_pl, is_training_pl, autoencoder=False, output_dim=OUTPUT_DIM)
else:
out_vecs, _, end_points= MODEL.get_model(pointclouds_pl, is_training_pl, autoencoder=True)
out_vecs = tf.squeeze(out_vecs)
embeddings = out_vecs
pred = None #dummy
# Add ops to save and restore all the variables.
saver = tf.train.Saver()
# Create a session
config = tf.ConfigProto()
config.gpu_options.allow_growth = True
config.allow_soft_placement = True
config.log_device_placement = False
sess = tf.Session(config=config)
# Restore variables from disk.
saver.restore(sess, MODEL_PATH)
log_string("Model restored.")
ops = {'pointclouds_pl': pointclouds_pl,
'is_training_pl': is_training_pl,
'pred': pred,
'embeddings': embeddings,
'end_points': end_points}
eval_one_epoch(sess, ops)
def eval_one_epoch(sess, ops):
""" ops: dict mapping from string to tf ops """
is_training = False
current_data = provider.get_current_data(TEST_DATA, NUM_POINT, shuffle=False)
num_batches = current_data.shape[0]//BATCH_SIZE
log_string(str(datetime.now()))
log_string(str(current_data.shape[0]))
loss_sum = 0
all_embeddings = []
for batch_idx in range(num_batches):
start_idx = batch_idx * BATCH_SIZE
end_idx = (batch_idx+1) * BATCH_SIZE
batch_data = current_data[start_idx:end_idx, :, :]
if (FLAGS.model != "pointnet_autoencoder" and FLAGS.model != "pointnet_autoencoder_dimreduc"):
batch_data= np.expand_dims(batch_data,axis=1)
feed_dict = {ops['pointclouds_pl']: batch_data,
ops['is_training_pl']: is_training}
embeddings = sess.run([ops['embeddings']], feed_dict=feed_dict)
all_embeddings.append(embeddings)
all_embeddings = np.array(all_embeddings)
all_embeddings = all_embeddings.reshape(-1,all_embeddings.shape[-1])
#leftovers
for i in range((current_data.shape[0]//BATCH_SIZE*BATCH_SIZE), current_data.shape[0]):
pc = current_data[i,:,:]
pc= np.expand_dims(pc, axis=0)
fake_pcs = np.zeros((BATCH_SIZE-1, NUM_POINT,3))
q=np.vstack((pc,fake_pcs))
if (FLAGS.model != "pointnet_autoencoder" and FLAGS.model != "pointnet_autoencoder_dimreduc"):
q= np.expand_dims(q,axis=1)
feed_dict = {ops['pointclouds_pl']: q,
ops['is_training_pl']: is_training}
embeddings = sess.run([ops['embeddings']], feed_dict=feed_dict)
embeddings = embeddings[0][0]
embeddings=np.squeeze(embeddings)
all_embeddings = np.vstack((all_embeddings, embeddings))
log_string(str(all_embeddings.shape[0]))
# tsne = TSNE(n_components=2, random_state=0)
# embeddings_2d = tsne.fit_transform(all_embeddings)
# for i in range(len(embeddings)):
# plt.scatter(embeddings_2d[i, 0], embeddings_2d[i, 1])
# plt.savefig(os.path.join(DUMP_DIR, 'tsne.png'))
####For Retrieval
if not TEST_RANK:
database_kdtree = KDTree(all_embeddings)
neighbor_list = []
distances, nbr_idx = database_kdtree.query(all_embeddings, k=NUM_NEIGHBORS+1)
nbr_idx = np.squeeze(nbr_idx)
for i in range(all_embeddings.shape[0]):
i_nbr_idx = np.delete(nbr_idx[i], np.where(nbr_idx[i]==i))
i_nbr_idx = i_nbr_idx[:NUM_NEIGHBORS]
neighbor_list.append(i_nbr_idx)
###For smaller subset of the database
else:
neighbor_list = []
for i in range(all_embeddings.shape[0]):
database_candidates_idx_i = database_candidate_idxs[i]
database_candidates_embeddings = all_embeddings[np.array(database_candidates_idx_i)]
# print(database_candidates_embeddings.shape)
# exit()
database_kdtree_i = KDTree(database_candidates_embeddings)
distances, nbr_idx = database_kdtree_i.query(np.array([all_embeddings[i]]), k=NUM_NEIGHBORS)
# neighbor_list.append(database_candidates_idx_i[nbr_idx[0]])
# print(database_candidates_idx_i[nbr_idx[0]])
neighbor_list.append(nbr_idx[0])
# print(nbr_idx[0])
# exit()
pickle_out = open(os.path.join(DUMP_DIR, "neighbors.pickle"),"wb")
pickle.dump(neighbor_list, pickle_out)
pickle_out.close()
log_string("Done.")
return
if __name__ == "__main__":
log_string('pid: %s'%(str(os.getpid())))
evaluate()
LOG_FOUT.close()
|
429990
|
import torch
import torch.nn as nn
import torch.nn.functional as F
import math
def weight_init(layers):
for layer in layers:
torch.nn.init.kaiming_normal_(layer.weight, nonlinearity='relu')
class NoisyLinear(nn.Linear):
# Noisy Linear Layer for independent Gaussian Noise
def __init__(self, in_features, out_features, sigma_init=0.017, bias=True):
super(NoisyLinear, self).__init__(in_features, out_features, bias=bias)
# make the sigmas trainable:
self.sigma_weight = nn.Parameter(torch.full((out_features, in_features), sigma_init))
# not trainable tensor for the nn.Module
self.register_buffer("epsilon_weight", torch.zeros(out_features, in_features))
# extra parameter for the bias and register buffer for the bias parameter
if bias:
self.sigma_bias = nn.Parameter(torch.full((out_features,), sigma_init))
self.register_buffer("epsilon_bias", torch.zeros(out_features))
# reset parameter as initialization of the layer
self.reset_parameter()
def reset_parameter(self):
"""
initialize the parameter of the layer and bias
"""
std = math.sqrt(3/self.in_features)
self.weight.data.uniform_(-std, std)
self.bias.data.uniform_(-std, std)
def forward(self, input):
# sample random noise in sigma weight buffer and bias buffer
self.epsilon_weight.normal_()
bias = self.bias
if bias is not None:
self.epsilon_bias.normal_()
bias = bias + self.sigma_bias * self.epsilon_bias
return F.linear(input, self.weight + self.sigma_weight * self.epsilon_weight, bias)
class DDQN(nn.Module):
def __init__(self, state_size, action_size,layer_size, n_step, seed, layer_type="ff"):
super(DDQN, self).__init__()
self.seed = torch.manual_seed(seed)
self.input_shape = state_size
self.action_size = action_size
self.state_dim = len(state_size)
if self.state_dim == 3:
self.cnn_1 = nn.Conv2d(4, out_channels=32, kernel_size=8, stride=4)
self.cnn_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2)
self.cnn_3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1)
weight_init([self.cnn_1, self.cnn_2, self.cnn_3])
if layer_type == "noisy":
self.ff_1 = NoisyLinear(self.calc_input_layer(), layer_size)
self.ff_2 = NoisyLinear(layer_size, action_size)
else:
self.ff_1 = nn.Linear(self.calc_input_layer(), layer_size)
self.ff_2 = nn.Linear(layer_size, action_size)
weight_init([self.ff_1])
elif self.state_dim == 1:
if layer_type == "noisy":
self.head_1 = nn.Linear(self.input_shape[0], layer_size)
self.ff_1 = NoisyLinear(layer_size, layer_size)
self.ff_2 = NoisyLinear(layer_size, action_size)
else:
self.head_1 = nn.Linear(self.input_shape[0], layer_size)
self.ff_1 = nn.Linear(layer_size, layer_size)
self.ff_2 = nn.Linear(layer_size, action_size)
weight_init([self.head_1, self.ff_1])
else:
print("Unknown input dimension!")
def calc_input_layer(self):
x = torch.zeros(self.input_shape).unsqueeze(0)
x = self.cnn_1(x)
x = self.cnn_2(x)
x = self.cnn_3(x)
return x.flatten().shape[0]
def forward(self, input):
"""
"""
if self.state_dim == 3:
x = torch.relu(self.cnn_1(input))
x = torch.relu(self.cnn_2(x))
x = torch.relu(self.cnn_3(x))
x = x.view(input.size(0), -1)
else:
x = torch.relu(self.head_1(input))
x = torch.relu(self.ff_1(x))
out = self.ff_2(x)
return out
class Dueling_QNetwork(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size,layer_size, n_step, seed, layer_type="ff"):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Dueling_QNetwork, self).__init__()
self.seed = torch.manual_seed(seed)
self.input_shape = state_size
self.state_dim = len(self.input_shape)
self.action_size = action_size
if self.state_dim == 3:
self.cnn_1 = nn.Conv2d(4, out_channels=32, kernel_size=8, stride=4)
self.cnn_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2)
self.cnn_3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1)
weight_init([self.cnn_1, self.cnn_2, self.cnn_3])
if layer_type == "noisy":
self.ff_1_A = NoisyLinear(self.calc_input_layer(), layer_size)
self.ff_1_V = NoisyLinear(self.calc_input_layer(), layer_size)
self.advantage = NoisyLinear(layer_size,action_size)
self.value = NoisyLinear(layer_size,1)
weight_init([self.ff_1_A, self.ff_1_V])
else:
self.ff_1_A = nn.Linear(self.calc_input_layer(), layer_size)
self.ff_1_V = nn.Linear(self.calc_input_layer(), layer_size)
self.advantage = nn.Linear(layer_size,action_size)
self.value = nn.Linear(layer_size,1)
weight_init([self.ff_1_A, self.ff_1_V])
elif self.state_dim == 1:
if layer_type == "noisy":
self.head_1 = nn.Linear(self.input_shape[0], layer_size)
self.ff_1_A = NoisyLinear(layer_size, layer_size)
self.ff_1_V = NoisyLinear(layer_size, layer_size)
self.advantage = NoisyLinear(layer_size,action_size)
self.value = NoisyLinear(layer_size,1)
weight_init([self.head_1,self.ff_1_A, self.ff_1_V])
else:
self.head_1 = nn.Linear(self.input_shape[0], layer_size)
self.ff_1_A = nn.Linear(layer_size, layer_size)
self.ff_1_V = nn.Linear(layer_size, layer_size)
self.advantage = nn.Linear(layer_size,action_size)
self.value = nn.Linear(layer_size,1)
weight_init([self.head_1,self.ff_1_A, self.ff_1_V])
else:
print("Unknown input dimension!")
def calc_input_layer(self):
x = torch.zeros(self.input_shape).unsqueeze(0)
x = self.cnn_1(x)
x = self.cnn_2(x)
x = self.cnn_3(x)
return x.flatten().shape[0]
def forward(self, input):
"""
"""
if self.state_dim == 3:
x = torch.relu(self.cnn_1(input))
x = torch.relu(self.cnn_2(x))
x = torch.relu(self.cnn_3(x))
x = x.view(input.size(0), -1)
x_A = torch.relu(self.ff_1_A(x))
x_V = torch.relu(self.ff_1_V(x))
else:
x = torch.relu(self.head_1(input))
x_A = torch.relu(self.ff_1_A(x))
x_V = torch.relu(self.ff_1_V(x))
value = self.value(x_V)
value = value.expand(input.size(0), self.action_size)
advantage = self.advantage(x_A)
Q = value + advantage - advantage.mean()
return Q
class Dueling_C51Network(nn.Module):
"""Actor (Policy) Model."""
def __init__(self, state_size, action_size,layer_size, n_step, seed, layer_type="ff", N_ATOMS=51, VMAX=10, VMIN=-10):
"""Initialize parameters and build model.
Params
======
state_size (int): Dimension of each state
action_size (int): Dimension of each action
seed (int): Random seed
fc1_units (int): Number of nodes in first hidden layer
fc2_units (int): Number of nodes in second hidden layer
"""
super(Dueling_C51Network, self).__init__()
self.seed = torch.manual_seed(seed)
self.input_shape = state_size
self.state_dim = len(self.input_shape)
self.action_size = action_size
self.N_ATOMS = N_ATOMS
self.VMAX = VMAX
self.VMIN = VMIN
self.DZ = (VMAX-VMIN) / (N_ATOMS - 1)
if self.state_dim == 3:
self.cnn_1 = nn.Conv2d(4, out_channels=32, kernel_size=8, stride=4)
self.cnn_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2)
self.cnn_3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1)
weight_init([self.cnn_1, self.cnn_2, self.cnn_3])
if layer_type == "noisy":
self.ff_1_A = NoisyLinear(self.calc_input_layer(), layer_size)
self.ff_1_V = NoisyLinear(self.calc_input_layer(), layer_size)
self.advantage = NoisyLinear(layer_size,action_size*N_ATOMS)
self.value = NoisyLinear(layer_size,N_ATOMS)
weight_init([self.ff_1_A, self.ff_1_V])
else:
self.ff_1_A = nn.Linear(self.calc_input_layer(), layer_size)
self.ff_1_V = nn.Linear(self.calc_input_layer(), layer_size)
self.advantage = nn.Linear(layer_size,action_size*N_ATOMS)
self.value = nn.Linear(layer_size,N_ATOMS)
weight_init([self.ff_1_A, self.ff_1_V])
elif self.state_dim == 1:
if layer_type == "noisy":
self.head_1 = nn.Linear(self.input_shape[0], layer_size)
self.ff_1_A = NoisyLinear(layer_size, layer_size)
self.ff_1_V = NoisyLinear(layer_size, layer_size)
self.advantage = NoisyLinear(layer_size,action_size*N_ATOMS)
self.value = NoisyLinear(layer_size,N_ATOMS)
weight_init([self.head_1,self.ff_1_A, self.ff_1_V])
else:
self.head_1 = nn.Linear(self.input_shape[0], layer_size)
self.ff_1_A = nn.Linear(layer_size, layer_size)
self.ff_1_V = nn.Linear(layer_size, layer_size)
self.advantage = nn.Linear(layer_size,action_size*N_ATOMS)
self.value = nn.Linear(layer_size,N_ATOMS)
weight_init([self.head_1,self.ff_1_A, self.ff_1_V])
else:
print("Unknown input dimension!")
self.register_buffer("supports", torch.arange(VMIN, VMAX+self.DZ, self.DZ)) # basic value vector - shape n_atoms stepsize dz
self.softmax = nn.Softmax(dim = 1)
def calc_input_layer(self):
x = torch.zeros(self.input_shape).unsqueeze(0)
x = self.cnn_1(x)
x = self.cnn_2(x)
x = self.cnn_3(x)
return x.flatten().shape[0]
def forward(self, input):
batch_size = input.size()[0]
if self.state_dim == 3:
x = torch.relu(self.cnn_1(input))
x = torch.relu(self.cnn_2(x))
x = torch.relu(self.cnn_3(x))
x = x.view(input.size(0), -1)
x_A = torch.relu(self.ff_1_A(x))
x_V = torch.relu(self.ff_1_V(x))
else:
x = torch.relu(self.head_1(input))
x_A = torch.relu(self.ff_1_A(x))
x_V = torch.relu(self.ff_1_V(x))
value = self.value(x_V).view(batch_size,1,self.N_ATOMS)
advantage = self.advantage(x_A).view(batch_size,-1, self.N_ATOMS)
q_distr = value + advantage - advantage.mean(dim = 1, keepdim = True)
prob = self.softmax(q_distr.view(-1, self.N_ATOMS)).view(-1, self.action_size, self.N_ATOMS)
return prob
def act(self,state):
prob = self.forward(state).data.cpu()
expected_value = prob.cpu() * self.supports.cpu()
actions = expected_value.sum(2)
return actions
class DDQN_C51(nn.Module):
def __init__(self, state_size, action_size,layer_size, n_step, seed, layer_type="ff", N_ATOMS=51, VMAX=10, VMIN=-10):
super(DDQN_C51, self).__init__()
self.seed = torch.manual_seed(seed)
self.input_shape = state_size
self.action_size = action_size
self.state_dim = len(state_size)
self.N_ATOMS = N_ATOMS
self.VMAX = VMAX
self.VMIN = VMIN
self.DZ = (VMAX-VMIN) / (N_ATOMS - 1)
if self.state_dim == 3:
self.cnn_1 = nn.Conv2d(4, out_channels=32, kernel_size=8, stride=4)
self.cnn_2 = nn.Conv2d(in_channels=32, out_channels=64, kernel_size=4, stride=2)
self.cnn_3 = nn.Conv2d(in_channels=64, out_channels=64, kernel_size=3, stride=1)
weight_init([self.cnn_1, self.cnn_2, self.cnn_3])
if layer_type == "noisy":
self.ff_1 = NoisyLinear(self.calc_input_layer(), layer_size)
self.ff_2 = NoisyLinear(layer_size, action_size*N_ATOMS)
else:
self.ff_1 = nn.Linear(self.calc_input_layer(), layer_size)
self.ff_2 = nn.Linear(layer_size, action_size*N_ATOMS)
weight_init([self.ff_1])
elif self.state_dim == 1:
if layer_type == "noisy":
self.head_1 = nn.Linear(self.input_shape[0], layer_size)
self.ff_1 = NoisyLinear(layer_size, layer_size)
self.ff_2 = NoisyLinear(layer_size, action_size*N_ATOMS)
else:
self.head_1 = nn.Linear(self.input_shape[0], layer_size)
self.ff_1 = nn.Linear(layer_size, layer_size)
self.ff_2 = nn.Linear(layer_size, action_size*N_ATOMS)
weight_init([self.head_1, self.ff_1])
else:
print("Unknown input dimension!")
self.register_buffer("supports", torch.arange(VMIN, VMAX+self.DZ, self.DZ)) # basic value vector - shape n_atoms stepsize dz
self.softmax = nn.Softmax(dim = 1)
def calc_input_layer(self):
x = torch.zeros(self.input_shape).unsqueeze(0)
x = self.cnn_1(x)
x = self.cnn_2(x)
x = self.cnn_3(x)
return x.flatten().shape[0]
def forward(self, input):
batch_size = input.size()[0]
if self.state_dim == 3:
x = torch.relu(self.cnn_1(input))
x = torch.relu(self.cnn_2(x))
x = torch.relu(self.cnn_3(x))
x = x.view(input.size(0), -1)
x = torch.relu(self.ff_1(x))
else:
x = torch.relu(self.head_1(input))
x = torch.relu(self.ff_1(x))
q_distr = self.ff_2(x)
prob = self.softmax(q_distr.view(-1, self.N_ATOMS)).view(-1, self.action_size, self.N_ATOMS)
return prob
def act(self,state):
prob = self.forward(state).data.cpu()
# create value distribution for each action - shape: (batch_size, action_space, 51)
expected_value = prob.cpu() * self.supports.cpu()
# sum up the prob*values for the action dimension - shape: (batch_size, action_space)
actions = expected_value.sum(2)
return actions
|
430082
|
from collections import Counter
import pytest
from satosa.attribute_mapping import AttributeMapper
class TestAttributeMapper:
def test_nested_attribute_to_internal(self):
mapping = {
"attributes": {
"address": {
"openid": ["address.formatted"],
},
},
}
data = {
"address": {
"formatted": ["100 Universal City Plaza, Hollywood CA 91608, USA"]
}
}
internal_repr = AttributeMapper(mapping).to_internal("openid", data)
assert internal_repr["address"] == data["address"]["formatted"]
def test_deeply_nested_attribute_to_internal(self):
mapping = {
"attributes": {
"address": {
"openid": ["address.formatted.text.value"],
},
},
}
data = {
"address": {
"formatted": {
"text": {
"value": ["100 Universal City Plaza, Hollywood CA 91608, USA"]
}
}
}
}
internal_repr = AttributeMapper(mapping).to_internal("openid", data)
assert internal_repr["address"] == data["address"]["formatted"]["text"]["value"]
def test_mapping_from_nested_attribute(self):
mapping = {
"attributes": {
"address": {
"openid": ["address.formatted"],
"saml": ["postaladdress"]
},
},
}
data = {
"address": {
"formatted": ["100 Universal City Plaza, Hollywood CA 91608, USA"]
}
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("openid", data)
external_repr = converter.from_internal("saml", internal_repr)
assert external_repr["postaladdress"] == data["address"]["formatted"]
def test_mapping_from_deeply_nested_attribute(self):
mapping = {
"attributes": {
"address": {
"openid": ["address.formatted.text.value"],
"saml": ["postaladdress"]
},
},
}
data = {
"address": {
"formatted": {
"text": {
"value": ["100 Universal City Plaza, Hollywood CA 91608, USA"]
}
}
}
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("openid", data)
external_repr = converter.from_internal("saml", internal_repr)
assert external_repr["postaladdress"] == data["address"]["formatted"]["text"]["value"]
def test_mapping_to_nested_attribute(self):
mapping = {
"attributes": {
"address": {
"openid": ["address.formatted"],
"saml": ["postaladdress"]
},
},
}
data = {
"postaladdress": ["100 Universal City Plaza, Hollywood CA 91608, USA"]
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("saml", data)
external_repr = converter.from_internal("openid", internal_repr)
assert external_repr["address"]["formatted"] == data["postaladdress"]
def test_mapping_to_deeply_nested_attribute(self):
mapping = {
"attributes": {
"address": {
"openid": ["address.formatted.text.value"],
"saml": ["postaladdress"]
},
},
}
data = {
"postaladdress": ["100 Universal City Plaza, Hollywood CA 91608, USA"]
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("saml", data)
external_repr = converter.from_internal("openid", internal_repr)
assert external_repr["address"]["formatted"]["text"]["value"] == data["postaladdress"]
def test_multiple_source_attribute_values(self):
mapping = {
"attributes": {
"mail": {
"saml": ["mail", "emailAddress", "email"]
},
},
}
data = {
"mail": ["<EMAIL>"],
"email": ["<EMAIL>"],
"emailAddress": ["<EMAIL>"],
}
expected = Counter(["<EMAIL>", "<EMAIL>", "<EMAIL>"])
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("saml", data)
assert Counter(internal_repr["mail"]) == expected
external_repr = converter.from_internal("saml", internal_repr)
assert Counter(external_repr[mapping["attributes"]["mail"]["saml"][0]]) == expected
def test_to_internal_filter(self):
mapping = {
"attributes": {
"mail": {
"p1": ["email"],
},
"identifier": {
"p1": ["uid"],
},
},
}
converter = AttributeMapper(mapping)
filter = converter.to_internal_filter("p1", ["uid", "email"])
assert Counter(filter) == Counter(["mail", "identifier"])
def test_to_internal_with_missing_attribute_value(self):
mapping = {
"attributes": {
"mail": {
"p1": ["emailaddress"],
},
}
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("p1", {})
assert not internal_repr
def test_map_one_source_attribute_to_multiple_internal_attributes(self):
mapping = {
"attributes": {
"mail": {
"p1": ["email"],
},
"identifier": {
"p1": ["email"],
},
},
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("p1", {"email": ["<EMAIL>"]})
assert internal_repr == {"mail": ["<EMAIL>"], "identifier": ["<EMAIL>"]}
def test_to_internal_profile_missing_attribute_mapping(self):
mapping = {
"attributes": {
"mail": {
"foo": ["email"],
},
"id": {
"foo": ["id"],
"bar": ["uid"],
}
},
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("bar", {"email": ["<EMAIL>"], "uid": ["uid"]})
assert "mail" not in internal_repr # no mapping for the 'mail' attribute in the 'bar' profile
assert internal_repr["id"] == ["uid"]
def test_to_internal_filter_profile_missing_attribute_mapping(self):
mapping = {
"attributes": {
"mail": {
"foo": ["email"],
},
"id": {
"foo": ["id"],
"bar": ["uid"],
}
},
}
converter = AttributeMapper(mapping)
filter = converter.to_internal_filter("bar", ["email", "uid"])
assert filter == ["id"] # mail should not included since its missing in 'bar' profile
def test_to_internal_with_unknown_attribute_profile(self):
mapping = {
"attributes": {
"mail": {
"foo": ["email"],
},
}
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("bar", {"email": ["<EMAIL>"]})
assert internal_repr == {}
def test_to_internal_filter_with_unknown_profile(self):
mapping = {
"attributes": {
"mail": {
"foo": ["email"],
}
}
}
converter = AttributeMapper(mapping)
filter = converter.to_internal_filter("bar", ["email"])
assert filter == []
def test_from_internal_with_unknown_profile(self):
mapping = {
"attributes": {
"mail": {
"foo": ["email"],
},
},
}
converter = AttributeMapper(mapping)
external_repr = converter.from_internal("bar", {"mail": "bob"})
assert external_repr == {}
def test_simple_template_mapping(self):
mapping = {
"attributes": {
"last_name": {
"p1": ["sn"],
"p2": ["sn"]
},
"first_name": {
"p1": ["givenName"],
"p2": ["givenName"]
},
"name": {
"p2": ["cn"]
}
},
"template_attributes": {
"name": {
"p2": ["${first_name[0]} ${last_name[0]}"]
}
}
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("p2", {"givenName": ["Valfrid"], "sn": ["Lindeman"]})
assert "name" in internal_repr
assert len(internal_repr["name"]) == 1
assert internal_repr["name"][0] == "<NAME>"
external_repr = converter.from_internal("p2", internal_repr)
assert external_repr["cn"][0] == "<NAME>"
def test_scoped_template_mapping(self):
mapping = {
"attributes": {
"unscoped_affiliation": {
"p1": ["eduPersonAffiliation"]
},
"uid": {
"p1": ["eduPersonPrincipalName"],
},
"affiliation": {
"p1": ["eduPersonScopedAffiliation"]
}
},
"template_attributes": {
"affiliation": {
"p1": ["${unscoped_affiliation[0]}@${uid[0] | scope}"]
}
}
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("p1", {
"eduPersonAffiliation": ["student"],
"eduPersonPrincipalName": ["<EMAIL>"]})
assert "affiliation" in internal_repr
assert len(internal_repr["affiliation"]) == 1
assert internal_repr["affiliation"][0] == "<EMAIL>"
def test_template_attribute_overrides_existing_attribute(self):
mapping = {
"attributes": {
"last_name": {
"p1": ["sn"],
},
"first_name": {
"p1": ["givenName"],
},
"name": {
"p1": ["cn"]
}
},
"template_attributes": {
"name": {
"p1": ["${first_name[0]} ${last_name[0]}"]
}
}
}
converter = AttributeMapper(mapping)
data = {"sn": ["Surname"],
"givenName": ["Given"],
"cn": ["Common Name"]}
internal_repr = converter.to_internal("p1", data)
external_repr = converter.from_internal("p1", internal_repr)
assert len(internal_repr["name"]) == 1
assert internal_repr["name"][0] == "Given Surname"
assert external_repr["cn"][0] == "Given Surname"
def test_template_attribute_preserves_existing_attribute_if_template_cant_be_rendered(self):
mapping = {
"attributes": {
"last_name": {
"p1": ["sn"],
},
"first_name": {
"p1": ["givenName"],
},
"name": {
"p1": ["cn"]
}
},
"template_attributes": {
"name": {
"p1": ["${unknown[0]} ${last_name[0]}"]
}
}
}
converter = AttributeMapper(mapping)
data = {"sn": ["Surname"],
"givenName": ["Given"],
"cn": ["Common Name"]}
internal_repr = converter.to_internal("p1", data)
assert len(internal_repr["name"]) == 1
assert internal_repr["name"][0] == "Common Name"
def test_template_attribute_with_multiple_templates_tries_them_all_templates(self):
mapping = {
"attributes": {
"last_name": {
"p1": ["sn"],
},
"first_name": {
"p1": ["givenName"],
},
"name": {
"p1": ["cn"]
}
},
"template_attributes": {
"name": {
"p1": ["${first_name[0]} ${last_name[0]}", "${unknown[0]} ${unknown[1]}",
"${first_name[1]} ${last_name[1]}", "${foo} ${bar}"]
}
}
}
converter = AttributeMapper(mapping)
data = {"sn": ["Surname1", "Surname2"],
"givenName": ["Given1", "Given2"],
"cn": ["Common Name"]}
internal_repr = converter.to_internal("p1", data)
assert len(internal_repr["name"]) == 2
assert internal_repr["name"][0] == "Given1 Surname1"
assert internal_repr["name"][1] == "Given2 Surname2"
def test_template_attribute_fail_does_not_insert_None_attribute_value(self):
mapping = {
"attributes": {
"last_name": {
"p1": ["sn"],
},
"first_name": {
"p1": ["givenName"],
},
"name": {
"p1": ["cn"]
}
},
"template_attributes": {
"name": {
"p1": ["${first_name[0]} ${last_name[0]}"]
}
}
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("p1", {})
assert len(internal_repr) == 0
@pytest.mark.parametrize("attribute_value", [
{"email": "<EMAIL>"},
{"email": ["<EMAIL>"]}
])
def test_to_internal_same_attribute_value_from_list_and_single_value(self, attribute_value):
mapping = {
"attributes": {
"mail": {
"foo": ["email"],
},
},
}
converter = AttributeMapper(mapping)
internal_repr = converter.to_internal("foo", attribute_value)
assert internal_repr["mail"] == ["<EMAIL>"]
|
430083
|
from string import Template
t = Template('$name is the $job of $company')
s = t.substitute(name='<NAME>', job='CEO', company='Apple Inc.')
print(s)
# dictionary as substitute argument
d = {"name": "<NAME>", "job": "CEO", "company": "Apple Inc."}
s = t.substitute(**d)
print(s)
# TypeError: substitute() got multiple values for keyword argument 'name'
# s = t.substitute(**d, name='<NAME>')
s = t.safe_substitute(name='<NAME>', job='CEO')
print(s)
print('Template String =', t.template)
# escaping $ sign
t = Template('$$ is called $name')
s = t.substitute(name='Dollar')
print(s)
# complex example
t = Template('$noun adjective is ${noun}ing')
s = t.substitute(noun='Test')
print(s)
|
430084
|
from django.conf.urls import url
from django.urls import path
from django.conf import settings
from django.conf.urls.static import static
from main.views import FileView
from . import views
urlpatterns = [
url(r'^$', views.index),
url(r'^add_user', views.addUser),
url(r'^save_user', views.saveUser),
url(r'^view_users', views.viewUsers),
url(r'^success$', views.success),
url(r'^add_citizen', views.addCitizen),
url(r'^save_citizen', views.saveCitizen),
url(r'^view_citizens', views.viewCitizens),
path('wanted_citizen/<int:citizen_id>/',views.wantedCitizen,name='wanted_citizen'),
path('free_citizen/<int:citizen_id>/',views.freeCitizen,name='free_citizen'),
url(r'^login$', views.login),
url(r'^logout', views.logout_view),
url(r'^detectImage', views.detectImage),
url(r'^detectWithWebcam', views.detectWithWebcam),
url(r'^upload', FileView.as_view(), name='file-upload'),
url(r'^spotted_criminals', views.spottedCriminals),
path('thief_location/<int:thief_id>/',views.viewThiefLocation,name='thief_location'),
path('found_thief/<int:thief_id>/', views.foundThief, name='found_thief'),
url(r'^reports', views.viewReports),
]
if settings.DEBUG:
urlpatterns += static(settings.MEDIA_URL, document_root=settings.MEDIA_ROOT)
|
430118
|
from unittest import mock
from django.core import mail
from rest_framework import test
from waldur_core.structure.models import ProjectRole
from waldur_rancher import enums, models, tasks, utils
from waldur_rancher.tests import factories, fixtures
from waldur_rancher.tests.base import override_rancher_settings
class UserSyncTest(test.APITransactionTestCase):
def setUp(self):
super(UserSyncTest, self).setUp()
self.fixture = fixtures.RancherFixture()
self.fixture.admin
self.fixture.manager
self.fixture.owner
@mock.patch('waldur_rancher.utils.RancherBackend')
def test_create_user(self, mock_backend_class):
utils.SyncUser.run()
self.assertEqual(mock_backend_class().create_user.call_count, 3)
self.assertEqual(models.RancherUser.objects.all().count(), 3)
@mock.patch('waldur_rancher.utils.RancherBackend')
@override_rancher_settings(DISABLE_AUTOMANAGEMENT_OF_USERS=True)
def test_disable_users_automanagement(self, mock_backend_class):
utils.SyncUser.run()
self.assertEqual(mock_backend_class().create_user.call_count, 0)
self.assertEqual(models.RancherUser.objects.all().count(), 0)
@mock.patch('waldur_rancher.utils.RancherBackend')
def test_delete_user(self, mock_backend_class):
utils.SyncUser.run()
self.fixture.project.remove_user(self.fixture.admin)
utils.SyncUser.run()
self.assertEqual(mock_backend_class().block_user.call_count, 1)
@mock.patch('waldur_rancher.utils.RancherBackend')
def test_update_user(self, mock_backend_class):
utils.SyncUser.run()
self.fixture.project.add_user(self.fixture.admin, ProjectRole.MANAGER)
utils.SyncUser.run()
self.assertEqual(mock_backend_class().delete_cluster_role.call_count, 1)
self.assertEqual(mock_backend_class().create_cluster_user_role.call_count, 4)
@mock.patch('waldur_rancher.utils.RancherBackend.client')
@mock.patch('waldur_rancher.handlers.tasks')
def test_notification(self, mock_tests, mock_client):
mock_client.create_user.return_value = {'id': 'ID'}
mock_client.create_cluster_user_role.return_value = {'id': 'ID'}
utils.SyncUser.run()
self.assertEqual(models.RancherUser.objects.all().count(), 3)
self.assertEqual(mock_tests.notify_create_user.delay.call_count, 3)
def test_notification_message(self):
rancher_user = factories.RancherUserFactory()
password = 'password'
url = 'http//example.com'
tasks.notify_create_user(rancher_user.id, password, url)
self.assertEqual(len(mail.outbox), 1)
self.assertEqual(mail.outbox[0].to, [rancher_user.user.email])
self.assertTrue(url in mail.outbox[0].body)
@mock.patch('waldur_rancher.utils.RancherBackend')
def test_create_project_role(self, mock_backend_class):
project = factories.ProjectFactory()
utils.SyncUser.run()
rancher_user = models.RancherUser.objects.first()
rancher_user.backend_id = 'backend_id'
rancher_user.save()
mock_backend_class().client.get_projects_roles.return_value = [
{
'projectId': project.backend_id,
'roleTemplateId': enums.ProjectRoleId.project_owner,
'id': 'project_role_id',
'userId': 'backend_id',
}
]
utils.SyncUser.run()
rancher_user.refresh_from_db()
self.assertEqual(rancher_user.rancheruserprojectlink_set.count(), 1)
self.assertEqual(
rancher_user.rancheruserprojectlink_set.first().backend_id,
'project_role_id',
)
|
430133
|
import socket, struct, datetime, os, time
BLOCK_SIZE = 256
SAMPLE_RATE = 44100.0
OSCS = 64
MAX_QUEUE = 400
[SINE, PULSE, SAW, TRIANGLE, NOISE, KS, PCM, ALGO, PARTIAL, PARTIALS, OFF] = range(11)
TARGET_AMP, TARGET_DUTY, TARGET_FREQ, TARGET_FILTER_FREQ, TARGET_RESONANCE, TARGET_FEEDBACK, TARGET_LINEAR = (1, 2, 4, 8, 16, 32, 64)
FILTER_NONE, FILTER_LPF, FILTER_BPF, FILTER_HPF = range(4)
ALLES_LATENCY_MS = 1000
UDP_PORT = 9294
sock = 0
"""
A bunch of useful presets
"""
def preset(which,osc=0, **kwargs):
# Reset the osc first
reset(osc=osc)
if(which==0): # simple note
send(osc=osc, wave=SINE, bp0="10,1,250,0.7,500,0", bp0_target=TARGET_AMP, **kwargs)
if(which==1): # filter bass
send(osc=osc, filter_freq=2500, resonance=5, wave=SAW, filter_type=FILTER_LPF, bp0="100,0.5,25,0", bp0_target=TARGET_AMP+TARGET_FILTER_FREQ, **kwargs)
# TODO -- this is a good one to test the whistle on the bps...
if(which==2): # long sine pad to test ADSR
send(osc=osc, wave=SINE, bp0="0,0,500,1,1000,0.25,750,0", bp0_target=TARGET_AMP, **kwargs)
if(which==3): # amp LFO example
reset(osc=osc+1)
send(osc=osc+1, wave=SINE, vel=0.50, freq=1.5, **kwargs)
send(osc=osc, wave=PULSE, bp0="150,1,250,0.25,250,0", bp0_target=TARGET_AMP, mod_target=TARGET_AMP, mod_source=osc+1, **kwargs)
if(which==4): # pitch LFO going up
reset(osc=osc+1)
send(osc=osc+1, wave=SINE, vel=0.50, freq=0.25, **kwargs)
send(osc=osc, wave=PULSE, bp0="150,1,400,0,0,0", bp0_target=TARGET_AMP, mod_target=TARGET_FREQ, mod_source=osc+1, **kwargs)
if(which==5): # bass drum
# Uses a 0.25Hz sine wave at 0.5 phase (going down) to modify frequency of another sine wave
reset(osc=osc+1)
send(osc=osc+1, wave=SINE, vel=0.50, freq=0.25, phase=0.5, **kwargs)
send(osc=osc, wave=SINE, vel=0, bp0="500,0,0,0", bp0_target=TARGET_AMP, mod_target=TARGET_FREQ, mod_source=osc+1, **kwargs)
if(which==6): # noise snare
send(osc=osc, wave=NOISE, vel=0, bp0="250,0,0,0", bp0_target=TARGET_AMP, **kwargs)
if(which==7): # closed hat
send(osc=osc, wave=NOISE, vel=0, bp0="25,1,75,0,0,0", bp0_target=TARGET_AMP, **kwargs)
if(which==8): # closed hat from PCM
send(osc=osc, wave=PCM, vel=0, patch=0, freq=0, **kwargs)
if(which==9): # cowbell from PCM
send(osc=osc, wave=PCM, vel=0, patch=10, freq=0, **kwargs)
if(which==10): # high cowbell from PCM
send(osc=osc, wave=PCM, vel=0, patch=10, note=70, **kwargs)
if(which==11): # snare from PCM
send(osc=osc, wave=PCM, vel=0, patch=5, freq=0, **kwargs)
if(which==12): # FM bass
send(osc=osc, wave=ALGO, vel=0, patch=21, **kwargs)
if(which==13): # Pcm bass drum
send(osc=osc, wave=PCM, vel=0, patch=1, freq=0, **kwargs)
if(which==14): # filtered algo
send(wave=ALGO, vel=0, patch=62,filter_freq=2000,resonance=2.5,filter_type=FILTER_LPF, bp0_target=TARGET_FILTER_FREQ,bp0="1,1,500,0,0,0")
# Buffer messages sent to the synths if you call buffer().
# Calling buffer(0) turns off the buffering
# flush() sends whatever is in the buffer now, and is called after buffer(0) as well
send_buffer = ""
buffer_size = 0
def millis():
import datetime
# Timestamp to send over to synths for global sync
# This is a suggestion. I use ms since today started
d = datetime.datetime.now()
return int((datetime.datetime.utcnow() - datetime.datetime(d.year, d.month, d.day)).total_seconds()*1000)
# Removes trailing 0s and x.0000s
def trunc(number):
return ('%.10f' % number).rstrip('0').rstrip('.')
# Construct an AMY message
def message(osc=0, wave=-1, patch=-1, note=-1, vel=-1, amp=-1, freq=-1, duty=-1, feedback=-1, timestamp=None, reset=-1, phase=-1, \
client=-1, retries=1, volume=-1, filter_freq = -1, resonance = -1, bp0="", bp1="", bp2="", bp0_target=-1, bp1_target=-1, bp2_target=-1, mod_target=-1, \
debug=-1, mod_source=-1, eq_l = -1, eq_m = -1, eq_h = -1, filter_type= -1, algorithm=-1, ratio = -1, detune = -1, algo_source=None):
m = ""
if(timestamp is None): timestamp = millis()
m = m + "t" + trunc(timestamp)
if(osc>=0): m = m + "v" + trunc(osc)
if(wave>=0): m = m + "w" + trunc(wave)
if(duty>=0): m = m + "d" + trunc(duty)
if(feedback>=0): m = m + "b" + trunc(feedback)
if(freq>=0): m = m + "f" + trunc(freq)
if(note>=0): m = m + "n" + trunc(note)
if(patch>=0): m = m + "p" + trunc(patch)
if(phase>=0): m = m + "P" + trunc(phase)
if(detune>=0): m = m + "u" + trunc(detune)
if(client>=0): m = m + "c" + trunc(client)
if(amp>=0): m = m + "a" + trunc(amp)
if(vel>=0): m = m + "l" + trunc(vel)
if(volume>=0): m = m + "V" + trunc(volume)
if(resonance>=0): m = m + "R" + trunc(resonance)
if(filter_freq>=0): m = m + "F" + trunc(filter_freq)
if(ratio>=0): m = m + "I" + trunc(ratio)
if(algorithm>=0): m = m + "o" + trunc(algorithm)
if(len(bp0)): m = m +"A%s" % (bp0)
if(len(bp1)): m = m +"B%s" % (bp1)
if(len(bp2)): m = m +"C%s" % (bp2)
if(algo_source is not None): m = m +"O%s" % (algo_source)
if(bp0_target>=0): m = m + "T" +trunc(bp0_target)
if(bp1_target>=0): m = m + "W" +trunc(bp1_target)
if(bp2_target>=0): m = m + "X" +trunc(bp2_target)
if(mod_target>=0): m = m + "g" + trunc(mod_target)
if(mod_source>=0): m = m + "L" + trunc(mod_source)
if(reset>=0): m = m + "S" + trunc(reset)
if(debug>=0): m = m + "D" + trunc(debug)
if(eq_l>=0): m = m + "x" + trunc(eq_l)
if(eq_m>=0): m = m + "y" + trunc(eq_m)
if(eq_h>=0): m = m + "z" + trunc(eq_h)
if(filter_type>=0): m = m + "G" + trunc(filter_type)
return m+'Z'
def transmit(message, retries=1):
for x in range(retries):
get_sock().sendto(message.encode('ascii'), get_multicast_group())
def buffer(size=508):
global buffer_size
buffer_size = size
if(buffer_size == 0):
flush()
def flush(retries=1):
global send_buffer
transmit(send_buffer)
send_buffer = ""
def send(retries=1, **kwargs):
global send_buffer
m = message(**kwargs)
if(buffer_size > 0):
if(len(send_buffer + m) > buffer_size):
transmit(send_buffer, retries=retries)
send_buffer = m
else:
send_buffer = send_buffer + m
else:
transmit(m,retries=retries)
"""
Convenience functions
"""
def reset(osc=None):
if(osc is not None):
send(reset=osc)
else:
send(reset=100) # reset > ALLES_OSCS resets all oscs
def volume(volume, client = -1):
send(client=client, volume=volume)
"""
Run a scale through all the synth's sounds
"""
def test():
while True:
for wave in [SINE, SAW, PULSE, TRIANGLE, NOISE]:
for i in range(12):
send(osc=0, wave=wave, note=40+i, patch=i, vel=1)
time.sleep(0.5)
"""
Play all of the FM patches in order
"""
def play_patches(wait=0.500, patch_total = 100, **kwargs):
once = True
patch_count = 0
while True:
for i in range(24):
patch = patch_count % patch_total
patch_count = patch_count + 1
send(osc=i % OSCS, note=i+50, wave=ALGO, patch=patch, vel=1, **kwargs)
time.sleep(wait)
send(osc=i % OSCS, vel=0)
"""
Play up to ALLES_OSCS patches at once
"""
def polyphony(max_voices=OSCS,**kwargs):
note = 0
oscs = []
for i in range(int(max_voices/2)):
oscs.append(int(i))
oscs.append(int(i+(OSCS/2)))
print(str(oscs))
while(1):
osc = oscs[note % max_voices]
print("osc %d note %d filter %f " % (osc, 30+note, note*50))
send(osc=osc, **kwargs, patch=note, filter_type=FILTER_NONE, filter_freq=note*50, note=30+(note), client = -1, vel=1)
time.sleep(0.5)
note =(note + 1) % 64
def eq_test():
reset()
eqs = [ [0,0,0], [15,0,0], [0,0,15], [0,15,0],[-15,-15,15],[-15,-15,30],[-15,30,-15], [30,-15,-15] ]
for eq in eqs:
print("eq_l = %ddB eq_m = %ddB eq_h = %ddB" % (eq[0], eq[1], eq[2]))
send(eq_l=eq[0], eq_m=eq[1], eq_h=eq[2])
drums(loops=2)
time.sleep(1)
reset()
time.sleep(0.250)
"""
Sweep the filter
"""
def sweep(speed=0.100, res=0.5, loops = -1):
end = 2000
cur = 0
while(loops != 0):
for i in [0, 1, 4, 5, 1, 3, 4, 5]:
cur = (cur + 100) % end
send(osc=0,filter_type=FILTER_LPF, filter_freq=cur+250, resonance=res, wave=PULSE, note=50+i, duty=0.50, vel=1)
send(osc=1,filter_type=FILTER_LPF, filter_freq=cur+500, resonance=res, wave=PULSE, note=50+12+i, duty=0.25, vel=1)
send(osc=2,filter_type=FILTER_LPF, filter_freq=cur, resonance=res, wave=PULSE, note=50+6+i, duty=0.90, vel=1)
time.sleep(speed)
"""
An example drum machine using osc+PCM presets
"""
def drums(bpm=120, loops=-1, **kwargs):
preset(13, osc=0, **kwargs) # sample bass drum
preset(8, osc=3, **kwargs) # sample hat
preset(9, osc=4, **kwargs) # sample cow
preset(10, osc=5, **kwargs) # sample hi cow
preset(11, osc=2, **kwargs) # sample snare
preset(1, osc=7, **kwargs) # filter bass
[bass, snare, hat, cow, hicow, silent] = [1, 2, 4, 8, 16, 32]
pattern = [bass+hat, hat+hicow, bass+hat+snare, hat+cow, hat, hat+bass, snare+hat, hat]
bassline = [50, 0, 0, 0, 50, 52, 51, 0]
while (loops != 0):
loops = loops - 1
for i,x in enumerate(pattern):
if(x & bass):
send(osc=0, vel=4, **kwargs)
if(x & snare):
send(osc=2, vel=1.5)
if(x & hat):
send(osc=3, vel=1)
if(x & cow):
send(osc=4, vel=1)
if(x & hicow):
send(osc=5, vel=1)
if(bassline[i]>0):
send(osc=7, vel=0.5, note=bassline[i]-12, **kwargs)
else:
send(vel=0, osc=7, **kwargs)
time.sleep(1.0/(bpm*2/60))
"""
C-major chord
"""
def c_major(octave=2,wave=SINE, **kwargs):
send(osc=0, freq=220.5*octave, wave=wave, vel=1, **kwargs)
send(osc=1, freq=138.5*octave, wave=wave, vel=1, **kwargs)
send(osc=2, freq=164.5*octave, wave=wave, vel=1, **kwargs)
"""
Connection stuff
"""
def get_sock():
global sock
return sock
def get_multicast_group():
return ('172.16.31.10', UDP_PORT)
def connect(local_ip=None):
# Set up the socket for multicast send & receive
global sock
# If not given, find your source IP -- by default your main routable network interface.
if(local_ip is None):
s = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
try:
s.connect(('10.255.255.255', 1))
local_ip = s.getsockname()[0]
except Exception:
print("Trouble getting routable IP address, using localhost. If wrong, do alles.connect(local_ip='ip.address')")
local_ip = "127.0.0.1"
finally:
s.close()
sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEADDR, 1)
try:
sock.setsockopt(socket.SOL_SOCKET, socket.SO_REUSEPORT, 1)
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
except AttributeError:
print("couldn't REUSEPORT")
sock.setsockopt(socket.SOL_SOCKET, socket.SO_BROADCAST, 1)
# TTL defines how many hops it can take
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_TTL, 255) # 1
# Keep loopback on if you're controlling Alles from your own desktop
sock.setsockopt(socket.IPPROTO_IP, socket.IP_MULTICAST_LOOP, 1) # 1
sock.bind(('', UDP_PORT))
# Set the local interface for multicast receive
sock.setsockopt(socket.SOL_IP, socket.IP_MULTICAST_IF, socket.inet_aton(local_ip))
# And the networks to be a member of (destination and host)
mreq = socket.inet_aton(get_multicast_group()[0]) + socket.inet_aton(local_ip)
sock.setsockopt(socket.SOL_IP, socket.IP_ADD_MEMBERSHIP, mreq)
# Don't block to receive -- not necessary and we sometimes drop packets we're waiting for
sock.setblocking(0)
print("Connected to %s as local IP for multicast IF" % (local_ip))
def disconnect():
global sock
# Remove ourselves from membership
mreq = socket.inet_aton(get_multicast_group()[0]) + socket.inet_aton(local_ip)
sock.setsockopt(socket.SOL_IP, socket.IP_DROP_MEMBERSHIP, mreq)
sock.close()
def decode_battery_mask(mask):
state = "unknown"
level = 0
if (mask & 0x01): state = "charging"
if (mask & 0x02): state = "charged"
if (mask & 0x04): state = "discharging"
if (mask & 0x10): level = 4
if (mask & 0x20): level = 3
if (mask & 0x40): level = 2
if (mask & 0x80): level = 1
return(state, level)
def sync(count=10, delay_ms=100):
global sock
import re
# Sends sync packets to all the listeners so they can correct / get the time
clients = {}
client_map = {}
battery_map = {}
start_time = millis()
last_sent = 0
time_sent = {}
rtt = {}
i = 0
while 1:
tic = millis() - start_time
if((tic - last_sent) > delay_ms):
time_sent[i] = millis()
#print ("sending %d at %d" % (i, time_sent[i]))
output = "s%di%dZ" % (time_sent[i], i)
sock.sendto(output.encode('ascii'), get_multicast_group())
i = i + 1
last_sent = tic
try:
data, address = sock.recvfrom(1024)
data = data.decode('ascii')
#print("received %s from %s" % (data, address))
if(data[0] == '_'):
data = data[:-1]
try:
[_, client_time, sync_index, client_id, ipv4, battery] = re.split(r'[sicry]',data)
except ValueError:
print("What! %s" % (data))
if(int(sync_index) <= i): # skip old ones from a previous run
#print ("recvd at %d: %s %s %s %s" % (millis(), client_time, sync_index, client_id, ipv4))
# ping sets client index to -1, so make sure this is a sync response
if(int(sync_index) >= 0):
client_map[int(ipv4)] = int(client_id)
battery_map[int(ipv4)] = battery
rtt[int(ipv4)] = rtt.get(int(ipv4), {})
rtt[int(ipv4)][int(sync_index)] = millis()-time_sent[int(sync_index)]
except socket.error:
pass
# Wait for at least (client latency) to get any straggling UDP packets back
delay_period = 1 + (ALLES_LATENCY_MS / delay_ms)
if((i-delay_period) > count):
break
# Compute average rtt in ms and reliability (number of rt packets we got)
for ipv4 in rtt.keys():
hit = 0
total_rtt_ms = 0
for i in range(count):
ms = rtt[ipv4].get(i, None)
if ms is not None:
total_rtt_ms = total_rtt_ms + ms
hit = hit + 1
clients[client_map[ipv4]] = {}
clients[client_map[ipv4]]["reliability"] = float(hit)/float(count)
clients[client_map[ipv4]]["avg_rtt"] = float(total_rtt_ms) / float(hit) # todo compute std.dev
clients[client_map[ipv4]]["ipv4"] = ipv4
clients[client_map[ipv4]]["battery"] = decode_battery_mask(int(battery_map[ipv4]))
# Return this as a map for future use
return clients
def battery_test():
tic = time.time()
clients = 1
try:
while clients:
reset()
print("Been %d seconds" % (time.time()-tic))
clients = len(sync().keys())
drums(loops=1)
time.sleep(2)
reset()
time.sleep(60)
except KeyboardInterrupt:
pass
print("Took %d seconds to stop" %(time.time() - tic))
# Setup the sock on module import
# I have some convenience hardcoded IPs for machines I work on here
try:
if(os.uname().nodename.startswith('colossus')):
connect(local_ip="192.168.1.2")
elif(os.uname().nodename.startswith('convolve')):
connect(local_ip = '192.168.1.3')
elif(os.uname().nodename.startswith('cedar')):
connect(local_ip = '192.168.1.3')
else:
connect(local_ip=None)
except OSError:
try:
connect(local_ip=None)
except OSError:
print("Couldn't connect. Try manually with alles.connect('local_ip_address')")
|
430178
|
from __future__ import unicode_literals
from billy.db import tables
from billy.models.base import BaseTableModel
from billy.utils.generic import make_guid
class TransactionFailureModel(BaseTableModel):
TABLE = tables.TransactionFailure
def create(
self,
transaction,
error_message,
error_code=None,
error_number=None,
):
"""Create a failure for and return
"""
failure = self.TABLE(
guid='TF' + make_guid(),
transaction=transaction,
error_message=error_message,
error_code=error_code,
error_number=error_number,
)
self.session.add(failure)
self.session.flush()
return failure
|
430190
|
import numpy as np
import pandas as pd
from tqdm import tqdm
from numpy.matlib import repmat
def raised_cosine(duration, nbases, binfun):
nbins = binfun(duration)
ttb = repmat(np.arange(1, nbins + 1).reshape(-1, 1), 1, nbases)
dbcenter = nbins / nbases
cwidth = 4 * dbcenter
bcenters = 0.5 * dbcenter + dbcenter * np.arange(0, nbases)
x = ttb - repmat(bcenters.reshape(1, -1), nbins, 1)
bases = (np.abs(x / cwidth) < 0.5) * (np.cos(x * np.pi * 2 / cwidth) * 0.5 + 0.5)
return bases
def full_rcos(duration, nbases, binfun, n_before=1):
if not isinstance(n_before, int):
n_before = int(n_before)
nbins = binfun(duration)
ttb = repmat(np.arange(1, nbins + 1).reshape(-1, 1), 1, nbases)
dbcenter = nbins / (nbases - 2)
cwidth = 4 * dbcenter
bcenters = 0.5 * dbcenter + dbcenter * np.arange(-n_before, nbases - n_before)
x = ttb - repmat(bcenters.reshape(1, -1), nbins, 1)
bases = (np.abs(x / cwidth) < 0.5) * (np.cos(x * np.pi * 2 / cwidth) * 0.5 + 0.5)
return bases
def neglog(weights, x, y):
xproj = x @ weights
f = np.exp(xproj)
nzidx = f != 0
if np.any(y[~nzidx] != 0):
return np.inf
return -y[nzidx].reshape(1, -1) @ xproj[nzidx] + np.sum(f)
class SequentialSelector:
def __init__(self, model, n_features_to_select=None, direction='forward', scoring=None):
"""
Sequential feature selection for neural models
Parameters
----------
model : brainbox.modeling.neural_model.NeuralModel
Any class which inherits NeuralModel and has already been instantiated.
n_features_to_select : int, optional
Number of covariates to select. When None, will sequentially fit all parameters and
store the associated scores. By default None
direction : str, optional
Direction of sequential selection. 'forward' indicates model will be built from 1
regressor up, while 'backward' indicates regrssors will be removed one at a time until
n_features_to_select is reached or 1 regressor remains. By default 'forward'
scoring : str, optional
Scoring function to use. Must be a valid argument to the subclass of NeuralModel passed
to SequentialSelector. By default None
"""
self.model = model
self.design = model.design
if n_features_to_select:
self.n_features_to_select = int(n_features_to_select)
else:
self.n_features_to_select = len(self.design.covar)
self.direction = direction
self.scoring = scoring
self.delta_scores = pd.DataFrame(index=self.model.clu_ids)
self.trlabels = self.design.trlabels
self.train = np.isin(self.trlabels, self.model.traininds).flatten()
self.test = ~self.train
self.features = np.array(list(self.design.covar.keys()))
def fit(self, progress=False):
"""
Fit the sequential feature selection
Parameters
----------
progress : bool, optional
Whether to show a progress bar, by default False
"""
n_features = len(self.features)
maskdf = pd.DataFrame(index=self.model.clu_ids, columns=self.features, dtype=bool)
maskdf.loc[:, :] = False
seqdf = pd.DataFrame(index=self.model.clu_ids, columns=range(self.n_features_to_select))
scoredf = pd.DataFrame(index=self.model.clu_ids, columns=range(self.n_features_to_select))
if not 0 < self.n_features_to_select <= n_features:
raise ValueError('n_features_to_select is not a valid number in the context'
' of the model.')
n_iterations = (
self.n_features_to_select if self.direction == 'forward'
else n_features - self.n_features_to_select
)
for i in tqdm(range(n_iterations), desc='step', leave=False, disable=not progress):
masks_set = maskdf.groupby(self.features.tolist()).groups
for current_mask in tqdm(masks_set, desc='feature subset', leave=False):
cells = masks_set[current_mask]
new_feature_idx, nf_score = self._get_best_new_feature(current_mask, cells)
for cell in cells:
maskdf.at[cell, self.features[new_feature_idx.loc[cell]]] = True
seqdf.loc[cell, i] = self.features[new_feature_idx.loc[cell]]
scoredf.loc[cell, i] = nf_score.loc[cell]
self.support_ = maskdf
self.sequences_ = seqdf
self.scores_ = scoredf
def _get_best_new_feature(self, mask, cells):
mask = np.array(mask)
candidate_features = np.flatnonzero(~mask)
cell_idxs = np.argwhere(np.isin(self.model.clu_ids, cells)).flatten()
my = self.model.binnedspikes[np.ix_(self.train, cell_idxs)]
scores = pd.DataFrame(index=cells, columns=candidate_features, dtype=float)
for feature_idx in candidate_features:
candidate_mask = mask.copy()
candidate_mask[feature_idx] = True
if self.direction == 'backward':
candidate_mask = ~candidate_mask
fitfeatures = self.features[candidate_mask]
feat_idx = np.hstack([self.design.covar[feat]['dmcol_idx'] for feat in fitfeatures])
mdm = self.design[np.ix_(self.train, feat_idx)]
coefs, intercepts = self.model._fit(mdm, my, cells=cells)
for i, cell in enumerate(cells):
scores.at[cell, feature_idx] = self.model._scorer(coefs.loc[cell],
intercepts.loc[cell],
mdm, my[:, i])
return scores.idxmax(axis=1), scores.max(axis=1)
|
430237
|
import torch
import torch.nn as nn
import math
from ltr.models.backbone.resnet import BasicBlock
from ltr.models.layers.blocks import conv_block
from ltr.models.lwl.utils import interpolate
class ResidualDS16SW(nn.Module):
""" Outputs the few-shot learner label and spatial importance weights given the segmentation mask """
def __init__(self, layer_dims, use_bn=True):
super().__init__()
self.conv_block = conv_block(1, layer_dims[0], kernel_size=3, stride=2, padding=1, batch_norm=use_bn)
self.pool = nn.MaxPool2d(kernel_size=3, stride=2, padding=1)
ds1 = nn.Conv2d(layer_dims[0], layer_dims[1], kernel_size=3, padding=1, stride=2)
self.res1 = BasicBlock(layer_dims[0], layer_dims[1], stride=2, downsample=ds1, use_bn=use_bn)
ds2 = nn.Conv2d(layer_dims[1], layer_dims[2], kernel_size=3, padding=1, stride=2)
self.res2 = BasicBlock(layer_dims[1], layer_dims[2], stride=2, downsample=ds2, use_bn=use_bn)
self.label_pred = conv_block(layer_dims[2], layer_dims[3], kernel_size=3, stride=1, padding=1,
relu=True, batch_norm=use_bn)
self.samp_w_pred = nn.Conv2d(layer_dims[2], layer_dims[3], kernel_size=3, padding=1, stride=1)
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
self.samp_w_pred.weight.data.fill_(0)
self.samp_w_pred.bias.data.fill_(1)
def forward(self, label_mask, feature=None):
# label_mask: frames, seq, h, w
assert label_mask.dim() == 4
label_shape = label_mask.shape
label_mask = label_mask.view(-1, 1, *label_mask.shape[-2:])
out = self.pool(self.conv_block(label_mask))
out = self.res2(self.res1(out))
label_enc = self.label_pred(out)
sample_w = self.samp_w_pred(out)
label_enc = label_enc.view(label_shape[0], label_shape[1], *label_enc.shape[-3:])
sample_w = sample_w.view(label_shape[0], label_shape[1], *sample_w.shape[-3:])
# Out dim is (num_seq, num_frames, layer_dims[-1], h, w)
return label_enc, sample_w
class ResidualDS16FeatSWBoxCatMultiBlock(nn.Module):
def __init__(self, layer_dims, feat_dim, use_final_relu=True, use_gauss=True, use_bn=True,
non_default_init=True, init_bn=1, gauss_scale=0.25, final_bn=True):
super().__init__()
in_layer_dim = (feat_dim+1,) + tuple(list(layer_dims)[:-2])
out_layer_dim = tuple(list(layer_dims)[:-1])
self.use_gauss = use_gauss
res = []
for in_d, out_d in zip(in_layer_dim, out_layer_dim):
ds = nn.Conv2d(in_d, out_d, kernel_size=3, padding=1, stride=1)
res.append(BasicBlock(in_d, out_d, stride=1, downsample=ds, use_bn=use_bn))
self.res = nn.Sequential(*res)
self.label_pred = conv_block(layer_dims[-2], layer_dims[-1], kernel_size=3, stride=1, padding=1,
relu=use_final_relu, batch_norm=final_bn)
self.gauss_scale = gauss_scale
if non_default_init:
for m in self.modules():
if isinstance(m, nn.Conv2d):
n = m.kernel_size[0] * m.kernel_size[1] * m.out_channels
m.weight.data.normal_(0, math.sqrt(2. / n))
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(init_bn)
m.bias.data.zero_()
def bbox_to_mask(self, bbox, sz):
mask = torch.zeros((bbox.shape[0],1,*sz), dtype=torch.float32, device=bbox.device)
for i, bb in enumerate(bbox):
x1, y1, w, h = list(map(int, bb))
x1 = int(x1+0.5)
y1 = int(y1+0.5)
h = int(h+0.5)
w = int(w+0.5)
mask[i, :, y1:(y1+h), x1:(x1+w)] = 1.0
return mask
def bbox_to_gauss(self, bbox, sz):
mask = torch.zeros((bbox.shape[0],1,*sz), dtype=torch.float32, device=bbox.device)
x_max, y_max = sz[-1], sz[-2]
for i, bb in enumerate(bbox):
x1, y1, w, h = list(map(int, bb))
cx, cy = x1+w/2, y1+h/2
xcoords = torch.arange(0, x_max).unsqueeze(dim=0).to(bbox.device).float()
ycoords = torch.arange(0, y_max).unsqueeze(dim=0).T.to(bbox.device).float()
d_xcoords = xcoords - cx
d_ycoords = ycoords - cy
dtotsqr = d_xcoords**2/(self.gauss_scale*w)**2 + d_ycoords**2/(self.gauss_scale*h)**2
mask[i,0] = torch.exp(-0.5*dtotsqr)
return mask
def forward(self, bb, feat, sz):
if self.use_gauss:
label_mask = self.bbox_to_gauss(bb, sz[-2:])
else:
label_mask = self.bbox_to_mask(bb, sz[-2:])
label_shape = label_mask.shape
label_mask = label_mask.view(-1, 1, *label_mask.shape[-2:])
feat = feat.view(-1, *feat.shape[-3:])
feat_mask_enc = torch.cat([feat, interpolate(label_mask, feat.shape[-2:])], dim=1)
out = self.res(feat_mask_enc)
label_enc = self.label_pred(out)
label_enc = label_enc.view(label_shape[0], label_shape[1], *label_enc.shape[-3:])
return label_enc
|
430298
|
import argparse
import os.path as osp
from glob import glob
from multiprocessing import Pool
import cv2
import mmcv
from tqdm import tqdm
def parse_args():
parser = argparse.ArgumentParser()
parser.add_argument("--img_pattern", default="/data/test/*jpg")
parser.add_argument("--output_path", default="/data/coco_test.json")
parser.add_argument("--n_jobs", type=int, default=16)
return parser.parse_args()
def convert(id_path):
img_id, img_path = id_path
image = cv2.imread(img_path)
h, w = image.shape[:2]
return {"id": img_id, "height": h, "width": w, "file_name": osp.basename(img_path)}
def main(img_pattern, output_path, n_jobs=16):
img_paths = glob(img_pattern)
with Pool(n_jobs) as p:
coco_images = list(
tqdm(
iterable=p.imap_unordered(convert, enumerate(img_paths)), total=len(img_paths), desc="Images to COCO..."
)
)
mmcv.dump(
{
"annotations": [],
"images": coco_images,
"categories": [{"supercategory": "wheat", "name": "wheat", "id": 1}],
},
output_path,
)
if __name__ == "__main__":
main(**vars(parse_args()))
|
430318
|
from docutils import nodes
from docutils.parsers import Parser
class Parser(Parser):
supported = ('foo',)
def parse(self, input, document):
section = nodes.section(ids=['id1'])
section += nodes.title('Generated section', 'Generated section')
document += section
def get_transforms(self):
return []
|
430335
|
import os
from kafka import SimpleProducer, KafkaConsumer, KafkaClient
from kafka.common import LeaderNotAvailableError
import logging
logging.basicConfig()
logger = logging.getLogger('sample-bolt')
# Input stream topics
in_streams = ['fortune-cookie', 'topic-b']
# IP:PORT of a Kafka broker. The typical port is 9092.
KAFKA_BROKER_IP_PORT = os.getenv('KAFKA_BROKER', '192.168.86.10:9092')
print "KAFKA BROKER: " + KAFKA_BROKER_IP_PORT
logger.warn("KAFKA BROKER: " + KAFKA_BROKER_IP_PORT)
kafka = KafkaClient(KAFKA_BROKER_IP_PORT)
producer = SimpleProducer(kafka)
consumer = KafkaConsumer('fortune-cookie', group_id="my_group", metadata_broker_list=[KAFKA_BROKER_IP_PORT])
def execute(message):
# IMPLEMENT THIS
print message
emit("bolt-out", "out")
def emit(topic, msg):
"""
topic : string
msg : string
"""
try:
producer.send_messages(topic, msg)
except LeaderNotAvailableError:
logger.warning('Caught a LeaderNotAvailableError. This seems to happen when auto-creating a new topic.')
print('Caught a LeaderNotAvailableError. This seems to happen when auto-creating a new topic.')
def close():
kafka.close()
if __name__=="__main__":
for message in consumer:
# message is raw byte string -- decode if necessary!
# e.g., for unicode: `message.decode('utf-8')`
execute(message)
# kafka.close()
|
430346
|
from sympy import zeros, eye, Matrix
from .extra_dyn import frictionforce, driveinertiaterm
from ..utils import sym_skew as skew
from ..utils import identity
def rne_khalil_forward(rbtdef, geom, ifunc=None):
'''RNE forward pass.'''
if not ifunc:
ifunc = identity
w = list(range(0, rbtdef.dof + 1))
dw = list(range(0, rbtdef.dof + 1))
dV = list(range(0, rbtdef.dof + 1))
U = list(range(0, rbtdef.dof + 1))
w[-1] = zeros(3, 1)
dw[-1] = zeros(3, 1)
dV[-1] = -rbtdef.gravityacc
U[-1] = zeros(3, 3)
z = Matrix([0, 0, 1])
# Forward
for i in range(rbtdef.dof):
s = rbtdef._links_sigma[i]
ns = 1 - s
w_pj = geom.Rdh[i].T * w[i - 1]
w[i] = w_pj + ns * rbtdef.dq[i] * z
w[i] = ifunc(w[i])
dw[i] = geom.Rdh[i].T * dw[i - 1] + ns * \
(rbtdef.ddq[i] * z + w_pj.cross(rbtdef.dq[i] * z).reshape(3, 1))
dw[i] = ifunc(dw[i])
dV[i] = geom.Rdh[i].T * (dV[i - 1] + U[i - 1] * geom.pdh[i]) + s * (
rbtdef.ddq[i] * z + 2 * w_pj.cross(rbtdef.dq[i] * z).reshape(3, 1))
dV[i] = ifunc(dV[i])
U[i] = skew(dw[i]) + skew(w[i]) ** 2
U[i] = ifunc(U[i])
return w, dw, dV, U
def rne_khalil_backward(rbtdef, geom, fw_results, ifunc=None):
'''RNE backward pass.'''
w, dw, dV, U = fw_results
if not ifunc:
ifunc = identity
# extend Rdh so that Rdh[dof] return identity
Rdh = geom.Rdh + [eye(3)]
# extend pdh so that pRdh[dof] return zero
pdh = geom.pdh + [zeros(3, 1)]
F = list(range(rbtdef.dof))
M = list(range(rbtdef.dof))
f = list(range(rbtdef.dof + 1))
m = list(range(rbtdef.dof + 1))
f[rbtdef.dof] = zeros(3, 1)
m[rbtdef.dof] = zeros(3, 1)
z = Matrix([0, 0, 1])
tau = zeros(rbtdef.dof, 1)
fric = frictionforce(rbtdef)
Idrive = driveinertiaterm(rbtdef)
# Backward
for i in range(rbtdef.dof - 1, -1, -1):
s = rbtdef._links_sigma[i]
ns = 1 - s
F[i] = rbtdef.m[i] * dV[i] + U[i] * Matrix(rbtdef.l[i])
F[i] = ifunc(F[i])
M[i] = rbtdef.L[i] * dw[i] + w[i].cross(
rbtdef.L[i] * w[i]).reshape(3, 1) + \
Matrix(rbtdef.l[i]).cross(dV[i]).reshape(3, 1)
M[i] = ifunc(M[i])
f_nj = Rdh[i + 1] * f[i + 1]
f[i] = F[i] + f_nj # + f_e[i]
f[i] = ifunc(f[i])
m[i] = M[i] + Rdh[i + 1] * m[i + 1] + \
pdh[i + 1].cross(f_nj).reshape(3, 1) # + m_e[i]
m[i] = ifunc(m[i])
tau[i] = ifunc(((s * f[i] + ns * m[i]).T * z)[0] + fric[i] + Idrive[i])
return tau
|
430349
|
import pylab as p
import numpy as np
from collections import defaultdict
p.rcParams.update({'font.size': 18}) # makes the dfault text size larger
fname='exoplanet.eu_catalog.csv'
f=open(fname,'r')
r=f.readlines()
f.close()
fields=r[0][1:].split(',')
for i in range(len(fields)):
fields[i]=fields[i].strip()
i=fields.index('discovered')
data=defaultdict(lambda:0)
for line in r[1:]:
try:
date=int(line.split(',')[i].strip())
data[date]+=1
except:
print line.split(',')[i].strip()
x=list(np.sort(data.keys()))
y=[data[i] for i in x]
for i in range(1,len(y)):
y[i]+=y[i-1]
x=[x[0]-1]+x+[x[-1]]
y=[0]+y+[0]
p.fill(x,y)
p.xlim(min(x),max(x))
p.xlabel('Year')
p.ylabel('Exoplanets Discovered')
p.show()
|
430399
|
from jivago.wsgi.annotations import Resource
from jivago.wsgi.methods import GET
from jivago.wsgi.request.headers import Headers
from jivago.wsgi.request.response import Response
from jivago.wsgi.request.streaming_response_body import StreamingResponseBody
@Resource("/stream")
class MyStreamingResource(object):
@GET
def get_stream(self) -> StreamingResponseBody:
# Returning the body object automatically sets the status code to 200 OK
return StreamingResponseBody(self.generate_bytes())
@GET
def get_stream(self) -> Response:
# A Response object can also be manually created to provide further control over transport parameters.
return Response(202, Headers(), StreamingResponseBody(self.generate_bytes()))
def generate_bytes(self) -> bytes:
for i in range(0, 5):
yield b"my bytes"
|
430403
|
import logging
from collections import namedtuple
log = logging.getLogger(__name__).addHandler(logging.NullHandler())
METADATA_PARENT_ID = "parent_id"
METADATA_KEY_PROV_TYPE = "prov_type"
METADATA_KEY_IDENTIFIER = "identifier"
METADATA_KEY_IDENTIFIER_ORIGINAL = "identifier_original"
METADATA_KEY_NAMESPACES = "namespaces"
METADATA_KEY_TYPE_MAP = "type_map"
# Return types for adapter classes
DbDocument = namedtuple("DbDocument", "document, bundles")
DbBundle = namedtuple("DbBundle", "records, bundle_record")
DbRecord = namedtuple("DbRecord", "attributes, metadata")
DbRelation = namedtuple("DbRelation", "attributes, metadata")
class BaseAdapter():
"""
Interface class for a prov database adapter
"""
def __init__(self, *args, **kwargs):
pass
def connect(self, authentication_info):
"""
Establish the database connection / login into the database
:param authentication_info: a custom dict with credentials
:type authentication_info: dict
:return: Indicate whether the connection was successful
:rtype: boolean
:raise InvalidOptionsException:
"""
raise NotImplementedError("Abstract method")
def save_element(self, attributes, metadata):
"""
Saves a entity, activity or entity into the database
:param attributes: Attributes as dict for the record. Be careful you have to encode the dict
:type attributes: dict
:param metadata: Metadata as dict for the record. Be careful you have to encode the dict but you can be sure that all meta keys are always there
:type metadata: dict
:return: Record id
:rtype: str
"""
raise NotImplementedError("Abstract method")
def save_relation(self, from_node, to_node, attributes, metadata):
"""
Create a relation between 2 nodes
:param from_node: The identifier
:type from_node: str
:param to_node: The identifier for the destination node
:type: to_node: str
:param attributes: Attributes as dict for the record. Be careful you have to encode the dict
:type attributes: dict
:param metadata: Metadata as dict for the record. Be careful you have to encode the dict but you can be sure that all meta keys are always there
:type metadata: dict
:return: Record id
:rtype: str
"""
raise NotImplementedError("Abstract method")
def get_records_by_filter(self, attributes_dict=None, metadata_dict=None):
"""
Returns all records (nodes and relations) based on a filter dict.
The filter dict's are and AND combination but only the start node must fulfill the conditions.
The result should contain all associated relations and nodes together
:param attributes_dict:
:type attributes_dict: dict
:param metadata_dict:
:type metadata_dict: dict
:return: list of relations and nodes
:rtype: list
"""
raise NotImplementedError("Abstract method")
def get_records_tail(self, attributes_dict=None, metadata_dict=None, depth=None):
"""
Returns all connected nodes and relations based on a filter.
The filter is an AND combination and this describes the filter only for the origin nodes.
:param attributes_dict:
:type attributes_dict: dict
:param metadata_dict:
:type metadata_dict: dict
:param depth:
:type depth: int
:return: a list of relations and nodes
:rtype: list
"""
raise NotImplementedError("Abstract method")
def get_bundle_records(self, bundle_identifier):
"""
Returns the relations and nodes for a specific bundle identifier.
Please use the bundle association to get all bundle nodes.
Only the relations belongs to the bundle where the start AND end node belong also to the bundle.
Except the prov:Mention see: W3C bundle links
:param bundle_identifier: The bundle identifier
:type bundle_identifier: str
:return: list of nodes and bundles
:rtype: list
"""
raise NotImplementedError("Abstract method")
def get_record(self, record_id):
"""
Return a single record
:param record_id: The id
:type record_id: str
:return: DbRecord
:rtype: DbRecord
"""
raise NotImplementedError("Abstract method")
def get_relation(self, relation_id):
"""
Returns a single relation
:param relation_id: The id
:type relation_id: str
:return: DbRelation
:rtype: DbRelation
"""
raise NotImplementedError("Abstract method")
def delete_records_by_filter(self, attributes_dict, metadata_dict):
"""
Delete records by filter
:param attributes_dict:
:type attributes_dict: dict
:param metadata_dict:
:type metadata_dict: dict
:return: Indicates whether the deletion was successful
:rtype: boolean
:raise NotFoundException:
"""
raise NotImplementedError("Abstract method")
def delete_record(self, record_id):
"""
Delete a single record
:param record_id:
:type record_id: str
:return: Indicates whether the deletion was successful
:rtype: boolean
:raise NotFoundException:
"""
raise NotImplementedError("Abstract method")
def delete_relation(self, relation_id):
"""
Delete a single relation
:param relation_id:
:type relation_id: str
:return: Indicates whether the deletion was successful
:rtype: boolean
:raise NotFoundException:
"""
raise NotImplementedError("Abstract method")
|
430427
|
from math import sqrt, log
def log_loss(results):
predicted = [min([max([x, 1e-15]), 1-1e-15]) for x in map(lambda x: float(x[0]), results)]
target = [min([max([x, 1e-15]), 1-1e-15]) for x in map(lambda x: float(x[1]), results)]
return -(1.0 / len(target)) * sum([target[i] * log(predicted[i]) + (1.0 - target[i]) * log(1.0 - predicted[i]) for i in xrange(len(target))])
def rmse(results):
return (sum(map(lambda x: (x[1] - x[0]) ** 2, results)) / float(len(results))) ** 0.5
def percent_correct(results, threshold=0.5):
return sum(map(lambda x: x[1] == (0 if x[0] < threshold else 1), results)) / float(len(results))
def true_positives(results, threshold=0.5):
return sum(map(lambda x: x[0] >= threshold, filter(lambda x: x[1] == 1, results)))
def true_negatives(results, threshold=0.5):
return sum(map(lambda x: x[0] < threshold, filter(lambda x: x[1] == 0, results)))
def false_negatives(results, threshold=0.5):
return sum(map(lambda x: x[0] < threshold, filter(lambda x: x[1] == 1, results)))
def false_positives(results, threshold=0.5):
return sum(map(lambda x: x[0] >= threshold, filter(lambda x: x[1] == 0, results)))
def confusion_matrix(results, threshold=0.5):
return {
'TP': true_positives(results, threshold=threshold),
'TN': true_negatives(results, threshold=threshold),
'FP': false_positives(results, threshold=threshold),
'FN': false_negatives(results, threshold=threshold)
}
def tpr(results, threshold=0.5):
tpc = true_positives(results, threshold=threshold)
fnc = false_negatives(results, threshold=threshold)
if tpc + fnc <= 0:
return 0.0
else:
return tpc / float(tpc + fnc)
def sensitivity(results, threshold=0.5):
return tpr(results, threshold=threshold)
def recall(results, threshold=0.5):
return tpr(results, threshold=threshold)
def tnr(results, threshold=0.5):
tnc = true_negatives(results, threshold=threshold)
fpc = false_positives(results, threshold=threshold)
if tnc + fpc <= 0:
return 0.0
else:
return tnc / float(tnc + fpc)
def specificity(results, threshold=0.5):
return tnr(results, threshold=threshold)
def fnr(results, threshold=0.5):
fnc = false_negatives(results, threshold=threshold)
tpc = true_positives(results, threshold=threshold)
if tpc + fnc <= 0:
return 0.0
else:
return fnc / float(tpc + fnc)
def fpr(results, threshold=0.5):
fpc = false_positives(results, threshold=threshold)
tnc = true_negatives(results, threshold=threshold)
if fpc + tnc <= 0:
return 0.0
else:
return fpc / float(fpc + tnc)
def precision(results, threshold=0.5):
tpc = true_positives(results, threshold=threshold)
fpc = false_positives(results, threshold=threshold)
return tpc / max(float((tpc + fpc)), 1.0)
def f_score(results, threshold=0.5, beta=1):
precision_value = precision(results, threshold=threshold)
recall_value = recall(results, threshold=threshold)
return (1 + pow(beta, 2)) * ((precision_value * recall_value) / max((pow(beta, 2) * precision_value) + recall_value, 0.000001))
def mcc(results, threshold=0.5):
tpc = true_positives(results, threshold=threshold)
tnc = true_negatives(results, threshold=threshold)
fpc = false_positives(results, threshold=threshold)
fnc = false_negatives(results, threshold=threshold)
return ((tpc * tnc) - (fpc * fnc)) / sqrt(float(max((tpc + fpc) * (tpc + fnc) * (tnc + fpc) * (tnc + fnc), 1.0)))
def average_accuracy(results, threshold=0.5):
tpc = true_positives(results, threshold=threshold)
tnc = true_negatives(results, threshold=threshold)
fpc = false_positives(results, threshold=threshold)
fnc = false_negatives(results, threshold=threshold)
return 0.5 * ((tpc / float(tpc + fnc)) + (tnc / float(tnc + fpc)))
def auc(results):
def _tied_rank(x):
sorted_x = sorted(zip(x,range(len(x))))
r = [0 for k in x]
cur_val = sorted_x[0][0]
last_rank = 0
for i in range(len(sorted_x)):
if cur_val != sorted_x[i][0]:
cur_val = sorted_x[i][0]
for j in range(last_rank, i):
r[sorted_x[j][1]] = float(last_rank+1+i)/2.0
last_rank = i
if i==len(sorted_x)-1:
for j in range(last_rank, i+1):
r[sorted_x[j][1]] = float(last_rank+i+2)/2.0
return r
def _auc(actual, posterior):
r = _tied_rank(posterior)
num_positive = len([0 for x in actual if x==1])
num_negative = len(actual)-num_positive
sum_positive = sum([r[i] for i in range(len(r)) if actual[i]==1])
auc = ((sum_positive - num_positive*(num_positive+1)/2.0) /
(num_negative*num_positive))
return auc
preds = map(lambda x: x[0], results)
actuals = map(lambda x: x[1], results)
return _auc(actuals, preds)
|
430443
|
from . import geocoding_utils
class TableGeocodingLock:
def __init__(self, execute_query, table_name):
self._execute_query = execute_query
text_id = 'carto-geocoder-{table_name}'.format(table_name=table_name)
self.lock_id = geocoding_utils.hash_as_big_int(text_id)
self.locked = False
def __enter__(self):
self.locked = geocoding_utils.lock(self._execute_query, self.lock_id)
return self.locked
def __exit__(self, type, value, traceback):
if self.locked:
geocoding_utils.unlock(self._execute_query, self.lock_id)
|
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.